1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 /*
29 * This module holds the global variables and machine independent functions
30 * used for the kernel SMP support.
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/ktr.h>
37 #include <sys/proc.h>
38 #include <sys/bus.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/pcpu.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/sysctl.h>
46
47 #include <machine/cpu.h>
48 #include <machine/pcb.h>
49 #include <machine/smp.h>
50
51 #include "opt_sched.h"
52
53 MALLOC_DEFINE(M_TOPO, "toponodes", "SMP topology data");
54
55 struct cpu_group *
smp_topo_alloc(u_int count)56 smp_topo_alloc(u_int count)
57 {
58 static struct cpu_group *group = NULL;
59 static u_int index;
60 u_int curr;
61
62 if (group == NULL) {
63 group = mallocarray((mp_maxid + 1) * MAX_CACHE_LEVELS + 1,
64 sizeof(*group), M_DEVBUF, M_WAITOK | M_ZERO);
65 }
66 curr = index;
67 index += count;
68 return (&group[curr]);
69 }
70
71 struct cpu_group *
smp_topo_none(void)72 smp_topo_none(void)
73 {
74 struct cpu_group *top;
75
76 top = smp_topo_alloc(1);
77 top->cg_parent = NULL;
78 top->cg_child = NULL;
79 top->cg_mask = all_cpus;
80 top->cg_count = mp_ncpus;
81 top->cg_children = 0;
82 top->cg_level = CG_SHARE_NONE;
83 top->cg_flags = 0;
84
85 return (top);
86 }
87
88 #ifdef SMP
89
90 volatile cpuset_t stopped_cpus;
91 volatile cpuset_t started_cpus;
92 volatile cpuset_t suspended_cpus;
93 cpuset_t hlt_cpus_mask;
94 cpuset_t logical_cpus_mask;
95
96 void (*cpustop_restartfunc)(void);
97 #endif
98
99 static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS);
100
101 /* This is used in modules that need to work in both SMP and UP. */
102 cpuset_t all_cpus;
103
104 int mp_ncpus;
105 /* export this for libkvm consumers. */
106 int mp_maxcpus = MAXCPU;
107
108 volatile int smp_started;
109 u_int mp_maxid;
110
111 /* Array of CPU contexts saved during a panic. */
112 struct pcb *stoppcbs;
113
114 static SYSCTL_NODE(_kern, OID_AUTO, smp,
115 CTLFLAG_RD | CTLFLAG_CAPRD | CTLFLAG_MPSAFE, NULL,
116 "Kernel SMP");
117
118 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
119 "Max CPU ID.");
120
121 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
122 0, "Max number of CPUs that the system was compiled for.");
123
124 SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD|CTLTYPE_INT|CTLFLAG_MPSAFE,
125 NULL, 0, sysctl_kern_smp_active, "I",
126 "Indicates system is running in SMP mode");
127
128 int smp_disabled = 0; /* has smp been disabled? */
129 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
130 &smp_disabled, 0, "SMP has been disabled from the loader");
131
132 int smp_cpus = 1; /* how many cpu's running */
133 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
134 "Number of CPUs online");
135
136 int smp_threads_per_core = 1; /* how many SMT threads are running per core */
137 SYSCTL_INT(_kern_smp, OID_AUTO, threads_per_core, CTLFLAG_RD|CTLFLAG_CAPRD,
138 &smp_threads_per_core, 0, "Number of SMT threads online per core");
139
140 int mp_ncores = -1; /* how many physical cores running */
141 SYSCTL_INT(_kern_smp, OID_AUTO, cores, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_ncores, 0,
142 "Number of physical cores online");
143
144 int smp_topology = 0; /* Which topology we're using. */
145 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0,
146 "Topology override setting; 0 is default provided by hardware.");
147
148 #ifdef SMP
149 /* Variables needed for SMP rendezvous. */
150 static volatile int smp_rv_ncpus;
151 static void (*volatile smp_rv_setup_func)(void *arg);
152 static void (*volatile smp_rv_action_func)(void *arg);
153 static void (*volatile smp_rv_teardown_func)(void *arg);
154 static void *volatile smp_rv_func_arg;
155 static volatile int smp_rv_waiters[4];
156
157 /*
158 * Shared mutex to restrict busywaits between smp_rendezvous() and
159 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these
160 * functions trigger at once and cause multiple CPUs to busywait with
161 * interrupts disabled.
162 */
163 struct mtx smp_ipi_mtx;
164
165 /*
166 * Let the MD SMP code initialize mp_maxid very early if it can.
167 */
168 static void
mp_setmaxid(void * dummy)169 mp_setmaxid(void *dummy)
170 {
171
172 cpu_mp_setmaxid();
173
174 KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__));
175 KASSERT(mp_ncpus > 1 || mp_maxid == 0,
176 ("%s: one CPU but mp_maxid is not zero", __func__));
177 KASSERT(mp_maxid >= mp_ncpus - 1,
178 ("%s: counters out of sync: max %d, count %d", __func__,
179 mp_maxid, mp_ncpus));
180
181 cpusetsizemin = howmany(mp_maxid + 1, NBBY);
182 }
183 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
184
185 /*
186 * Call the MD SMP initialization code.
187 */
188 static void
mp_start(void * dummy)189 mp_start(void *dummy)
190 {
191
192 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
193
194 /* Probe for MP hardware. */
195 if (smp_disabled != 0 || cpu_mp_probe() == 0) {
196 mp_ncores = 1;
197 mp_ncpus = 1;
198 CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
199 return;
200 }
201
202 cpu_mp_start();
203 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
204 mp_ncpus);
205
206 /* Provide a default for most architectures that don't have SMT/HTT. */
207 if (mp_ncores < 0)
208 mp_ncores = mp_ncpus;
209
210 stoppcbs = mallocarray(mp_maxid + 1, sizeof(struct pcb), M_DEVBUF,
211 M_WAITOK | M_ZERO);
212
213 cpu_mp_announce();
214 }
215 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
216
217 void
forward_signal(struct thread * td)218 forward_signal(struct thread *td)
219 {
220 int id;
221
222 /*
223 * signotify() has already set TDA_AST and TDA_SIG on td_ast for
224 * this thread, so all we need to do is poke it if it is currently
225 * executing so that it executes ast().
226 */
227 THREAD_LOCK_ASSERT(td, MA_OWNED);
228 KASSERT(TD_IS_RUNNING(td),
229 ("forward_signal: thread is not TDS_RUNNING"));
230
231 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
232
233 if (!smp_started || cold || KERNEL_PANICKED())
234 return;
235
236 /* No need to IPI ourself. */
237 if (td == curthread)
238 return;
239
240 id = td->td_oncpu;
241 if (id == NOCPU)
242 return;
243 ipi_cpu(id, IPI_AST);
244 }
245
246 /*
247 * When called the executing CPU will send an IPI to all other CPUs
248 * requesting that they halt execution.
249 *
250 * Usually (but not necessarily) called with 'other_cpus' as its arg.
251 *
252 * - Signals all CPUs in map to stop.
253 * - Waits for each to stop.
254 *
255 * Returns:
256 * -1: error
257 * 0: NA
258 * 1: ok
259 *
260 */
261 #if defined(__amd64__) || defined(__i386__)
262 #define X86 1
263 #else
264 #define X86 0
265 #endif
266 static int
generic_stop_cpus(cpuset_t map,u_int type)267 generic_stop_cpus(cpuset_t map, u_int type)
268 {
269 #ifdef KTR
270 char cpusetbuf[CPUSETBUFSIZ];
271 #endif
272 static volatile u_int stopping_cpu = NOCPU;
273 int i;
274 volatile cpuset_t *cpus;
275
276 KASSERT(
277 type == IPI_STOP || type == IPI_STOP_HARD
278 #if X86
279 || type == IPI_SUSPEND || type == IPI_OFF
280 #endif
281 , ("%s: invalid stop type", __func__));
282
283 if (!smp_started)
284 return (0);
285
286 CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
287 cpusetobj_strprint(cpusetbuf, &map), type);
288
289 #if X86
290 /*
291 * When suspending, ensure there are are no IPIs in progress.
292 * IPIs that have been issued, but not yet delivered (e.g.
293 * not pending on a vCPU when running under virtualization)
294 * will be lost, violating FreeBSD's assumption of reliable
295 * IPI delivery.
296 */
297 if (type == IPI_SUSPEND || type == IPI_OFF)
298 mtx_lock_spin(&smp_ipi_mtx);
299 #endif
300
301 #if X86
302 if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
303 #endif
304 if (stopping_cpu != PCPU_GET(cpuid))
305 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
306 PCPU_GET(cpuid)) == 0)
307 while (stopping_cpu != NOCPU)
308 cpu_spinwait(); /* spin */
309
310 /* send the stop IPI to all CPUs in map */
311 ipi_selected(map, type);
312 #if X86
313 }
314 #endif
315
316 #if X86
317 if (type == IPI_SUSPEND || type == IPI_OFF)
318 cpus = &suspended_cpus;
319 else
320 #endif
321 cpus = &stopped_cpus;
322
323 i = 0;
324 while (!CPU_SUBSET(cpus, &map)) {
325 /* spin */
326 cpu_spinwait();
327 i++;
328 if (i == 100000000) {
329 printf("timeout stopping cpus\n");
330 break;
331 }
332 }
333
334 #if X86
335 if (type == IPI_SUSPEND || type == IPI_OFF)
336 mtx_unlock_spin(&smp_ipi_mtx);
337 #endif
338
339 stopping_cpu = NOCPU;
340 return (1);
341 }
342
343 int
stop_cpus(cpuset_t map)344 stop_cpus(cpuset_t map)
345 {
346
347 return (generic_stop_cpus(map, IPI_STOP));
348 }
349
350 int
stop_cpus_hard(cpuset_t map)351 stop_cpus_hard(cpuset_t map)
352 {
353
354 return (generic_stop_cpus(map, IPI_STOP_HARD));
355 }
356
357 #if X86
358 int
suspend_cpus(cpuset_t map)359 suspend_cpus(cpuset_t map)
360 {
361
362 return (generic_stop_cpus(map, IPI_SUSPEND));
363 }
364
365 int
offline_cpus(cpuset_t map)366 offline_cpus(cpuset_t map)
367 {
368
369 return (generic_stop_cpus(map, IPI_OFF));
370 }
371 #endif
372
373 /*
374 * Called by a CPU to restart stopped CPUs.
375 *
376 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
377 *
378 * - Signals all CPUs in map to restart.
379 * - Waits for each to restart.
380 *
381 * Returns:
382 * -1: error
383 * 0: NA
384 * 1: ok
385 */
386 static int
generic_restart_cpus(cpuset_t map,u_int type)387 generic_restart_cpus(cpuset_t map, u_int type)
388 {
389 #ifdef KTR
390 char cpusetbuf[CPUSETBUFSIZ];
391 #endif
392 volatile cpuset_t *cpus;
393
394 #if X86
395 KASSERT(type == IPI_STOP || type == IPI_STOP_HARD
396 || type == IPI_SUSPEND, ("%s: invalid stop type", __func__));
397
398 if (!smp_started)
399 return (0);
400
401 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
402
403 if (type == IPI_SUSPEND)
404 cpus = &resuming_cpus;
405 else
406 cpus = &stopped_cpus;
407
408 /* signal other cpus to restart */
409 if (type == IPI_SUSPEND)
410 CPU_COPY_STORE_REL(&map, &toresume_cpus);
411 else
412 CPU_COPY_STORE_REL(&map, &started_cpus);
413
414 /*
415 * Wake up any CPUs stopped with MWAIT. From MI code we can't tell if
416 * MONITOR/MWAIT is enabled, but the potentially redundant writes are
417 * relatively inexpensive.
418 */
419 if (type == IPI_STOP) {
420 struct monitorbuf *mb;
421 u_int id;
422
423 CPU_FOREACH(id) {
424 if (!CPU_ISSET(id, &map))
425 continue;
426
427 mb = &pcpu_find(id)->pc_monitorbuf;
428 atomic_store_int(&mb->stop_state,
429 MONITOR_STOPSTATE_RUNNING);
430 }
431 }
432
433 if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
434 /* wait for each to clear its bit */
435 while (CPU_OVERLAP(cpus, &map))
436 cpu_spinwait();
437 }
438 #else /* !X86 */
439 KASSERT(type == IPI_STOP || type == IPI_STOP_HARD,
440 ("%s: invalid stop type", __func__));
441
442 if (!smp_started)
443 return (0);
444
445 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
446
447 cpus = &stopped_cpus;
448
449 /* signal other cpus to restart */
450 CPU_COPY_STORE_REL(&map, &started_cpus);
451
452 /* wait for each to clear its bit */
453 while (CPU_OVERLAP(cpus, &map))
454 cpu_spinwait();
455 #endif
456 return (1);
457 }
458
459 int
restart_cpus(cpuset_t map)460 restart_cpus(cpuset_t map)
461 {
462
463 return (generic_restart_cpus(map, IPI_STOP));
464 }
465
466 #if X86
467 int
resume_cpus(cpuset_t map)468 resume_cpus(cpuset_t map)
469 {
470
471 return (generic_restart_cpus(map, IPI_SUSPEND));
472 }
473 #endif
474 #undef X86
475
476 /*
477 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
478 * (if specified), rendezvous, execute the action function (if specified),
479 * rendezvous again, execute the teardown function (if specified), and then
480 * resume.
481 *
482 * Note that the supplied external functions _must_ be reentrant and aware
483 * that they are running in parallel and in an unknown lock context.
484 */
485 void
smp_rendezvous_action(void)486 smp_rendezvous_action(void)
487 {
488 struct thread *td;
489 void *local_func_arg;
490 void (*local_setup_func)(void*);
491 void (*local_action_func)(void*);
492 void (*local_teardown_func)(void*);
493 #ifdef INVARIANTS
494 int owepreempt;
495 #endif
496
497 /* Ensure we have up-to-date values. */
498 atomic_add_acq_int(&smp_rv_waiters[0], 1);
499 while (smp_rv_waiters[0] < smp_rv_ncpus)
500 cpu_spinwait();
501
502 /* Fetch rendezvous parameters after acquire barrier. */
503 local_func_arg = smp_rv_func_arg;
504 local_setup_func = smp_rv_setup_func;
505 local_action_func = smp_rv_action_func;
506 local_teardown_func = smp_rv_teardown_func;
507
508 /*
509 * Use a nested critical section to prevent any preemptions
510 * from occurring during a rendezvous action routine.
511 * Specifically, if a rendezvous handler is invoked via an IPI
512 * and the interrupted thread was in the critical_exit()
513 * function after setting td_critnest to 0 but before
514 * performing a deferred preemption, this routine can be
515 * invoked with td_critnest set to 0 and td_owepreempt true.
516 * In that case, a critical_exit() during the rendezvous
517 * action would trigger a preemption which is not permitted in
518 * a rendezvous action. To fix this, wrap all of the
519 * rendezvous action handlers in a critical section. We
520 * cannot use a regular critical section however as having
521 * critical_exit() preempt from this routine would also be
522 * problematic (the preemption must not occur before the IPI
523 * has been acknowledged via an EOI). Instead, we
524 * intentionally ignore td_owepreempt when leaving the
525 * critical section. This should be harmless because we do
526 * not permit rendezvous action routines to schedule threads,
527 * and thus td_owepreempt should never transition from 0 to 1
528 * during this routine.
529 */
530 td = curthread;
531 td->td_critnest++;
532 #ifdef INVARIANTS
533 owepreempt = td->td_owepreempt;
534 #endif
535
536 /*
537 * If requested, run a setup function before the main action
538 * function. Ensure all CPUs have completed the setup
539 * function before moving on to the action function.
540 */
541 if (local_setup_func != smp_no_rendezvous_barrier) {
542 if (local_setup_func != NULL)
543 local_setup_func(local_func_arg);
544 atomic_add_int(&smp_rv_waiters[1], 1);
545 while (smp_rv_waiters[1] < smp_rv_ncpus)
546 cpu_spinwait();
547 }
548
549 if (local_action_func != NULL)
550 local_action_func(local_func_arg);
551
552 if (local_teardown_func != smp_no_rendezvous_barrier) {
553 /*
554 * Signal that the main action has been completed. If a
555 * full exit rendezvous is requested, then all CPUs will
556 * wait here until all CPUs have finished the main action.
557 */
558 atomic_add_int(&smp_rv_waiters[2], 1);
559 while (smp_rv_waiters[2] < smp_rv_ncpus)
560 cpu_spinwait();
561
562 if (local_teardown_func != NULL)
563 local_teardown_func(local_func_arg);
564 }
565
566 /*
567 * Signal that the rendezvous is fully completed by this CPU.
568 * This means that no member of smp_rv_* pseudo-structure will be
569 * accessed by this target CPU after this point; in particular,
570 * memory pointed by smp_rv_func_arg.
571 *
572 * The release semantic ensures that all accesses performed by
573 * the current CPU are visible when smp_rendezvous_cpus()
574 * returns, by synchronizing with the
575 * atomic_load_acq_int(&smp_rv_waiters[3]).
576 */
577 atomic_add_rel_int(&smp_rv_waiters[3], 1);
578
579 td->td_critnest--;
580 KASSERT(owepreempt == td->td_owepreempt,
581 ("rendezvous action changed td_owepreempt"));
582 }
583
584 void
smp_rendezvous_cpus(cpuset_t map,void (* setup_func)(void *),void (* action_func)(void *),void (* teardown_func)(void *),void * arg)585 smp_rendezvous_cpus(cpuset_t map,
586 void (* setup_func)(void *),
587 void (* action_func)(void *),
588 void (* teardown_func)(void *),
589 void *arg)
590 {
591 int curcpumap, ncpus = 0;
592
593 /* See comments in the !SMP case. */
594 if (!smp_started) {
595 spinlock_enter();
596 if (setup_func != NULL)
597 setup_func(arg);
598 if (action_func != NULL)
599 action_func(arg);
600 if (teardown_func != NULL)
601 teardown_func(arg);
602 spinlock_exit();
603 return;
604 }
605
606 /*
607 * Make sure we come here with interrupts enabled. Otherwise we
608 * livelock if smp_ipi_mtx is owned by a thread which sent us an IPI.
609 */
610 MPASS(curthread->td_md.md_spinlock_count == 0);
611
612 CPU_AND(&map, &map, &all_cpus);
613 ncpus = CPU_COUNT(&map);
614 if (ncpus == 0)
615 panic("ncpus is 0 with non-zero map");
616
617 mtx_lock_spin(&smp_ipi_mtx);
618
619 /* Pass rendezvous parameters via global variables. */
620 smp_rv_ncpus = ncpus;
621 smp_rv_setup_func = setup_func;
622 smp_rv_action_func = action_func;
623 smp_rv_teardown_func = teardown_func;
624 smp_rv_func_arg = arg;
625 smp_rv_waiters[1] = 0;
626 smp_rv_waiters[2] = 0;
627 smp_rv_waiters[3] = 0;
628 atomic_store_rel_int(&smp_rv_waiters[0], 0);
629
630 /*
631 * Signal other processors, which will enter the IPI with
632 * interrupts off.
633 */
634 curcpumap = CPU_ISSET(curcpu, &map);
635 CPU_CLR(curcpu, &map);
636 ipi_selected(map, IPI_RENDEZVOUS);
637
638 /* Check if the current CPU is in the map */
639 if (curcpumap != 0)
640 smp_rendezvous_action();
641
642 /*
643 * Ensure that the master CPU waits for all the other
644 * CPUs to finish the rendezvous, so that smp_rv_*
645 * pseudo-structure and the arg are guaranteed to not
646 * be in use.
647 *
648 * Load acquire synchronizes with the release add in
649 * smp_rendezvous_action(), which ensures that our caller sees
650 * all memory actions done by the called functions on other
651 * CPUs.
652 */
653 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
654 cpu_spinwait();
655
656 mtx_unlock_spin(&smp_ipi_mtx);
657 }
658
659 void
smp_rendezvous_cpu(u_int cpuid,void (* setup_func)(void *),void (* action_func)(void *),void (* teardown_func)(void *),void * arg)660 smp_rendezvous_cpu(u_int cpuid,
661 void (* setup_func)(void *),
662 void (* action_func)(void *),
663 void (* teardown_func)(void *),
664 void *arg)
665 {
666 cpuset_t set;
667
668 CPU_SETOF(cpuid, &set);
669 smp_rendezvous_cpus(set, setup_func, action_func, teardown_func, arg);
670 }
671
672 void
smp_rendezvous(void (* setup_func)(void *),void (* action_func)(void *),void (* teardown_func)(void *),void * arg)673 smp_rendezvous(void (* setup_func)(void *),
674 void (* action_func)(void *),
675 void (* teardown_func)(void *),
676 void *arg)
677 {
678 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
679 }
680
681 static void
smp_topo_fill(struct cpu_group * cg)682 smp_topo_fill(struct cpu_group *cg)
683 {
684 int c;
685
686 for (c = 0; c < cg->cg_children; c++)
687 smp_topo_fill(&cg->cg_child[c]);
688 cg->cg_first = CPU_FFS(&cg->cg_mask) - 1;
689 cg->cg_last = CPU_FLS(&cg->cg_mask) - 1;
690 }
691
692 struct cpu_group *
smp_topo(void)693 smp_topo(void)
694 {
695 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
696 static struct cpu_group *top = NULL;
697
698 /*
699 * The first call to smp_topo() is guaranteed to occur
700 * during the kernel boot while we are still single-threaded.
701 */
702 if (top != NULL)
703 return (top);
704
705 /*
706 * Check for a fake topology request for debugging purposes.
707 */
708 switch (smp_topology) {
709 case 1:
710 /* Dual core with no sharing. */
711 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
712 break;
713 case 2:
714 /* No topology, all cpus are equal. */
715 top = smp_topo_none();
716 break;
717 case 3:
718 /* Dual core with shared L2. */
719 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
720 break;
721 case 4:
722 /* quad core, shared l3 among each package, private l2. */
723 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
724 break;
725 case 5:
726 /* quad core, 2 dualcore parts on each package share l2. */
727 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
728 break;
729 case 6:
730 /* Single-core 2xHTT */
731 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
732 break;
733 case 7:
734 /* quad core with a shared l3, 8 threads sharing L2. */
735 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
736 CG_FLAG_SMT);
737 break;
738 default:
739 /* Default, ask the system what it wants. */
740 top = cpu_topo();
741 break;
742 }
743 /*
744 * Verify the returned topology.
745 */
746 if (top->cg_count != mp_ncpus)
747 panic("Built bad topology at %p. CPU count %d != %d",
748 top, top->cg_count, mp_ncpus);
749 if (CPU_CMP(&top->cg_mask, &all_cpus))
750 panic("Built bad topology at %p. CPU mask (%s) != (%s)",
751 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
752 cpusetobj_strprint(cpusetbuf2, &all_cpus));
753
754 /*
755 * Collapse nonsense levels that may be created out of convenience by
756 * the MD layers. They cause extra work in the search functions.
757 */
758 while (top->cg_children == 1) {
759 top = &top->cg_child[0];
760 top->cg_parent = NULL;
761 }
762 smp_topo_fill(top);
763 return (top);
764 }
765
766 static int
smp_topo_addleaf(struct cpu_group * parent,struct cpu_group * child,int share,int count,int flags,int start)767 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
768 int count, int flags, int start)
769 {
770 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
771 cpuset_t mask;
772 int i;
773
774 CPU_ZERO(&mask);
775 for (i = 0; i < count; i++, start++)
776 CPU_SET(start, &mask);
777 child->cg_parent = parent;
778 child->cg_child = NULL;
779 child->cg_children = 0;
780 child->cg_level = share;
781 child->cg_count = count;
782 child->cg_flags = flags;
783 child->cg_mask = mask;
784 parent->cg_children++;
785 for (; parent != NULL; parent = parent->cg_parent) {
786 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
787 panic("Duplicate children in %p. mask (%s) child (%s)",
788 parent,
789 cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
790 cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
791 CPU_OR(&parent->cg_mask, &parent->cg_mask, &child->cg_mask);
792 parent->cg_count += child->cg_count;
793 }
794
795 return (start);
796 }
797
798 struct cpu_group *
smp_topo_1level(int share,int count,int flags)799 smp_topo_1level(int share, int count, int flags)
800 {
801 struct cpu_group *child;
802 struct cpu_group *top;
803 int packages;
804 int cpu;
805 int i;
806
807 cpu = 0;
808 packages = mp_ncpus / count;
809 top = smp_topo_alloc(1 + packages);
810 top->cg_child = child = top + 1;
811 top->cg_level = CG_SHARE_NONE;
812 for (i = 0; i < packages; i++, child++)
813 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
814 return (top);
815 }
816
817 struct cpu_group *
smp_topo_2level(int l2share,int l2count,int l1share,int l1count,int l1flags)818 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
819 int l1flags)
820 {
821 struct cpu_group *top;
822 struct cpu_group *l1g;
823 struct cpu_group *l2g;
824 int cpu;
825 int i;
826 int j;
827
828 cpu = 0;
829 top = smp_topo_alloc(1 + mp_ncpus / (l2count * l1count) +
830 mp_ncpus / l1count);
831 l2g = top + 1;
832 top->cg_child = l2g;
833 top->cg_level = CG_SHARE_NONE;
834 top->cg_children = mp_ncpus / (l2count * l1count);
835 l1g = l2g + top->cg_children;
836 for (i = 0; i < top->cg_children; i++, l2g++) {
837 l2g->cg_parent = top;
838 l2g->cg_child = l1g;
839 l2g->cg_level = l2share;
840 for (j = 0; j < l2count; j++, l1g++)
841 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
842 l1flags, cpu);
843 }
844 return (top);
845 }
846
847 struct cpu_group *
smp_topo_find(struct cpu_group * top,int cpu)848 smp_topo_find(struct cpu_group *top, int cpu)
849 {
850 struct cpu_group *cg;
851 cpuset_t mask;
852 int children;
853 int i;
854
855 CPU_SETOF(cpu, &mask);
856 cg = top;
857 for (;;) {
858 if (!CPU_OVERLAP(&cg->cg_mask, &mask))
859 return (NULL);
860 if (cg->cg_children == 0)
861 return (cg);
862 children = cg->cg_children;
863 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
864 if (CPU_OVERLAP(&cg->cg_mask, &mask))
865 break;
866 }
867 return (NULL);
868 }
869 #else /* !SMP */
870
871 void
smp_rendezvous_cpus(cpuset_t map,void (* setup_func)(void *),void (* action_func)(void *),void (* teardown_func)(void *),void * arg)872 smp_rendezvous_cpus(cpuset_t map,
873 void (*setup_func)(void *),
874 void (*action_func)(void *),
875 void (*teardown_func)(void *),
876 void *arg)
877 {
878 /*
879 * In the !SMP case we just need to ensure the same initial conditions
880 * as the SMP case.
881 */
882 spinlock_enter();
883 if (setup_func != NULL)
884 setup_func(arg);
885 if (action_func != NULL)
886 action_func(arg);
887 if (teardown_func != NULL)
888 teardown_func(arg);
889 spinlock_exit();
890 }
891
892 void
smp_rendezvous(void (* setup_func)(void *),void (* action_func)(void *),void (* teardown_func)(void *),void * arg)893 smp_rendezvous(void (*setup_func)(void *),
894 void (*action_func)(void *),
895 void (*teardown_func)(void *),
896 void *arg)
897 {
898
899 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func,
900 arg);
901 }
902
903 struct cpu_group *
smp_topo(void)904 smp_topo(void)
905 {
906 static struct cpu_group *top = NULL;
907
908 if (top != NULL)
909 return (top);
910
911 top = smp_topo_none();
912 return (top);
913 }
914
915 /*
916 * Provide dummy SMP support for UP kernels. Modules that need to use SMP
917 * APIs will still work using this dummy support.
918 */
919 static void
mp_setvariables_for_up(void * dummy)920 mp_setvariables_for_up(void *dummy)
921 {
922 mp_ncpus = 1;
923 mp_ncores = 1;
924 mp_maxid = PCPU_GET(cpuid);
925 CPU_SETOF(mp_maxid, &all_cpus);
926 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
927 }
928 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
929 mp_setvariables_for_up, NULL);
930 #endif /* SMP */
931
932 void
smp_no_rendezvous_barrier(void * dummy)933 smp_no_rendezvous_barrier(void *dummy)
934 {
935 #ifdef SMP
936 KASSERT((!smp_started),("smp_no_rendezvous called and smp is started"));
937 #endif
938 }
939
940 void
smp_rendezvous_cpus_retry(cpuset_t map,void (* setup_func)(void *),void (* action_func)(void *),void (* teardown_func)(void *),void (* wait_func)(void *,int),struct smp_rendezvous_cpus_retry_arg * arg)941 smp_rendezvous_cpus_retry(cpuset_t map,
942 void (* setup_func)(void *),
943 void (* action_func)(void *),
944 void (* teardown_func)(void *),
945 void (* wait_func)(void *, int),
946 struct smp_rendezvous_cpus_retry_arg *arg)
947 {
948 int cpu;
949
950 CPU_COPY(&map, &arg->cpus);
951
952 /*
953 * Only one CPU to execute on.
954 */
955 if (!smp_started) {
956 spinlock_enter();
957 if (setup_func != NULL)
958 setup_func(arg);
959 if (action_func != NULL)
960 action_func(arg);
961 if (teardown_func != NULL)
962 teardown_func(arg);
963 spinlock_exit();
964 return;
965 }
966
967 /*
968 * Execute an action on all specified CPUs while retrying until they
969 * all acknowledge completion.
970 */
971 for (;;) {
972 smp_rendezvous_cpus(
973 arg->cpus,
974 setup_func,
975 action_func,
976 teardown_func,
977 arg);
978
979 if (CPU_EMPTY(&arg->cpus))
980 break;
981
982 CPU_FOREACH(cpu) {
983 if (!CPU_ISSET(cpu, &arg->cpus))
984 continue;
985 wait_func(arg, cpu);
986 }
987 }
988 }
989
990 void
smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg * arg)991 smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *arg)
992 {
993
994 CPU_CLR_ATOMIC(curcpu, &arg->cpus);
995 }
996
997 /*
998 * If (prio & PDROP) == 0:
999 * Wait for specified idle threads to switch once. This ensures that even
1000 * preempted threads have cycled through the switch function once,
1001 * exiting their codepaths. This allows us to change global pointers
1002 * with no other synchronization.
1003 * If (prio & PDROP) != 0:
1004 * Force the specified CPUs to switch context at least once.
1005 */
1006 int
quiesce_cpus(cpuset_t map,const char * wmesg,int prio)1007 quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
1008 {
1009 struct pcpu *pcpu;
1010 u_int *gen;
1011 int error;
1012 int cpu;
1013
1014 error = 0;
1015 if ((prio & PDROP) == 0) {
1016 gen = mallocarray(sizeof(u_int), mp_maxid + 1, M_TEMP,
1017 M_WAITOK);
1018 for (cpu = 0; cpu <= mp_maxid; cpu++) {
1019 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
1020 continue;
1021 pcpu = pcpu_find(cpu);
1022 gen[cpu] = pcpu->pc_idlethread->td_generation;
1023 }
1024 }
1025 for (cpu = 0; cpu <= mp_maxid; cpu++) {
1026 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
1027 continue;
1028 pcpu = pcpu_find(cpu);
1029 thread_lock(curthread);
1030 sched_bind(curthread, cpu);
1031 thread_unlock(curthread);
1032 if ((prio & PDROP) != 0)
1033 continue;
1034 while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
1035 error = tsleep(quiesce_cpus, prio & ~PDROP, wmesg, 1);
1036 if (error != EWOULDBLOCK)
1037 goto out;
1038 error = 0;
1039 }
1040 }
1041 out:
1042 thread_lock(curthread);
1043 sched_unbind(curthread);
1044 thread_unlock(curthread);
1045 if ((prio & PDROP) == 0)
1046 free(gen, M_TEMP);
1047
1048 return (error);
1049 }
1050
1051 int
quiesce_all_cpus(const char * wmesg,int prio)1052 quiesce_all_cpus(const char *wmesg, int prio)
1053 {
1054
1055 return quiesce_cpus(all_cpus, wmesg, prio);
1056 }
1057
1058 /*
1059 * Observe all CPUs not executing in critical section.
1060 * We are not in one so the check for us is safe. If the found
1061 * thread changes to something else we know the section was
1062 * exited as well.
1063 */
1064 void
quiesce_all_critical(void)1065 quiesce_all_critical(void)
1066 {
1067 struct thread *td, *newtd;
1068 struct pcpu *pcpu;
1069 int cpu;
1070
1071 MPASS(curthread->td_critnest == 0);
1072
1073 CPU_FOREACH(cpu) {
1074 pcpu = cpuid_to_pcpu[cpu];
1075 td = pcpu->pc_curthread;
1076 for (;;) {
1077 if (td->td_critnest == 0)
1078 break;
1079 cpu_spinwait();
1080 newtd = (struct thread *)
1081 atomic_load_acq_ptr((void *)pcpu->pc_curthread);
1082 if (td != newtd)
1083 break;
1084 }
1085 }
1086 }
1087
1088 static void
cpus_fence_seq_cst_issue(void * arg __unused)1089 cpus_fence_seq_cst_issue(void *arg __unused)
1090 {
1091
1092 atomic_thread_fence_seq_cst();
1093 }
1094
1095 /*
1096 * Send an IPI forcing a sequentially consistent fence.
1097 *
1098 * Allows replacement of an explicitly fence with a compiler barrier.
1099 * Trades speed up during normal execution for a significant slowdown when
1100 * the barrier is needed.
1101 */
1102 void
cpus_fence_seq_cst(void)1103 cpus_fence_seq_cst(void)
1104 {
1105
1106 #ifdef SMP
1107 smp_rendezvous(
1108 smp_no_rendezvous_barrier,
1109 cpus_fence_seq_cst_issue,
1110 smp_no_rendezvous_barrier,
1111 NULL
1112 );
1113 #else
1114 cpus_fence_seq_cst_issue(NULL);
1115 #endif
1116 }
1117
1118 /* Extra care is taken with this sysctl because the data type is volatile */
1119 static int
sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)1120 sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)
1121 {
1122 int error, active;
1123
1124 active = smp_started;
1125 error = SYSCTL_OUT(req, &active, sizeof(active));
1126 return (error);
1127 }
1128
1129 #ifdef SMP
1130 void
topo_init_node(struct topo_node * node)1131 topo_init_node(struct topo_node *node)
1132 {
1133
1134 bzero(node, sizeof(*node));
1135 TAILQ_INIT(&node->children);
1136 }
1137
1138 void
topo_init_root(struct topo_node * root)1139 topo_init_root(struct topo_node *root)
1140 {
1141
1142 topo_init_node(root);
1143 root->type = TOPO_TYPE_SYSTEM;
1144 }
1145
1146 /*
1147 * Add a child node with the given ID under the given parent.
1148 * Do nothing if there is already a child with that ID.
1149 */
1150 struct topo_node *
topo_add_node_by_hwid(struct topo_node * parent,int hwid,topo_node_type type,uintptr_t subtype)1151 topo_add_node_by_hwid(struct topo_node *parent, int hwid,
1152 topo_node_type type, uintptr_t subtype)
1153 {
1154 struct topo_node *node;
1155
1156 TAILQ_FOREACH_REVERSE(node, &parent->children,
1157 topo_children, siblings) {
1158 if (node->hwid == hwid
1159 && node->type == type && node->subtype == subtype) {
1160 return (node);
1161 }
1162 }
1163
1164 node = malloc(sizeof(*node), M_TOPO, M_WAITOK);
1165 topo_init_node(node);
1166 node->parent = parent;
1167 node->hwid = hwid;
1168 node->type = type;
1169 node->subtype = subtype;
1170 TAILQ_INSERT_TAIL(&parent->children, node, siblings);
1171 parent->nchildren++;
1172
1173 return (node);
1174 }
1175
1176 /*
1177 * Find a child node with the given ID under the given parent.
1178 */
1179 struct topo_node *
topo_find_node_by_hwid(struct topo_node * parent,int hwid,topo_node_type type,uintptr_t subtype)1180 topo_find_node_by_hwid(struct topo_node *parent, int hwid,
1181 topo_node_type type, uintptr_t subtype)
1182 {
1183
1184 struct topo_node *node;
1185
1186 TAILQ_FOREACH(node, &parent->children, siblings) {
1187 if (node->hwid == hwid
1188 && node->type == type && node->subtype == subtype) {
1189 return (node);
1190 }
1191 }
1192
1193 return (NULL);
1194 }
1195
1196 /*
1197 * Given a node change the order of its parent's child nodes such
1198 * that the node becomes the firt child while preserving the cyclic
1199 * order of the children. In other words, the given node is promoted
1200 * by rotation.
1201 */
1202 void
topo_promote_child(struct topo_node * child)1203 topo_promote_child(struct topo_node *child)
1204 {
1205 struct topo_node *next;
1206 struct topo_node *node;
1207 struct topo_node *parent;
1208
1209 parent = child->parent;
1210 next = TAILQ_NEXT(child, siblings);
1211 TAILQ_REMOVE(&parent->children, child, siblings);
1212 TAILQ_INSERT_HEAD(&parent->children, child, siblings);
1213
1214 while (next != NULL) {
1215 node = next;
1216 next = TAILQ_NEXT(node, siblings);
1217 TAILQ_REMOVE(&parent->children, node, siblings);
1218 TAILQ_INSERT_AFTER(&parent->children, child, node, siblings);
1219 child = node;
1220 }
1221 }
1222
1223 /*
1224 * Iterate to the next node in the depth-first search (traversal) of
1225 * the topology tree.
1226 */
1227 struct topo_node *
topo_next_node(struct topo_node * top,struct topo_node * node)1228 topo_next_node(struct topo_node *top, struct topo_node *node)
1229 {
1230 struct topo_node *next;
1231
1232 if ((next = TAILQ_FIRST(&node->children)) != NULL)
1233 return (next);
1234
1235 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1236 return (next);
1237
1238 while (node != top && (node = node->parent) != top)
1239 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1240 return (next);
1241
1242 return (NULL);
1243 }
1244
1245 /*
1246 * Iterate to the next node in the depth-first search of the topology tree,
1247 * but without descending below the current node.
1248 */
1249 struct topo_node *
topo_next_nonchild_node(struct topo_node * top,struct topo_node * node)1250 topo_next_nonchild_node(struct topo_node *top, struct topo_node *node)
1251 {
1252 struct topo_node *next;
1253
1254 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1255 return (next);
1256
1257 while (node != top && (node = node->parent) != top)
1258 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1259 return (next);
1260
1261 return (NULL);
1262 }
1263
1264 /*
1265 * Assign the given ID to the given topology node that represents a logical
1266 * processor.
1267 */
1268 void
topo_set_pu_id(struct topo_node * node,cpuid_t id)1269 topo_set_pu_id(struct topo_node *node, cpuid_t id)
1270 {
1271
1272 KASSERT(node->type == TOPO_TYPE_PU,
1273 ("topo_set_pu_id: wrong node type: %u", node->type));
1274 KASSERT(CPU_EMPTY(&node->cpuset) && node->cpu_count == 0,
1275 ("topo_set_pu_id: cpuset already not empty"));
1276 node->id = id;
1277 CPU_SET(id, &node->cpuset);
1278 node->cpu_count = 1;
1279 node->subtype = 1;
1280
1281 while ((node = node->parent) != NULL) {
1282 KASSERT(!CPU_ISSET(id, &node->cpuset),
1283 ("logical ID %u is already set in node %p", id, node));
1284 CPU_SET(id, &node->cpuset);
1285 node->cpu_count++;
1286 }
1287 }
1288
1289 static struct topology_spec {
1290 topo_node_type type;
1291 bool match_subtype;
1292 uintptr_t subtype;
1293 } topology_level_table[TOPO_LEVEL_COUNT] = {
1294 [TOPO_LEVEL_PKG] = { .type = TOPO_TYPE_PKG, },
1295 [TOPO_LEVEL_GROUP] = { .type = TOPO_TYPE_GROUP, },
1296 [TOPO_LEVEL_CACHEGROUP] = {
1297 .type = TOPO_TYPE_CACHE,
1298 .match_subtype = true,
1299 .subtype = CG_SHARE_L3,
1300 },
1301 [TOPO_LEVEL_CORE] = { .type = TOPO_TYPE_CORE, },
1302 [TOPO_LEVEL_THREAD] = { .type = TOPO_TYPE_PU, },
1303 };
1304
1305 static bool
topo_analyze_table(struct topo_node * root,int all,enum topo_level level,struct topo_analysis * results)1306 topo_analyze_table(struct topo_node *root, int all, enum topo_level level,
1307 struct topo_analysis *results)
1308 {
1309 struct topology_spec *spec;
1310 struct topo_node *node;
1311 int count;
1312
1313 if (level >= TOPO_LEVEL_COUNT)
1314 return (true);
1315
1316 spec = &topology_level_table[level];
1317 count = 0;
1318 node = topo_next_node(root, root);
1319
1320 while (node != NULL) {
1321 if (node->type != spec->type ||
1322 (spec->match_subtype && node->subtype != spec->subtype)) {
1323 node = topo_next_node(root, node);
1324 continue;
1325 }
1326 if (!all && CPU_EMPTY(&node->cpuset)) {
1327 node = topo_next_nonchild_node(root, node);
1328 continue;
1329 }
1330
1331 count++;
1332
1333 if (!topo_analyze_table(node, all, level + 1, results))
1334 return (false);
1335
1336 node = topo_next_nonchild_node(root, node);
1337 }
1338
1339 /* No explicit subgroups is essentially one subgroup. */
1340 if (count == 0) {
1341 count = 1;
1342
1343 if (!topo_analyze_table(root, all, level + 1, results))
1344 return (false);
1345 }
1346
1347 if (results->entities[level] == -1)
1348 results->entities[level] = count;
1349 else if (results->entities[level] != count)
1350 return (false);
1351
1352 return (true);
1353 }
1354
1355 /*
1356 * Check if the topology is uniform, that is, each package has the same number
1357 * of cores in it and each core has the same number of threads (logical
1358 * processors) in it. If so, calculate the number of packages, the number of
1359 * groups per package, the number of cachegroups per group, and the number of
1360 * logical processors per cachegroup. 'all' parameter tells whether to include
1361 * administratively disabled logical processors into the analysis.
1362 */
1363 int
topo_analyze(struct topo_node * topo_root,int all,struct topo_analysis * results)1364 topo_analyze(struct topo_node *topo_root, int all,
1365 struct topo_analysis *results)
1366 {
1367
1368 results->entities[TOPO_LEVEL_PKG] = -1;
1369 results->entities[TOPO_LEVEL_CORE] = -1;
1370 results->entities[TOPO_LEVEL_THREAD] = -1;
1371 results->entities[TOPO_LEVEL_GROUP] = -1;
1372 results->entities[TOPO_LEVEL_CACHEGROUP] = -1;
1373
1374 if (!topo_analyze_table(topo_root, all, TOPO_LEVEL_PKG, results))
1375 return (0);
1376
1377 KASSERT(results->entities[TOPO_LEVEL_PKG] > 0,
1378 ("bug in topology or analysis"));
1379
1380 return (1);
1381 }
1382
1383 #endif /* SMP */
1384