1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 #include "opt_acpi.h"
41 #include "opt_atpic.h"
42 #include "opt_cpu.h"
43 #include "opt_ddb.h"
44 #include "opt_inet.h"
45 #include "opt_isa.h"
46 #include "opt_kdb.h"
47 #include "opt_kstack_pages.h"
48 #include "opt_maxmem.h"
49 #include "opt_platform.h"
50 #ifdef __i386__
51 #include "opt_apic.h"
52 #endif
53
54 #include <sys/param.h>
55 #include <sys/proc.h>
56 #include <sys/systm.h>
57 #include <sys/bus.h>
58 #include <sys/cpu.h>
59 #include <sys/domainset.h>
60 #include <sys/kdb.h>
61 #include <sys/kernel.h>
62 #include <sys/ktr.h>
63 #include <sys/lock.h>
64 #include <sys/malloc.h>
65 #include <sys/mutex.h>
66 #include <sys/pcpu.h>
67 #include <sys/pmckern.h>
68 #include <sys/rwlock.h>
69 #include <sys/sched.h>
70 #include <sys/smp.h>
71 #include <sys/sysctl.h>
72
73 #include <machine/clock.h>
74 #include <machine/cpu.h>
75 #include <machine/cpufunc.h>
76 #include <machine/cputypes.h>
77 #include <machine/specialreg.h>
78 #include <machine/md_var.h>
79 #include <machine/trap.h>
80 #include <machine/tss.h>
81 #ifdef SMP
82 #include <machine/smp.h>
83 #endif
84 #ifdef CPU_ELAN
85 #include <machine/elan_mmcr.h>
86 #endif
87 #include <x86/acpica_machdep.h>
88 #include <x86/ifunc.h>
89
90 #include <vm/vm.h>
91 #include <vm/vm_extern.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vm_param.h>
98
99 #include <isa/isareg.h>
100
101 #include <contrib/dev/acpica/include/acpi.h>
102
103 #define STATE_RUNNING 0x0
104 #define STATE_MWAIT 0x1
105 #define STATE_SLEEPING 0x2
106
107 #ifdef SMP
108 static u_int cpu_reset_proxyid;
109 static volatile u_int cpu_reset_proxy_active;
110 #endif
111
112 char bootmethod[16];
113 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
114 "System firmware boot method");
115
116 struct msr_op_arg {
117 u_int msr;
118 int op;
119 uint64_t arg1;
120 uint64_t *res;
121 bool safe;
122 int error;
123 };
124
125 static void
x86_msr_op_one_safe(struct msr_op_arg * a)126 x86_msr_op_one_safe(struct msr_op_arg *a)
127 {
128 uint64_t v;
129 int error;
130
131 error = 0;
132 switch (a->op) {
133 case MSR_OP_ANDNOT:
134 error = rdmsr_safe(a->msr, &v);
135 if (error != 0) {
136 atomic_cmpset_int(&a->error, 0, error);
137 break;
138 }
139 if (a->res != NULL)
140 atomic_store_64(a->res, v);
141 v &= ~a->arg1;
142 error = wrmsr_safe(a->msr, v);
143 if (error != 0)
144 atomic_cmpset_int(&a->error, 0, error);
145 break;
146 case MSR_OP_OR:
147 error = rdmsr_safe(a->msr, &v);
148 if (error != 0) {
149 atomic_cmpset_int(&a->error, 0, error);
150 break;
151 }
152 if (a->res != NULL)
153 atomic_store_64(a->res, v);
154 v |= a->arg1;
155 error = wrmsr_safe(a->msr, v);
156 if (error != 0)
157 atomic_cmpset_int(&a->error, 0, error);
158 break;
159 case MSR_OP_WRITE:
160 error = wrmsr_safe(a->msr, a->arg1);
161 if (error != 0)
162 atomic_cmpset_int(&a->error, 0, error);
163 break;
164 case MSR_OP_READ:
165 error = rdmsr_safe(a->msr, &v);
166 if (error == 0) {
167 if (a->res != NULL)
168 atomic_store_64(a->res, v);
169 } else {
170 atomic_cmpset_int(&a->error, 0, error);
171 }
172 break;
173 }
174 }
175
176 static void
x86_msr_op_one_unsafe(struct msr_op_arg * a)177 x86_msr_op_one_unsafe(struct msr_op_arg *a)
178 {
179 uint64_t v;
180
181 switch (a->op) {
182 case MSR_OP_ANDNOT:
183 v = rdmsr(a->msr);
184 if (a->res != NULL)
185 atomic_store_64(a->res, v);
186 v &= ~a->arg1;
187 wrmsr(a->msr, v);
188 break;
189 case MSR_OP_OR:
190 v = rdmsr(a->msr);
191 if (a->res != NULL)
192 atomic_store_64(a->res, v);
193 v |= a->arg1;
194 wrmsr(a->msr, v);
195 break;
196 case MSR_OP_WRITE:
197 wrmsr(a->msr, a->arg1);
198 break;
199 case MSR_OP_READ:
200 v = rdmsr(a->msr);
201 if (a->res != NULL)
202 atomic_store_64(a->res, v);
203 break;
204 default:
205 __assert_unreachable();
206 }
207 }
208
209 static void
x86_msr_op_one(void * arg)210 x86_msr_op_one(void *arg)
211 {
212 struct msr_op_arg *a;
213
214 a = arg;
215 if (a->safe)
216 x86_msr_op_one_safe(a);
217 else
218 x86_msr_op_one_unsafe(a);
219 }
220
221 #define MSR_OP_EXMODE_MASK 0xf0000000
222 #define MSR_OP_OP_MASK 0x000000ff
223 #define MSR_OP_GET_CPUID(x) \
224 (((x) & ~(MSR_OP_EXMODE_MASK | MSR_OP_SAFE)) >> 8)
225
226 /*
227 * Utility function to wrap common MSR accesses.
228 *
229 * The msr argument specifies the MSR number to operate on.
230 * arg1 is an optional additional argument which is needed by
231 * modifying ops.
232 *
233 * res is the location where the value read from MSR is placed. It is
234 * the value that was initially read from the MSR, before applying the
235 * specified operation. Can be NULL if the value is not needed. If
236 * the op is executed on more than one CPU, it is unspecified on which
237 * CPU the value was read.
238 *
239 * op encoding combines the target/mode specification and the requested
240 * operation, all or-ed together.
241 *
242 * MSR accesses are executed with interrupts disabled.
243
244 * The following targets can be specified:
245 * MSR_OP_LOCAL execute on current CPU.
246 * MSR_OP_SCHED_ALL execute on all CPUs, by migrating
247 * the current thread to them in sequence.
248 * MSR_OP_SCHED_ALL | MSR_OP_SAFE execute on all CPUs by migrating, using
249 * safe MSR access.
250 * MSR_OP_SCHED_ONE execute on specified CPU, migrate
251 * curthread to it.
252 * MSR_OP_SCHED_ONE | MSR_OP_SAFE safely execute on specified CPU,
253 * migrate curthread to it.
254 * MSR_OP_RENDEZVOUS_ALL execute on all CPUs in interrupt
255 * context.
256 * MSR_OP_RENDEZVOUS_ONE execute on specified CPU in interrupt
257 * context.
258 * If a _ONE target is specified, 'or' the op value with MSR_OP_CPUID(cpuid)
259 * to name the target CPU. _SAFE variants might return EFAULT if access to
260 * MSR faulted with #GP. Non-_SAFE variants most likely panic or reboot
261 * the machine if the MSR is not present or access is not tolerated by hw.
262 *
263 * The following operations can be specified:
264 * MSR_OP_ANDNOT *res = v = *msr; *msr = v & ~arg1
265 * MSR_OP_OR *res = v = *msr; *msr = v | arg1
266 * MSR_OP_READ *res = *msr
267 * MSR_OP_WRITE *res = *msr; *msr = arg1
268 */
269 int
x86_msr_op(u_int msr,u_int op,uint64_t arg1,uint64_t * res)270 x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res)
271 {
272 struct thread *td;
273 struct msr_op_arg a;
274 cpuset_t set;
275 register_t flags;
276 u_int exmode;
277 int bound_cpu, cpu, i, is_bound;
278
279 exmode = op & MSR_OP_EXMODE_MASK;
280 a.op = op & MSR_OP_OP_MASK;
281 a.msr = msr;
282 a.safe = (op & MSR_OP_SAFE) != 0;
283 a.arg1 = arg1;
284 a.res = res;
285 a.error = 0;
286
287 switch (exmode) {
288 case MSR_OP_LOCAL:
289 flags = intr_disable();
290 x86_msr_op_one(&a);
291 intr_restore(flags);
292 break;
293 case MSR_OP_SCHED_ALL:
294 td = curthread;
295 thread_lock(td);
296 is_bound = sched_is_bound(td);
297 bound_cpu = td->td_oncpu;
298 CPU_FOREACH(i) {
299 sched_bind(td, i);
300 x86_msr_op_one(&a);
301 }
302 if (is_bound)
303 sched_bind(td, bound_cpu);
304 else
305 sched_unbind(td);
306 thread_unlock(td);
307 break;
308 case MSR_OP_SCHED_ONE:
309 td = curthread;
310 cpu = MSR_OP_GET_CPUID(op);
311 thread_lock(td);
312 is_bound = sched_is_bound(td);
313 bound_cpu = td->td_oncpu;
314 if (!is_bound || bound_cpu != cpu)
315 sched_bind(td, cpu);
316 x86_msr_op_one(&a);
317 if (is_bound) {
318 if (bound_cpu != cpu)
319 sched_bind(td, bound_cpu);
320 } else {
321 sched_unbind(td);
322 }
323 thread_unlock(td);
324 break;
325 case MSR_OP_RENDEZVOUS_ALL:
326 smp_rendezvous(smp_no_rendezvous_barrier,
327 x86_msr_op_one, smp_no_rendezvous_barrier, &a);
328 break;
329 case MSR_OP_RENDEZVOUS_ONE:
330 cpu = MSR_OP_GET_CPUID(op);
331 CPU_SETOF(cpu, &set);
332 smp_rendezvous_cpus(set, smp_no_rendezvous_barrier,
333 x86_msr_op_one, smp_no_rendezvous_barrier, &a);
334 break;
335 default:
336 __assert_unreachable();
337 }
338 return (a.error);
339 }
340
341 /*
342 * Automatically initialized per CPU errata in cpu_idle_tun below.
343 */
344 bool mwait_cpustop_broken = false;
345 SYSCTL_BOOL(_machdep, OID_AUTO, mwait_cpustop_broken, CTLFLAG_RDTUN,
346 &mwait_cpustop_broken, 0,
347 "Can not reliably wake MONITOR/MWAIT cpus without interrupts");
348
349 /*
350 * Flush the D-cache for non-DMA I/O so that the I-cache can
351 * be made coherent later.
352 */
353 void
cpu_flush_dcache(void * ptr,size_t len)354 cpu_flush_dcache(void *ptr, size_t len)
355 {
356 /* Not applicable */
357 }
358
359 void
acpi_cpu_c1(void)360 acpi_cpu_c1(void)
361 {
362
363 __asm __volatile("sti; hlt");
364 }
365
366 /*
367 * Use mwait to pause execution while waiting for an interrupt or
368 * another thread to signal that there is more work.
369 *
370 * NOTE: Interrupts will cause a wakeup; however, this function does
371 * not enable interrupt handling. The caller is responsible to enable
372 * interrupts.
373 */
374 void
acpi_cpu_idle_mwait(uint32_t mwait_hint)375 acpi_cpu_idle_mwait(uint32_t mwait_hint)
376 {
377 int *state;
378 uint64_t v;
379
380 /*
381 * A comment in Linux patch claims that 'CPUs run faster with
382 * speculation protection disabled. All CPU threads in a core
383 * must disable speculation protection for it to be
384 * disabled. Disable it while we are idle so the other
385 * hyperthread can run fast.'
386 *
387 * XXXKIB. Software coordination mode should be supported,
388 * but all Intel CPUs provide hardware coordination.
389 */
390
391 state = &PCPU_PTR(monitorbuf)->idle_state;
392 KASSERT(atomic_load_int(state) == STATE_SLEEPING,
393 ("cpu_mwait_cx: wrong monitorbuf state"));
394 atomic_store_int(state, STATE_MWAIT);
395 if (PCPU_GET(ibpb_set) || hw_ssb_active) {
396 v = rdmsr(MSR_IA32_SPEC_CTRL);
397 wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS |
398 IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD));
399 } else {
400 v = 0;
401 }
402 cpu_monitor(state, 0, 0);
403 if (atomic_load_int(state) == STATE_MWAIT)
404 cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
405
406 /*
407 * SSB cannot be disabled while we sleep, or rather, if it was
408 * disabled, the sysctl thread will bind to our cpu to tweak
409 * MSR.
410 */
411 if (v != 0)
412 wrmsr(MSR_IA32_SPEC_CTRL, v);
413
414 /*
415 * We should exit on any event that interrupts mwait, because
416 * that event might be a wanted interrupt.
417 */
418 atomic_store_int(state, STATE_RUNNING);
419 }
420
421 /* Get current clock frequency for the given cpu id. */
422 int
cpu_est_clockrate(int cpu_id,uint64_t * rate)423 cpu_est_clockrate(int cpu_id, uint64_t *rate)
424 {
425 uint64_t tsc1, tsc2;
426 uint64_t acnt_start, acnt_end, mcnt_start, mcnt_end, perf;
427 register_t reg;
428 int error = 0;
429
430 if (pcpu_find(cpu_id) == NULL || rate == NULL)
431 return (EINVAL);
432 #ifdef __i386__
433 if ((cpu_feature & CPUID_TSC) == 0)
434 return (EOPNOTSUPP);
435 #endif
436
437 /*
438 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
439 * DELAY(9) based logic fails.
440 */
441 if (tsc_is_invariant && !tsc_perf_stat)
442 return (EOPNOTSUPP);
443
444 #ifdef SMP
445 if (smp_cpus > 1) {
446 /* Schedule ourselves on the indicated cpu. */
447 thread_lock(curthread);
448 sched_bind(curthread, cpu_id);
449 thread_unlock(curthread);
450 }
451 #endif
452
453 /* Calibrate by measuring a short delay. */
454 reg = intr_disable();
455 if (tsc_is_invariant) {
456 mcnt_start = rdmsr(MSR_MPERF);
457 acnt_start = rdmsr(MSR_APERF);
458 tsc1 = rdtsc();
459 DELAY(1000);
460 mcnt_end = rdmsr(MSR_MPERF);
461 acnt_end = rdmsr(MSR_APERF);
462 tsc2 = rdtsc();
463 intr_restore(reg);
464 if (mcnt_end == mcnt_start) {
465 tsc_perf_stat = 0;
466 error = EOPNOTSUPP;
467 goto err;
468 }
469 perf = 1000 * (acnt_end - acnt_start) / (mcnt_end - mcnt_start);
470 *rate = (tsc2 - tsc1) * perf;
471 } else {
472 tsc1 = rdtsc();
473 DELAY(1000);
474 tsc2 = rdtsc();
475 intr_restore(reg);
476 *rate = (tsc2 - tsc1) * 1000;
477 }
478
479 err:
480 #ifdef SMP
481 if (smp_cpus > 1) {
482 thread_lock(curthread);
483 sched_unbind(curthread);
484 thread_unlock(curthread);
485 }
486 #endif
487
488 return (error);
489 }
490
491 /*
492 * Shutdown the CPU as much as possible
493 */
494 void
cpu_halt(void)495 cpu_halt(void)
496 {
497 for (;;)
498 halt();
499 }
500
501 static void
cpu_reset_real(void)502 cpu_reset_real(void)
503 {
504 struct region_descriptor null_idt;
505 int b;
506
507 disable_intr();
508 #ifdef CPU_ELAN
509 if (elan_mmcr != NULL)
510 elan_mmcr->RESCFG = 1;
511 #endif
512 #ifdef __i386__
513 if (cpu == CPU_GEODE1100) {
514 /* Attempt Geode's own reset */
515 outl(0xcf8, 0x80009044ul);
516 outl(0xcfc, 0xf);
517 }
518 #endif
519 #if !defined(BROKEN_KEYBOARD_RESET)
520 /*
521 * Attempt to do a CPU reset via the keyboard controller,
522 * do not turn off GateA20, as any machine that fails
523 * to do the reset here would then end up in no man's land.
524 */
525 outb(IO_KBD + 4, 0xFE);
526 DELAY(500000); /* wait 0.5 sec to see if that did it */
527 #endif
528
529 /*
530 * Attempt to force a reset via the Reset Control register at
531 * I/O port 0xcf9. Bit 2 forces a system reset when it
532 * transitions from 0 to 1. Bit 1 selects the type of reset
533 * to attempt: 0 selects a "soft" reset, and 1 selects a
534 * "hard" reset. We try a "hard" reset. The first write sets
535 * bit 1 to select a "hard" reset and clears bit 2. The
536 * second write forces a 0 -> 1 transition in bit 2 to trigger
537 * a reset.
538 */
539 outb(0xcf9, 0x2);
540 outb(0xcf9, 0x6);
541 DELAY(500000); /* wait 0.5 sec to see if that did it */
542
543 /*
544 * Attempt to force a reset via the Fast A20 and Init register
545 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate.
546 * Bit 0 asserts INIT# when set to 1. We are careful to only
547 * preserve bit 1 while setting bit 0. We also must clear bit
548 * 0 before setting it if it isn't already clear.
549 */
550 b = inb(0x92);
551 if (b != 0xff) {
552 if ((b & 0x1) != 0)
553 outb(0x92, b & 0xfe);
554 outb(0x92, b | 0x1);
555 DELAY(500000); /* wait 0.5 sec to see if that did it */
556 }
557
558 printf("No known reset method worked, attempting CPU shutdown\n");
559 DELAY(1000000); /* wait 1 sec for printf to complete */
560
561 /* Wipe the IDT. */
562 null_idt.rd_limit = 0;
563 null_idt.rd_base = 0;
564 lidt(&null_idt);
565
566 /* "good night, sweet prince .... <THUNK!>" */
567 breakpoint();
568
569 /* NOTREACHED */
570 while(1);
571 }
572
573 #ifdef SMP
574 static void
cpu_reset_proxy(void)575 cpu_reset_proxy(void)
576 {
577
578 cpu_reset_proxy_active = 1;
579 while (cpu_reset_proxy_active == 1)
580 ia32_pause(); /* Wait for other cpu to see that we've started */
581
582 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
583 DELAY(1000000);
584 cpu_reset_real();
585 }
586 #endif
587
588 void
cpu_reset(void)589 cpu_reset(void)
590 {
591 #ifdef SMP
592 struct monitorbuf *mb;
593 cpuset_t map;
594 u_int cnt;
595
596 if (smp_started) {
597 map = all_cpus;
598 CPU_CLR(PCPU_GET(cpuid), &map);
599 CPU_ANDNOT(&map, &map, &stopped_cpus);
600 if (!CPU_EMPTY(&map)) {
601 printf("cpu_reset: Stopping other CPUs\n");
602 stop_cpus(map);
603 }
604
605 if (PCPU_GET(cpuid) != 0) {
606 cpu_reset_proxyid = PCPU_GET(cpuid);
607 cpustop_restartfunc = cpu_reset_proxy;
608 cpu_reset_proxy_active = 0;
609 printf("cpu_reset: Restarting BSP\n");
610
611 /* Restart CPU #0. */
612 CPU_SETOF(0, &started_cpus);
613 mb = &pcpu_find(0)->pc_monitorbuf;
614 atomic_store_int(&mb->stop_state,
615 MONITOR_STOPSTATE_RUNNING);
616
617 cnt = 0;
618 while (cpu_reset_proxy_active == 0 && cnt < 10000000) {
619 ia32_pause();
620 cnt++; /* Wait for BSP to announce restart */
621 }
622 if (cpu_reset_proxy_active == 0) {
623 printf("cpu_reset: Failed to restart BSP\n");
624 } else {
625 cpu_reset_proxy_active = 2;
626 while (1)
627 ia32_pause();
628 /* NOTREACHED */
629 }
630 }
631 }
632 #endif
633 cpu_reset_real();
634 /* NOTREACHED */
635 }
636
637 bool
cpu_mwait_usable(void)638 cpu_mwait_usable(void)
639 {
640
641 return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags &
642 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) ==
643 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)));
644 }
645
646 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
647
648 int cpu_amdc1e_bug = 0; /* AMD C1E APIC workaround required. */
649
650 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
651 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
652 0, "Use MONITOR/MWAIT for short idle");
653
654 static bool
cpu_idle_enter(int * statep,int newstate)655 cpu_idle_enter(int *statep, int newstate)
656 {
657 KASSERT(atomic_load_int(statep) == STATE_RUNNING,
658 ("%s: state %d", __func__, atomic_load_int(statep)));
659
660 /*
661 * A fence is needed to prevent reordering of the load in
662 * sched_runnable() with this store to the idle state word. Without it,
663 * cpu_idle_wakeup() can observe the state as STATE_RUNNING after having
664 * added load to the queue, and elide an IPI. Then, sched_runnable()
665 * can observe tdq_load == 0, so the CPU ends up idling with pending
666 * work. tdq_notify() similarly ensures that a prior update to tdq_load
667 * is visible before calling cpu_idle_wakeup().
668 */
669 atomic_store_int(statep, newstate);
670 atomic_thread_fence_seq_cst();
671
672 /*
673 * Since we may be in a critical section from cpu_idle(), if
674 * an interrupt fires during that critical section we may have
675 * a pending preemption. If the CPU halts, then that thread
676 * may not execute until a later interrupt awakens the CPU.
677 * To handle this race, check for a runnable thread after
678 * disabling interrupts and immediately return if one is
679 * found. Also, we must absolutely guarentee that hlt is
680 * the next instruction after sti. This ensures that any
681 * interrupt that fires after the call to disable_intr() will
682 * immediately awaken the CPU from hlt. Finally, please note
683 * that on x86 this works fine because of interrupts enabled only
684 * after the instruction following sti takes place, while IF is set
685 * to 1 immediately, allowing hlt instruction to acknowledge the
686 * interrupt.
687 */
688 disable_intr();
689 if (sched_runnable()) {
690 enable_intr();
691 atomic_store_int(statep, STATE_RUNNING);
692 return (false);
693 } else {
694 return (true);
695 }
696 }
697
698 static void
cpu_idle_exit(int * statep)699 cpu_idle_exit(int *statep)
700 {
701 atomic_store_int(statep, STATE_RUNNING);
702 }
703
704 static void
cpu_idle_acpi(sbintime_t sbt)705 cpu_idle_acpi(sbintime_t sbt)
706 {
707 int *state;
708
709 state = &PCPU_PTR(monitorbuf)->idle_state;
710 if (cpu_idle_enter(state, STATE_SLEEPING)) {
711 if (cpu_idle_hook)
712 cpu_idle_hook(sbt);
713 else
714 acpi_cpu_c1();
715 cpu_idle_exit(state);
716 }
717 }
718
719 static void
cpu_idle_hlt(sbintime_t sbt)720 cpu_idle_hlt(sbintime_t sbt)
721 {
722 int *state;
723
724 state = &PCPU_PTR(monitorbuf)->idle_state;
725 if (cpu_idle_enter(state, STATE_SLEEPING)) {
726 acpi_cpu_c1();
727 atomic_store_int(state, STATE_RUNNING);
728 }
729 }
730
731 static void
cpu_idle_mwait(sbintime_t sbt)732 cpu_idle_mwait(sbintime_t sbt)
733 {
734 int *state;
735
736 state = &PCPU_PTR(monitorbuf)->idle_state;
737 if (cpu_idle_enter(state, STATE_MWAIT)) {
738 cpu_monitor(state, 0, 0);
739 if (atomic_load_int(state) == STATE_MWAIT)
740 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
741 else
742 enable_intr();
743 cpu_idle_exit(state);
744 }
745 }
746
747 static void
cpu_idle_spin(sbintime_t sbt)748 cpu_idle_spin(sbintime_t sbt)
749 {
750 int *state;
751 int i;
752
753 state = &PCPU_PTR(monitorbuf)->idle_state;
754 atomic_store_int(state, STATE_RUNNING);
755
756 /*
757 * The sched_runnable() call is racy but as long as there is
758 * a loop missing it one time will have just a little impact if any
759 * (and it is much better than missing the check at all).
760 */
761 for (i = 0; i < 1000; i++) {
762 if (sched_runnable())
763 return;
764 cpu_spinwait();
765 }
766 }
767
768 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
769
770 void
cpu_idle(int busy)771 cpu_idle(int busy)
772 {
773 uint64_t msr;
774 sbintime_t sbt = -1;
775
776 CTR1(KTR_SPARE2, "cpu_idle(%d)", busy);
777
778 /* If we are busy - try to use fast methods. */
779 if (busy) {
780 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
781 cpu_idle_mwait(busy);
782 goto out;
783 }
784 }
785
786 /* If we have time - switch timers into idle mode. */
787 if (!busy) {
788 critical_enter();
789 sbt = cpu_idleclock();
790 }
791
792 /* Apply AMD APIC timer C1E workaround. */
793 if (cpu_amdc1e_bug && cpu_disable_c3_sleep) {
794 msr = rdmsr(MSR_AMDK8_IPM);
795 if ((msr & (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)) != 0)
796 wrmsr(MSR_AMDK8_IPM, msr & ~(AMDK8_SMIONCMPHALT |
797 AMDK8_C1EONCMPHALT));
798 }
799
800 /* Call main idle method. */
801 cpu_idle_fn(sbt);
802
803 /* Switch timers back into active mode. */
804 if (!busy) {
805 cpu_activeclock();
806 critical_exit();
807 }
808 out:
809 CTR1(KTR_SPARE2, "cpu_idle(%d) done", busy);
810 }
811
812 static int cpu_idle_apl31_workaround;
813 SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
814 &cpu_idle_apl31_workaround, 0,
815 "Apollo Lake APL31 MWAIT bug workaround");
816
817 int
cpu_idle_wakeup(int cpu)818 cpu_idle_wakeup(int cpu)
819 {
820 struct monitorbuf *mb;
821 int *state;
822
823 mb = &pcpu_find(cpu)->pc_monitorbuf;
824 state = &mb->idle_state;
825 switch (atomic_load_int(state)) {
826 case STATE_SLEEPING:
827 return (0);
828 case STATE_MWAIT:
829 atomic_store_int(state, STATE_RUNNING);
830 return (cpu_idle_apl31_workaround ? 0 : 1);
831 case STATE_RUNNING:
832 return (1);
833 default:
834 panic("bad monitor state");
835 return (1);
836 }
837 }
838
839 /*
840 * Ordered by speed/power consumption.
841 */
842 static const struct {
843 void *id_fn;
844 const char *id_name;
845 int id_cpuid2_flag;
846 } idle_tbl[] = {
847 { .id_fn = cpu_idle_spin, .id_name = "spin" },
848 { .id_fn = cpu_idle_mwait, .id_name = "mwait",
849 .id_cpuid2_flag = CPUID2_MON },
850 { .id_fn = cpu_idle_hlt, .id_name = "hlt" },
851 { .id_fn = cpu_idle_acpi, .id_name = "acpi" },
852 };
853
854 static int
idle_sysctl_available(SYSCTL_HANDLER_ARGS)855 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
856 {
857 char *avail, *p;
858 int error;
859 int i;
860
861 avail = malloc(256, M_TEMP, M_WAITOK);
862 p = avail;
863 for (i = 0; i < nitems(idle_tbl); i++) {
864 if (idle_tbl[i].id_cpuid2_flag != 0 &&
865 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
866 continue;
867 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
868 cpu_idle_hook == NULL)
869 continue;
870 p += sprintf(p, "%s%s", p != avail ? ", " : "",
871 idle_tbl[i].id_name);
872 }
873 error = sysctl_handle_string(oidp, avail, 0, req);
874 free(avail, M_TEMP);
875 return (error);
876 }
877
878 SYSCTL_PROC(_machdep, OID_AUTO, idle_available,
879 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
880 0, 0, idle_sysctl_available, "A",
881 "list of available idle functions");
882
883 static bool
cpu_idle_selector(const char * new_idle_name)884 cpu_idle_selector(const char *new_idle_name)
885 {
886 int i;
887
888 for (i = 0; i < nitems(idle_tbl); i++) {
889 if (idle_tbl[i].id_cpuid2_flag != 0 &&
890 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
891 continue;
892 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
893 cpu_idle_hook == NULL)
894 continue;
895 if (strcmp(idle_tbl[i].id_name, new_idle_name))
896 continue;
897 cpu_idle_fn = idle_tbl[i].id_fn;
898 if (bootverbose)
899 printf("CPU idle set to %s\n", idle_tbl[i].id_name);
900 return (true);
901 }
902 return (false);
903 }
904
905 static int
cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)906 cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)
907 {
908 char buf[16];
909 const char *p;
910 int error, i;
911
912 p = "unknown";
913 for (i = 0; i < nitems(idle_tbl); i++) {
914 if (idle_tbl[i].id_fn == cpu_idle_fn) {
915 p = idle_tbl[i].id_name;
916 break;
917 }
918 }
919 strncpy(buf, p, sizeof(buf));
920 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
921 if (error != 0 || req->newptr == NULL)
922 return (error);
923 return (cpu_idle_selector(buf) ? 0 : EINVAL);
924 }
925
926 SYSCTL_PROC(_machdep, OID_AUTO, idle,
927 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
928 0, 0, cpu_idle_sysctl, "A",
929 "currently selected idle function");
930
931 static void
cpu_idle_tun(void * unused __unused)932 cpu_idle_tun(void *unused __unused)
933 {
934 char tunvar[16];
935
936 if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar)))
937 cpu_idle_selector(tunvar);
938 else if (cpu_vendor_id == CPU_VENDOR_AMD &&
939 CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1) {
940 /* Ryzen erratas 1057, 1109. */
941 cpu_idle_selector("hlt");
942 idle_mwait = 0;
943 mwait_cpustop_broken = true;
944 }
945
946 if (cpu_vendor_id == CPU_VENDOR_INTEL &&
947 CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) == 0x5c) {
948 /*
949 * Apollo Lake errata APL31 (public errata APL30).
950 * Stores to the armed address range may not trigger
951 * MWAIT to resume execution. OS needs to use
952 * interrupts to wake processors from MWAIT-induced
953 * sleep states.
954 */
955 cpu_idle_apl31_workaround = 1;
956 mwait_cpustop_broken = true;
957 }
958 TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround);
959 }
960 SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL);
961
962 static int panic_on_nmi = 0xff;
963 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
964 &panic_on_nmi, 0,
965 "Panic on NMI: 1 = H/W failure; 2 = unknown; 0xff = all");
966 int nmi_is_broadcast = 1;
967 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN,
968 &nmi_is_broadcast, 0,
969 "Chipset NMI is broadcast");
970 int (*apei_nmi)(void);
971
972 void
nmi_call_kdb(u_int cpu,u_int type,struct trapframe * frame)973 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame)
974 {
975 bool claimed = false;
976
977 #ifdef DEV_ISA
978 /* machine/parity/power fail/"kitchen sink" faults */
979 if (isa_nmi(frame->tf_err)) {
980 claimed = true;
981 if ((panic_on_nmi & 1) != 0)
982 panic("NMI indicates hardware failure");
983 }
984 #endif /* DEV_ISA */
985
986 /* ACPI Platform Error Interfaces callback. */
987 if (apei_nmi != NULL && (*apei_nmi)())
988 claimed = true;
989
990 /*
991 * NMIs can be useful for debugging. They can be hooked up to a
992 * pushbutton, usually on an ISA, PCI, or PCIe card. They can also be
993 * generated by an IPMI BMC, either manually or in response to a
994 * watchdog timeout. For example, see the "power diag" command in
995 * ports/sysutils/ipmitool. They can also be generated by a
996 * hypervisor; see "bhyvectl --inject-nmi".
997 */
998
999 #ifdef KDB
1000 if (!claimed && (panic_on_nmi & 2) != 0) {
1001 if (debugger_on_panic) {
1002 printf("NMI/cpu%d ... going to debugger\n", cpu);
1003 claimed = kdb_trap(type, 0, frame);
1004 }
1005 }
1006 #endif /* KDB */
1007
1008 if (!claimed && panic_on_nmi != 0)
1009 panic("NMI");
1010 }
1011
1012 /*
1013 * Dynamically registered NMI handlers.
1014 */
1015 struct nmi_handler {
1016 int running;
1017 int (*func)(struct trapframe *);
1018 struct nmi_handler *next;
1019 };
1020 static struct nmi_handler *nmi_handlers_head = NULL;
1021 MALLOC_DEFINE(M_NMI, "NMI handlers",
1022 "List entries for dynamically registered NMI handlers");
1023
1024 void
nmi_register_handler(int (* handler)(struct trapframe *))1025 nmi_register_handler(int (*handler)(struct trapframe *))
1026 {
1027 struct nmi_handler *hp;
1028 int (*hpf)(struct trapframe *);
1029
1030 hp = (struct nmi_handler *)atomic_load_acq_ptr(
1031 (uintptr_t *)&nmi_handlers_head);
1032 while (hp != NULL) {
1033 hpf = hp->func;
1034 MPASS(hpf != handler);
1035 if (hpf == NULL &&
1036 atomic_cmpset_ptr((volatile uintptr_t *)&hp->func,
1037 (uintptr_t)NULL, (uintptr_t)handler) != 0) {
1038 hp->running = 0;
1039 return;
1040 }
1041 hp = (struct nmi_handler *)atomic_load_acq_ptr(
1042 (uintptr_t *)&hp->next);
1043 }
1044 hp = malloc(sizeof(struct nmi_handler), M_NMI, M_WAITOK | M_ZERO);
1045 hp->func = handler;
1046 hp->next = nmi_handlers_head;
1047 while (atomic_fcmpset_rel_ptr(
1048 (volatile uintptr_t *)&nmi_handlers_head,
1049 (uintptr_t *)&hp->next, (uintptr_t)hp) == 0)
1050 ;
1051 }
1052
1053 void
nmi_remove_handler(int (* handler)(struct trapframe *))1054 nmi_remove_handler(int (*handler)(struct trapframe *))
1055 {
1056 struct nmi_handler *hp;
1057
1058 hp = (struct nmi_handler *)atomic_load_acq_ptr(
1059 (uintptr_t *)&nmi_handlers_head);
1060 while (hp != NULL) {
1061 if (hp->func == handler) {
1062 hp->func = NULL;
1063 /* Wait for the handler to exit before returning. */
1064 while (atomic_load_int(&hp->running) != 0)
1065 cpu_spinwait();
1066 return;
1067 }
1068 hp = (struct nmi_handler *)atomic_load_acq_ptr(
1069 (uintptr_t *)&hp->next);
1070 }
1071
1072 panic("%s: attempting to remove an unregistered NMI handler %p\n",
1073 __func__, handler);
1074 }
1075
1076 void
nmi_handle_intr(struct trapframe * frame)1077 nmi_handle_intr(struct trapframe *frame)
1078 {
1079 int (*func)(struct trapframe *);
1080 struct nmi_handler *hp;
1081 int rv;
1082 bool handled;
1083
1084 #ifdef SMP
1085 /* Handler for NMI IPIs used for stopping CPUs. */
1086 if (ipi_nmi_handler() == 0)
1087 return;
1088 #endif
1089 handled = false;
1090 hp = (struct nmi_handler *)atomic_load_acq_ptr(
1091 (uintptr_t *)&nmi_handlers_head);
1092 while (!handled && hp != NULL) {
1093 func = hp->func;
1094 if (func != NULL) {
1095 atomic_add_int(&hp->running, 1);
1096 rv = func(frame);
1097 atomic_subtract_int(&hp->running, 1);
1098 if (rv != 0) {
1099 handled = true;
1100 break;
1101 }
1102 }
1103 hp = (struct nmi_handler *)atomic_load_acq_ptr(
1104 (uintptr_t *)&hp->next);
1105 }
1106 if (handled)
1107 return;
1108 #ifdef SMP
1109 if (nmi_is_broadcast) {
1110 nmi_call_kdb_smp(T_NMI, frame);
1111 return;
1112 }
1113 #endif
1114 nmi_call_kdb(PCPU_GET(cpuid), T_NMI, frame);
1115 }
1116
1117 static int hw_ibrs_active;
1118 int hw_ibrs_ibpb_active;
1119 int hw_ibrs_disable = 1;
1120
1121 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
1122 "Indirect Branch Restricted Speculation active");
1123
1124 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ibrs,
1125 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1126 "Indirect Branch Restricted Speculation active");
1127
1128 SYSCTL_INT(_machdep_mitigations_ibrs, OID_AUTO, active, CTLFLAG_RD,
1129 &hw_ibrs_active, 0, "Indirect Branch Restricted Speculation active");
1130
1131 void
hw_ibrs_recalculate(bool for_all_cpus)1132 hw_ibrs_recalculate(bool for_all_cpus)
1133 {
1134 if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
1135 x86_msr_op(MSR_IA32_SPEC_CTRL, (for_all_cpus ?
1136 MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL) |
1137 (hw_ibrs_disable != 0 ? MSR_OP_ANDNOT : MSR_OP_OR),
1138 IA32_SPEC_CTRL_IBRS, NULL);
1139 hw_ibrs_active = hw_ibrs_disable == 0;
1140 hw_ibrs_ibpb_active = 0;
1141 } else {
1142 hw_ibrs_active = hw_ibrs_ibpb_active = (cpu_stdext_feature3 &
1143 CPUID_STDEXT3_IBPB) != 0 && !hw_ibrs_disable;
1144 }
1145 }
1146
1147 static int
hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)1148 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
1149 {
1150 int error, val;
1151
1152 val = hw_ibrs_disable;
1153 error = sysctl_handle_int(oidp, &val, 0, req);
1154 if (error != 0 || req->newptr == NULL)
1155 return (error);
1156 hw_ibrs_disable = val != 0;
1157 hw_ibrs_recalculate(true);
1158 return (0);
1159 }
1160 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
1161 CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
1162 "Disable Indirect Branch Restricted Speculation");
1163
1164 SYSCTL_PROC(_machdep_mitigations_ibrs, OID_AUTO, disable, CTLTYPE_INT |
1165 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1166 hw_ibrs_disable_handler, "I",
1167 "Disable Indirect Branch Restricted Speculation");
1168
1169 int hw_ssb_active;
1170 int hw_ssb_disable;
1171
1172 SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
1173 &hw_ssb_active, 0,
1174 "Speculative Store Bypass Disable active");
1175
1176 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ssb,
1177 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1178 "Speculative Store Bypass Disable active");
1179
1180 SYSCTL_INT(_machdep_mitigations_ssb, OID_AUTO, active, CTLFLAG_RD,
1181 &hw_ssb_active, 0, "Speculative Store Bypass Disable active");
1182
1183 static void
hw_ssb_set(bool enable,bool for_all_cpus)1184 hw_ssb_set(bool enable, bool for_all_cpus)
1185 {
1186
1187 if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
1188 hw_ssb_active = 0;
1189 return;
1190 }
1191 hw_ssb_active = enable;
1192 x86_msr_op(MSR_IA32_SPEC_CTRL,
1193 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1194 (for_all_cpus ? MSR_OP_SCHED_ALL : MSR_OP_LOCAL),
1195 IA32_SPEC_CTRL_SSBD, NULL);
1196 }
1197
1198 void
hw_ssb_recalculate(bool all_cpus)1199 hw_ssb_recalculate(bool all_cpus)
1200 {
1201
1202 switch (hw_ssb_disable) {
1203 default:
1204 hw_ssb_disable = 0;
1205 /* FALLTHROUGH */
1206 case 0: /* off */
1207 hw_ssb_set(false, all_cpus);
1208 break;
1209 case 1: /* on */
1210 hw_ssb_set(true, all_cpus);
1211 break;
1212 case 2: /* auto */
1213 hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSB_NO) != 0 ?
1214 false : true, all_cpus);
1215 break;
1216 }
1217 }
1218
1219 static int
hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)1220 hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)
1221 {
1222 int error, val;
1223
1224 val = hw_ssb_disable;
1225 error = sysctl_handle_int(oidp, &val, 0, req);
1226 if (error != 0 || req->newptr == NULL)
1227 return (error);
1228 hw_ssb_disable = val;
1229 hw_ssb_recalculate(true);
1230 return (0);
1231 }
1232 SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
1233 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1234 hw_ssb_disable_handler, "I",
1235 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)");
1236
1237 SYSCTL_PROC(_machdep_mitigations_ssb, OID_AUTO, disable, CTLTYPE_INT |
1238 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1239 hw_ssb_disable_handler, "I",
1240 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)");
1241
1242 int hw_mds_disable;
1243
1244 /*
1245 * Handler for Microarchitectural Data Sampling issues. Really not a
1246 * pointer to C function: on amd64 the code must not change any CPU
1247 * architectural state except possibly %rflags. Also, it is always
1248 * called with interrupts disabled.
1249 */
1250 void mds_handler_void(void);
1251 void mds_handler_verw(void);
1252 void mds_handler_ivb(void);
1253 void mds_handler_bdw(void);
1254 void mds_handler_skl_sse(void);
1255 void mds_handler_skl_avx(void);
1256 void mds_handler_skl_avx512(void);
1257 void mds_handler_silvermont(void);
1258 void (*mds_handler)(void) = mds_handler_void;
1259
1260 static int
sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)1261 sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)
1262 {
1263 const char *state;
1264
1265 if (mds_handler == mds_handler_void)
1266 state = "inactive";
1267 else if (mds_handler == mds_handler_verw)
1268 state = "VERW";
1269 else if (mds_handler == mds_handler_ivb)
1270 state = "software IvyBridge";
1271 else if (mds_handler == mds_handler_bdw)
1272 state = "software Broadwell";
1273 else if (mds_handler == mds_handler_skl_sse)
1274 state = "software Skylake SSE";
1275 else if (mds_handler == mds_handler_skl_avx)
1276 state = "software Skylake AVX";
1277 else if (mds_handler == mds_handler_skl_avx512)
1278 state = "software Skylake AVX512";
1279 else if (mds_handler == mds_handler_silvermont)
1280 state = "software Silvermont";
1281 else
1282 state = "unknown";
1283 return (SYSCTL_OUT(req, state, strlen(state)));
1284 }
1285
1286 SYSCTL_PROC(_hw, OID_AUTO, mds_disable_state,
1287 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1288 sysctl_hw_mds_disable_state_handler, "A",
1289 "Microarchitectural Data Sampling Mitigation state");
1290
1291 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, mds,
1292 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1293 "Microarchitectural Data Sampling Mitigation state");
1294
1295 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, state,
1296 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1297 sysctl_hw_mds_disable_state_handler, "A",
1298 "Microarchitectural Data Sampling Mitigation state");
1299
1300 _Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512");
1301
1302 void
hw_mds_recalculate(void)1303 hw_mds_recalculate(void)
1304 {
1305 struct pcpu *pc;
1306 vm_offset_t b64;
1307 u_long xcr0;
1308 int i;
1309
1310 /*
1311 * Allow user to force VERW variant even if MD_CLEAR is not
1312 * reported. For instance, hypervisor might unknowingly
1313 * filter the cap out.
1314 * For the similar reasons, and for testing, allow to enable
1315 * mitigation even when MDS_NO cap is set.
1316 */
1317 if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 ||
1318 ((cpu_ia32_arch_caps & IA32_ARCH_CAP_MDS_NO) != 0 &&
1319 hw_mds_disable == 3)) {
1320 mds_handler = mds_handler_void;
1321 } else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 &&
1322 hw_mds_disable == 3) || hw_mds_disable == 1) {
1323 mds_handler = mds_handler_verw;
1324 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1325 (CPUID_TO_MODEL(cpu_id) == 0x2e || CPUID_TO_MODEL(cpu_id) == 0x1e ||
1326 CPUID_TO_MODEL(cpu_id) == 0x1f || CPUID_TO_MODEL(cpu_id) == 0x1a ||
1327 CPUID_TO_MODEL(cpu_id) == 0x2f || CPUID_TO_MODEL(cpu_id) == 0x25 ||
1328 CPUID_TO_MODEL(cpu_id) == 0x2c || CPUID_TO_MODEL(cpu_id) == 0x2d ||
1329 CPUID_TO_MODEL(cpu_id) == 0x2a || CPUID_TO_MODEL(cpu_id) == 0x3e ||
1330 CPUID_TO_MODEL(cpu_id) == 0x3a) &&
1331 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1332 /*
1333 * Nehalem, SandyBridge, IvyBridge
1334 */
1335 CPU_FOREACH(i) {
1336 pc = pcpu_find(i);
1337 if (pc->pc_mds_buf == NULL) {
1338 pc->pc_mds_buf = malloc_domainset(672, M_TEMP,
1339 DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1340 bzero(pc->pc_mds_buf, 16);
1341 }
1342 }
1343 mds_handler = mds_handler_ivb;
1344 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1345 (CPUID_TO_MODEL(cpu_id) == 0x3f || CPUID_TO_MODEL(cpu_id) == 0x3c ||
1346 CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46 ||
1347 CPUID_TO_MODEL(cpu_id) == 0x56 || CPUID_TO_MODEL(cpu_id) == 0x4f ||
1348 CPUID_TO_MODEL(cpu_id) == 0x47 || CPUID_TO_MODEL(cpu_id) == 0x3d) &&
1349 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1350 /*
1351 * Haswell, Broadwell
1352 */
1353 CPU_FOREACH(i) {
1354 pc = pcpu_find(i);
1355 if (pc->pc_mds_buf == NULL) {
1356 pc->pc_mds_buf = malloc_domainset(1536, M_TEMP,
1357 DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1358 bzero(pc->pc_mds_buf, 16);
1359 }
1360 }
1361 mds_handler = mds_handler_bdw;
1362 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1363 ((CPUID_TO_MODEL(cpu_id) == 0x55 && (cpu_id &
1364 CPUID_STEPPING) <= 5) ||
1365 CPUID_TO_MODEL(cpu_id) == 0x4e || CPUID_TO_MODEL(cpu_id) == 0x5e ||
1366 (CPUID_TO_MODEL(cpu_id) == 0x8e && (cpu_id &
1367 CPUID_STEPPING) <= 0xb) ||
1368 (CPUID_TO_MODEL(cpu_id) == 0x9e && (cpu_id &
1369 CPUID_STEPPING) <= 0xc)) &&
1370 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1371 /*
1372 * Skylake, KabyLake, CoffeeLake, WhiskeyLake,
1373 * CascadeLake
1374 */
1375 CPU_FOREACH(i) {
1376 pc = pcpu_find(i);
1377 if (pc->pc_mds_buf == NULL) {
1378 pc->pc_mds_buf = malloc_domainset(6 * 1024,
1379 M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1380 M_WAITOK);
1381 b64 = (vm_offset_t)malloc_domainset(64 + 63,
1382 M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1383 M_WAITOK);
1384 pc->pc_mds_buf64 = (void *)roundup2(b64, 64);
1385 bzero(pc->pc_mds_buf64, 64);
1386 }
1387 }
1388 xcr0 = rxcr(0);
1389 if ((xcr0 & XFEATURE_ENABLED_ZMM_HI256) != 0 &&
1390 (cpu_stdext_feature & CPUID_STDEXT_AVX512DQ) != 0)
1391 mds_handler = mds_handler_skl_avx512;
1392 else if ((xcr0 & XFEATURE_ENABLED_AVX) != 0 &&
1393 (cpu_feature2 & CPUID2_AVX) != 0)
1394 mds_handler = mds_handler_skl_avx;
1395 else
1396 mds_handler = mds_handler_skl_sse;
1397 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1398 ((CPUID_TO_MODEL(cpu_id) == 0x37 ||
1399 CPUID_TO_MODEL(cpu_id) == 0x4a ||
1400 CPUID_TO_MODEL(cpu_id) == 0x4c ||
1401 CPUID_TO_MODEL(cpu_id) == 0x4d ||
1402 CPUID_TO_MODEL(cpu_id) == 0x5a ||
1403 CPUID_TO_MODEL(cpu_id) == 0x5d ||
1404 CPUID_TO_MODEL(cpu_id) == 0x6e ||
1405 CPUID_TO_MODEL(cpu_id) == 0x65 ||
1406 CPUID_TO_MODEL(cpu_id) == 0x75 ||
1407 CPUID_TO_MODEL(cpu_id) == 0x1c ||
1408 CPUID_TO_MODEL(cpu_id) == 0x26 ||
1409 CPUID_TO_MODEL(cpu_id) == 0x27 ||
1410 CPUID_TO_MODEL(cpu_id) == 0x35 ||
1411 CPUID_TO_MODEL(cpu_id) == 0x36 ||
1412 CPUID_TO_MODEL(cpu_id) == 0x7a))) {
1413 /* Silvermont, Airmont */
1414 CPU_FOREACH(i) {
1415 pc = pcpu_find(i);
1416 if (pc->pc_mds_buf == NULL)
1417 pc->pc_mds_buf = malloc(256, M_TEMP, M_WAITOK);
1418 }
1419 mds_handler = mds_handler_silvermont;
1420 } else {
1421 hw_mds_disable = 0;
1422 mds_handler = mds_handler_void;
1423 }
1424 }
1425
1426 static void
hw_mds_recalculate_boot(void * arg __unused)1427 hw_mds_recalculate_boot(void *arg __unused)
1428 {
1429
1430 hw_mds_recalculate();
1431 }
1432 SYSINIT(mds_recalc, SI_SUB_SMP, SI_ORDER_ANY, hw_mds_recalculate_boot, NULL);
1433
1434 static int
sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)1435 sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)
1436 {
1437 int error, val;
1438
1439 val = hw_mds_disable;
1440 error = sysctl_handle_int(oidp, &val, 0, req);
1441 if (error != 0 || req->newptr == NULL)
1442 return (error);
1443 if (val < 0 || val > 3)
1444 return (EINVAL);
1445 hw_mds_disable = val;
1446 hw_mds_recalculate();
1447 return (0);
1448 }
1449
1450 SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT |
1451 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1452 sysctl_mds_disable_handler, "I",
1453 "Microarchitectural Data Sampling Mitigation "
1454 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)");
1455
1456 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, disable, CTLTYPE_INT |
1457 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1458 sysctl_mds_disable_handler, "I",
1459 "Microarchitectural Data Sampling Mitigation "
1460 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)");
1461
1462 /*
1463 * Intel Transactional Memory Asynchronous Abort Mitigation
1464 * CVE-2019-11135
1465 */
1466 int x86_taa_enable;
1467 int x86_taa_state;
1468 enum {
1469 TAA_NONE = 0, /* No mitigation enabled */
1470 TAA_TSX_DISABLE = 1, /* Disable TSX via MSR */
1471 TAA_VERW = 2, /* Use VERW mitigation */
1472 TAA_AUTO = 3, /* Automatically select the mitigation */
1473
1474 /* The states below are not selectable by the operator */
1475
1476 TAA_TAA_UC = 4, /* Mitigation present in microcode */
1477 TAA_NOT_PRESENT = 5 /* TSX is not present */
1478 };
1479
1480 static void
taa_set(bool enable,bool all)1481 taa_set(bool enable, bool all)
1482 {
1483
1484 x86_msr_op(MSR_IA32_TSX_CTRL,
1485 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1486 (all ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1487 IA32_TSX_CTRL_RTM_DISABLE | IA32_TSX_CTRL_TSX_CPUID_CLEAR,
1488 NULL);
1489 }
1490
1491 void
x86_taa_recalculate(void)1492 x86_taa_recalculate(void)
1493 {
1494 static int taa_saved_mds_disable = 0;
1495 int taa_need = 0, taa_state = 0;
1496 int mds_disable = 0, need_mds_recalc = 0;
1497
1498 /* Check CPUID.07h.EBX.HLE and RTM for the presence of TSX */
1499 if ((cpu_stdext_feature & CPUID_STDEXT_HLE) == 0 ||
1500 (cpu_stdext_feature & CPUID_STDEXT_RTM) == 0) {
1501 /* TSX is not present */
1502 x86_taa_state = TAA_NOT_PRESENT;
1503 return;
1504 }
1505
1506 /* Check to see what mitigation options the CPU gives us */
1507 if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TAA_NO) {
1508 /* CPU is not suseptible to TAA */
1509 taa_need = TAA_TAA_UC;
1510 } else if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TSX_CTRL) {
1511 /*
1512 * CPU can turn off TSX. This is the next best option
1513 * if TAA_NO hardware mitigation isn't present
1514 */
1515 taa_need = TAA_TSX_DISABLE;
1516 } else {
1517 /* No TSX/TAA specific remedies are available. */
1518 if (x86_taa_enable == TAA_TSX_DISABLE) {
1519 if (bootverbose)
1520 printf("TSX control not available\n");
1521 return;
1522 } else
1523 taa_need = TAA_VERW;
1524 }
1525
1526 /* Can we automatically take action, or are we being forced? */
1527 if (x86_taa_enable == TAA_AUTO)
1528 taa_state = taa_need;
1529 else
1530 taa_state = x86_taa_enable;
1531
1532 /* No state change, nothing to do */
1533 if (taa_state == x86_taa_state) {
1534 if (bootverbose)
1535 printf("No TSX change made\n");
1536 return;
1537 }
1538
1539 /* Does the MSR need to be turned on or off? */
1540 if (taa_state == TAA_TSX_DISABLE)
1541 taa_set(true, true);
1542 else if (x86_taa_state == TAA_TSX_DISABLE)
1543 taa_set(false, true);
1544
1545 /* Does MDS need to be set to turn on VERW? */
1546 if (taa_state == TAA_VERW) {
1547 taa_saved_mds_disable = hw_mds_disable;
1548 mds_disable = hw_mds_disable = 1;
1549 need_mds_recalc = 1;
1550 } else if (x86_taa_state == TAA_VERW) {
1551 mds_disable = hw_mds_disable = taa_saved_mds_disable;
1552 need_mds_recalc = 1;
1553 }
1554 if (need_mds_recalc) {
1555 hw_mds_recalculate();
1556 if (mds_disable != hw_mds_disable) {
1557 if (bootverbose)
1558 printf("Cannot change MDS state for TAA\n");
1559 /* Don't update our state */
1560 return;
1561 }
1562 }
1563
1564 x86_taa_state = taa_state;
1565 return;
1566 }
1567
1568 static void
taa_recalculate_boot(void * arg __unused)1569 taa_recalculate_boot(void * arg __unused)
1570 {
1571
1572 x86_taa_recalculate();
1573 }
1574 SYSINIT(taa_recalc, SI_SUB_SMP, SI_ORDER_ANY, taa_recalculate_boot, NULL);
1575
1576 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, taa,
1577 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1578 "TSX Asynchronous Abort Mitigation");
1579
1580 static int
sysctl_taa_handler(SYSCTL_HANDLER_ARGS)1581 sysctl_taa_handler(SYSCTL_HANDLER_ARGS)
1582 {
1583 int error, val;
1584
1585 val = x86_taa_enable;
1586 error = sysctl_handle_int(oidp, &val, 0, req);
1587 if (error != 0 || req->newptr == NULL)
1588 return (error);
1589 if (val < TAA_NONE || val > TAA_AUTO)
1590 return (EINVAL);
1591 x86_taa_enable = val;
1592 x86_taa_recalculate();
1593 return (0);
1594 }
1595
1596 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, enable, CTLTYPE_INT |
1597 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1598 sysctl_taa_handler, "I",
1599 "TAA Mitigation enablement control "
1600 "(0 - off, 1 - disable TSX, 2 - VERW, 3 - on AUTO)");
1601
1602 static int
sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)1603 sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)
1604 {
1605 const char *state;
1606
1607 switch (x86_taa_state) {
1608 case TAA_NONE:
1609 state = "inactive";
1610 break;
1611 case TAA_TSX_DISABLE:
1612 state = "TSX disabled";
1613 break;
1614 case TAA_VERW:
1615 state = "VERW";
1616 break;
1617 case TAA_TAA_UC:
1618 state = "Mitigated in microcode";
1619 break;
1620 case TAA_NOT_PRESENT:
1621 state = "TSX not present";
1622 break;
1623 default:
1624 state = "unknown";
1625 }
1626
1627 return (SYSCTL_OUT(req, state, strlen(state)));
1628 }
1629
1630 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state,
1631 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1632 sysctl_taa_state_handler, "A",
1633 "TAA Mitigation state");
1634
1635 int __read_frequently cpu_flush_rsb_ctxsw;
1636 SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw,
1637 CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0,
1638 "Flush Return Stack Buffer on context switch");
1639
1640 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, rngds,
1641 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1642 "MCU Optimization, disable RDSEED mitigation");
1643
1644 int x86_rngds_mitg_enable = 1;
1645 void
x86_rngds_mitg_recalculate(bool all_cpus)1646 x86_rngds_mitg_recalculate(bool all_cpus)
1647 {
1648 if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0)
1649 return;
1650 x86_msr_op(MSR_IA32_MCU_OPT_CTRL,
1651 (x86_rngds_mitg_enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1652 (all_cpus ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1653 IA32_RNGDS_MITG_DIS, NULL);
1654 }
1655
1656 static int
sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS)1657 sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS)
1658 {
1659 int error, val;
1660
1661 val = x86_rngds_mitg_enable;
1662 error = sysctl_handle_int(oidp, &val, 0, req);
1663 if (error != 0 || req->newptr == NULL)
1664 return (error);
1665 x86_rngds_mitg_enable = val;
1666 x86_rngds_mitg_recalculate(true);
1667 return (0);
1668 }
1669 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, enable, CTLTYPE_INT |
1670 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1671 sysctl_rngds_mitg_enable_handler, "I",
1672 "MCU Optimization, disabling RDSEED mitigation control "
1673 "(0 - mitigation disabled (RDSEED optimized), 1 - mitigation enabled)");
1674
1675 static int
sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS)1676 sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS)
1677 {
1678 const char *state;
1679
1680 if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0) {
1681 state = "Not applicable";
1682 } else if (x86_rngds_mitg_enable == 0) {
1683 state = "RDSEED not serialized";
1684 } else {
1685 state = "Mitigated";
1686 }
1687 return (SYSCTL_OUT(req, state, strlen(state)));
1688 }
1689 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, state,
1690 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1691 sysctl_rngds_state_handler, "A",
1692 "MCU Optimization state");
1693
1694
1695 /*
1696 * Zenbleed.
1697 *
1698 * No corresponding errata is publicly listed. AMD has issued a security
1699 * bulletin (AMD-SB-7008), entitled "Cross-Process Information Leak". This
1700 * document lists (as of August 2023) platform firmware's availability target
1701 * dates, with most being November/December 2023. It will then be up to
1702 * motherboard manufacturers to produce corresponding BIOS updates, which will
1703 * happen with an inevitable lag. Additionally, for a variety of reasons,
1704 * operators might not be able to apply them everywhere due. On the side of
1705 * standalone CPU microcodes, no plans for availability have been published so
1706 * far. However, a developer appearing to be an AMD employee has hardcoded in
1707 * Linux revision numbers of future microcodes that are presumed to fix the
1708 * vulnerability.
1709 *
1710 * Given the stability issues encountered with early microcode releases for Rome
1711 * (the only microcode publicly released so far) and the absence of official
1712 * communication on standalone CPU microcodes, we have opted instead for
1713 * matching by default all AMD Zen2 processors which, according to the
1714 * vulnerability's discoverer, are all affected (see
1715 * https://lock.cmpxchg8b.com/zenbleed.html). This policy, also adopted by
1716 * OpenBSD, may be overriden using the tunable/sysctl
1717 * 'machdep.mitigations.zenbleed.enable'. We might revise it later depending on
1718 * official statements, microcode updates' public availability and community
1719 * assessment that they actually fix the vulnerability without any instability
1720 * side effects.
1721 */
1722
1723 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, zenbleed,
1724 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1725 "Zenbleed OS-triggered prevention (via chicken bit)");
1726
1727 /* 2 is auto, see below. */
1728 int zenbleed_enable = 2;
1729
1730 void
zenbleed_sanitize_enable(void)1731 zenbleed_sanitize_enable(void)
1732 {
1733 /* Default to auto (2). */
1734 if (zenbleed_enable < 0 || zenbleed_enable > 2)
1735 zenbleed_enable = 2;
1736 }
1737
1738 static bool
zenbleed_chicken_bit_applicable(void)1739 zenbleed_chicken_bit_applicable(void)
1740 {
1741 /* Concerns only bare-metal AMD Zen2 processors. */
1742 return (cpu_vendor_id == CPU_VENDOR_AMD &&
1743 CPUID_TO_FAMILY(cpu_id) == 0x17 &&
1744 CPUID_TO_MODEL(cpu_id) >= 0x30 &&
1745 vm_guest == VM_GUEST_NO);
1746 }
1747
1748 static bool
zenbleed_chicken_bit_should_enable(void)1749 zenbleed_chicken_bit_should_enable(void)
1750 {
1751 /*
1752 * Obey tunable/sysctl.
1753 *
1754 * As explained above, currently, the automatic setting (2) and the "on"
1755 * one (1) have the same effect. In the future, we might additionally
1756 * check for specific microcode revisions as part of the automatic
1757 * determination.
1758 */
1759 return (zenbleed_enable != 0);
1760 }
1761
1762 void
zenbleed_check_and_apply(bool all_cpus)1763 zenbleed_check_and_apply(bool all_cpus)
1764 {
1765 bool set;
1766
1767 if (!zenbleed_chicken_bit_applicable())
1768 return;
1769
1770 set = zenbleed_chicken_bit_should_enable();
1771
1772 x86_msr_op(MSR_DE_CFG,
1773 (set ? MSR_OP_OR : MSR_OP_ANDNOT) |
1774 (all_cpus ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1775 DE_CFG_ZEN2_FP_BACKUP_FIX_BIT, NULL);
1776 }
1777
1778 static int
sysctl_zenbleed_enable_handler(SYSCTL_HANDLER_ARGS)1779 sysctl_zenbleed_enable_handler(SYSCTL_HANDLER_ARGS)
1780 {
1781 int error, val;
1782
1783 val = zenbleed_enable;
1784 error = sysctl_handle_int(oidp, &val, 0, req);
1785 if (error != 0 || req->newptr == NULL)
1786 return (error);
1787 zenbleed_enable = val;
1788 zenbleed_sanitize_enable();
1789 zenbleed_check_and_apply(true);
1790 return (0);
1791 }
1792 SYSCTL_PROC(_machdep_mitigations_zenbleed, OID_AUTO, enable, CTLTYPE_INT |
1793 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1794 sysctl_zenbleed_enable_handler, "I",
1795 "Enable Zenbleed OS-triggered mitigation (chicken bit) "
1796 "(0: Force disable, 1: Force enable, 2: Automatic determination)");
1797
1798 static int
sysctl_zenbleed_state_handler(SYSCTL_HANDLER_ARGS)1799 sysctl_zenbleed_state_handler(SYSCTL_HANDLER_ARGS)
1800 {
1801 const char *state;
1802
1803 if (!zenbleed_chicken_bit_applicable())
1804 state = "Not applicable";
1805 else if (zenbleed_chicken_bit_should_enable())
1806 state = "Mitigation enabled";
1807 else
1808 state = "Mitigation disabled";
1809 return (SYSCTL_OUT(req, state, strlen(state)));
1810 }
1811 SYSCTL_PROC(_machdep_mitigations_zenbleed, OID_AUTO, state,
1812 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1813 sysctl_zenbleed_state_handler, "A",
1814 "Zenbleed OS-triggered mitigation (chicken bit) state");
1815
1816
1817 /*
1818 * Enable and restore kernel text write permissions.
1819 * Callers must ensure that disable_wp()/restore_wp() are executed
1820 * without rescheduling on the same core.
1821 */
1822 bool
disable_wp(void)1823 disable_wp(void)
1824 {
1825 u_int cr0;
1826
1827 cr0 = rcr0();
1828 if ((cr0 & CR0_WP) == 0)
1829 return (false);
1830 load_cr0(cr0 & ~CR0_WP);
1831 return (true);
1832 }
1833
1834 void
restore_wp(bool old_wp)1835 restore_wp(bool old_wp)
1836 {
1837
1838 if (old_wp)
1839 load_cr0(rcr0() | CR0_WP);
1840 }
1841
1842 bool
acpi_get_fadt_bootflags(uint16_t * flagsp)1843 acpi_get_fadt_bootflags(uint16_t *flagsp)
1844 {
1845 #ifdef DEV_ACPI
1846 ACPI_TABLE_FADT *fadt;
1847 vm_paddr_t physaddr;
1848
1849 physaddr = acpi_find_table(ACPI_SIG_FADT);
1850 if (physaddr == 0)
1851 return (false);
1852 fadt = acpi_map_table(physaddr, ACPI_SIG_FADT);
1853 if (fadt == NULL)
1854 return (false);
1855 *flagsp = fadt->BootFlags;
1856 acpi_unmap_table(fadt);
1857 return (true);
1858 #else
1859 return (false);
1860 #endif
1861 }
1862
1863 DEFINE_IFUNC(, uint64_t, rdtsc_ordered, (void))
1864 {
1865 bool cpu_is_amd = cpu_vendor_id == CPU_VENDOR_AMD ||
1866 cpu_vendor_id == CPU_VENDOR_HYGON;
1867
1868 if ((amd_feature & AMDID_RDTSCP) != 0)
1869 return (rdtscp);
1870 else if ((cpu_feature & CPUID_SSE2) != 0)
1871 return (cpu_is_amd ? rdtsc_ordered_mfence :
1872 rdtsc_ordered_lfence);
1873 else
1874 return (rdtsc);
1875 }
1876