xref: /src/sys/x86/x86/cpu_machdep.c (revision cb81a9c18db93a2046c47b0c7dc0bd6adcdd2495)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1992 Terrence R. Lambert.
4  * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * William Jolitz.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 #include "opt_acpi.h"
41 #include "opt_atpic.h"
42 #include "opt_cpu.h"
43 #include "opt_ddb.h"
44 #include "opt_inet.h"
45 #include "opt_isa.h"
46 #include "opt_kdb.h"
47 #include "opt_kstack_pages.h"
48 #include "opt_maxmem.h"
49 #include "opt_platform.h"
50 #ifdef __i386__
51 #include "opt_apic.h"
52 #endif
53 
54 #include <sys/param.h>
55 #include <sys/proc.h>
56 #include <sys/systm.h>
57 #include <sys/bus.h>
58 #include <sys/cpu.h>
59 #include <sys/domainset.h>
60 #include <sys/kdb.h>
61 #include <sys/kernel.h>
62 #include <sys/ktr.h>
63 #include <sys/lock.h>
64 #include <sys/malloc.h>
65 #include <sys/mutex.h>
66 #include <sys/pcpu.h>
67 #include <sys/pmckern.h>
68 #include <sys/rwlock.h>
69 #include <sys/sched.h>
70 #include <sys/smp.h>
71 #include <sys/sysctl.h>
72 
73 #include <machine/clock.h>
74 #include <machine/cpu.h>
75 #include <machine/cpufunc.h>
76 #include <machine/cputypes.h>
77 #include <machine/specialreg.h>
78 #include <machine/md_var.h>
79 #include <machine/trap.h>
80 #include <machine/tss.h>
81 #ifdef SMP
82 #include <machine/smp.h>
83 #endif
84 #ifdef CPU_ELAN
85 #include <machine/elan_mmcr.h>
86 #endif
87 #include <x86/acpica_machdep.h>
88 #include <x86/ifunc.h>
89 
90 #include <vm/vm.h>
91 #include <vm/vm_extern.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vm_param.h>
98 
99 #include <isa/isareg.h>
100 
101 #include <contrib/dev/acpica/include/acpi.h>
102 
103 #define	STATE_RUNNING	0x0
104 #define	STATE_MWAIT	0x1
105 #define	STATE_SLEEPING	0x2
106 
107 #ifdef SMP
108 static u_int	cpu_reset_proxyid;
109 static volatile u_int	cpu_reset_proxy_active;
110 #endif
111 
112 char bootmethod[16];
113 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
114     "System firmware boot method");
115 
116 struct msr_op_arg {
117 	u_int msr;
118 	int op;
119 	uint64_t arg1;
120 	uint64_t *res;
121 	bool safe;
122 	int error;
123 };
124 
125 static void
x86_msr_op_one_safe(struct msr_op_arg * a)126 x86_msr_op_one_safe(struct msr_op_arg *a)
127 {
128 	uint64_t v;
129 	int error;
130 
131 	error = 0;
132 	switch (a->op) {
133 	case MSR_OP_ANDNOT:
134 		error = rdmsr_safe(a->msr, &v);
135 		if (error != 0) {
136 			atomic_cmpset_int(&a->error, 0, error);
137 			break;
138 		}
139 		if (a->res != NULL)
140 			atomic_store_64(a->res, v);
141 		v &= ~a->arg1;
142 		error = wrmsr_safe(a->msr, v);
143 		if (error != 0)
144 			atomic_cmpset_int(&a->error, 0, error);
145 		break;
146 	case MSR_OP_OR:
147 		error = rdmsr_safe(a->msr, &v);
148 		if (error != 0) {
149 			atomic_cmpset_int(&a->error, 0, error);
150 			break;
151 		}
152 		if (a->res != NULL)
153 			atomic_store_64(a->res, v);
154 		v |= a->arg1;
155 		error = wrmsr_safe(a->msr, v);
156 		if (error != 0)
157 			atomic_cmpset_int(&a->error, 0, error);
158 		break;
159 	case MSR_OP_WRITE:
160 		error = wrmsr_safe(a->msr, a->arg1);
161 		if (error != 0)
162 			atomic_cmpset_int(&a->error, 0, error);
163 		break;
164 	case MSR_OP_READ:
165 		error = rdmsr_safe(a->msr, &v);
166 		if (error == 0) {
167 			if (a->res != NULL)
168 				atomic_store_64(a->res, v);
169 		} else {
170 			atomic_cmpset_int(&a->error, 0, error);
171 		}
172 		break;
173 	}
174 }
175 
176 static void
x86_msr_op_one_unsafe(struct msr_op_arg * a)177 x86_msr_op_one_unsafe(struct msr_op_arg *a)
178 {
179 	uint64_t v;
180 
181 	switch (a->op) {
182 	case MSR_OP_ANDNOT:
183 		v = rdmsr(a->msr);
184 		if (a->res != NULL)
185 			atomic_store_64(a->res, v);
186 		v &= ~a->arg1;
187 		wrmsr(a->msr, v);
188 		break;
189 	case MSR_OP_OR:
190 		v = rdmsr(a->msr);
191 		if (a->res != NULL)
192 			atomic_store_64(a->res, v);
193 		v |= a->arg1;
194 		wrmsr(a->msr, v);
195 		break;
196 	case MSR_OP_WRITE:
197 		wrmsr(a->msr, a->arg1);
198 		break;
199 	case MSR_OP_READ:
200 		v = rdmsr(a->msr);
201 		if (a->res != NULL)
202 			atomic_store_64(a->res, v);
203 		break;
204 	default:
205 		__assert_unreachable();
206 	}
207 }
208 
209 static void
x86_msr_op_one(void * arg)210 x86_msr_op_one(void *arg)
211 {
212 	struct msr_op_arg *a;
213 
214 	a = arg;
215 	if (a->safe)
216 		x86_msr_op_one_safe(a);
217 	else
218 		x86_msr_op_one_unsafe(a);
219 }
220 
221 #define	MSR_OP_EXMODE_MASK	0xf0000000
222 #define	MSR_OP_OP_MASK		0x000000ff
223 #define	MSR_OP_GET_CPUID(x) \
224     (((x) & ~(MSR_OP_EXMODE_MASK | MSR_OP_SAFE)) >> 8)
225 
226 /*
227  * Utility function to wrap common MSR accesses.
228  *
229  * The msr argument specifies the MSR number to operate on.
230  * arg1 is an optional additional argument which is needed by
231  * modifying ops.
232  *
233  * res is the location where the value read from MSR is placed.  It is
234  * the value that was initially read from the MSR, before applying the
235  * specified operation.  Can be NULL if the value is not needed.  If
236  * the op is executed on more than one CPU, it is unspecified on which
237  * CPU the value was read.
238  *
239  * op encoding combines the target/mode specification and the requested
240  * operation, all or-ed together.
241  *
242  * MSR accesses are executed with interrupts disabled.
243 
244  * The following targets can be specified:
245  * MSR_OP_LOCAL				execute on current CPU.
246  * MSR_OP_SCHED_ALL			execute on all CPUs, by migrating
247  *					the current thread to them in sequence.
248  * MSR_OP_SCHED_ALL | MSR_OP_SAFE	execute on all CPUs by migrating, using
249  *					safe MSR access.
250  * MSR_OP_SCHED_ONE			execute on specified CPU, migrate
251  *					curthread to it.
252  * MSR_OP_SCHED_ONE | MSR_OP_SAFE	safely execute on specified CPU,
253  *					migrate curthread to it.
254  * MSR_OP_RENDEZVOUS_ALL		execute on all CPUs in interrupt
255  *					context.
256  * MSR_OP_RENDEZVOUS_ONE		execute on specified CPU in interrupt
257  *					context.
258  * If a _ONE target is specified, 'or' the op value with MSR_OP_CPUID(cpuid)
259  * to name the target CPU.  _SAFE variants might return EFAULT if access to
260  * MSR faulted with #GP.  Non-_SAFE variants most likely panic or reboot
261  * the machine if the MSR is not present or access is not tolerated by hw.
262  *
263  * The following operations can be specified:
264  * MSR_OP_ANDNOT	*res = v = *msr; *msr = v & ~arg1
265  * MSR_OP_OR		*res = v = *msr; *msr = v | arg1
266  * MSR_OP_READ		*res = *msr
267  * MSR_OP_WRITE		*res = *msr; *msr = arg1
268  */
269 int
x86_msr_op(u_int msr,u_int op,uint64_t arg1,uint64_t * res)270 x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res)
271 {
272 	struct thread *td;
273 	struct msr_op_arg a;
274 	cpuset_t set;
275 	register_t flags;
276 	u_int exmode;
277 	int bound_cpu, cpu, i, is_bound;
278 
279 	exmode = op & MSR_OP_EXMODE_MASK;
280 	a.op = op & MSR_OP_OP_MASK;
281 	a.msr = msr;
282 	a.safe = (op & MSR_OP_SAFE) != 0;
283 	a.arg1 = arg1;
284 	a.res = res;
285 	a.error = 0;
286 
287 	switch (exmode) {
288 	case MSR_OP_LOCAL:
289 		flags = intr_disable();
290 		x86_msr_op_one(&a);
291 		intr_restore(flags);
292 		break;
293 	case MSR_OP_SCHED_ALL:
294 		td = curthread;
295 		thread_lock(td);
296 		is_bound = sched_is_bound(td);
297 		bound_cpu = td->td_oncpu;
298 		CPU_FOREACH(i) {
299 			sched_bind(td, i);
300 			x86_msr_op_one(&a);
301 		}
302 		if (is_bound)
303 			sched_bind(td, bound_cpu);
304 		else
305 			sched_unbind(td);
306 		thread_unlock(td);
307 		break;
308 	case MSR_OP_SCHED_ONE:
309 		td = curthread;
310 		cpu = MSR_OP_GET_CPUID(op);
311 		thread_lock(td);
312 		is_bound = sched_is_bound(td);
313 		bound_cpu = td->td_oncpu;
314 		if (!is_bound || bound_cpu != cpu)
315 			sched_bind(td, cpu);
316 		x86_msr_op_one(&a);
317 		if (is_bound) {
318 			if (bound_cpu != cpu)
319 				sched_bind(td, bound_cpu);
320 		} else {
321 			sched_unbind(td);
322 		}
323 		thread_unlock(td);
324 		break;
325 	case MSR_OP_RENDEZVOUS_ALL:
326 		smp_rendezvous(smp_no_rendezvous_barrier,
327 		    x86_msr_op_one, smp_no_rendezvous_barrier, &a);
328 		break;
329 	case MSR_OP_RENDEZVOUS_ONE:
330 		cpu = MSR_OP_GET_CPUID(op);
331 		CPU_SETOF(cpu, &set);
332 		smp_rendezvous_cpus(set, smp_no_rendezvous_barrier,
333 		    x86_msr_op_one, smp_no_rendezvous_barrier, &a);
334 		break;
335 	default:
336 		__assert_unreachable();
337 	}
338 	return (a.error);
339 }
340 
341 /*
342  * Automatically initialized per CPU errata in cpu_idle_tun below.
343  */
344 bool mwait_cpustop_broken = false;
345 SYSCTL_BOOL(_machdep, OID_AUTO, mwait_cpustop_broken, CTLFLAG_RDTUN,
346     &mwait_cpustop_broken, 0,
347     "Can not reliably wake MONITOR/MWAIT cpus without interrupts");
348 
349 /*
350  * Flush the D-cache for non-DMA I/O so that the I-cache can
351  * be made coherent later.
352  */
353 void
cpu_flush_dcache(void * ptr,size_t len)354 cpu_flush_dcache(void *ptr, size_t len)
355 {
356 	/* Not applicable */
357 }
358 
359 void
acpi_cpu_c1(void)360 acpi_cpu_c1(void)
361 {
362 
363 	__asm __volatile("sti; hlt");
364 }
365 
366 /*
367  * Use mwait to pause execution while waiting for an interrupt or
368  * another thread to signal that there is more work.
369  *
370  * NOTE: Interrupts will cause a wakeup; however, this function does
371  * not enable interrupt handling. The caller is responsible to enable
372  * interrupts.
373  */
374 void
acpi_cpu_idle_mwait(uint32_t mwait_hint)375 acpi_cpu_idle_mwait(uint32_t mwait_hint)
376 {
377 	int *state;
378 	uint64_t v;
379 
380 	/*
381 	 * A comment in Linux patch claims that 'CPUs run faster with
382 	 * speculation protection disabled. All CPU threads in a core
383 	 * must disable speculation protection for it to be
384 	 * disabled. Disable it while we are idle so the other
385 	 * hyperthread can run fast.'
386 	 *
387 	 * XXXKIB.  Software coordination mode should be supported,
388 	 * but all Intel CPUs provide hardware coordination.
389 	 */
390 
391 	state = &PCPU_PTR(monitorbuf)->idle_state;
392 	KASSERT(atomic_load_int(state) == STATE_SLEEPING,
393 	    ("cpu_mwait_cx: wrong monitorbuf state"));
394 	atomic_store_int(state, STATE_MWAIT);
395 	if (PCPU_GET(ibpb_set) || hw_ssb_active) {
396 		v = rdmsr(MSR_IA32_SPEC_CTRL);
397 		wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS |
398 		    IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD));
399 	} else {
400 		v = 0;
401 	}
402 	cpu_monitor(state, 0, 0);
403 	if (atomic_load_int(state) == STATE_MWAIT)
404 		cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
405 
406 	/*
407 	 * SSB cannot be disabled while we sleep, or rather, if it was
408 	 * disabled, the sysctl thread will bind to our cpu to tweak
409 	 * MSR.
410 	 */
411 	if (v != 0)
412 		wrmsr(MSR_IA32_SPEC_CTRL, v);
413 
414 	/*
415 	 * We should exit on any event that interrupts mwait, because
416 	 * that event might be a wanted interrupt.
417 	 */
418 	atomic_store_int(state, STATE_RUNNING);
419 }
420 
421 /* Get current clock frequency for the given cpu id. */
422 int
cpu_est_clockrate(int cpu_id,uint64_t * rate)423 cpu_est_clockrate(int cpu_id, uint64_t *rate)
424 {
425 	uint64_t tsc1, tsc2;
426 	uint64_t acnt, mcnt, perf;
427 	register_t reg;
428 
429 	if (pcpu_find(cpu_id) == NULL || rate == NULL)
430 		return (EINVAL);
431 #ifdef __i386__
432 	if ((cpu_feature & CPUID_TSC) == 0)
433 		return (EOPNOTSUPP);
434 #endif
435 
436 	/*
437 	 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
438 	 * DELAY(9) based logic fails.
439 	 */
440 	if (tsc_is_invariant && !tsc_perf_stat)
441 		return (EOPNOTSUPP);
442 
443 #ifdef SMP
444 	if (smp_cpus > 1) {
445 		/* Schedule ourselves on the indicated cpu. */
446 		thread_lock(curthread);
447 		sched_bind(curthread, cpu_id);
448 		thread_unlock(curthread);
449 	}
450 #endif
451 
452 	/* Calibrate by measuring a short delay. */
453 	reg = intr_disable();
454 	if (tsc_is_invariant) {
455 		wrmsr(MSR_MPERF, 0);
456 		wrmsr(MSR_APERF, 0);
457 		tsc1 = rdtsc();
458 		DELAY(1000);
459 		mcnt = rdmsr(MSR_MPERF);
460 		acnt = rdmsr(MSR_APERF);
461 		tsc2 = rdtsc();
462 		intr_restore(reg);
463 		perf = 1000 * acnt / mcnt;
464 		*rate = (tsc2 - tsc1) * perf;
465 	} else {
466 		tsc1 = rdtsc();
467 		DELAY(1000);
468 		tsc2 = rdtsc();
469 		intr_restore(reg);
470 		*rate = (tsc2 - tsc1) * 1000;
471 	}
472 
473 #ifdef SMP
474 	if (smp_cpus > 1) {
475 		thread_lock(curthread);
476 		sched_unbind(curthread);
477 		thread_unlock(curthread);
478 	}
479 #endif
480 
481 	return (0);
482 }
483 
484 /*
485  * Shutdown the CPU as much as possible
486  */
487 void
cpu_halt(void)488 cpu_halt(void)
489 {
490 	for (;;)
491 		halt();
492 }
493 
494 static void
cpu_reset_real(void)495 cpu_reset_real(void)
496 {
497 	struct region_descriptor null_idt;
498 	int b;
499 
500 	disable_intr();
501 #ifdef CPU_ELAN
502 	if (elan_mmcr != NULL)
503 		elan_mmcr->RESCFG = 1;
504 #endif
505 #ifdef __i386__
506 	if (cpu == CPU_GEODE1100) {
507 		/* Attempt Geode's own reset */
508 		outl(0xcf8, 0x80009044ul);
509 		outl(0xcfc, 0xf);
510 	}
511 #endif
512 #if !defined(BROKEN_KEYBOARD_RESET)
513 	/*
514 	 * Attempt to do a CPU reset via the keyboard controller,
515 	 * do not turn off GateA20, as any machine that fails
516 	 * to do the reset here would then end up in no man's land.
517 	 */
518 	outb(IO_KBD + 4, 0xFE);
519 	DELAY(500000);	/* wait 0.5 sec to see if that did it */
520 #endif
521 
522 	/*
523 	 * Attempt to force a reset via the Reset Control register at
524 	 * I/O port 0xcf9.  Bit 2 forces a system reset when it
525 	 * transitions from 0 to 1.  Bit 1 selects the type of reset
526 	 * to attempt: 0 selects a "soft" reset, and 1 selects a
527 	 * "hard" reset.  We try a "hard" reset.  The first write sets
528 	 * bit 1 to select a "hard" reset and clears bit 2.  The
529 	 * second write forces a 0 -> 1 transition in bit 2 to trigger
530 	 * a reset.
531 	 */
532 	outb(0xcf9, 0x2);
533 	outb(0xcf9, 0x6);
534 	DELAY(500000);  /* wait 0.5 sec to see if that did it */
535 
536 	/*
537 	 * Attempt to force a reset via the Fast A20 and Init register
538 	 * at I/O port 0x92.  Bit 1 serves as an alternate A20 gate.
539 	 * Bit 0 asserts INIT# when set to 1.  We are careful to only
540 	 * preserve bit 1 while setting bit 0.  We also must clear bit
541 	 * 0 before setting it if it isn't already clear.
542 	 */
543 	b = inb(0x92);
544 	if (b != 0xff) {
545 		if ((b & 0x1) != 0)
546 			outb(0x92, b & 0xfe);
547 		outb(0x92, b | 0x1);
548 		DELAY(500000);  /* wait 0.5 sec to see if that did it */
549 	}
550 
551 	printf("No known reset method worked, attempting CPU shutdown\n");
552 	DELAY(1000000); /* wait 1 sec for printf to complete */
553 
554 	/* Wipe the IDT. */
555 	null_idt.rd_limit = 0;
556 	null_idt.rd_base = 0;
557 	lidt(&null_idt);
558 
559 	/* "good night, sweet prince .... <THUNK!>" */
560 	breakpoint();
561 
562 	/* NOTREACHED */
563 	while(1);
564 }
565 
566 #ifdef SMP
567 static void
cpu_reset_proxy(void)568 cpu_reset_proxy(void)
569 {
570 
571 	cpu_reset_proxy_active = 1;
572 	while (cpu_reset_proxy_active == 1)
573 		ia32_pause(); /* Wait for other cpu to see that we've started */
574 
575 	printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
576 	DELAY(1000000);
577 	cpu_reset_real();
578 }
579 #endif
580 
581 void
cpu_reset(void)582 cpu_reset(void)
583 {
584 #ifdef SMP
585 	struct monitorbuf *mb;
586 	cpuset_t map;
587 	u_int cnt;
588 
589 	if (smp_started) {
590 		map = all_cpus;
591 		CPU_CLR(PCPU_GET(cpuid), &map);
592 		CPU_ANDNOT(&map, &map, &stopped_cpus);
593 		if (!CPU_EMPTY(&map)) {
594 			printf("cpu_reset: Stopping other CPUs\n");
595 			stop_cpus(map);
596 		}
597 
598 		if (PCPU_GET(cpuid) != 0) {
599 			cpu_reset_proxyid = PCPU_GET(cpuid);
600 			cpustop_restartfunc = cpu_reset_proxy;
601 			cpu_reset_proxy_active = 0;
602 			printf("cpu_reset: Restarting BSP\n");
603 
604 			/* Restart CPU #0. */
605 			CPU_SETOF(0, &started_cpus);
606 			mb = &pcpu_find(0)->pc_monitorbuf;
607 			atomic_store_int(&mb->stop_state,
608 			    MONITOR_STOPSTATE_RUNNING);
609 
610 			cnt = 0;
611 			while (cpu_reset_proxy_active == 0 && cnt < 10000000) {
612 				ia32_pause();
613 				cnt++;	/* Wait for BSP to announce restart */
614 			}
615 			if (cpu_reset_proxy_active == 0) {
616 				printf("cpu_reset: Failed to restart BSP\n");
617 			} else {
618 				cpu_reset_proxy_active = 2;
619 				while (1)
620 					ia32_pause();
621 				/* NOTREACHED */
622 			}
623 		}
624 	}
625 #endif
626 	cpu_reset_real();
627 	/* NOTREACHED */
628 }
629 
630 bool
cpu_mwait_usable(void)631 cpu_mwait_usable(void)
632 {
633 
634 	return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags &
635 	    (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) ==
636 	    (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)));
637 }
638 
639 void (*cpu_idle_hook)(sbintime_t) = NULL;	/* ACPI idle hook. */
640 
641 int cpu_amdc1e_bug = 0;			/* AMD C1E APIC workaround required. */
642 
643 static int	idle_mwait = 1;		/* Use MONITOR/MWAIT for short idle. */
644 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
645     0, "Use MONITOR/MWAIT for short idle");
646 
647 static bool
cpu_idle_enter(int * statep,int newstate)648 cpu_idle_enter(int *statep, int newstate)
649 {
650 	KASSERT(atomic_load_int(statep) == STATE_RUNNING,
651 	    ("%s: state %d", __func__, atomic_load_int(statep)));
652 
653 	/*
654 	 * A fence is needed to prevent reordering of the load in
655 	 * sched_runnable() with this store to the idle state word.  Without it,
656 	 * cpu_idle_wakeup() can observe the state as STATE_RUNNING after having
657 	 * added load to the queue, and elide an IPI.  Then, sched_runnable()
658 	 * can observe tdq_load == 0, so the CPU ends up idling with pending
659 	 * work.  tdq_notify() similarly ensures that a prior update to tdq_load
660 	 * is visible before calling cpu_idle_wakeup().
661 	 */
662 	atomic_store_int(statep, newstate);
663 	atomic_thread_fence_seq_cst();
664 
665 	/*
666 	 * Since we may be in a critical section from cpu_idle(), if
667 	 * an interrupt fires during that critical section we may have
668 	 * a pending preemption.  If the CPU halts, then that thread
669 	 * may not execute until a later interrupt awakens the CPU.
670 	 * To handle this race, check for a runnable thread after
671 	 * disabling interrupts and immediately return if one is
672 	 * found.  Also, we must absolutely guarentee that hlt is
673 	 * the next instruction after sti.  This ensures that any
674 	 * interrupt that fires after the call to disable_intr() will
675 	 * immediately awaken the CPU from hlt.  Finally, please note
676 	 * that on x86 this works fine because of interrupts enabled only
677 	 * after the instruction following sti takes place, while IF is set
678 	 * to 1 immediately, allowing hlt instruction to acknowledge the
679 	 * interrupt.
680 	 */
681 	disable_intr();
682 	if (sched_runnable()) {
683 		enable_intr();
684 		atomic_store_int(statep, STATE_RUNNING);
685 		return (false);
686 	} else {
687 		return (true);
688 	}
689 }
690 
691 static void
cpu_idle_exit(int * statep)692 cpu_idle_exit(int *statep)
693 {
694 	atomic_store_int(statep, STATE_RUNNING);
695 }
696 
697 static void
cpu_idle_acpi(sbintime_t sbt)698 cpu_idle_acpi(sbintime_t sbt)
699 {
700 	int *state;
701 
702 	state = &PCPU_PTR(monitorbuf)->idle_state;
703 	if (cpu_idle_enter(state, STATE_SLEEPING)) {
704 		if (cpu_idle_hook)
705 			cpu_idle_hook(sbt);
706 		else
707 			acpi_cpu_c1();
708 		cpu_idle_exit(state);
709 	}
710 }
711 
712 static void
cpu_idle_hlt(sbintime_t sbt)713 cpu_idle_hlt(sbintime_t sbt)
714 {
715 	int *state;
716 
717 	state = &PCPU_PTR(monitorbuf)->idle_state;
718 	if (cpu_idle_enter(state, STATE_SLEEPING)) {
719 		acpi_cpu_c1();
720 		atomic_store_int(state, STATE_RUNNING);
721 	}
722 }
723 
724 static void
cpu_idle_mwait(sbintime_t sbt)725 cpu_idle_mwait(sbintime_t sbt)
726 {
727 	int *state;
728 
729 	state = &PCPU_PTR(monitorbuf)->idle_state;
730 	if (cpu_idle_enter(state, STATE_MWAIT)) {
731 		cpu_monitor(state, 0, 0);
732 		if (atomic_load_int(state) == STATE_MWAIT)
733 			__asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
734 		else
735 			enable_intr();
736 		cpu_idle_exit(state);
737 	}
738 }
739 
740 static void
cpu_idle_spin(sbintime_t sbt)741 cpu_idle_spin(sbintime_t sbt)
742 {
743 	int *state;
744 	int i;
745 
746 	state = &PCPU_PTR(monitorbuf)->idle_state;
747 	atomic_store_int(state, STATE_RUNNING);
748 
749 	/*
750 	 * The sched_runnable() call is racy but as long as there is
751 	 * a loop missing it one time will have just a little impact if any
752 	 * (and it is much better than missing the check at all).
753 	 */
754 	for (i = 0; i < 1000; i++) {
755 		if (sched_runnable())
756 			return;
757 		cpu_spinwait();
758 	}
759 }
760 
761 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
762 
763 void
cpu_idle(int busy)764 cpu_idle(int busy)
765 {
766 	uint64_t msr;
767 	sbintime_t sbt = -1;
768 
769 	CTR1(KTR_SPARE2, "cpu_idle(%d)", busy);
770 
771 	/* If we are busy - try to use fast methods. */
772 	if (busy) {
773 		if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
774 			cpu_idle_mwait(busy);
775 			goto out;
776 		}
777 	}
778 
779 	/* If we have time - switch timers into idle mode. */
780 	if (!busy) {
781 		critical_enter();
782 		sbt = cpu_idleclock();
783 	}
784 
785 	/* Apply AMD APIC timer C1E workaround. */
786 	if (cpu_amdc1e_bug && cpu_disable_c3_sleep) {
787 		msr = rdmsr(MSR_AMDK8_IPM);
788 		if ((msr & (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)) != 0)
789 			wrmsr(MSR_AMDK8_IPM, msr & ~(AMDK8_SMIONCMPHALT |
790 			    AMDK8_C1EONCMPHALT));
791 	}
792 
793 	/* Call main idle method. */
794 	cpu_idle_fn(sbt);
795 
796 	/* Switch timers back into active mode. */
797 	if (!busy) {
798 		cpu_activeclock();
799 		critical_exit();
800 	}
801 out:
802 	CTR1(KTR_SPARE2, "cpu_idle(%d) done", busy);
803 }
804 
805 static int cpu_idle_apl31_workaround;
806 SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
807     &cpu_idle_apl31_workaround, 0,
808     "Apollo Lake APL31 MWAIT bug workaround");
809 
810 int
cpu_idle_wakeup(int cpu)811 cpu_idle_wakeup(int cpu)
812 {
813 	struct monitorbuf *mb;
814 	int *state;
815 
816 	mb = &pcpu_find(cpu)->pc_monitorbuf;
817 	state = &mb->idle_state;
818 	switch (atomic_load_int(state)) {
819 	case STATE_SLEEPING:
820 		return (0);
821 	case STATE_MWAIT:
822 		atomic_store_int(state, STATE_RUNNING);
823 		return (cpu_idle_apl31_workaround ? 0 : 1);
824 	case STATE_RUNNING:
825 		return (1);
826 	default:
827 		panic("bad monitor state");
828 		return (1);
829 	}
830 }
831 
832 /*
833  * Ordered by speed/power consumption.
834  */
835 static const struct {
836 	void	*id_fn;
837 	const char *id_name;
838 	int	id_cpuid2_flag;
839 } idle_tbl[] = {
840 	{ .id_fn = cpu_idle_spin, .id_name = "spin" },
841 	{ .id_fn = cpu_idle_mwait, .id_name = "mwait",
842 	    .id_cpuid2_flag = CPUID2_MON },
843 	{ .id_fn = cpu_idle_hlt, .id_name = "hlt" },
844 	{ .id_fn = cpu_idle_acpi, .id_name = "acpi" },
845 };
846 
847 static int
idle_sysctl_available(SYSCTL_HANDLER_ARGS)848 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
849 {
850 	char *avail, *p;
851 	int error;
852 	int i;
853 
854 	avail = malloc(256, M_TEMP, M_WAITOK);
855 	p = avail;
856 	for (i = 0; i < nitems(idle_tbl); i++) {
857 		if (idle_tbl[i].id_cpuid2_flag != 0 &&
858 		    (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
859 			continue;
860 		if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
861 		    cpu_idle_hook == NULL)
862 			continue;
863 		p += sprintf(p, "%s%s", p != avail ? ", " : "",
864 		    idle_tbl[i].id_name);
865 	}
866 	error = sysctl_handle_string(oidp, avail, 0, req);
867 	free(avail, M_TEMP);
868 	return (error);
869 }
870 
871 SYSCTL_PROC(_machdep, OID_AUTO, idle_available,
872     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
873     0, 0, idle_sysctl_available, "A",
874     "list of available idle functions");
875 
876 static bool
cpu_idle_selector(const char * new_idle_name)877 cpu_idle_selector(const char *new_idle_name)
878 {
879 	int i;
880 
881 	for (i = 0; i < nitems(idle_tbl); i++) {
882 		if (idle_tbl[i].id_cpuid2_flag != 0 &&
883 		    (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
884 			continue;
885 		if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
886 		    cpu_idle_hook == NULL)
887 			continue;
888 		if (strcmp(idle_tbl[i].id_name, new_idle_name))
889 			continue;
890 		cpu_idle_fn = idle_tbl[i].id_fn;
891 		if (bootverbose)
892 			printf("CPU idle set to %s\n", idle_tbl[i].id_name);
893 		return (true);
894 	}
895 	return (false);
896 }
897 
898 static int
cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)899 cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)
900 {
901 	char buf[16];
902 	const char *p;
903 	int error, i;
904 
905 	p = "unknown";
906 	for (i = 0; i < nitems(idle_tbl); i++) {
907 		if (idle_tbl[i].id_fn == cpu_idle_fn) {
908 			p = idle_tbl[i].id_name;
909 			break;
910 		}
911 	}
912 	strncpy(buf, p, sizeof(buf));
913 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
914 	if (error != 0 || req->newptr == NULL)
915 		return (error);
916 	return (cpu_idle_selector(buf) ? 0 : EINVAL);
917 }
918 
919 SYSCTL_PROC(_machdep, OID_AUTO, idle,
920     CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
921     0, 0, cpu_idle_sysctl, "A",
922     "currently selected idle function");
923 
924 static void
cpu_idle_tun(void * unused __unused)925 cpu_idle_tun(void *unused __unused)
926 {
927 	char tunvar[16];
928 
929 	if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar)))
930 		cpu_idle_selector(tunvar);
931 	else if (cpu_vendor_id == CPU_VENDOR_AMD &&
932 	    CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1) {
933 		/* Ryzen erratas 1057, 1109. */
934 		cpu_idle_selector("hlt");
935 		idle_mwait = 0;
936 		mwait_cpustop_broken = true;
937 	}
938 
939 	if (cpu_vendor_id == CPU_VENDOR_INTEL &&
940 	    CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) == 0x5c) {
941 		/*
942 		 * Apollo Lake errata APL31 (public errata APL30).
943 		 * Stores to the armed address range may not trigger
944 		 * MWAIT to resume execution.  OS needs to use
945 		 * interrupts to wake processors from MWAIT-induced
946 		 * sleep states.
947 		 */
948 		cpu_idle_apl31_workaround = 1;
949 		mwait_cpustop_broken = true;
950 	}
951 	TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround);
952 }
953 SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL);
954 
955 static int panic_on_nmi = 0xff;
956 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
957     &panic_on_nmi, 0,
958     "Panic on NMI: 1 = H/W failure; 2 = unknown; 0xff = all");
959 int nmi_is_broadcast = 1;
960 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN,
961     &nmi_is_broadcast, 0,
962     "Chipset NMI is broadcast");
963 int (*apei_nmi)(void);
964 
965 void
nmi_call_kdb(u_int cpu,u_int type,struct trapframe * frame)966 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame)
967 {
968 	bool claimed = false;
969 
970 #ifdef DEV_ISA
971 	/* machine/parity/power fail/"kitchen sink" faults */
972 	if (isa_nmi(frame->tf_err)) {
973 		claimed = true;
974 		if ((panic_on_nmi & 1) != 0)
975 			panic("NMI indicates hardware failure");
976 	}
977 #endif /* DEV_ISA */
978 
979 	/* ACPI Platform Error Interfaces callback. */
980 	if (apei_nmi != NULL && (*apei_nmi)())
981 		claimed = true;
982 
983 	/*
984 	 * NMIs can be useful for debugging.  They can be hooked up to a
985 	 * pushbutton, usually on an ISA, PCI, or PCIe card.  They can also be
986 	 * generated by an IPMI BMC, either manually or in response to a
987 	 * watchdog timeout.  For example, see the "power diag" command in
988 	 * ports/sysutils/ipmitool.  They can also be generated by a
989 	 * hypervisor; see "bhyvectl --inject-nmi".
990 	 */
991 
992 #ifdef KDB
993 	if (!claimed && (panic_on_nmi & 2) != 0) {
994 		if (debugger_on_panic) {
995 			printf("NMI/cpu%d ... going to debugger\n", cpu);
996 			claimed = kdb_trap(type, 0, frame);
997 		}
998 	}
999 #endif /* KDB */
1000 
1001 	if (!claimed && panic_on_nmi != 0)
1002 		panic("NMI");
1003 }
1004 
1005 /*
1006  * Dynamically registered NMI handlers.
1007  */
1008 struct nmi_handler {
1009 	int running;
1010 	int (*func)(struct trapframe *);
1011 	struct nmi_handler *next;
1012 };
1013 static struct nmi_handler *nmi_handlers_head = NULL;
1014 MALLOC_DEFINE(M_NMI, "NMI handlers",
1015     "List entries for dynamically registered NMI handlers");
1016 
1017 void
nmi_register_handler(int (* handler)(struct trapframe *))1018 nmi_register_handler(int (*handler)(struct trapframe *))
1019 {
1020 	struct nmi_handler *hp;
1021 	int (*hpf)(struct trapframe *);
1022 
1023 	hp = (struct nmi_handler *)atomic_load_acq_ptr(
1024 	    (uintptr_t *)&nmi_handlers_head);
1025 	while (hp != NULL) {
1026 		hpf = hp->func;
1027 		MPASS(hpf != handler);
1028 		if (hpf == NULL &&
1029 		    atomic_cmpset_ptr((volatile uintptr_t *)&hp->func,
1030 		    (uintptr_t)NULL, (uintptr_t)handler) != 0) {
1031 			hp->running = 0;
1032 			return;
1033 		}
1034 		hp = (struct nmi_handler *)atomic_load_acq_ptr(
1035 		    (uintptr_t *)&hp->next);
1036 	}
1037 	hp = malloc(sizeof(struct nmi_handler), M_NMI, M_WAITOK | M_ZERO);
1038 	hp->func = handler;
1039 	hp->next = nmi_handlers_head;
1040 	while (atomic_fcmpset_rel_ptr(
1041 	    (volatile uintptr_t *)&nmi_handlers_head,
1042 	    (uintptr_t *)&hp->next, (uintptr_t)hp) == 0)
1043 	        ;
1044 }
1045 
1046 void
nmi_remove_handler(int (* handler)(struct trapframe *))1047 nmi_remove_handler(int (*handler)(struct trapframe *))
1048 {
1049 	struct nmi_handler *hp;
1050 
1051 	hp = (struct nmi_handler *)atomic_load_acq_ptr(
1052 	    (uintptr_t *)&nmi_handlers_head);
1053 	while (hp != NULL) {
1054 		if (hp->func == handler) {
1055 			hp->func = NULL;
1056 			/* Wait for the handler to exit before returning. */
1057 			while (atomic_load_int(&hp->running) != 0)
1058 				cpu_spinwait();
1059 			return;
1060 		}
1061 		hp = (struct nmi_handler *)atomic_load_acq_ptr(
1062 		    (uintptr_t *)&hp->next);
1063 	}
1064 
1065 	panic("%s: attempting to remove an unregistered NMI handler %p\n",
1066 	    __func__, handler);
1067 }
1068 
1069 void
nmi_handle_intr(struct trapframe * frame)1070 nmi_handle_intr(struct trapframe *frame)
1071 {
1072 	int (*func)(struct trapframe *);
1073 	struct nmi_handler *hp;
1074 	int rv;
1075 	bool handled;
1076 
1077 #ifdef SMP
1078 	/* Handler for NMI IPIs used for stopping CPUs. */
1079 	if (ipi_nmi_handler() == 0)
1080 		return;
1081 #endif
1082 	handled = false;
1083 	hp = (struct nmi_handler *)atomic_load_acq_ptr(
1084 	    (uintptr_t *)&nmi_handlers_head);
1085 	while (!handled && hp != NULL) {
1086 		func = hp->func;
1087 		if (func != NULL) {
1088 			atomic_add_int(&hp->running, 1);
1089 			rv = func(frame);
1090 			atomic_subtract_int(&hp->running, 1);
1091 			if (rv != 0) {
1092 				handled = true;
1093 				break;
1094 			}
1095 		}
1096 		hp = (struct nmi_handler *)atomic_load_acq_ptr(
1097 		    (uintptr_t *)&hp->next);
1098 	}
1099 	if (handled)
1100 		return;
1101 #ifdef SMP
1102 	if (nmi_is_broadcast) {
1103 		nmi_call_kdb_smp(T_NMI, frame);
1104 		return;
1105 	}
1106 #endif
1107 	nmi_call_kdb(PCPU_GET(cpuid), T_NMI, frame);
1108 }
1109 
1110 static int hw_ibrs_active;
1111 int hw_ibrs_ibpb_active;
1112 int hw_ibrs_disable = 1;
1113 
1114 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
1115     "Indirect Branch Restricted Speculation active");
1116 
1117 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ibrs,
1118     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1119     "Indirect Branch Restricted Speculation active");
1120 
1121 SYSCTL_INT(_machdep_mitigations_ibrs, OID_AUTO, active, CTLFLAG_RD,
1122     &hw_ibrs_active, 0, "Indirect Branch Restricted Speculation active");
1123 
1124 void
hw_ibrs_recalculate(bool for_all_cpus)1125 hw_ibrs_recalculate(bool for_all_cpus)
1126 {
1127 	if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
1128 		x86_msr_op(MSR_IA32_SPEC_CTRL, (for_all_cpus ?
1129 		    MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL) |
1130 		    (hw_ibrs_disable != 0 ? MSR_OP_ANDNOT : MSR_OP_OR),
1131 		    IA32_SPEC_CTRL_IBRS, NULL);
1132 		hw_ibrs_active = hw_ibrs_disable == 0;
1133 		hw_ibrs_ibpb_active = 0;
1134 	} else {
1135 		hw_ibrs_active = hw_ibrs_ibpb_active = (cpu_stdext_feature3 &
1136 		    CPUID_STDEXT3_IBPB) != 0 && !hw_ibrs_disable;
1137 	}
1138 }
1139 
1140 static int
hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)1141 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
1142 {
1143 	int error, val;
1144 
1145 	val = hw_ibrs_disable;
1146 	error = sysctl_handle_int(oidp, &val, 0, req);
1147 	if (error != 0 || req->newptr == NULL)
1148 		return (error);
1149 	hw_ibrs_disable = val != 0;
1150 	hw_ibrs_recalculate(true);
1151 	return (0);
1152 }
1153 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
1154     CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
1155     "Disable Indirect Branch Restricted Speculation");
1156 
1157 SYSCTL_PROC(_machdep_mitigations_ibrs, OID_AUTO, disable, CTLTYPE_INT |
1158     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1159     hw_ibrs_disable_handler, "I",
1160     "Disable Indirect Branch Restricted Speculation");
1161 
1162 int hw_ssb_active;
1163 int hw_ssb_disable;
1164 
1165 SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
1166     &hw_ssb_active, 0,
1167     "Speculative Store Bypass Disable active");
1168 
1169 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ssb,
1170     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1171     "Speculative Store Bypass Disable active");
1172 
1173 SYSCTL_INT(_machdep_mitigations_ssb, OID_AUTO, active, CTLFLAG_RD,
1174     &hw_ssb_active, 0, "Speculative Store Bypass Disable active");
1175 
1176 static void
hw_ssb_set(bool enable,bool for_all_cpus)1177 hw_ssb_set(bool enable, bool for_all_cpus)
1178 {
1179 
1180 	if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
1181 		hw_ssb_active = 0;
1182 		return;
1183 	}
1184 	hw_ssb_active = enable;
1185 	x86_msr_op(MSR_IA32_SPEC_CTRL,
1186 	    (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1187 	    (for_all_cpus ? MSR_OP_SCHED_ALL : MSR_OP_LOCAL),
1188 	    IA32_SPEC_CTRL_SSBD, NULL);
1189 }
1190 
1191 void
hw_ssb_recalculate(bool all_cpus)1192 hw_ssb_recalculate(bool all_cpus)
1193 {
1194 
1195 	switch (hw_ssb_disable) {
1196 	default:
1197 		hw_ssb_disable = 0;
1198 		/* FALLTHROUGH */
1199 	case 0: /* off */
1200 		hw_ssb_set(false, all_cpus);
1201 		break;
1202 	case 1: /* on */
1203 		hw_ssb_set(true, all_cpus);
1204 		break;
1205 	case 2: /* auto */
1206 		hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSB_NO) != 0 ?
1207 		    false : true, all_cpus);
1208 		break;
1209 	}
1210 }
1211 
1212 static int
hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)1213 hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)
1214 {
1215 	int error, val;
1216 
1217 	val = hw_ssb_disable;
1218 	error = sysctl_handle_int(oidp, &val, 0, req);
1219 	if (error != 0 || req->newptr == NULL)
1220 		return (error);
1221 	hw_ssb_disable = val;
1222 	hw_ssb_recalculate(true);
1223 	return (0);
1224 }
1225 SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
1226     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1227     hw_ssb_disable_handler, "I",
1228     "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)");
1229 
1230 SYSCTL_PROC(_machdep_mitigations_ssb, OID_AUTO, disable, CTLTYPE_INT |
1231     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1232     hw_ssb_disable_handler, "I",
1233     "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)");
1234 
1235 int hw_mds_disable;
1236 
1237 /*
1238  * Handler for Microarchitectural Data Sampling issues.  Really not a
1239  * pointer to C function: on amd64 the code must not change any CPU
1240  * architectural state except possibly %rflags. Also, it is always
1241  * called with interrupts disabled.
1242  */
1243 void mds_handler_void(void);
1244 void mds_handler_verw(void);
1245 void mds_handler_ivb(void);
1246 void mds_handler_bdw(void);
1247 void mds_handler_skl_sse(void);
1248 void mds_handler_skl_avx(void);
1249 void mds_handler_skl_avx512(void);
1250 void mds_handler_silvermont(void);
1251 void (*mds_handler)(void) = mds_handler_void;
1252 
1253 static int
sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)1254 sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)
1255 {
1256 	const char *state;
1257 
1258 	if (mds_handler == mds_handler_void)
1259 		state = "inactive";
1260 	else if (mds_handler == mds_handler_verw)
1261 		state = "VERW";
1262 	else if (mds_handler == mds_handler_ivb)
1263 		state = "software IvyBridge";
1264 	else if (mds_handler == mds_handler_bdw)
1265 		state = "software Broadwell";
1266 	else if (mds_handler == mds_handler_skl_sse)
1267 		state = "software Skylake SSE";
1268 	else if (mds_handler == mds_handler_skl_avx)
1269 		state = "software Skylake AVX";
1270 	else if (mds_handler == mds_handler_skl_avx512)
1271 		state = "software Skylake AVX512";
1272 	else if (mds_handler == mds_handler_silvermont)
1273 		state = "software Silvermont";
1274 	else
1275 		state = "unknown";
1276 	return (SYSCTL_OUT(req, state, strlen(state)));
1277 }
1278 
1279 SYSCTL_PROC(_hw, OID_AUTO, mds_disable_state,
1280     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1281     sysctl_hw_mds_disable_state_handler, "A",
1282     "Microarchitectural Data Sampling Mitigation state");
1283 
1284 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, mds,
1285     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1286     "Microarchitectural Data Sampling Mitigation state");
1287 
1288 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, state,
1289     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1290     sysctl_hw_mds_disable_state_handler, "A",
1291     "Microarchitectural Data Sampling Mitigation state");
1292 
1293 _Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512");
1294 
1295 void
hw_mds_recalculate(void)1296 hw_mds_recalculate(void)
1297 {
1298 	struct pcpu *pc;
1299 	vm_offset_t b64;
1300 	u_long xcr0;
1301 	int i;
1302 
1303 	/*
1304 	 * Allow user to force VERW variant even if MD_CLEAR is not
1305 	 * reported.  For instance, hypervisor might unknowingly
1306 	 * filter the cap out.
1307 	 * For the similar reasons, and for testing, allow to enable
1308 	 * mitigation even when MDS_NO cap is set.
1309 	 */
1310 	if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 ||
1311 	    ((cpu_ia32_arch_caps & IA32_ARCH_CAP_MDS_NO) != 0 &&
1312 	    hw_mds_disable == 3)) {
1313 		mds_handler = mds_handler_void;
1314 	} else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 &&
1315 	    hw_mds_disable == 3) || hw_mds_disable == 1) {
1316 		mds_handler = mds_handler_verw;
1317 	} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1318 	    (CPUID_TO_MODEL(cpu_id) == 0x2e || CPUID_TO_MODEL(cpu_id) == 0x1e ||
1319 	    CPUID_TO_MODEL(cpu_id) == 0x1f || CPUID_TO_MODEL(cpu_id) == 0x1a ||
1320 	    CPUID_TO_MODEL(cpu_id) == 0x2f || CPUID_TO_MODEL(cpu_id) == 0x25 ||
1321 	    CPUID_TO_MODEL(cpu_id) == 0x2c || CPUID_TO_MODEL(cpu_id) == 0x2d ||
1322 	    CPUID_TO_MODEL(cpu_id) == 0x2a || CPUID_TO_MODEL(cpu_id) == 0x3e ||
1323 	    CPUID_TO_MODEL(cpu_id) == 0x3a) &&
1324 	    (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1325 		/*
1326 		 * Nehalem, SandyBridge, IvyBridge
1327 		 */
1328 		CPU_FOREACH(i) {
1329 			pc = pcpu_find(i);
1330 			if (pc->pc_mds_buf == NULL) {
1331 				pc->pc_mds_buf = malloc_domainset(672, M_TEMP,
1332 				    DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1333 				bzero(pc->pc_mds_buf, 16);
1334 			}
1335 		}
1336 		mds_handler = mds_handler_ivb;
1337 	} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1338 	    (CPUID_TO_MODEL(cpu_id) == 0x3f || CPUID_TO_MODEL(cpu_id) == 0x3c ||
1339 	    CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46 ||
1340 	    CPUID_TO_MODEL(cpu_id) == 0x56 || CPUID_TO_MODEL(cpu_id) == 0x4f ||
1341 	    CPUID_TO_MODEL(cpu_id) == 0x47 || CPUID_TO_MODEL(cpu_id) == 0x3d) &&
1342 	    (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1343 		/*
1344 		 * Haswell, Broadwell
1345 		 */
1346 		CPU_FOREACH(i) {
1347 			pc = pcpu_find(i);
1348 			if (pc->pc_mds_buf == NULL) {
1349 				pc->pc_mds_buf = malloc_domainset(1536, M_TEMP,
1350 				    DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1351 				bzero(pc->pc_mds_buf, 16);
1352 			}
1353 		}
1354 		mds_handler = mds_handler_bdw;
1355 	} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1356 	    ((CPUID_TO_MODEL(cpu_id) == 0x55 && (cpu_id &
1357 	    CPUID_STEPPING) <= 5) ||
1358 	    CPUID_TO_MODEL(cpu_id) == 0x4e || CPUID_TO_MODEL(cpu_id) == 0x5e ||
1359 	    (CPUID_TO_MODEL(cpu_id) == 0x8e && (cpu_id &
1360 	    CPUID_STEPPING) <= 0xb) ||
1361 	    (CPUID_TO_MODEL(cpu_id) == 0x9e && (cpu_id &
1362 	    CPUID_STEPPING) <= 0xc)) &&
1363 	    (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1364 		/*
1365 		 * Skylake, KabyLake, CoffeeLake, WhiskeyLake,
1366 		 * CascadeLake
1367 		 */
1368 		CPU_FOREACH(i) {
1369 			pc = pcpu_find(i);
1370 			if (pc->pc_mds_buf == NULL) {
1371 				pc->pc_mds_buf = malloc_domainset(6 * 1024,
1372 				    M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1373 				    M_WAITOK);
1374 				b64 = (vm_offset_t)malloc_domainset(64 + 63,
1375 				    M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1376 				    M_WAITOK);
1377 				pc->pc_mds_buf64 = (void *)roundup2(b64, 64);
1378 				bzero(pc->pc_mds_buf64, 64);
1379 			}
1380 		}
1381 		xcr0 = rxcr(0);
1382 		if ((xcr0 & XFEATURE_ENABLED_ZMM_HI256) != 0 &&
1383 		    (cpu_stdext_feature & CPUID_STDEXT_AVX512DQ) != 0)
1384 			mds_handler = mds_handler_skl_avx512;
1385 		else if ((xcr0 & XFEATURE_ENABLED_AVX) != 0 &&
1386 		    (cpu_feature2 & CPUID2_AVX) != 0)
1387 			mds_handler = mds_handler_skl_avx;
1388 		else
1389 			mds_handler = mds_handler_skl_sse;
1390 	} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1391 	    ((CPUID_TO_MODEL(cpu_id) == 0x37 ||
1392 	    CPUID_TO_MODEL(cpu_id) == 0x4a ||
1393 	    CPUID_TO_MODEL(cpu_id) == 0x4c ||
1394 	    CPUID_TO_MODEL(cpu_id) == 0x4d ||
1395 	    CPUID_TO_MODEL(cpu_id) == 0x5a ||
1396 	    CPUID_TO_MODEL(cpu_id) == 0x5d ||
1397 	    CPUID_TO_MODEL(cpu_id) == 0x6e ||
1398 	    CPUID_TO_MODEL(cpu_id) == 0x65 ||
1399 	    CPUID_TO_MODEL(cpu_id) == 0x75 ||
1400 	    CPUID_TO_MODEL(cpu_id) == 0x1c ||
1401 	    CPUID_TO_MODEL(cpu_id) == 0x26 ||
1402 	    CPUID_TO_MODEL(cpu_id) == 0x27 ||
1403 	    CPUID_TO_MODEL(cpu_id) == 0x35 ||
1404 	    CPUID_TO_MODEL(cpu_id) == 0x36 ||
1405 	    CPUID_TO_MODEL(cpu_id) == 0x7a))) {
1406 		/* Silvermont, Airmont */
1407 		CPU_FOREACH(i) {
1408 			pc = pcpu_find(i);
1409 			if (pc->pc_mds_buf == NULL)
1410 				pc->pc_mds_buf = malloc(256, M_TEMP, M_WAITOK);
1411 		}
1412 		mds_handler = mds_handler_silvermont;
1413 	} else {
1414 		hw_mds_disable = 0;
1415 		mds_handler = mds_handler_void;
1416 	}
1417 }
1418 
1419 static void
hw_mds_recalculate_boot(void * arg __unused)1420 hw_mds_recalculate_boot(void *arg __unused)
1421 {
1422 
1423 	hw_mds_recalculate();
1424 }
1425 SYSINIT(mds_recalc, SI_SUB_SMP, SI_ORDER_ANY, hw_mds_recalculate_boot, NULL);
1426 
1427 static int
sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)1428 sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)
1429 {
1430 	int error, val;
1431 
1432 	val = hw_mds_disable;
1433 	error = sysctl_handle_int(oidp, &val, 0, req);
1434 	if (error != 0 || req->newptr == NULL)
1435 		return (error);
1436 	if (val < 0 || val > 3)
1437 		return (EINVAL);
1438 	hw_mds_disable = val;
1439 	hw_mds_recalculate();
1440 	return (0);
1441 }
1442 
1443 SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT |
1444     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1445     sysctl_mds_disable_handler, "I",
1446     "Microarchitectural Data Sampling Mitigation "
1447     "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)");
1448 
1449 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, disable, CTLTYPE_INT |
1450     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1451     sysctl_mds_disable_handler, "I",
1452     "Microarchitectural Data Sampling Mitigation "
1453     "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)");
1454 
1455 /*
1456  * Intel Transactional Memory Asynchronous Abort Mitigation
1457  * CVE-2019-11135
1458  */
1459 int x86_taa_enable;
1460 int x86_taa_state;
1461 enum {
1462 	TAA_NONE	= 0,	/* No mitigation enabled */
1463 	TAA_TSX_DISABLE	= 1,	/* Disable TSX via MSR */
1464 	TAA_VERW	= 2,	/* Use VERW mitigation */
1465 	TAA_AUTO	= 3,	/* Automatically select the mitigation */
1466 
1467 	/* The states below are not selectable by the operator */
1468 
1469 	TAA_TAA_UC	= 4,	/* Mitigation present in microcode */
1470 	TAA_NOT_PRESENT	= 5	/* TSX is not present */
1471 };
1472 
1473 static void
taa_set(bool enable,bool all)1474 taa_set(bool enable, bool all)
1475 {
1476 
1477 	x86_msr_op(MSR_IA32_TSX_CTRL,
1478 	    (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1479 	    (all ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1480 	    IA32_TSX_CTRL_RTM_DISABLE | IA32_TSX_CTRL_TSX_CPUID_CLEAR,
1481 	    NULL);
1482 }
1483 
1484 void
x86_taa_recalculate(void)1485 x86_taa_recalculate(void)
1486 {
1487 	static int taa_saved_mds_disable = 0;
1488 	int taa_need = 0, taa_state = 0;
1489 	int mds_disable = 0, need_mds_recalc = 0;
1490 
1491 	/* Check CPUID.07h.EBX.HLE and RTM for the presence of TSX */
1492 	if ((cpu_stdext_feature & CPUID_STDEXT_HLE) == 0 ||
1493 	    (cpu_stdext_feature & CPUID_STDEXT_RTM) == 0) {
1494 		/* TSX is not present */
1495 		x86_taa_state = TAA_NOT_PRESENT;
1496 		return;
1497 	}
1498 
1499 	/* Check to see what mitigation options the CPU gives us */
1500 	if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TAA_NO) {
1501 		/* CPU is not suseptible to TAA */
1502 		taa_need = TAA_TAA_UC;
1503 	} else if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TSX_CTRL) {
1504 		/*
1505 		 * CPU can turn off TSX.  This is the next best option
1506 		 * if TAA_NO hardware mitigation isn't present
1507 		 */
1508 		taa_need = TAA_TSX_DISABLE;
1509 	} else {
1510 		/* No TSX/TAA specific remedies are available. */
1511 		if (x86_taa_enable == TAA_TSX_DISABLE) {
1512 			if (bootverbose)
1513 				printf("TSX control not available\n");
1514 			return;
1515 		} else
1516 			taa_need = TAA_VERW;
1517 	}
1518 
1519 	/* Can we automatically take action, or are we being forced? */
1520 	if (x86_taa_enable == TAA_AUTO)
1521 		taa_state = taa_need;
1522 	else
1523 		taa_state = x86_taa_enable;
1524 
1525 	/* No state change, nothing to do */
1526 	if (taa_state == x86_taa_state) {
1527 		if (bootverbose)
1528 			printf("No TSX change made\n");
1529 		return;
1530 	}
1531 
1532 	/* Does the MSR need to be turned on or off? */
1533 	if (taa_state == TAA_TSX_DISABLE)
1534 		taa_set(true, true);
1535 	else if (x86_taa_state == TAA_TSX_DISABLE)
1536 		taa_set(false, true);
1537 
1538 	/* Does MDS need to be set to turn on VERW? */
1539 	if (taa_state == TAA_VERW) {
1540 		taa_saved_mds_disable = hw_mds_disable;
1541 		mds_disable = hw_mds_disable = 1;
1542 		need_mds_recalc = 1;
1543 	} else if (x86_taa_state == TAA_VERW) {
1544 		mds_disable = hw_mds_disable = taa_saved_mds_disable;
1545 		need_mds_recalc = 1;
1546 	}
1547 	if (need_mds_recalc) {
1548 		hw_mds_recalculate();
1549 		if (mds_disable != hw_mds_disable) {
1550 			if (bootverbose)
1551 				printf("Cannot change MDS state for TAA\n");
1552 			/* Don't update our state */
1553 			return;
1554 		}
1555 	}
1556 
1557 	x86_taa_state = taa_state;
1558 	return;
1559 }
1560 
1561 static void
taa_recalculate_boot(void * arg __unused)1562 taa_recalculate_boot(void * arg __unused)
1563 {
1564 
1565 	x86_taa_recalculate();
1566 }
1567 SYSINIT(taa_recalc, SI_SUB_SMP, SI_ORDER_ANY, taa_recalculate_boot, NULL);
1568 
1569 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, taa,
1570     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1571     "TSX Asynchronous Abort Mitigation");
1572 
1573 static int
sysctl_taa_handler(SYSCTL_HANDLER_ARGS)1574 sysctl_taa_handler(SYSCTL_HANDLER_ARGS)
1575 {
1576 	int error, val;
1577 
1578 	val = x86_taa_enable;
1579 	error = sysctl_handle_int(oidp, &val, 0, req);
1580 	if (error != 0 || req->newptr == NULL)
1581 		return (error);
1582 	if (val < TAA_NONE || val > TAA_AUTO)
1583 		return (EINVAL);
1584 	x86_taa_enable = val;
1585 	x86_taa_recalculate();
1586 	return (0);
1587 }
1588 
1589 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, enable, CTLTYPE_INT |
1590     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1591     sysctl_taa_handler, "I",
1592     "TAA Mitigation enablement control "
1593     "(0 - off, 1 - disable TSX, 2 - VERW, 3 - on AUTO)");
1594 
1595 static int
sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)1596 sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)
1597 {
1598 	const char *state;
1599 
1600 	switch (x86_taa_state) {
1601 	case TAA_NONE:
1602 		state = "inactive";
1603 		break;
1604 	case TAA_TSX_DISABLE:
1605 		state = "TSX disabled";
1606 		break;
1607 	case TAA_VERW:
1608 		state = "VERW";
1609 		break;
1610 	case TAA_TAA_UC:
1611 		state = "Mitigated in microcode";
1612 		break;
1613 	case TAA_NOT_PRESENT:
1614 		state = "TSX not present";
1615 		break;
1616 	default:
1617 		state = "unknown";
1618 	}
1619 
1620 	return (SYSCTL_OUT(req, state, strlen(state)));
1621 }
1622 
1623 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state,
1624     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1625     sysctl_taa_state_handler, "A",
1626     "TAA Mitigation state");
1627 
1628 int __read_frequently cpu_flush_rsb_ctxsw;
1629 SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw,
1630     CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0,
1631     "Flush Return Stack Buffer on context switch");
1632 
1633 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, rngds,
1634     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1635     "MCU Optimization, disable RDSEED mitigation");
1636 
1637 int x86_rngds_mitg_enable = 1;
1638 void
x86_rngds_mitg_recalculate(bool all_cpus)1639 x86_rngds_mitg_recalculate(bool all_cpus)
1640 {
1641 	if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0)
1642 		return;
1643 	x86_msr_op(MSR_IA32_MCU_OPT_CTRL,
1644 	    (x86_rngds_mitg_enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1645 	    (all_cpus ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1646 	    IA32_RNGDS_MITG_DIS, NULL);
1647 }
1648 
1649 static int
sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS)1650 sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS)
1651 {
1652 	int error, val;
1653 
1654 	val = x86_rngds_mitg_enable;
1655 	error = sysctl_handle_int(oidp, &val, 0, req);
1656 	if (error != 0 || req->newptr == NULL)
1657 		return (error);
1658 	x86_rngds_mitg_enable = val;
1659 	x86_rngds_mitg_recalculate(true);
1660 	return (0);
1661 }
1662 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, enable, CTLTYPE_INT |
1663     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1664     sysctl_rngds_mitg_enable_handler, "I",
1665     "MCU Optimization, disabling RDSEED mitigation control "
1666     "(0 - mitigation disabled (RDSEED optimized), 1 - mitigation enabled)");
1667 
1668 static int
sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS)1669 sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS)
1670 {
1671 	const char *state;
1672 
1673 	if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0) {
1674 		state = "Not applicable";
1675 	} else if (x86_rngds_mitg_enable == 0) {
1676 		state = "RDSEED not serialized";
1677 	} else {
1678 		state = "Mitigated";
1679 	}
1680 	return (SYSCTL_OUT(req, state, strlen(state)));
1681 }
1682 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, state,
1683     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1684     sysctl_rngds_state_handler, "A",
1685     "MCU Optimization state");
1686 
1687 
1688 /*
1689  * Zenbleed.
1690  *
1691  * No corresponding errata is publicly listed.  AMD has issued a security
1692  * bulletin (AMD-SB-7008), entitled "Cross-Process Information Leak".  This
1693  * document lists (as of August 2023) platform firmware's availability target
1694  * dates, with most being November/December 2023.  It will then be up to
1695  * motherboard manufacturers to produce corresponding BIOS updates, which will
1696  * happen with an inevitable lag.  Additionally, for a variety of reasons,
1697  * operators might not be able to apply them everywhere due.  On the side of
1698  * standalone CPU microcodes, no plans for availability have been published so
1699  * far.  However, a developer appearing to be an AMD employee has hardcoded in
1700  * Linux revision numbers of future microcodes that are presumed to fix the
1701  * vulnerability.
1702  *
1703  * Given the stability issues encountered with early microcode releases for Rome
1704  * (the only microcode publicly released so far) and the absence of official
1705  * communication on standalone CPU microcodes, we have opted instead for
1706  * matching by default all AMD Zen2 processors which, according to the
1707  * vulnerability's discoverer, are all affected (see
1708  * https://lock.cmpxchg8b.com/zenbleed.html).  This policy, also adopted by
1709  * OpenBSD, may be overriden using the tunable/sysctl
1710  * 'machdep.mitigations.zenbleed.enable'.  We might revise it later depending on
1711  * official statements, microcode updates' public availability and community
1712  * assessment that they actually fix the vulnerability without any instability
1713  * side effects.
1714  */
1715 
1716 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, zenbleed,
1717     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1718     "Zenbleed OS-triggered prevention (via chicken bit)");
1719 
1720 /* 2 is auto, see below. */
1721 int zenbleed_enable = 2;
1722 
1723 void
zenbleed_sanitize_enable(void)1724 zenbleed_sanitize_enable(void)
1725 {
1726 	/* Default to auto (2). */
1727 	if (zenbleed_enable < 0 || zenbleed_enable > 2)
1728 		zenbleed_enable = 2;
1729 }
1730 
1731 static bool
zenbleed_chicken_bit_applicable(void)1732 zenbleed_chicken_bit_applicable(void)
1733 {
1734 	/* Concerns only bare-metal AMD Zen2 processors. */
1735 	return (cpu_vendor_id == CPU_VENDOR_AMD &&
1736 	    CPUID_TO_FAMILY(cpu_id) == 0x17 &&
1737 	    CPUID_TO_MODEL(cpu_id) >= 0x30 &&
1738 	    vm_guest == VM_GUEST_NO);
1739 }
1740 
1741 static bool
zenbleed_chicken_bit_should_enable(void)1742 zenbleed_chicken_bit_should_enable(void)
1743 {
1744 	/*
1745 	 * Obey tunable/sysctl.
1746 	 *
1747 	 * As explained above, currently, the automatic setting (2) and the "on"
1748 	 * one (1) have the same effect.  In the future, we might additionally
1749 	 * check for specific microcode revisions as part of the automatic
1750 	 * determination.
1751 	 */
1752 	return (zenbleed_enable != 0);
1753 }
1754 
1755 void
zenbleed_check_and_apply(bool all_cpus)1756 zenbleed_check_and_apply(bool all_cpus)
1757 {
1758 	bool set;
1759 
1760 	if (!zenbleed_chicken_bit_applicable())
1761 		return;
1762 
1763 	set = zenbleed_chicken_bit_should_enable();
1764 
1765 	x86_msr_op(MSR_DE_CFG,
1766 	    (set ? MSR_OP_OR : MSR_OP_ANDNOT) |
1767 	    (all_cpus ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1768 	    DE_CFG_ZEN2_FP_BACKUP_FIX_BIT, NULL);
1769 }
1770 
1771 static int
sysctl_zenbleed_enable_handler(SYSCTL_HANDLER_ARGS)1772 sysctl_zenbleed_enable_handler(SYSCTL_HANDLER_ARGS)
1773 {
1774 	int error, val;
1775 
1776 	val = zenbleed_enable;
1777 	error = sysctl_handle_int(oidp, &val, 0, req);
1778 	if (error != 0 || req->newptr == NULL)
1779 		return (error);
1780 	zenbleed_enable = val;
1781 	zenbleed_sanitize_enable();
1782 	zenbleed_check_and_apply(true);
1783 	return (0);
1784 }
1785 SYSCTL_PROC(_machdep_mitigations_zenbleed, OID_AUTO, enable, CTLTYPE_INT |
1786     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1787     sysctl_zenbleed_enable_handler, "I",
1788     "Enable Zenbleed OS-triggered mitigation (chicken bit) "
1789     "(0: Force disable, 1: Force enable, 2: Automatic determination)");
1790 
1791 static int
sysctl_zenbleed_state_handler(SYSCTL_HANDLER_ARGS)1792 sysctl_zenbleed_state_handler(SYSCTL_HANDLER_ARGS)
1793 {
1794 	const char *state;
1795 
1796 	if (!zenbleed_chicken_bit_applicable())
1797 		state = "Not applicable";
1798 	else if (zenbleed_chicken_bit_should_enable())
1799 		state = "Mitigation enabled";
1800 	else
1801 		state = "Mitigation disabled";
1802 	return (SYSCTL_OUT(req, state, strlen(state)));
1803 }
1804 SYSCTL_PROC(_machdep_mitigations_zenbleed, OID_AUTO, state,
1805     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1806     sysctl_zenbleed_state_handler, "A",
1807     "Zenbleed OS-triggered mitigation (chicken bit) state");
1808 
1809 
1810 /*
1811  * Enable and restore kernel text write permissions.
1812  * Callers must ensure that disable_wp()/restore_wp() are executed
1813  * without rescheduling on the same core.
1814  */
1815 bool
disable_wp(void)1816 disable_wp(void)
1817 {
1818 	u_int cr0;
1819 
1820 	cr0 = rcr0();
1821 	if ((cr0 & CR0_WP) == 0)
1822 		return (false);
1823 	load_cr0(cr0 & ~CR0_WP);
1824 	return (true);
1825 }
1826 
1827 void
restore_wp(bool old_wp)1828 restore_wp(bool old_wp)
1829 {
1830 
1831 	if (old_wp)
1832 		load_cr0(rcr0() | CR0_WP);
1833 }
1834 
1835 bool
acpi_get_fadt_bootflags(uint16_t * flagsp)1836 acpi_get_fadt_bootflags(uint16_t *flagsp)
1837 {
1838 #ifdef DEV_ACPI
1839 	ACPI_TABLE_FADT *fadt;
1840 	vm_paddr_t physaddr;
1841 
1842 	physaddr = acpi_find_table(ACPI_SIG_FADT);
1843 	if (physaddr == 0)
1844 		return (false);
1845 	fadt = acpi_map_table(physaddr, ACPI_SIG_FADT);
1846 	if (fadt == NULL)
1847 		return (false);
1848 	*flagsp = fadt->BootFlags;
1849 	acpi_unmap_table(fadt);
1850 	return (true);
1851 #else
1852 	return (false);
1853 #endif
1854 }
1855 
1856 DEFINE_IFUNC(, uint64_t, rdtsc_ordered, (void))
1857 {
1858 	bool cpu_is_amd = cpu_vendor_id == CPU_VENDOR_AMD ||
1859 	    cpu_vendor_id == CPU_VENDOR_HYGON;
1860 
1861 	if ((amd_feature & AMDID_RDTSCP) != 0)
1862 		return (rdtscp);
1863 	else if ((cpu_feature & CPUID_SSE2) != 0)
1864 		return (cpu_is_amd ? rdtsc_ordered_mfence :
1865 		    rdtsc_ordered_lfence);
1866 	else
1867 		return (rdtsc);
1868 }
1869