1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
19 #include <linux/kvm_types.h>
20
21 #include <asm/spec-ctrl.h>
22 #include <asm/cmdline.h>
23 #include <asm/bugs.h>
24 #include <asm/processor.h>
25 #include <asm/processor-flags.h>
26 #include <asm/fpu/api.h>
27 #include <asm/msr.h>
28 #include <asm/vmx.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cpu.h>
34
35 #include "cpu.h"
36
37 /*
38 * Speculation Vulnerability Handling
39 *
40 * Each vulnerability is handled with the following functions:
41 * <vuln>_select_mitigation() -- Selects a mitigation to use. This should
42 * take into account all relevant command line
43 * options.
44 * <vuln>_update_mitigation() -- This is called after all vulnerabilities have
45 * selected a mitigation, in case the selection
46 * may want to change based on other choices
47 * made. This function is optional.
48 * <vuln>_apply_mitigation() -- Enable the selected mitigation.
49 *
50 * The compile-time mitigation in all cases should be AUTO. An explicit
51 * command-line option can override AUTO. If no such option is
52 * provided, <vuln>_select_mitigation() will override AUTO to the best
53 * mitigation option.
54 */
55
56 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
57 u64 x86_spec_ctrl_base;
58
59 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
60 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
61 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
62
63 /*
64 * Set when the CPU has run a potentially malicious guest. An IBPB will
65 * be needed to before running userspace. That IBPB will flush the branch
66 * predictor content.
67 */
68 DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user);
69 EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user);
70
71 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
72
73 static u64 __ro_after_init x86_arch_cap_msr;
74
75 static DEFINE_MUTEX(spec_ctrl_mutex);
76
77 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
78
set_return_thunk(void * thunk)79 static void __init set_return_thunk(void *thunk)
80 {
81 x86_return_thunk = thunk;
82
83 pr_info("active return thunk: %ps\n", thunk);
84 }
85
86 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
update_spec_ctrl(u64 val)87 static void update_spec_ctrl(u64 val)
88 {
89 this_cpu_write(x86_spec_ctrl_current, val);
90 wrmsrq(MSR_IA32_SPEC_CTRL, val);
91 }
92
93 /*
94 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
95 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
96 */
update_spec_ctrl_cond(u64 val)97 void update_spec_ctrl_cond(u64 val)
98 {
99 if (this_cpu_read(x86_spec_ctrl_current) == val)
100 return;
101
102 this_cpu_write(x86_spec_ctrl_current, val);
103
104 /*
105 * When KERNEL_IBRS this MSR is written on return-to-user, unless
106 * forced the update can be delayed until that time.
107 */
108 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
109 wrmsrq(MSR_IA32_SPEC_CTRL, val);
110 }
111
spec_ctrl_current(void)112 noinstr u64 spec_ctrl_current(void)
113 {
114 return this_cpu_read(x86_spec_ctrl_current);
115 }
116 EXPORT_SYMBOL_GPL(spec_ctrl_current);
117
118 /*
119 * AMD specific MSR info for Speculative Store Bypass control.
120 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
121 */
122 u64 __ro_after_init x86_amd_ls_cfg_base;
123 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
124
125 /* Control conditional STIBP in switch_to() */
126 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
127 /* Control conditional IBPB in switch_mm() */
128 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
129 /* Control unconditional IBPB in switch_mm() */
130 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
131
132 /* Control IBPB on vCPU load */
133 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
134 EXPORT_SYMBOL_FOR_KVM(switch_vcpu_ibpb);
135
136 /* Control CPU buffer clear before idling (halt, mwait) */
137 DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
138 EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
139
140 /*
141 * Controls whether l1d flush based mitigations are enabled,
142 * based on hw features and admin setting via boot parameter
143 * defaults to false
144 */
145 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
146
147 #undef pr_fmt
148 #define pr_fmt(fmt) "mitigations: " fmt
149
cpu_print_attack_vectors(void)150 static void __init cpu_print_attack_vectors(void)
151 {
152 pr_info("Enabled attack vectors: ");
153
154 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
155 pr_cont("user_kernel, ");
156
157 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER))
158 pr_cont("user_user, ");
159
160 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST))
161 pr_cont("guest_host, ");
162
163 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST))
164 pr_cont("guest_guest, ");
165
166 pr_cont("SMT mitigations: ");
167
168 switch (smt_mitigations) {
169 case SMT_MITIGATIONS_OFF:
170 pr_cont("off\n");
171 break;
172 case SMT_MITIGATIONS_AUTO:
173 pr_cont("auto\n");
174 break;
175 case SMT_MITIGATIONS_ON:
176 pr_cont("on\n");
177 }
178 }
179
180 /*
181 * NOTE: This function is *only* called for SVM, since Intel uses
182 * MSR_IA32_SPEC_CTRL for SSBD.
183 */
184 void
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl,bool setguest)185 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
186 {
187 u64 guestval, hostval;
188 struct thread_info *ti = current_thread_info();
189
190 /*
191 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
192 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
193 */
194 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
195 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
196 return;
197
198 /*
199 * If the host has SSBD mitigation enabled, force it in the host's
200 * virtual MSR value. If its not permanently enabled, evaluate
201 * current's TIF_SSBD thread flag.
202 */
203 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
204 hostval = SPEC_CTRL_SSBD;
205 else
206 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
207
208 /* Sanitize the guest value */
209 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
210
211 if (hostval != guestval) {
212 unsigned long tif;
213
214 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
215 ssbd_spec_ctrl_to_tif(hostval);
216
217 speculation_ctrl_update(tif);
218 }
219 }
220 EXPORT_SYMBOL_FOR_KVM(x86_virt_spec_ctrl);
221
x86_amd_ssb_disable(void)222 static void x86_amd_ssb_disable(void)
223 {
224 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
225
226 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
227 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
228 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
229 wrmsrq(MSR_AMD64_LS_CFG, msrval);
230 }
231
232 #undef pr_fmt
233 #define pr_fmt(fmt) "MDS: " fmt
234
235 /*
236 * Returns true if vulnerability should be mitigated based on the
237 * selected attack vector controls.
238 *
239 * See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
240 */
should_mitigate_vuln(unsigned int bug)241 static bool __init should_mitigate_vuln(unsigned int bug)
242 {
243 switch (bug) {
244 /*
245 * The only runtime-selected spectre_v1 mitigations in the kernel are
246 * related to SWAPGS protection on kernel entry. Therefore, protection
247 * is only required for the user->kernel attack vector.
248 */
249 case X86_BUG_SPECTRE_V1:
250 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL);
251
252 case X86_BUG_SPECTRE_V2:
253 case X86_BUG_RETBLEED:
254 case X86_BUG_L1TF:
255 case X86_BUG_ITS:
256 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
257 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST);
258
259 case X86_BUG_SPECTRE_V2_USER:
260 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
261 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
262
263 /*
264 * All the vulnerabilities below allow potentially leaking data
265 * across address spaces. Therefore, mitigation is required for
266 * any of these 4 attack vectors.
267 */
268 case X86_BUG_MDS:
269 case X86_BUG_TAA:
270 case X86_BUG_MMIO_STALE_DATA:
271 case X86_BUG_RFDS:
272 case X86_BUG_SRBDS:
273 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
274 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
275 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
276 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
277
278 case X86_BUG_GDS:
279 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
280 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
281 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
282 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) ||
283 (smt_mitigations != SMT_MITIGATIONS_OFF);
284
285 case X86_BUG_SPEC_STORE_BYPASS:
286 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER);
287
288 case X86_BUG_VMSCAPE:
289 return cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST);
290
291 default:
292 WARN(1, "Unknown bug %x\n", bug);
293 return false;
294 }
295 }
296
297 /* Default mitigation for MDS-affected CPUs */
298 static enum mds_mitigations mds_mitigation __ro_after_init =
299 IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF;
300 static bool mds_nosmt __ro_after_init = false;
301
302 static const char * const mds_strings[] = {
303 [MDS_MITIGATION_OFF] = "Vulnerable",
304 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
305 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
306 };
307
308 enum taa_mitigations {
309 TAA_MITIGATION_OFF,
310 TAA_MITIGATION_AUTO,
311 TAA_MITIGATION_UCODE_NEEDED,
312 TAA_MITIGATION_VERW,
313 TAA_MITIGATION_TSX_DISABLED,
314 };
315
316 /* Default mitigation for TAA-affected CPUs */
317 static enum taa_mitigations taa_mitigation __ro_after_init =
318 IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF;
319
320 enum mmio_mitigations {
321 MMIO_MITIGATION_OFF,
322 MMIO_MITIGATION_AUTO,
323 MMIO_MITIGATION_UCODE_NEEDED,
324 MMIO_MITIGATION_VERW,
325 };
326
327 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
328 static enum mmio_mitigations mmio_mitigation __ro_after_init =
329 IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF;
330
331 enum rfds_mitigations {
332 RFDS_MITIGATION_OFF,
333 RFDS_MITIGATION_AUTO,
334 RFDS_MITIGATION_VERW,
335 RFDS_MITIGATION_UCODE_NEEDED,
336 };
337
338 /* Default mitigation for Register File Data Sampling */
339 static enum rfds_mitigations rfds_mitigation __ro_after_init =
340 IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
341
342 /*
343 * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing on exit to
344 * userspace *and* on entry to KVM guests.
345 */
346 static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
347
mds_select_mitigation(void)348 static void __init mds_select_mitigation(void)
349 {
350 if (!boot_cpu_has_bug(X86_BUG_MDS)) {
351 mds_mitigation = MDS_MITIGATION_OFF;
352 return;
353 }
354
355 if (mds_mitigation == MDS_MITIGATION_AUTO) {
356 if (should_mitigate_vuln(X86_BUG_MDS))
357 mds_mitigation = MDS_MITIGATION_FULL;
358 else
359 mds_mitigation = MDS_MITIGATION_OFF;
360 }
361
362 if (mds_mitigation == MDS_MITIGATION_OFF)
363 return;
364
365 verw_clear_cpu_buf_mitigation_selected = true;
366 }
367
mds_update_mitigation(void)368 static void __init mds_update_mitigation(void)
369 {
370 if (!boot_cpu_has_bug(X86_BUG_MDS))
371 return;
372
373 /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */
374 if (verw_clear_cpu_buf_mitigation_selected)
375 mds_mitigation = MDS_MITIGATION_FULL;
376
377 if (mds_mitigation == MDS_MITIGATION_FULL) {
378 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
379 mds_mitigation = MDS_MITIGATION_VMWERV;
380 }
381
382 pr_info("%s\n", mds_strings[mds_mitigation]);
383 }
384
mds_apply_mitigation(void)385 static void __init mds_apply_mitigation(void)
386 {
387 if (mds_mitigation == MDS_MITIGATION_FULL ||
388 mds_mitigation == MDS_MITIGATION_VMWERV) {
389 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
390 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
391 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
392 (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
393 cpu_smt_disable(false);
394 }
395 }
396
mds_cmdline(char * str)397 static int __init mds_cmdline(char *str)
398 {
399 if (!boot_cpu_has_bug(X86_BUG_MDS))
400 return 0;
401
402 if (!str)
403 return -EINVAL;
404
405 if (!strcmp(str, "off"))
406 mds_mitigation = MDS_MITIGATION_OFF;
407 else if (!strcmp(str, "full"))
408 mds_mitigation = MDS_MITIGATION_FULL;
409 else if (!strcmp(str, "full,nosmt")) {
410 mds_mitigation = MDS_MITIGATION_FULL;
411 mds_nosmt = true;
412 }
413
414 return 0;
415 }
416 early_param("mds", mds_cmdline);
417
418 #undef pr_fmt
419 #define pr_fmt(fmt) "TAA: " fmt
420
421 static bool taa_nosmt __ro_after_init;
422
423 static const char * const taa_strings[] = {
424 [TAA_MITIGATION_OFF] = "Vulnerable",
425 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
426 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
427 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
428 };
429
taa_vulnerable(void)430 static bool __init taa_vulnerable(void)
431 {
432 return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM);
433 }
434
taa_select_mitigation(void)435 static void __init taa_select_mitigation(void)
436 {
437 if (!boot_cpu_has_bug(X86_BUG_TAA)) {
438 taa_mitigation = TAA_MITIGATION_OFF;
439 return;
440 }
441
442 /* TSX previously disabled by tsx=off */
443 if (!boot_cpu_has(X86_FEATURE_RTM)) {
444 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
445 return;
446 }
447
448 /* Microcode will be checked in taa_update_mitigation(). */
449 if (taa_mitigation == TAA_MITIGATION_AUTO) {
450 if (should_mitigate_vuln(X86_BUG_TAA))
451 taa_mitigation = TAA_MITIGATION_VERW;
452 else
453 taa_mitigation = TAA_MITIGATION_OFF;
454 }
455
456 if (taa_mitigation != TAA_MITIGATION_OFF)
457 verw_clear_cpu_buf_mitigation_selected = true;
458 }
459
taa_update_mitigation(void)460 static void __init taa_update_mitigation(void)
461 {
462 if (!taa_vulnerable())
463 return;
464
465 if (verw_clear_cpu_buf_mitigation_selected)
466 taa_mitigation = TAA_MITIGATION_VERW;
467
468 if (taa_mitigation == TAA_MITIGATION_VERW) {
469 /* Check if the requisite ucode is available. */
470 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
471 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
472
473 /*
474 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
475 * A microcode update fixes this behavior to clear CPU buffers. It also
476 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
477 * ARCH_CAP_TSX_CTRL_MSR bit.
478 *
479 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
480 * update is required.
481 */
482 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
483 !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
484 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
485 }
486
487 pr_info("%s\n", taa_strings[taa_mitigation]);
488 }
489
taa_apply_mitigation(void)490 static void __init taa_apply_mitigation(void)
491 {
492 if (taa_mitigation == TAA_MITIGATION_VERW ||
493 taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) {
494 /*
495 * TSX is enabled, select alternate mitigation for TAA which is
496 * the same as MDS. Enable MDS static branch to clear CPU buffers.
497 *
498 * For guests that can't determine whether the correct microcode is
499 * present on host, enable the mitigation for UCODE_NEEDED as well.
500 */
501 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
502 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
503
504 if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
505 cpu_smt_disable(false);
506 }
507 }
508
tsx_async_abort_parse_cmdline(char * str)509 static int __init tsx_async_abort_parse_cmdline(char *str)
510 {
511 if (!boot_cpu_has_bug(X86_BUG_TAA))
512 return 0;
513
514 if (!str)
515 return -EINVAL;
516
517 if (!strcmp(str, "off")) {
518 taa_mitigation = TAA_MITIGATION_OFF;
519 } else if (!strcmp(str, "full")) {
520 taa_mitigation = TAA_MITIGATION_VERW;
521 } else if (!strcmp(str, "full,nosmt")) {
522 taa_mitigation = TAA_MITIGATION_VERW;
523 taa_nosmt = true;
524 }
525
526 return 0;
527 }
528 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
529
530 #undef pr_fmt
531 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
532
533 static bool mmio_nosmt __ro_after_init = false;
534
535 static const char * const mmio_strings[] = {
536 [MMIO_MITIGATION_OFF] = "Vulnerable",
537 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
538 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
539 };
540
mmio_select_mitigation(void)541 static void __init mmio_select_mitigation(void)
542 {
543 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
544 mmio_mitigation = MMIO_MITIGATION_OFF;
545 return;
546 }
547
548 /* Microcode will be checked in mmio_update_mitigation(). */
549 if (mmio_mitigation == MMIO_MITIGATION_AUTO) {
550 if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA))
551 mmio_mitigation = MMIO_MITIGATION_VERW;
552 else
553 mmio_mitigation = MMIO_MITIGATION_OFF;
554 }
555
556 if (mmio_mitigation == MMIO_MITIGATION_OFF)
557 return;
558
559 /*
560 * Enable CPU buffer clear mitigation for host and VMM, if also affected
561 * by MDS or TAA.
562 */
563 if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable())
564 verw_clear_cpu_buf_mitigation_selected = true;
565 }
566
mmio_update_mitigation(void)567 static void __init mmio_update_mitigation(void)
568 {
569 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
570 return;
571
572 if (verw_clear_cpu_buf_mitigation_selected)
573 mmio_mitigation = MMIO_MITIGATION_VERW;
574
575 if (mmio_mitigation == MMIO_MITIGATION_VERW) {
576 /*
577 * Check if the system has the right microcode.
578 *
579 * CPU Fill buffer clear mitigation is enumerated by either an explicit
580 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
581 * affected systems.
582 */
583 if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
584 (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
585 boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
586 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))))
587 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
588 }
589
590 pr_info("%s\n", mmio_strings[mmio_mitigation]);
591 }
592
mmio_apply_mitigation(void)593 static void __init mmio_apply_mitigation(void)
594 {
595 if (mmio_mitigation == MMIO_MITIGATION_OFF)
596 return;
597
598 /*
599 * Only enable the VMM mitigation if the CPU buffer clear mitigation is
600 * not being used.
601 */
602 if (verw_clear_cpu_buf_mitigation_selected) {
603 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
604 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
605 } else {
606 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO);
607 }
608
609 /*
610 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
611 * be propagated to uncore buffers, clearing the Fill buffers on idle
612 * is required irrespective of SMT state.
613 */
614 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
615 static_branch_enable(&cpu_buf_idle_clear);
616
617 if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
618 cpu_smt_disable(false);
619 }
620
mmio_stale_data_parse_cmdline(char * str)621 static int __init mmio_stale_data_parse_cmdline(char *str)
622 {
623 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
624 return 0;
625
626 if (!str)
627 return -EINVAL;
628
629 if (!strcmp(str, "off")) {
630 mmio_mitigation = MMIO_MITIGATION_OFF;
631 } else if (!strcmp(str, "full")) {
632 mmio_mitigation = MMIO_MITIGATION_VERW;
633 } else if (!strcmp(str, "full,nosmt")) {
634 mmio_mitigation = MMIO_MITIGATION_VERW;
635 mmio_nosmt = true;
636 }
637
638 return 0;
639 }
640 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
641
642 #undef pr_fmt
643 #define pr_fmt(fmt) "Register File Data Sampling: " fmt
644
645 static const char * const rfds_strings[] = {
646 [RFDS_MITIGATION_OFF] = "Vulnerable",
647 [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
648 [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
649 };
650
verw_clears_cpu_reg_file(void)651 static inline bool __init verw_clears_cpu_reg_file(void)
652 {
653 return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR);
654 }
655
rfds_select_mitigation(void)656 static void __init rfds_select_mitigation(void)
657 {
658 if (!boot_cpu_has_bug(X86_BUG_RFDS)) {
659 rfds_mitigation = RFDS_MITIGATION_OFF;
660 return;
661 }
662
663 if (rfds_mitigation == RFDS_MITIGATION_AUTO) {
664 if (should_mitigate_vuln(X86_BUG_RFDS))
665 rfds_mitigation = RFDS_MITIGATION_VERW;
666 else
667 rfds_mitigation = RFDS_MITIGATION_OFF;
668 }
669
670 if (rfds_mitigation == RFDS_MITIGATION_OFF)
671 return;
672
673 if (verw_clears_cpu_reg_file())
674 verw_clear_cpu_buf_mitigation_selected = true;
675 }
676
rfds_update_mitigation(void)677 static void __init rfds_update_mitigation(void)
678 {
679 if (!boot_cpu_has_bug(X86_BUG_RFDS))
680 return;
681
682 if (verw_clear_cpu_buf_mitigation_selected)
683 rfds_mitigation = RFDS_MITIGATION_VERW;
684
685 if (rfds_mitigation == RFDS_MITIGATION_VERW) {
686 if (!verw_clears_cpu_reg_file())
687 rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
688 }
689
690 pr_info("%s\n", rfds_strings[rfds_mitigation]);
691 }
692
rfds_apply_mitigation(void)693 static void __init rfds_apply_mitigation(void)
694 {
695 if (rfds_mitigation == RFDS_MITIGATION_VERW) {
696 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
697 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
698 }
699 }
700
rfds_parse_cmdline(char * str)701 static __init int rfds_parse_cmdline(char *str)
702 {
703 if (!str)
704 return -EINVAL;
705
706 if (!boot_cpu_has_bug(X86_BUG_RFDS))
707 return 0;
708
709 if (!strcmp(str, "off"))
710 rfds_mitigation = RFDS_MITIGATION_OFF;
711 else if (!strcmp(str, "on"))
712 rfds_mitigation = RFDS_MITIGATION_VERW;
713
714 return 0;
715 }
716 early_param("reg_file_data_sampling", rfds_parse_cmdline);
717
718 #undef pr_fmt
719 #define pr_fmt(fmt) "SRBDS: " fmt
720
721 enum srbds_mitigations {
722 SRBDS_MITIGATION_OFF,
723 SRBDS_MITIGATION_AUTO,
724 SRBDS_MITIGATION_UCODE_NEEDED,
725 SRBDS_MITIGATION_FULL,
726 SRBDS_MITIGATION_TSX_OFF,
727 SRBDS_MITIGATION_HYPERVISOR,
728 };
729
730 static enum srbds_mitigations srbds_mitigation __ro_after_init =
731 IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF;
732
733 static const char * const srbds_strings[] = {
734 [SRBDS_MITIGATION_OFF] = "Vulnerable",
735 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
736 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
737 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
738 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
739 };
740
741 static bool srbds_off;
742
update_srbds_msr(void)743 void update_srbds_msr(void)
744 {
745 u64 mcu_ctrl;
746
747 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
748 return;
749
750 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
751 return;
752
753 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
754 return;
755
756 /*
757 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
758 * being disabled and it hasn't received the SRBDS MSR microcode.
759 */
760 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
761 return;
762
763 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
764
765 switch (srbds_mitigation) {
766 case SRBDS_MITIGATION_OFF:
767 case SRBDS_MITIGATION_TSX_OFF:
768 mcu_ctrl |= RNGDS_MITG_DIS;
769 break;
770 case SRBDS_MITIGATION_FULL:
771 mcu_ctrl &= ~RNGDS_MITG_DIS;
772 break;
773 default:
774 break;
775 }
776
777 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
778 }
779
srbds_select_mitigation(void)780 static void __init srbds_select_mitigation(void)
781 {
782 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) {
783 srbds_mitigation = SRBDS_MITIGATION_OFF;
784 return;
785 }
786
787 if (srbds_mitigation == SRBDS_MITIGATION_AUTO) {
788 if (should_mitigate_vuln(X86_BUG_SRBDS))
789 srbds_mitigation = SRBDS_MITIGATION_FULL;
790 else {
791 srbds_mitigation = SRBDS_MITIGATION_OFF;
792 return;
793 }
794 }
795
796 /*
797 * Check to see if this is one of the MDS_NO systems supporting TSX that
798 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
799 * by Processor MMIO Stale Data vulnerability.
800 */
801 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
802 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
803 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
804 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
805 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
806 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
807 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
808 else if (srbds_off)
809 srbds_mitigation = SRBDS_MITIGATION_OFF;
810
811 pr_info("%s\n", srbds_strings[srbds_mitigation]);
812 }
813
srbds_apply_mitigation(void)814 static void __init srbds_apply_mitigation(void)
815 {
816 update_srbds_msr();
817 }
818
srbds_parse_cmdline(char * str)819 static int __init srbds_parse_cmdline(char *str)
820 {
821 if (!str)
822 return -EINVAL;
823
824 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
825 return 0;
826
827 srbds_off = !strcmp(str, "off");
828 return 0;
829 }
830 early_param("srbds", srbds_parse_cmdline);
831
832 #undef pr_fmt
833 #define pr_fmt(fmt) "L1D Flush : " fmt
834
835 enum l1d_flush_mitigations {
836 L1D_FLUSH_OFF = 0,
837 L1D_FLUSH_ON,
838 };
839
840 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
841
l1d_flush_select_mitigation(void)842 static void __init l1d_flush_select_mitigation(void)
843 {
844 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
845 return;
846
847 static_branch_enable(&switch_mm_cond_l1d_flush);
848 pr_info("Conditional flush on switch_mm() enabled\n");
849 }
850
l1d_flush_parse_cmdline(char * str)851 static int __init l1d_flush_parse_cmdline(char *str)
852 {
853 if (!strcmp(str, "on"))
854 l1d_flush_mitigation = L1D_FLUSH_ON;
855
856 return 0;
857 }
858 early_param("l1d_flush", l1d_flush_parse_cmdline);
859
860 #undef pr_fmt
861 #define pr_fmt(fmt) "GDS: " fmt
862
863 enum gds_mitigations {
864 GDS_MITIGATION_OFF,
865 GDS_MITIGATION_AUTO,
866 GDS_MITIGATION_UCODE_NEEDED,
867 GDS_MITIGATION_FORCE,
868 GDS_MITIGATION_FULL,
869 GDS_MITIGATION_FULL_LOCKED,
870 GDS_MITIGATION_HYPERVISOR,
871 };
872
873 static enum gds_mitigations gds_mitigation __ro_after_init =
874 IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF;
875
876 static const char * const gds_strings[] = {
877 [GDS_MITIGATION_OFF] = "Vulnerable",
878 [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
879 [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode",
880 [GDS_MITIGATION_FULL] = "Mitigation: Microcode",
881 [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
882 [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
883 };
884
gds_ucode_mitigated(void)885 bool gds_ucode_mitigated(void)
886 {
887 return (gds_mitigation == GDS_MITIGATION_FULL ||
888 gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
889 }
890 EXPORT_SYMBOL_FOR_KVM(gds_ucode_mitigated);
891
update_gds_msr(void)892 void update_gds_msr(void)
893 {
894 u64 mcu_ctrl_after;
895 u64 mcu_ctrl;
896
897 switch (gds_mitigation) {
898 case GDS_MITIGATION_OFF:
899 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
900 mcu_ctrl |= GDS_MITG_DIS;
901 break;
902 case GDS_MITIGATION_FULL_LOCKED:
903 /*
904 * The LOCKED state comes from the boot CPU. APs might not have
905 * the same state. Make sure the mitigation is enabled on all
906 * CPUs.
907 */
908 case GDS_MITIGATION_FULL:
909 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
910 mcu_ctrl &= ~GDS_MITG_DIS;
911 break;
912 case GDS_MITIGATION_FORCE:
913 case GDS_MITIGATION_UCODE_NEEDED:
914 case GDS_MITIGATION_HYPERVISOR:
915 case GDS_MITIGATION_AUTO:
916 return;
917 }
918
919 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
920
921 /*
922 * Check to make sure that the WRMSR value was not ignored. Writes to
923 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
924 * processor was not.
925 */
926 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
927 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
928 }
929
gds_select_mitigation(void)930 static void __init gds_select_mitigation(void)
931 {
932 u64 mcu_ctrl;
933
934 if (!boot_cpu_has_bug(X86_BUG_GDS))
935 return;
936
937 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
938 gds_mitigation = GDS_MITIGATION_HYPERVISOR;
939 return;
940 }
941
942 /* Will verify below that mitigation _can_ be disabled */
943 if (gds_mitigation == GDS_MITIGATION_AUTO) {
944 if (should_mitigate_vuln(X86_BUG_GDS))
945 gds_mitigation = GDS_MITIGATION_FULL;
946 else
947 gds_mitigation = GDS_MITIGATION_OFF;
948 }
949
950 /* No microcode */
951 if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
952 if (gds_mitigation != GDS_MITIGATION_FORCE)
953 gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
954 return;
955 }
956
957 /* Microcode has mitigation, use it */
958 if (gds_mitigation == GDS_MITIGATION_FORCE)
959 gds_mitigation = GDS_MITIGATION_FULL;
960
961 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
962 if (mcu_ctrl & GDS_MITG_LOCKED) {
963 if (gds_mitigation == GDS_MITIGATION_OFF)
964 pr_warn("Mitigation locked. Disable failed.\n");
965
966 /*
967 * The mitigation is selected from the boot CPU. All other CPUs
968 * _should_ have the same state. If the boot CPU isn't locked
969 * but others are then update_gds_msr() will WARN() of the state
970 * mismatch. If the boot CPU is locked update_gds_msr() will
971 * ensure the other CPUs have the mitigation enabled.
972 */
973 gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
974 }
975 }
976
gds_apply_mitigation(void)977 static void __init gds_apply_mitigation(void)
978 {
979 if (!boot_cpu_has_bug(X86_BUG_GDS))
980 return;
981
982 /* Microcode is present */
983 if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)
984 update_gds_msr();
985 else if (gds_mitigation == GDS_MITIGATION_FORCE) {
986 /*
987 * This only needs to be done on the boot CPU so do it
988 * here rather than in update_gds_msr()
989 */
990 setup_clear_cpu_cap(X86_FEATURE_AVX);
991 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
992 }
993
994 pr_info("%s\n", gds_strings[gds_mitigation]);
995 }
996
gds_parse_cmdline(char * str)997 static int __init gds_parse_cmdline(char *str)
998 {
999 if (!str)
1000 return -EINVAL;
1001
1002 if (!boot_cpu_has_bug(X86_BUG_GDS))
1003 return 0;
1004
1005 if (!strcmp(str, "off"))
1006 gds_mitigation = GDS_MITIGATION_OFF;
1007 else if (!strcmp(str, "force"))
1008 gds_mitigation = GDS_MITIGATION_FORCE;
1009
1010 return 0;
1011 }
1012 early_param("gather_data_sampling", gds_parse_cmdline);
1013
1014 #undef pr_fmt
1015 #define pr_fmt(fmt) "Spectre V1 : " fmt
1016
1017 enum spectre_v1_mitigation {
1018 SPECTRE_V1_MITIGATION_NONE,
1019 SPECTRE_V1_MITIGATION_AUTO,
1020 };
1021
1022 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
1023 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ?
1024 SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE;
1025
1026 static const char * const spectre_v1_strings[] = {
1027 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
1028 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
1029 };
1030
1031 /*
1032 * Does SMAP provide full mitigation against speculative kernel access to
1033 * userspace?
1034 */
smap_works_speculatively(void)1035 static bool smap_works_speculatively(void)
1036 {
1037 if (!boot_cpu_has(X86_FEATURE_SMAP))
1038 return false;
1039
1040 /*
1041 * On CPUs which are vulnerable to Meltdown, SMAP does not
1042 * prevent speculative access to user data in the L1 cache.
1043 * Consider SMAP to be non-functional as a mitigation on these
1044 * CPUs.
1045 */
1046 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
1047 return false;
1048
1049 return true;
1050 }
1051
spectre_v1_select_mitigation(void)1052 static void __init spectre_v1_select_mitigation(void)
1053 {
1054 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1055 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1056
1057 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1))
1058 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1059 }
1060
spectre_v1_apply_mitigation(void)1061 static void __init spectre_v1_apply_mitigation(void)
1062 {
1063 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1064 return;
1065
1066 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
1067 /*
1068 * With Spectre v1, a user can speculatively control either
1069 * path of a conditional swapgs with a user-controlled GS
1070 * value. The mitigation is to add lfences to both code paths.
1071 *
1072 * If FSGSBASE is enabled, the user can put a kernel address in
1073 * GS, in which case SMAP provides no protection.
1074 *
1075 * If FSGSBASE is disabled, the user can only put a user space
1076 * address in GS. That makes an attack harder, but still
1077 * possible if there's no SMAP protection.
1078 */
1079 if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
1080 !smap_works_speculatively()) {
1081 /*
1082 * Mitigation can be provided from SWAPGS itself or
1083 * PTI as the CR3 write in the Meltdown mitigation
1084 * is serializing.
1085 *
1086 * If neither is there, mitigate with an LFENCE to
1087 * stop speculation through swapgs.
1088 */
1089 if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
1090 !boot_cpu_has(X86_FEATURE_PTI))
1091 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
1092
1093 /*
1094 * Enable lfences in the kernel entry (non-swapgs)
1095 * paths, to prevent user entry from speculatively
1096 * skipping swapgs.
1097 */
1098 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
1099 }
1100 }
1101
1102 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1103 }
1104
nospectre_v1_cmdline(char * str)1105 static int __init nospectre_v1_cmdline(char *str)
1106 {
1107 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1108 return 0;
1109 }
1110 early_param("nospectre_v1", nospectre_v1_cmdline);
1111
1112 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
1113
1114 /* Depends on spectre_v2 mitigation selected already */
cdt_possible(enum spectre_v2_mitigation mode)1115 static inline bool cdt_possible(enum spectre_v2_mitigation mode)
1116 {
1117 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) ||
1118 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE))
1119 return false;
1120
1121 if (mode == SPECTRE_V2_RETPOLINE ||
1122 mode == SPECTRE_V2_EIBRS_RETPOLINE)
1123 return true;
1124
1125 return false;
1126 }
1127
1128 #undef pr_fmt
1129 #define pr_fmt(fmt) "RETBleed: " fmt
1130
1131 enum its_mitigation {
1132 ITS_MITIGATION_OFF,
1133 ITS_MITIGATION_AUTO,
1134 ITS_MITIGATION_VMEXIT_ONLY,
1135 ITS_MITIGATION_ALIGNED_THUNKS,
1136 ITS_MITIGATION_RETPOLINE_STUFF,
1137 };
1138
1139 static enum its_mitigation its_mitigation __ro_after_init =
1140 IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF;
1141
1142 enum retbleed_mitigation {
1143 RETBLEED_MITIGATION_NONE,
1144 RETBLEED_MITIGATION_AUTO,
1145 RETBLEED_MITIGATION_UNRET,
1146 RETBLEED_MITIGATION_IBPB,
1147 RETBLEED_MITIGATION_IBRS,
1148 RETBLEED_MITIGATION_EIBRS,
1149 RETBLEED_MITIGATION_STUFF,
1150 };
1151
1152 static const char * const retbleed_strings[] = {
1153 [RETBLEED_MITIGATION_NONE] = "Vulnerable",
1154 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
1155 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
1156 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
1157 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
1158 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
1159 };
1160
1161 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
1162 IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE;
1163
1164 static int __ro_after_init retbleed_nosmt = false;
1165
1166 enum srso_mitigation {
1167 SRSO_MITIGATION_NONE,
1168 SRSO_MITIGATION_AUTO,
1169 SRSO_MITIGATION_UCODE_NEEDED,
1170 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
1171 SRSO_MITIGATION_MICROCODE,
1172 SRSO_MITIGATION_NOSMT,
1173 SRSO_MITIGATION_SAFE_RET,
1174 SRSO_MITIGATION_IBPB,
1175 SRSO_MITIGATION_IBPB_ON_VMEXIT,
1176 SRSO_MITIGATION_BP_SPEC_REDUCE,
1177 };
1178
1179 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
1180
retbleed_parse_cmdline(char * str)1181 static int __init retbleed_parse_cmdline(char *str)
1182 {
1183 if (!str)
1184 return -EINVAL;
1185
1186 while (str) {
1187 char *next = strchr(str, ',');
1188 if (next) {
1189 *next = 0;
1190 next++;
1191 }
1192
1193 if (!strcmp(str, "off")) {
1194 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1195 } else if (!strcmp(str, "auto")) {
1196 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1197 } else if (!strcmp(str, "unret")) {
1198 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1199 } else if (!strcmp(str, "ibpb")) {
1200 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1201 } else if (!strcmp(str, "stuff")) {
1202 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1203 } else if (!strcmp(str, "nosmt")) {
1204 retbleed_nosmt = true;
1205 } else if (!strcmp(str, "force")) {
1206 setup_force_cpu_bug(X86_BUG_RETBLEED);
1207 } else {
1208 pr_err("Ignoring unknown retbleed option (%s).", str);
1209 }
1210
1211 str = next;
1212 }
1213
1214 return 0;
1215 }
1216 early_param("retbleed", retbleed_parse_cmdline);
1217
1218 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
1219 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
1220
retbleed_select_mitigation(void)1221 static void __init retbleed_select_mitigation(void)
1222 {
1223 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) {
1224 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1225 return;
1226 }
1227
1228 switch (retbleed_mitigation) {
1229 case RETBLEED_MITIGATION_UNRET:
1230 if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
1231 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1232 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
1233 }
1234 break;
1235 case RETBLEED_MITIGATION_IBPB:
1236 if (!boot_cpu_has(X86_FEATURE_IBPB)) {
1237 pr_err("WARNING: CPU does not support IBPB.\n");
1238 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1239 } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
1240 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1241 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1242 }
1243 break;
1244 case RETBLEED_MITIGATION_STUFF:
1245 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1246 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1247 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1248 } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1249 pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n");
1250 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1251 }
1252 break;
1253 default:
1254 break;
1255 }
1256
1257 if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO)
1258 return;
1259
1260 if (!should_mitigate_vuln(X86_BUG_RETBLEED)) {
1261 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1262 return;
1263 }
1264
1265 /* Intel mitigation selected in retbleed_update_mitigation() */
1266 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1267 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
1268 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
1269 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1270 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
1271 boot_cpu_has(X86_FEATURE_IBPB))
1272 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1273 else
1274 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1275 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1276 /* Final mitigation depends on spectre-v2 selection */
1277 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1278 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1279 else if (boot_cpu_has(X86_FEATURE_IBRS))
1280 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1281 else
1282 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1283 }
1284 }
1285
retbleed_update_mitigation(void)1286 static void __init retbleed_update_mitigation(void)
1287 {
1288 if (!boot_cpu_has_bug(X86_BUG_RETBLEED))
1289 return;
1290
1291 /* ITS can also enable stuffing */
1292 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF)
1293 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1294
1295 /* If SRSO is using IBPB, that works for retbleed too */
1296 if (srso_mitigation == SRSO_MITIGATION_IBPB)
1297 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1298
1299 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF &&
1300 !cdt_possible(spectre_v2_enabled)) {
1301 pr_err("WARNING: retbleed=stuff depends on retpoline\n");
1302 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1303 }
1304
1305 /*
1306 * Let IBRS trump all on Intel without affecting the effects of the
1307 * retbleed= cmdline option except for call depth based stuffing
1308 */
1309 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1310 switch (spectre_v2_enabled) {
1311 case SPECTRE_V2_IBRS:
1312 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1313 break;
1314 case SPECTRE_V2_EIBRS:
1315 case SPECTRE_V2_EIBRS_RETPOLINE:
1316 case SPECTRE_V2_EIBRS_LFENCE:
1317 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1318 break;
1319 default:
1320 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) {
1321 if (retbleed_mitigation != RETBLEED_MITIGATION_NONE)
1322 pr_err(RETBLEED_INTEL_MSG);
1323
1324 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1325 }
1326 }
1327 }
1328
1329 pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1330 }
1331
retbleed_apply_mitigation(void)1332 static void __init retbleed_apply_mitigation(void)
1333 {
1334 bool mitigate_smt = false;
1335
1336 switch (retbleed_mitigation) {
1337 case RETBLEED_MITIGATION_NONE:
1338 return;
1339
1340 case RETBLEED_MITIGATION_UNRET:
1341 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1342 setup_force_cpu_cap(X86_FEATURE_UNRET);
1343
1344 set_return_thunk(retbleed_return_thunk);
1345
1346 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1347 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1348 pr_err(RETBLEED_UNTRAIN_MSG);
1349
1350 mitigate_smt = true;
1351 break;
1352
1353 case RETBLEED_MITIGATION_IBPB:
1354 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1355 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1356 mitigate_smt = true;
1357
1358 /*
1359 * IBPB on entry already obviates the need for
1360 * software-based untraining so clear those in case some
1361 * other mitigation like SRSO has selected them.
1362 */
1363 setup_clear_cpu_cap(X86_FEATURE_UNRET);
1364 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
1365
1366 /*
1367 * There is no need for RSB filling: write_ibpb() ensures
1368 * all predictions, including the RSB, are invalidated,
1369 * regardless of IBPB implementation.
1370 */
1371 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1372
1373 break;
1374
1375 case RETBLEED_MITIGATION_STUFF:
1376 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1377 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1378
1379 set_return_thunk(call_depth_return_thunk);
1380 break;
1381
1382 default:
1383 break;
1384 }
1385
1386 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1387 (retbleed_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
1388 cpu_smt_disable(false);
1389 }
1390
1391 #undef pr_fmt
1392 #define pr_fmt(fmt) "ITS: " fmt
1393
1394 static const char * const its_strings[] = {
1395 [ITS_MITIGATION_OFF] = "Vulnerable",
1396 [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
1397 [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
1398 [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
1399 };
1400
its_parse_cmdline(char * str)1401 static int __init its_parse_cmdline(char *str)
1402 {
1403 if (!str)
1404 return -EINVAL;
1405
1406 if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
1407 pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
1408 return 0;
1409 }
1410
1411 if (!strcmp(str, "off")) {
1412 its_mitigation = ITS_MITIGATION_OFF;
1413 } else if (!strcmp(str, "on")) {
1414 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1415 } else if (!strcmp(str, "force")) {
1416 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1417 setup_force_cpu_bug(X86_BUG_ITS);
1418 } else if (!strcmp(str, "vmexit")) {
1419 its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
1420 } else if (!strcmp(str, "stuff")) {
1421 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1422 } else {
1423 pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
1424 }
1425
1426 return 0;
1427 }
1428 early_param("indirect_target_selection", its_parse_cmdline);
1429
its_select_mitigation(void)1430 static void __init its_select_mitigation(void)
1431 {
1432 if (!boot_cpu_has_bug(X86_BUG_ITS)) {
1433 its_mitigation = ITS_MITIGATION_OFF;
1434 return;
1435 }
1436
1437 if (its_mitigation == ITS_MITIGATION_AUTO) {
1438 if (should_mitigate_vuln(X86_BUG_ITS))
1439 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1440 else
1441 its_mitigation = ITS_MITIGATION_OFF;
1442 }
1443
1444 if (its_mitigation == ITS_MITIGATION_OFF)
1445 return;
1446
1447 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
1448 !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
1449 pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
1450 its_mitigation = ITS_MITIGATION_OFF;
1451 return;
1452 }
1453
1454 if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
1455 pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
1456 its_mitigation = ITS_MITIGATION_OFF;
1457 return;
1458 }
1459
1460 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1461 !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1462 pr_err("RSB stuff mitigation not supported, using default\n");
1463 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1464 }
1465
1466 if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY &&
1467 !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY))
1468 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1469 }
1470
its_update_mitigation(void)1471 static void __init its_update_mitigation(void)
1472 {
1473 if (!boot_cpu_has_bug(X86_BUG_ITS))
1474 return;
1475
1476 switch (spectre_v2_enabled) {
1477 case SPECTRE_V2_NONE:
1478 if (its_mitigation != ITS_MITIGATION_OFF)
1479 pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
1480 its_mitigation = ITS_MITIGATION_OFF;
1481 break;
1482 case SPECTRE_V2_RETPOLINE:
1483 case SPECTRE_V2_EIBRS_RETPOLINE:
1484 /* Retpoline+CDT mitigates ITS */
1485 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF)
1486 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1487 break;
1488 case SPECTRE_V2_LFENCE:
1489 case SPECTRE_V2_EIBRS_LFENCE:
1490 pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
1491 its_mitigation = ITS_MITIGATION_OFF;
1492 break;
1493 default:
1494 break;
1495 }
1496
1497 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1498 !cdt_possible(spectre_v2_enabled))
1499 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1500
1501 pr_info("%s\n", its_strings[its_mitigation]);
1502 }
1503
its_apply_mitigation(void)1504 static void __init its_apply_mitigation(void)
1505 {
1506 switch (its_mitigation) {
1507 case ITS_MITIGATION_OFF:
1508 case ITS_MITIGATION_AUTO:
1509 case ITS_MITIGATION_VMEXIT_ONLY:
1510 break;
1511 case ITS_MITIGATION_ALIGNED_THUNKS:
1512 if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
1513 setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
1514
1515 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1516 set_return_thunk(its_return_thunk);
1517 break;
1518 case ITS_MITIGATION_RETPOLINE_STUFF:
1519 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1520 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1521 set_return_thunk(call_depth_return_thunk);
1522 break;
1523 }
1524 }
1525
1526 #undef pr_fmt
1527 #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
1528
1529 enum tsa_mitigations {
1530 TSA_MITIGATION_NONE,
1531 TSA_MITIGATION_AUTO,
1532 TSA_MITIGATION_UCODE_NEEDED,
1533 TSA_MITIGATION_USER_KERNEL,
1534 TSA_MITIGATION_VM,
1535 TSA_MITIGATION_FULL,
1536 };
1537
1538 static const char * const tsa_strings[] = {
1539 [TSA_MITIGATION_NONE] = "Vulnerable",
1540 [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1541 [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
1542 [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
1543 [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
1544 };
1545
1546 static enum tsa_mitigations tsa_mitigation __ro_after_init =
1547 IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
1548
tsa_parse_cmdline(char * str)1549 static int __init tsa_parse_cmdline(char *str)
1550 {
1551 if (!str)
1552 return -EINVAL;
1553
1554 if (!strcmp(str, "off"))
1555 tsa_mitigation = TSA_MITIGATION_NONE;
1556 else if (!strcmp(str, "on"))
1557 tsa_mitigation = TSA_MITIGATION_FULL;
1558 else if (!strcmp(str, "user"))
1559 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1560 else if (!strcmp(str, "vm"))
1561 tsa_mitigation = TSA_MITIGATION_VM;
1562 else
1563 pr_err("Ignoring unknown tsa=%s option.\n", str);
1564
1565 return 0;
1566 }
1567 early_param("tsa", tsa_parse_cmdline);
1568
tsa_select_mitigation(void)1569 static void __init tsa_select_mitigation(void)
1570 {
1571 if (!boot_cpu_has_bug(X86_BUG_TSA)) {
1572 tsa_mitigation = TSA_MITIGATION_NONE;
1573 return;
1574 }
1575
1576 if (tsa_mitigation == TSA_MITIGATION_AUTO) {
1577 bool vm = false, uk = false;
1578
1579 tsa_mitigation = TSA_MITIGATION_NONE;
1580
1581 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
1582 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) {
1583 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1584 uk = true;
1585 }
1586
1587 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
1588 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
1589 tsa_mitigation = TSA_MITIGATION_VM;
1590 vm = true;
1591 }
1592
1593 if (uk && vm)
1594 tsa_mitigation = TSA_MITIGATION_FULL;
1595 }
1596
1597 if (tsa_mitigation == TSA_MITIGATION_NONE)
1598 return;
1599
1600 if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
1601 tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
1602
1603 /*
1604 * No need to set verw_clear_cpu_buf_mitigation_selected - it
1605 * doesn't fit all cases here and it is not needed because this
1606 * is the only VERW-based mitigation on AMD.
1607 */
1608 pr_info("%s\n", tsa_strings[tsa_mitigation]);
1609 }
1610
tsa_apply_mitigation(void)1611 static void __init tsa_apply_mitigation(void)
1612 {
1613 switch (tsa_mitigation) {
1614 case TSA_MITIGATION_USER_KERNEL:
1615 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1616 break;
1617 case TSA_MITIGATION_VM:
1618 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1619 break;
1620 case TSA_MITIGATION_FULL:
1621 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1622 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1623 break;
1624 default:
1625 break;
1626 }
1627 }
1628
1629 #undef pr_fmt
1630 #define pr_fmt(fmt) "Spectre V2 : " fmt
1631
1632 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1633 SPECTRE_V2_USER_NONE;
1634 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1635 SPECTRE_V2_USER_NONE;
1636
1637 #ifdef CONFIG_MITIGATION_RETPOLINE
1638 static bool spectre_v2_bad_module;
1639
retpoline_module_ok(bool has_retpoline)1640 bool retpoline_module_ok(bool has_retpoline)
1641 {
1642 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1643 return true;
1644
1645 pr_err("System may be vulnerable to spectre v2\n");
1646 spectre_v2_bad_module = true;
1647 return false;
1648 }
1649
spectre_v2_module_string(void)1650 static inline const char *spectre_v2_module_string(void)
1651 {
1652 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1653 }
1654 #else
spectre_v2_module_string(void)1655 static inline const char *spectre_v2_module_string(void) { return ""; }
1656 #endif
1657
1658 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1659 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1660 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1661 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1662
1663 #ifdef CONFIG_BPF_SYSCALL
unpriv_ebpf_notify(int new_state)1664 void unpriv_ebpf_notify(int new_state)
1665 {
1666 if (new_state)
1667 return;
1668
1669 /* Unprivileged eBPF is enabled */
1670
1671 switch (spectre_v2_enabled) {
1672 case SPECTRE_V2_EIBRS:
1673 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1674 break;
1675 case SPECTRE_V2_EIBRS_LFENCE:
1676 if (sched_smt_active())
1677 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1678 break;
1679 default:
1680 break;
1681 }
1682 }
1683 #endif
1684
1685 /* The kernel command line selection for spectre v2 */
1686 enum spectre_v2_mitigation_cmd {
1687 SPECTRE_V2_CMD_NONE,
1688 SPECTRE_V2_CMD_AUTO,
1689 SPECTRE_V2_CMD_FORCE,
1690 SPECTRE_V2_CMD_RETPOLINE,
1691 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1692 SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1693 SPECTRE_V2_CMD_EIBRS,
1694 SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1695 SPECTRE_V2_CMD_EIBRS_LFENCE,
1696 SPECTRE_V2_CMD_IBRS,
1697 };
1698
1699 static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init =
1700 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE;
1701
1702 enum spectre_v2_user_mitigation_cmd {
1703 SPECTRE_V2_USER_CMD_NONE,
1704 SPECTRE_V2_USER_CMD_AUTO,
1705 SPECTRE_V2_USER_CMD_FORCE,
1706 SPECTRE_V2_USER_CMD_PRCTL,
1707 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1708 SPECTRE_V2_USER_CMD_SECCOMP,
1709 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1710 };
1711
1712 static enum spectre_v2_user_mitigation_cmd spectre_v2_user_cmd __ro_after_init =
1713 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
1714
1715 static const char * const spectre_v2_user_strings[] = {
1716 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
1717 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
1718 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
1719 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
1720 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
1721 };
1722
spectre_v2_user_parse_cmdline(char * str)1723 static int __init spectre_v2_user_parse_cmdline(char *str)
1724 {
1725 if (!str)
1726 return -EINVAL;
1727
1728 if (!strcmp(str, "auto"))
1729 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_AUTO;
1730 else if (!strcmp(str, "off"))
1731 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_NONE;
1732 else if (!strcmp(str, "on"))
1733 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_FORCE;
1734 else if (!strcmp(str, "prctl"))
1735 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_PRCTL;
1736 else if (!strcmp(str, "prctl,ibpb"))
1737 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_PRCTL_IBPB;
1738 else if (!strcmp(str, "seccomp"))
1739 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_SECCOMP;
1740 else if (!strcmp(str, "seccomp,ibpb"))
1741 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_SECCOMP_IBPB;
1742 else
1743 pr_err("Ignoring unknown spectre_v2_user option (%s).", str);
1744
1745 return 0;
1746 }
1747 early_param("spectre_v2_user", spectre_v2_user_parse_cmdline);
1748
spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)1749 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1750 {
1751 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1752 }
1753
spectre_v2_user_select_mitigation(void)1754 static void __init spectre_v2_user_select_mitigation(void)
1755 {
1756 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1757 return;
1758
1759 switch (spectre_v2_user_cmd) {
1760 case SPECTRE_V2_USER_CMD_NONE:
1761 return;
1762 case SPECTRE_V2_USER_CMD_FORCE:
1763 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1764 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1765 break;
1766 case SPECTRE_V2_USER_CMD_AUTO:
1767 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER))
1768 break;
1769 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1770 if (smt_mitigations == SMT_MITIGATIONS_OFF)
1771 break;
1772 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1773 break;
1774 case SPECTRE_V2_USER_CMD_PRCTL:
1775 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1776 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1777 break;
1778 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1779 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1780 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1781 break;
1782 case SPECTRE_V2_USER_CMD_SECCOMP:
1783 if (IS_ENABLED(CONFIG_SECCOMP))
1784 spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP;
1785 else
1786 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1787 spectre_v2_user_stibp = spectre_v2_user_ibpb;
1788 break;
1789 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1790 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1791 if (IS_ENABLED(CONFIG_SECCOMP))
1792 spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP;
1793 else
1794 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1795 break;
1796 }
1797
1798 /*
1799 * At this point, an STIBP mode other than "off" has been set.
1800 * If STIBP support is not being forced, check if STIBP always-on
1801 * is preferred.
1802 */
1803 if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1804 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) &&
1805 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1806 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1807
1808 if (!boot_cpu_has(X86_FEATURE_IBPB))
1809 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1810
1811 if (!boot_cpu_has(X86_FEATURE_STIBP))
1812 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1813 }
1814
spectre_v2_user_update_mitigation(void)1815 static void __init spectre_v2_user_update_mitigation(void)
1816 {
1817 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1818 return;
1819
1820 /* The spectre_v2 cmd line can override spectre_v2_user options */
1821 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) {
1822 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1823 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1824 } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) {
1825 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1826 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1827 }
1828
1829 /*
1830 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1831 * is not required.
1832 *
1833 * Intel's Enhanced IBRS also protects against cross-thread branch target
1834 * injection in user-mode as the IBRS bit remains always set which
1835 * implicitly enables cross-thread protections. However, in legacy IBRS
1836 * mode, the IBRS bit is set only on kernel entry and cleared on return
1837 * to userspace. AMD Automatic IBRS also does not protect userspace.
1838 * These modes therefore disable the implicit cross-thread protection,
1839 * so allow for STIBP to be selected in those cases.
1840 */
1841 if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1842 !cpu_smt_possible() ||
1843 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1844 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) {
1845 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1846 return;
1847 }
1848
1849 if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE &&
1850 (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1851 retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) {
1852 if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT &&
1853 spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED)
1854 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1855 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1856 }
1857 pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]);
1858 }
1859
spectre_v2_user_apply_mitigation(void)1860 static void __init spectre_v2_user_apply_mitigation(void)
1861 {
1862 /* Initialize Indirect Branch Prediction Barrier */
1863 if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) {
1864 static_branch_enable(&switch_vcpu_ibpb);
1865
1866 switch (spectre_v2_user_ibpb) {
1867 case SPECTRE_V2_USER_STRICT:
1868 static_branch_enable(&switch_mm_always_ibpb);
1869 break;
1870 case SPECTRE_V2_USER_PRCTL:
1871 case SPECTRE_V2_USER_SECCOMP:
1872 static_branch_enable(&switch_mm_cond_ibpb);
1873 break;
1874 default:
1875 break;
1876 }
1877
1878 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1879 static_key_enabled(&switch_mm_always_ibpb) ?
1880 "always-on" : "conditional");
1881 }
1882 }
1883
1884 static const char * const spectre_v2_strings[] = {
1885 [SPECTRE_V2_NONE] = "Vulnerable",
1886 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
1887 [SPECTRE_V2_LFENCE] = "Vulnerable: LFENCE",
1888 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
1889 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
1890 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
1891 [SPECTRE_V2_IBRS] = "Mitigation: IBRS",
1892 };
1893
1894 static bool nospectre_v2 __ro_after_init;
1895
nospectre_v2_parse_cmdline(char * str)1896 static int __init nospectre_v2_parse_cmdline(char *str)
1897 {
1898 nospectre_v2 = true;
1899 spectre_v2_cmd = SPECTRE_V2_CMD_NONE;
1900 return 0;
1901 }
1902 early_param("nospectre_v2", nospectre_v2_parse_cmdline);
1903
spectre_v2_parse_cmdline(char * str)1904 static int __init spectre_v2_parse_cmdline(char *str)
1905 {
1906 if (!str)
1907 return -EINVAL;
1908
1909 if (nospectre_v2)
1910 return 0;
1911
1912 if (!strcmp(str, "off")) {
1913 spectre_v2_cmd = SPECTRE_V2_CMD_NONE;
1914 } else if (!strcmp(str, "on")) {
1915 spectre_v2_cmd = SPECTRE_V2_CMD_FORCE;
1916 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1917 setup_force_cpu_bug(X86_BUG_SPECTRE_V2_USER);
1918 } else if (!strcmp(str, "retpoline")) {
1919 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE;
1920 } else if (!strcmp(str, "retpoline,amd") ||
1921 !strcmp(str, "retpoline,lfence")) {
1922 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE_LFENCE;
1923 } else if (!strcmp(str, "retpoline,generic")) {
1924 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE_GENERIC;
1925 } else if (!strcmp(str, "eibrs")) {
1926 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS;
1927 } else if (!strcmp(str, "eibrs,lfence")) {
1928 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS_LFENCE;
1929 } else if (!strcmp(str, "eibrs,retpoline")) {
1930 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS_RETPOLINE;
1931 } else if (!strcmp(str, "auto")) {
1932 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
1933 } else if (!strcmp(str, "ibrs")) {
1934 spectre_v2_cmd = SPECTRE_V2_CMD_IBRS;
1935 } else {
1936 pr_err("Ignoring unknown spectre_v2 option (%s).", str);
1937 }
1938
1939 return 0;
1940 }
1941 early_param("spectre_v2", spectre_v2_parse_cmdline);
1942
spectre_v2_select_retpoline(void)1943 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1944 {
1945 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
1946 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1947 return SPECTRE_V2_NONE;
1948 }
1949
1950 return SPECTRE_V2_RETPOLINE;
1951 }
1952
1953 static bool __ro_after_init rrsba_disabled;
1954
1955 /* Disable in-kernel use of non-RSB RET predictors */
spec_ctrl_disable_kernel_rrsba(void)1956 static void __init spec_ctrl_disable_kernel_rrsba(void)
1957 {
1958 if (rrsba_disabled)
1959 return;
1960
1961 if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
1962 rrsba_disabled = true;
1963 return;
1964 }
1965
1966 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
1967 return;
1968
1969 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
1970 update_spec_ctrl(x86_spec_ctrl_base);
1971 rrsba_disabled = true;
1972 }
1973
spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)1974 static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
1975 {
1976 /*
1977 * WARNING! There are many subtleties to consider when changing *any*
1978 * code related to RSB-related mitigations. Before doing so, carefully
1979 * read the following document, and update if necessary:
1980 *
1981 * Documentation/admin-guide/hw-vuln/rsb.rst
1982 *
1983 * In an overly simplified nutshell:
1984 *
1985 * - User->user RSB attacks are conditionally mitigated during
1986 * context switches by cond_mitigation -> write_ibpb().
1987 *
1988 * - User->kernel and guest->host attacks are mitigated by eIBRS or
1989 * RSB filling.
1990 *
1991 * Though, depending on config, note that other alternative
1992 * mitigations may end up getting used instead, e.g., IBPB on
1993 * entry/vmexit, call depth tracking, or return thunks.
1994 */
1995
1996 switch (mode) {
1997 case SPECTRE_V2_NONE:
1998 break;
1999
2000 case SPECTRE_V2_EIBRS:
2001 case SPECTRE_V2_EIBRS_LFENCE:
2002 case SPECTRE_V2_EIBRS_RETPOLINE:
2003 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2004 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
2005 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
2006 }
2007 break;
2008
2009 case SPECTRE_V2_RETPOLINE:
2010 case SPECTRE_V2_LFENCE:
2011 case SPECTRE_V2_IBRS:
2012 pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
2013 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
2014 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
2015 break;
2016
2017 default:
2018 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
2019 dump_stack();
2020 break;
2021 }
2022 }
2023
2024 /*
2025 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
2026 * branch history in userspace. Not needed if BHI_NO is set.
2027 */
spec_ctrl_bhi_dis(void)2028 static bool __init spec_ctrl_bhi_dis(void)
2029 {
2030 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
2031 return false;
2032
2033 x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
2034 update_spec_ctrl(x86_spec_ctrl_base);
2035 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
2036
2037 return true;
2038 }
2039
2040 enum bhi_mitigations {
2041 BHI_MITIGATION_OFF,
2042 BHI_MITIGATION_AUTO,
2043 BHI_MITIGATION_ON,
2044 BHI_MITIGATION_VMEXIT_ONLY,
2045 };
2046
2047 static enum bhi_mitigations bhi_mitigation __ro_after_init =
2048 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF;
2049
spectre_bhi_parse_cmdline(char * str)2050 static int __init spectre_bhi_parse_cmdline(char *str)
2051 {
2052 if (!str)
2053 return -EINVAL;
2054
2055 if (!strcmp(str, "off"))
2056 bhi_mitigation = BHI_MITIGATION_OFF;
2057 else if (!strcmp(str, "on"))
2058 bhi_mitigation = BHI_MITIGATION_ON;
2059 else if (!strcmp(str, "vmexit"))
2060 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2061 else
2062 pr_err("Ignoring unknown spectre_bhi option (%s)", str);
2063
2064 return 0;
2065 }
2066 early_param("spectre_bhi", spectre_bhi_parse_cmdline);
2067
bhi_select_mitigation(void)2068 static void __init bhi_select_mitigation(void)
2069 {
2070 if (!boot_cpu_has(X86_BUG_BHI))
2071 bhi_mitigation = BHI_MITIGATION_OFF;
2072
2073 if (bhi_mitigation != BHI_MITIGATION_AUTO)
2074 return;
2075
2076 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) {
2077 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
2078 bhi_mitigation = BHI_MITIGATION_ON;
2079 else
2080 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2081 } else {
2082 bhi_mitigation = BHI_MITIGATION_OFF;
2083 }
2084 }
2085
bhi_update_mitigation(void)2086 static void __init bhi_update_mitigation(void)
2087 {
2088 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE)
2089 bhi_mitigation = BHI_MITIGATION_OFF;
2090 }
2091
bhi_apply_mitigation(void)2092 static void __init bhi_apply_mitigation(void)
2093 {
2094 if (bhi_mitigation == BHI_MITIGATION_OFF)
2095 return;
2096
2097 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
2098 if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
2099 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
2100 spec_ctrl_disable_kernel_rrsba();
2101 if (rrsba_disabled)
2102 return;
2103 }
2104
2105 if (!IS_ENABLED(CONFIG_X86_64))
2106 return;
2107
2108 /* Mitigate in hardware if supported */
2109 if (spec_ctrl_bhi_dis())
2110 return;
2111
2112 if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
2113 pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
2114 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2115 return;
2116 }
2117
2118 pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
2119 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
2120 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2121 }
2122
spectre_v2_select_mitigation(void)2123 static void __init spectre_v2_select_mitigation(void)
2124 {
2125 if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE ||
2126 spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2127 spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
2128 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2129 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2130 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
2131 pr_err("RETPOLINE selected but not compiled in. Switching to AUTO select\n");
2132 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2133 }
2134
2135 if ((spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS ||
2136 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2137 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2138 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2139 pr_err("EIBRS selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n");
2140 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2141 }
2142
2143 if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2144 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
2145 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
2146 pr_err("LFENCE selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n");
2147 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2148 }
2149
2150 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) {
2151 pr_err("IBRS selected but not compiled in. Switching to AUTO select\n");
2152 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2153 }
2154
2155 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
2156 pr_err("IBRS selected but not Intel CPU. Switching to AUTO select\n");
2157 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2158 }
2159
2160 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
2161 pr_err("IBRS selected but CPU doesn't have IBRS. Switching to AUTO select\n");
2162 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2163 }
2164
2165 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
2166 pr_err("IBRS selected but running as XenPV guest. Switching to AUTO select\n");
2167 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2168 }
2169
2170 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) {
2171 spectre_v2_cmd = SPECTRE_V2_CMD_NONE;
2172 return;
2173 }
2174
2175 switch (spectre_v2_cmd) {
2176 case SPECTRE_V2_CMD_NONE:
2177 return;
2178
2179 case SPECTRE_V2_CMD_AUTO:
2180 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2))
2181 break;
2182 fallthrough;
2183 case SPECTRE_V2_CMD_FORCE:
2184 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2185 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2186 break;
2187 }
2188
2189 spectre_v2_enabled = spectre_v2_select_retpoline();
2190 break;
2191
2192 case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
2193 pr_err(SPECTRE_V2_LFENCE_MSG);
2194 spectre_v2_enabled = SPECTRE_V2_LFENCE;
2195 break;
2196
2197 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
2198 spectre_v2_enabled = SPECTRE_V2_RETPOLINE;
2199 break;
2200
2201 case SPECTRE_V2_CMD_RETPOLINE:
2202 spectre_v2_enabled = spectre_v2_select_retpoline();
2203 break;
2204
2205 case SPECTRE_V2_CMD_IBRS:
2206 spectre_v2_enabled = SPECTRE_V2_IBRS;
2207 break;
2208
2209 case SPECTRE_V2_CMD_EIBRS:
2210 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2211 break;
2212
2213 case SPECTRE_V2_CMD_EIBRS_LFENCE:
2214 spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE;
2215 break;
2216
2217 case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
2218 spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE;
2219 break;
2220 }
2221 }
2222
spectre_v2_update_mitigation(void)2223 static void __init spectre_v2_update_mitigation(void)
2224 {
2225 if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO &&
2226 !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) {
2227 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
2228 boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2229 retbleed_mitigation != RETBLEED_MITIGATION_NONE &&
2230 retbleed_mitigation != RETBLEED_MITIGATION_STUFF &&
2231 boot_cpu_has(X86_FEATURE_IBRS) &&
2232 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
2233 spectre_v2_enabled = SPECTRE_V2_IBRS;
2234 }
2235 }
2236
2237 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2238 pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]);
2239 }
2240
spectre_v2_apply_mitigation(void)2241 static void __init spectre_v2_apply_mitigation(void)
2242 {
2243 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2244 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
2245
2246 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2247 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
2248 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
2249 } else {
2250 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
2251 update_spec_ctrl(x86_spec_ctrl_base);
2252 }
2253 }
2254
2255 switch (spectre_v2_enabled) {
2256 case SPECTRE_V2_NONE:
2257 return;
2258
2259 case SPECTRE_V2_EIBRS:
2260 break;
2261
2262 case SPECTRE_V2_IBRS:
2263 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
2264 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
2265 pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
2266 break;
2267
2268 case SPECTRE_V2_LFENCE:
2269 case SPECTRE_V2_EIBRS_LFENCE:
2270 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
2271 fallthrough;
2272
2273 case SPECTRE_V2_RETPOLINE:
2274 case SPECTRE_V2_EIBRS_RETPOLINE:
2275 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
2276 break;
2277 }
2278
2279 /*
2280 * Disable alternate RSB predictions in kernel when indirect CALLs and
2281 * JMPs gets protection against BHI and Intramode-BTI, but RET
2282 * prediction from a non-RSB predictor is still a risk.
2283 */
2284 if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE ||
2285 spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE ||
2286 spectre_v2_enabled == SPECTRE_V2_RETPOLINE)
2287 spec_ctrl_disable_kernel_rrsba();
2288
2289 spectre_v2_select_rsb_mitigation(spectre_v2_enabled);
2290
2291 /*
2292 * Retpoline protects the kernel, but doesn't protect firmware. IBRS
2293 * and Enhanced IBRS protect firmware too, so enable IBRS around
2294 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
2295 * otherwise enabled.
2296 *
2297 * Use "spectre_v2_enabled" to check Enhanced IBRS instead of
2298 * boot_cpu_has(), because the user might select retpoline on the kernel
2299 * command line and if the CPU supports Enhanced IBRS, kernel might
2300 * un-intentionally not enable IBRS around firmware calls.
2301 */
2302 if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2303 boot_cpu_has(X86_FEATURE_IBPB) &&
2304 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2305 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
2306
2307 if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) {
2308 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
2309 pr_info("Enabling Speculation Barrier for firmware calls\n");
2310 }
2311
2312 } else if (boot_cpu_has(X86_FEATURE_IBRS) &&
2313 !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2314 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
2315 pr_info("Enabling Restricted Speculation for firmware calls\n");
2316 }
2317 }
2318
update_stibp_msr(void * __unused)2319 static void update_stibp_msr(void * __unused)
2320 {
2321 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
2322 update_spec_ctrl(val);
2323 }
2324
2325 /* Update x86_spec_ctrl_base in case SMT state changed. */
update_stibp_strict(void)2326 static void update_stibp_strict(void)
2327 {
2328 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
2329
2330 if (sched_smt_active())
2331 mask |= SPEC_CTRL_STIBP;
2332
2333 if (mask == x86_spec_ctrl_base)
2334 return;
2335
2336 pr_info("Update user space SMT mitigation: STIBP %s\n",
2337 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
2338 x86_spec_ctrl_base = mask;
2339 on_each_cpu(update_stibp_msr, NULL, 1);
2340 }
2341
2342 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
update_indir_branch_cond(void)2343 static void update_indir_branch_cond(void)
2344 {
2345 if (sched_smt_active())
2346 static_branch_enable(&switch_to_cond_stibp);
2347 else
2348 static_branch_disable(&switch_to_cond_stibp);
2349 }
2350
2351 #undef pr_fmt
2352 #define pr_fmt(fmt) fmt
2353
2354 /* Update the static key controlling the MDS CPU buffer clear in idle */
update_mds_branch_idle(void)2355 static void update_mds_branch_idle(void)
2356 {
2357 /*
2358 * Enable the idle clearing if SMT is active on CPUs which are
2359 * affected only by MSBDS and not any other MDS variant.
2360 *
2361 * The other variants cannot be mitigated when SMT is enabled, so
2362 * clearing the buffers on idle just to prevent the Store Buffer
2363 * repartitioning leak would be a window dressing exercise.
2364 */
2365 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
2366 return;
2367
2368 if (sched_smt_active()) {
2369 static_branch_enable(&cpu_buf_idle_clear);
2370 } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
2371 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
2372 static_branch_disable(&cpu_buf_idle_clear);
2373 }
2374 }
2375
2376 #undef pr_fmt
2377 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
2378
2379 static enum ssb_mitigation ssb_mode __ro_after_init =
2380 IS_ENABLED(CONFIG_MITIGATION_SSB) ? SPEC_STORE_BYPASS_AUTO : SPEC_STORE_BYPASS_NONE;
2381
2382 static const char * const ssb_strings[] = {
2383 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
2384 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
2385 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
2386 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
2387 };
2388
2389 static bool nossb __ro_after_init;
2390
nossb_parse_cmdline(char * str)2391 static int __init nossb_parse_cmdline(char *str)
2392 {
2393 nossb = true;
2394 ssb_mode = SPEC_STORE_BYPASS_NONE;
2395 return 0;
2396 }
2397 early_param("nospec_store_bypass_disable", nossb_parse_cmdline);
2398
ssb_parse_cmdline(char * str)2399 static int __init ssb_parse_cmdline(char *str)
2400 {
2401 if (!str)
2402 return -EINVAL;
2403
2404 if (nossb)
2405 return 0;
2406
2407 if (!strcmp(str, "auto"))
2408 ssb_mode = SPEC_STORE_BYPASS_AUTO;
2409 else if (!strcmp(str, "on"))
2410 ssb_mode = SPEC_STORE_BYPASS_DISABLE;
2411 else if (!strcmp(str, "off"))
2412 ssb_mode = SPEC_STORE_BYPASS_NONE;
2413 else if (!strcmp(str, "prctl"))
2414 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2415 else if (!strcmp(str, "seccomp"))
2416 ssb_mode = IS_ENABLED(CONFIG_SECCOMP) ?
2417 SPEC_STORE_BYPASS_SECCOMP : SPEC_STORE_BYPASS_PRCTL;
2418 else
2419 pr_err("Ignoring unknown spec_store_bypass_disable option (%s).\n",
2420 str);
2421
2422 return 0;
2423 }
2424 early_param("spec_store_bypass_disable", ssb_parse_cmdline);
2425
ssb_select_mitigation(void)2426 static void __init ssb_select_mitigation(void)
2427 {
2428 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) {
2429 ssb_mode = SPEC_STORE_BYPASS_NONE;
2430 return;
2431 }
2432
2433 if (ssb_mode == SPEC_STORE_BYPASS_AUTO) {
2434 if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS))
2435 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2436 else
2437 ssb_mode = SPEC_STORE_BYPASS_NONE;
2438 }
2439
2440 if (!boot_cpu_has(X86_FEATURE_SSBD))
2441 ssb_mode = SPEC_STORE_BYPASS_NONE;
2442
2443 pr_info("%s\n", ssb_strings[ssb_mode]);
2444 }
2445
ssb_apply_mitigation(void)2446 static void __init ssb_apply_mitigation(void)
2447 {
2448 /*
2449 * We have three CPU feature flags that are in play here:
2450 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
2451 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2452 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
2453 */
2454 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
2455 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
2456 /*
2457 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
2458 * use a completely different MSR and bit dependent on family.
2459 */
2460 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
2461 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
2462 x86_amd_ssb_disable();
2463 } else {
2464 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
2465 update_spec_ctrl(x86_spec_ctrl_base);
2466 }
2467 }
2468 }
2469
2470 #undef pr_fmt
2471 #define pr_fmt(fmt) "Speculation prctl: " fmt
2472
task_update_spec_tif(struct task_struct * tsk)2473 static void task_update_spec_tif(struct task_struct *tsk)
2474 {
2475 /* Force the update of the real TIF bits */
2476 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
2477
2478 /*
2479 * Immediately update the speculation control MSRs for the current
2480 * task, but for a non-current task delay setting the CPU
2481 * mitigation until it is scheduled next.
2482 *
2483 * This can only happen for SECCOMP mitigation. For PRCTL it's
2484 * always the current task.
2485 */
2486 if (tsk == current)
2487 speculation_ctrl_update_current();
2488 }
2489
l1d_flush_prctl_set(struct task_struct * task,unsigned long ctrl)2490 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
2491 {
2492
2493 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2494 return -EPERM;
2495
2496 switch (ctrl) {
2497 case PR_SPEC_ENABLE:
2498 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2499 return 0;
2500 case PR_SPEC_DISABLE:
2501 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2502 return 0;
2503 default:
2504 return -ERANGE;
2505 }
2506 }
2507
ssb_prctl_set(struct task_struct * task,unsigned long ctrl)2508 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
2509 {
2510 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
2511 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
2512 return -ENXIO;
2513
2514 switch (ctrl) {
2515 case PR_SPEC_ENABLE:
2516 /* If speculation is force disabled, enable is not allowed */
2517 if (task_spec_ssb_force_disable(task))
2518 return -EPERM;
2519 task_clear_spec_ssb_disable(task);
2520 task_clear_spec_ssb_noexec(task);
2521 task_update_spec_tif(task);
2522 break;
2523 case PR_SPEC_DISABLE:
2524 task_set_spec_ssb_disable(task);
2525 task_clear_spec_ssb_noexec(task);
2526 task_update_spec_tif(task);
2527 break;
2528 case PR_SPEC_FORCE_DISABLE:
2529 task_set_spec_ssb_disable(task);
2530 task_set_spec_ssb_force_disable(task);
2531 task_clear_spec_ssb_noexec(task);
2532 task_update_spec_tif(task);
2533 break;
2534 case PR_SPEC_DISABLE_NOEXEC:
2535 if (task_spec_ssb_force_disable(task))
2536 return -EPERM;
2537 task_set_spec_ssb_disable(task);
2538 task_set_spec_ssb_noexec(task);
2539 task_update_spec_tif(task);
2540 break;
2541 default:
2542 return -ERANGE;
2543 }
2544 return 0;
2545 }
2546
is_spec_ib_user_controlled(void)2547 static bool is_spec_ib_user_controlled(void)
2548 {
2549 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2550 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2551 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2552 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2553 }
2554
ib_prctl_set(struct task_struct * task,unsigned long ctrl)2555 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2556 {
2557 switch (ctrl) {
2558 case PR_SPEC_ENABLE:
2559 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2560 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2561 return 0;
2562
2563 /*
2564 * With strict mode for both IBPB and STIBP, the instruction
2565 * code paths avoid checking this task flag and instead,
2566 * unconditionally run the instruction. However, STIBP and IBPB
2567 * are independent and either can be set to conditionally
2568 * enabled regardless of the mode of the other.
2569 *
2570 * If either is set to conditional, allow the task flag to be
2571 * updated, unless it was force-disabled by a previous prctl
2572 * call. Currently, this is possible on an AMD CPU which has the
2573 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2574 * kernel is booted with 'spectre_v2_user=seccomp', then
2575 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2576 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2577 */
2578 if (!is_spec_ib_user_controlled() ||
2579 task_spec_ib_force_disable(task))
2580 return -EPERM;
2581
2582 task_clear_spec_ib_disable(task);
2583 task_update_spec_tif(task);
2584 break;
2585 case PR_SPEC_DISABLE:
2586 case PR_SPEC_FORCE_DISABLE:
2587 /*
2588 * Indirect branch speculation is always allowed when
2589 * mitigation is force disabled.
2590 */
2591 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2592 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2593 return -EPERM;
2594
2595 if (!is_spec_ib_user_controlled())
2596 return 0;
2597
2598 task_set_spec_ib_disable(task);
2599 if (ctrl == PR_SPEC_FORCE_DISABLE)
2600 task_set_spec_ib_force_disable(task);
2601 task_update_spec_tif(task);
2602 if (task == current)
2603 indirect_branch_prediction_barrier();
2604 break;
2605 default:
2606 return -ERANGE;
2607 }
2608 return 0;
2609 }
2610
arch_prctl_spec_ctrl_set(struct task_struct * task,unsigned long which,unsigned long ctrl)2611 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2612 unsigned long ctrl)
2613 {
2614 switch (which) {
2615 case PR_SPEC_STORE_BYPASS:
2616 return ssb_prctl_set(task, ctrl);
2617 case PR_SPEC_INDIRECT_BRANCH:
2618 return ib_prctl_set(task, ctrl);
2619 case PR_SPEC_L1D_FLUSH:
2620 return l1d_flush_prctl_set(task, ctrl);
2621 default:
2622 return -ENODEV;
2623 }
2624 }
2625
2626 #ifdef CONFIG_SECCOMP
arch_seccomp_spec_mitigate(struct task_struct * task)2627 void arch_seccomp_spec_mitigate(struct task_struct *task)
2628 {
2629 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2630 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2631 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2632 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2633 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2634 }
2635 #endif
2636
l1d_flush_prctl_get(struct task_struct * task)2637 static int l1d_flush_prctl_get(struct task_struct *task)
2638 {
2639 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2640 return PR_SPEC_FORCE_DISABLE;
2641
2642 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2643 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2644 else
2645 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2646 }
2647
ssb_prctl_get(struct task_struct * task)2648 static int ssb_prctl_get(struct task_struct *task)
2649 {
2650 switch (ssb_mode) {
2651 case SPEC_STORE_BYPASS_NONE:
2652 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2653 return PR_SPEC_ENABLE;
2654 return PR_SPEC_NOT_AFFECTED;
2655 case SPEC_STORE_BYPASS_DISABLE:
2656 return PR_SPEC_DISABLE;
2657 case SPEC_STORE_BYPASS_SECCOMP:
2658 case SPEC_STORE_BYPASS_PRCTL:
2659 case SPEC_STORE_BYPASS_AUTO:
2660 if (task_spec_ssb_force_disable(task))
2661 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2662 if (task_spec_ssb_noexec(task))
2663 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2664 if (task_spec_ssb_disable(task))
2665 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2666 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2667 }
2668 BUG();
2669 }
2670
ib_prctl_get(struct task_struct * task)2671 static int ib_prctl_get(struct task_struct *task)
2672 {
2673 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2674 return PR_SPEC_NOT_AFFECTED;
2675
2676 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2677 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2678 return PR_SPEC_ENABLE;
2679 else if (is_spec_ib_user_controlled()) {
2680 if (task_spec_ib_force_disable(task))
2681 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2682 if (task_spec_ib_disable(task))
2683 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2684 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2685 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2686 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2687 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2688 return PR_SPEC_DISABLE;
2689 else
2690 return PR_SPEC_NOT_AFFECTED;
2691 }
2692
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)2693 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2694 {
2695 switch (which) {
2696 case PR_SPEC_STORE_BYPASS:
2697 return ssb_prctl_get(task);
2698 case PR_SPEC_INDIRECT_BRANCH:
2699 return ib_prctl_get(task);
2700 case PR_SPEC_L1D_FLUSH:
2701 return l1d_flush_prctl_get(task);
2702 default:
2703 return -ENODEV;
2704 }
2705 }
2706
x86_spec_ctrl_setup_ap(void)2707 void x86_spec_ctrl_setup_ap(void)
2708 {
2709 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2710 update_spec_ctrl(x86_spec_ctrl_base);
2711
2712 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2713 x86_amd_ssb_disable();
2714 }
2715
2716 bool itlb_multihit_kvm_mitigation;
2717 EXPORT_SYMBOL_FOR_KVM(itlb_multihit_kvm_mitigation);
2718
2719 #undef pr_fmt
2720 #define pr_fmt(fmt) "L1TF: " fmt
2721
2722 /* Default mitigation for L1TF-affected CPUs */
2723 enum l1tf_mitigations l1tf_mitigation __ro_after_init =
2724 IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF;
2725 EXPORT_SYMBOL_FOR_KVM(l1tf_mitigation);
2726 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
2727 EXPORT_SYMBOL_FOR_KVM(l1tf_vmx_mitigation);
2728
2729 /*
2730 * These CPUs all support 44bits physical address space internally in the
2731 * cache but CPUID can report a smaller number of physical address bits.
2732 *
2733 * The L1TF mitigation uses the top most address bit for the inversion of
2734 * non present PTEs. When the installed memory reaches into the top most
2735 * address bit due to memory holes, which has been observed on machines
2736 * which report 36bits physical address bits and have 32G RAM installed,
2737 * then the mitigation range check in l1tf_select_mitigation() triggers.
2738 * This is a false positive because the mitigation is still possible due to
2739 * the fact that the cache uses 44bit internally. Use the cache bits
2740 * instead of the reported physical bits and adjust them on the affected
2741 * machines to 44bit if the reported bits are less than 44.
2742 */
override_cache_bits(struct cpuinfo_x86 * c)2743 static void override_cache_bits(struct cpuinfo_x86 *c)
2744 {
2745 if (c->x86 != 6)
2746 return;
2747
2748 switch (c->x86_vfm) {
2749 case INTEL_NEHALEM:
2750 case INTEL_WESTMERE:
2751 case INTEL_SANDYBRIDGE:
2752 case INTEL_IVYBRIDGE:
2753 case INTEL_HASWELL:
2754 case INTEL_HASWELL_L:
2755 case INTEL_HASWELL_G:
2756 case INTEL_BROADWELL:
2757 case INTEL_BROADWELL_G:
2758 case INTEL_SKYLAKE_L:
2759 case INTEL_SKYLAKE:
2760 case INTEL_KABYLAKE_L:
2761 case INTEL_KABYLAKE:
2762 if (c->x86_cache_bits < 44)
2763 c->x86_cache_bits = 44;
2764 break;
2765 }
2766 }
2767
l1tf_select_mitigation(void)2768 static void __init l1tf_select_mitigation(void)
2769 {
2770 if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
2771 l1tf_mitigation = L1TF_MITIGATION_OFF;
2772 return;
2773 }
2774
2775 if (l1tf_mitigation != L1TF_MITIGATION_AUTO)
2776 return;
2777
2778 if (!should_mitigate_vuln(X86_BUG_L1TF)) {
2779 l1tf_mitigation = L1TF_MITIGATION_OFF;
2780 return;
2781 }
2782
2783 if (smt_mitigations == SMT_MITIGATIONS_ON)
2784 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2785 else
2786 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2787 }
2788
l1tf_apply_mitigation(void)2789 static void __init l1tf_apply_mitigation(void)
2790 {
2791 u64 half_pa;
2792
2793 if (!boot_cpu_has_bug(X86_BUG_L1TF))
2794 return;
2795
2796 override_cache_bits(&boot_cpu_data);
2797
2798 switch (l1tf_mitigation) {
2799 case L1TF_MITIGATION_OFF:
2800 case L1TF_MITIGATION_FLUSH_NOWARN:
2801 case L1TF_MITIGATION_FLUSH:
2802 case L1TF_MITIGATION_AUTO:
2803 break;
2804 case L1TF_MITIGATION_FLUSH_NOSMT:
2805 case L1TF_MITIGATION_FULL:
2806 cpu_smt_disable(false);
2807 break;
2808 case L1TF_MITIGATION_FULL_FORCE:
2809 cpu_smt_disable(true);
2810 break;
2811 }
2812
2813 #if CONFIG_PGTABLE_LEVELS == 2
2814 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2815 return;
2816 #endif
2817
2818 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
2819 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
2820 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
2821 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2822 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2823 half_pa);
2824 pr_info("However, doing so will make a part of your RAM unusable.\n");
2825 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2826 return;
2827 }
2828
2829 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
2830 }
2831
l1tf_cmdline(char * str)2832 static int __init l1tf_cmdline(char *str)
2833 {
2834 if (!boot_cpu_has_bug(X86_BUG_L1TF))
2835 return 0;
2836
2837 if (!str)
2838 return -EINVAL;
2839
2840 if (!strcmp(str, "off"))
2841 l1tf_mitigation = L1TF_MITIGATION_OFF;
2842 else if (!strcmp(str, "flush,nowarn"))
2843 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
2844 else if (!strcmp(str, "flush"))
2845 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2846 else if (!strcmp(str, "flush,nosmt"))
2847 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2848 else if (!strcmp(str, "full"))
2849 l1tf_mitigation = L1TF_MITIGATION_FULL;
2850 else if (!strcmp(str, "full,force"))
2851 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
2852
2853 return 0;
2854 }
2855 early_param("l1tf", l1tf_cmdline);
2856
2857 #undef pr_fmt
2858 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
2859
2860 static const char * const srso_strings[] = {
2861 [SRSO_MITIGATION_NONE] = "Vulnerable",
2862 [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
2863 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
2864 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
2865 [SRSO_MITIGATION_NOSMT] = "Mitigation: SMT disabled",
2866 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
2867 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
2868 [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
2869 [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
2870 };
2871
srso_parse_cmdline(char * str)2872 static int __init srso_parse_cmdline(char *str)
2873 {
2874 if (!str)
2875 return -EINVAL;
2876
2877 if (!strcmp(str, "off"))
2878 srso_mitigation = SRSO_MITIGATION_NONE;
2879 else if (!strcmp(str, "microcode"))
2880 srso_mitigation = SRSO_MITIGATION_MICROCODE;
2881 else if (!strcmp(str, "safe-ret"))
2882 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
2883 else if (!strcmp(str, "ibpb"))
2884 srso_mitigation = SRSO_MITIGATION_IBPB;
2885 else if (!strcmp(str, "ibpb-vmexit"))
2886 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2887 else
2888 pr_err("Ignoring unknown SRSO option (%s).", str);
2889
2890 return 0;
2891 }
2892 early_param("spec_rstack_overflow", srso_parse_cmdline);
2893
2894 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
2895
srso_select_mitigation(void)2896 static void __init srso_select_mitigation(void)
2897 {
2898 if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
2899 srso_mitigation = SRSO_MITIGATION_NONE;
2900 return;
2901 }
2902
2903 if (srso_mitigation == SRSO_MITIGATION_AUTO) {
2904 /*
2905 * Use safe-RET if user->kernel or guest->host protection is
2906 * required. Otherwise the 'microcode' mitigation is sufficient
2907 * to protect the user->user and guest->guest vectors.
2908 */
2909 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
2910 (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) &&
2911 !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) {
2912 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
2913 } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
2914 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
2915 srso_mitigation = SRSO_MITIGATION_MICROCODE;
2916 } else {
2917 srso_mitigation = SRSO_MITIGATION_NONE;
2918 return;
2919 }
2920 }
2921
2922 /* Zen1/2 with SMT off aren't vulnerable to SRSO. */
2923 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
2924 srso_mitigation = SRSO_MITIGATION_NOSMT;
2925 return;
2926 }
2927
2928 if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) {
2929 pr_warn("IBPB-extending microcode not applied!\n");
2930 pr_warn(SRSO_NOTICE);
2931
2932 /*
2933 * Safe-RET provides partial mitigation without microcode, but
2934 * other mitigations require microcode to provide any
2935 * mitigations.
2936 */
2937 if (srso_mitigation == SRSO_MITIGATION_SAFE_RET)
2938 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
2939 else
2940 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
2941 }
2942
2943 switch (srso_mitigation) {
2944 case SRSO_MITIGATION_SAFE_RET:
2945 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
2946 if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
2947 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2948 goto ibpb_on_vmexit;
2949 }
2950
2951 if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
2952 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
2953 srso_mitigation = SRSO_MITIGATION_NONE;
2954 }
2955 break;
2956 ibpb_on_vmexit:
2957 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
2958 if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
2959 pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
2960 srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
2961 break;
2962 }
2963 fallthrough;
2964 case SRSO_MITIGATION_IBPB:
2965 if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
2966 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
2967 srso_mitigation = SRSO_MITIGATION_NONE;
2968 }
2969 break;
2970 default:
2971 break;
2972 }
2973 }
2974
srso_update_mitigation(void)2975 static void __init srso_update_mitigation(void)
2976 {
2977 if (!boot_cpu_has_bug(X86_BUG_SRSO))
2978 return;
2979
2980 /* If retbleed is using IBPB, that works for SRSO as well */
2981 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB &&
2982 boot_cpu_has(X86_FEATURE_IBPB_BRTYPE))
2983 srso_mitigation = SRSO_MITIGATION_IBPB;
2984
2985 pr_info("%s\n", srso_strings[srso_mitigation]);
2986 }
2987
srso_apply_mitigation(void)2988 static void __init srso_apply_mitigation(void)
2989 {
2990 /*
2991 * Clear the feature flag if this mitigation is not selected as that
2992 * feature flag controls the BpSpecReduce MSR bit toggling in KVM.
2993 */
2994 if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
2995 setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
2996
2997 if (srso_mitigation == SRSO_MITIGATION_NONE) {
2998 if (boot_cpu_has(X86_FEATURE_SBPB))
2999 x86_pred_cmd = PRED_CMD_SBPB;
3000 return;
3001 }
3002
3003 switch (srso_mitigation) {
3004 case SRSO_MITIGATION_SAFE_RET:
3005 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3006 /*
3007 * Enable the return thunk for generated code
3008 * like ftrace, static_call, etc.
3009 */
3010 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
3011 setup_force_cpu_cap(X86_FEATURE_UNRET);
3012
3013 if (boot_cpu_data.x86 == 0x19) {
3014 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
3015 set_return_thunk(srso_alias_return_thunk);
3016 } else {
3017 setup_force_cpu_cap(X86_FEATURE_SRSO);
3018 set_return_thunk(srso_return_thunk);
3019 }
3020 break;
3021 case SRSO_MITIGATION_IBPB:
3022 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
3023 /*
3024 * IBPB on entry already obviates the need for
3025 * software-based untraining so clear those in case some
3026 * other mitigation like Retbleed has selected them.
3027 */
3028 setup_clear_cpu_cap(X86_FEATURE_UNRET);
3029 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
3030 fallthrough;
3031 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3032 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
3033 /*
3034 * There is no need for RSB filling: entry_ibpb() ensures
3035 * all predictions, including the RSB, are invalidated,
3036 * regardless of IBPB implementation.
3037 */
3038 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
3039 break;
3040 default:
3041 break;
3042 }
3043 }
3044
3045 #undef pr_fmt
3046 #define pr_fmt(fmt) "VMSCAPE: " fmt
3047
3048 enum vmscape_mitigations {
3049 VMSCAPE_MITIGATION_NONE,
3050 VMSCAPE_MITIGATION_AUTO,
3051 VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
3052 VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
3053 };
3054
3055 static const char * const vmscape_strings[] = {
3056 [VMSCAPE_MITIGATION_NONE] = "Vulnerable",
3057 /* [VMSCAPE_MITIGATION_AUTO] */
3058 [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace",
3059 [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT",
3060 };
3061
3062 static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
3063 IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
3064
vmscape_parse_cmdline(char * str)3065 static int __init vmscape_parse_cmdline(char *str)
3066 {
3067 if (!str)
3068 return -EINVAL;
3069
3070 if (!strcmp(str, "off")) {
3071 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3072 } else if (!strcmp(str, "ibpb")) {
3073 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
3074 } else if (!strcmp(str, "force")) {
3075 setup_force_cpu_bug(X86_BUG_VMSCAPE);
3076 vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
3077 } else {
3078 pr_err("Ignoring unknown vmscape=%s option.\n", str);
3079 }
3080
3081 return 0;
3082 }
3083 early_param("vmscape", vmscape_parse_cmdline);
3084
vmscape_select_mitigation(void)3085 static void __init vmscape_select_mitigation(void)
3086 {
3087 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
3088 !boot_cpu_has(X86_FEATURE_IBPB)) {
3089 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3090 return;
3091 }
3092
3093 if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO) {
3094 if (should_mitigate_vuln(X86_BUG_VMSCAPE))
3095 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
3096 else
3097 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3098 }
3099 }
3100
vmscape_update_mitigation(void)3101 static void __init vmscape_update_mitigation(void)
3102 {
3103 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE))
3104 return;
3105
3106 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
3107 srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
3108 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
3109
3110 pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
3111 }
3112
vmscape_apply_mitigation(void)3113 static void __init vmscape_apply_mitigation(void)
3114 {
3115 if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
3116 setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
3117 }
3118
3119 #undef pr_fmt
3120 #define pr_fmt(fmt) fmt
3121
3122 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
3123 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
3124 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
3125 #define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n"
3126
cpu_bugs_smt_update(void)3127 void cpu_bugs_smt_update(void)
3128 {
3129 mutex_lock(&spec_ctrl_mutex);
3130
3131 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3132 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3133 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
3134
3135 switch (spectre_v2_user_stibp) {
3136 case SPECTRE_V2_USER_NONE:
3137 break;
3138 case SPECTRE_V2_USER_STRICT:
3139 case SPECTRE_V2_USER_STRICT_PREFERRED:
3140 update_stibp_strict();
3141 break;
3142 case SPECTRE_V2_USER_PRCTL:
3143 case SPECTRE_V2_USER_SECCOMP:
3144 update_indir_branch_cond();
3145 break;
3146 }
3147
3148 switch (mds_mitigation) {
3149 case MDS_MITIGATION_FULL:
3150 case MDS_MITIGATION_AUTO:
3151 case MDS_MITIGATION_VMWERV:
3152 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
3153 pr_warn_once(MDS_MSG_SMT);
3154 update_mds_branch_idle();
3155 break;
3156 case MDS_MITIGATION_OFF:
3157 break;
3158 }
3159
3160 switch (taa_mitigation) {
3161 case TAA_MITIGATION_VERW:
3162 case TAA_MITIGATION_AUTO:
3163 case TAA_MITIGATION_UCODE_NEEDED:
3164 if (sched_smt_active())
3165 pr_warn_once(TAA_MSG_SMT);
3166 break;
3167 case TAA_MITIGATION_TSX_DISABLED:
3168 case TAA_MITIGATION_OFF:
3169 break;
3170 }
3171
3172 switch (mmio_mitigation) {
3173 case MMIO_MITIGATION_VERW:
3174 case MMIO_MITIGATION_AUTO:
3175 case MMIO_MITIGATION_UCODE_NEEDED:
3176 if (sched_smt_active())
3177 pr_warn_once(MMIO_MSG_SMT);
3178 break;
3179 case MMIO_MITIGATION_OFF:
3180 break;
3181 }
3182
3183 switch (tsa_mitigation) {
3184 case TSA_MITIGATION_USER_KERNEL:
3185 case TSA_MITIGATION_VM:
3186 case TSA_MITIGATION_AUTO:
3187 case TSA_MITIGATION_FULL:
3188 /*
3189 * TSA-SQ can potentially lead to info leakage between
3190 * SMT threads.
3191 */
3192 if (sched_smt_active())
3193 static_branch_enable(&cpu_buf_idle_clear);
3194 else
3195 static_branch_disable(&cpu_buf_idle_clear);
3196 break;
3197 case TSA_MITIGATION_NONE:
3198 case TSA_MITIGATION_UCODE_NEEDED:
3199 break;
3200 }
3201
3202 switch (vmscape_mitigation) {
3203 case VMSCAPE_MITIGATION_NONE:
3204 case VMSCAPE_MITIGATION_AUTO:
3205 break;
3206 case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT:
3207 case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER:
3208 /*
3209 * Hypervisors can be attacked across-threads, warn for SMT when
3210 * STIBP is not already enabled system-wide.
3211 *
3212 * Intel eIBRS (!AUTOIBRS) implies STIBP on.
3213 */
3214 if (!sched_smt_active() ||
3215 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3216 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
3217 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3218 !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
3219 break;
3220 pr_warn_once(VMSCAPE_MSG_SMT);
3221 break;
3222 }
3223
3224 mutex_unlock(&spec_ctrl_mutex);
3225 }
3226
cpu_select_mitigations(void)3227 void __init cpu_select_mitigations(void)
3228 {
3229 /*
3230 * Read the SPEC_CTRL MSR to account for reserved bits which may
3231 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
3232 * init code as it is not enumerated and depends on the family.
3233 */
3234 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
3235 rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
3236
3237 /*
3238 * Previously running kernel (kexec), may have some controls
3239 * turned ON. Clear them and let the mitigations setup below
3240 * rediscover them based on configuration.
3241 */
3242 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
3243 }
3244
3245 x86_arch_cap_msr = x86_read_arch_cap_msr();
3246
3247 cpu_print_attack_vectors();
3248
3249 /* Select the proper CPU mitigations before patching alternatives: */
3250 spectre_v1_select_mitigation();
3251 spectre_v2_select_mitigation();
3252 retbleed_select_mitigation();
3253 spectre_v2_user_select_mitigation();
3254 ssb_select_mitigation();
3255 l1tf_select_mitigation();
3256 mds_select_mitigation();
3257 taa_select_mitigation();
3258 mmio_select_mitigation();
3259 rfds_select_mitigation();
3260 srbds_select_mitigation();
3261 l1d_flush_select_mitigation();
3262 srso_select_mitigation();
3263 gds_select_mitigation();
3264 its_select_mitigation();
3265 bhi_select_mitigation();
3266 tsa_select_mitigation();
3267 vmscape_select_mitigation();
3268
3269 /*
3270 * After mitigations are selected, some may need to update their
3271 * choices.
3272 */
3273 spectre_v2_update_mitigation();
3274 /*
3275 * retbleed_update_mitigation() relies on the state set by
3276 * spectre_v2_update_mitigation(); specifically it wants to know about
3277 * spectre_v2=ibrs.
3278 */
3279 retbleed_update_mitigation();
3280 /*
3281 * its_update_mitigation() depends on spectre_v2_update_mitigation()
3282 * and retbleed_update_mitigation().
3283 */
3284 its_update_mitigation();
3285
3286 /*
3287 * spectre_v2_user_update_mitigation() depends on
3288 * retbleed_update_mitigation(), specifically the STIBP
3289 * selection is forced for UNRET or IBPB.
3290 */
3291 spectre_v2_user_update_mitigation();
3292 mds_update_mitigation();
3293 taa_update_mitigation();
3294 mmio_update_mitigation();
3295 rfds_update_mitigation();
3296 bhi_update_mitigation();
3297 /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
3298 srso_update_mitigation();
3299 vmscape_update_mitigation();
3300
3301 spectre_v1_apply_mitigation();
3302 spectre_v2_apply_mitigation();
3303 retbleed_apply_mitigation();
3304 spectre_v2_user_apply_mitigation();
3305 ssb_apply_mitigation();
3306 l1tf_apply_mitigation();
3307 mds_apply_mitigation();
3308 taa_apply_mitigation();
3309 mmio_apply_mitigation();
3310 rfds_apply_mitigation();
3311 srbds_apply_mitigation();
3312 srso_apply_mitigation();
3313 gds_apply_mitigation();
3314 its_apply_mitigation();
3315 bhi_apply_mitigation();
3316 tsa_apply_mitigation();
3317 vmscape_apply_mitigation();
3318 }
3319
3320 #ifdef CONFIG_SYSFS
3321
3322 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
3323
3324 #if IS_ENABLED(CONFIG_KVM_INTEL)
3325 static const char * const l1tf_vmx_states[] = {
3326 [VMENTER_L1D_FLUSH_AUTO] = "auto",
3327 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
3328 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
3329 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
3330 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
3331 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
3332 };
3333
l1tf_show_state(char * buf)3334 static ssize_t l1tf_show_state(char *buf)
3335 {
3336 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
3337 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3338
3339 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
3340 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
3341 sched_smt_active())) {
3342 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
3343 l1tf_vmx_states[l1tf_vmx_mitigation]);
3344 }
3345
3346 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
3347 l1tf_vmx_states[l1tf_vmx_mitigation],
3348 sched_smt_active() ? "vulnerable" : "disabled");
3349 }
3350
itlb_multihit_show_state(char * buf)3351 static ssize_t itlb_multihit_show_state(char *buf)
3352 {
3353 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
3354 !boot_cpu_has(X86_FEATURE_VMX))
3355 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
3356 else if (!(cr4_read_shadow() & X86_CR4_VMXE))
3357 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
3358 else if (itlb_multihit_kvm_mitigation)
3359 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
3360 else
3361 return sysfs_emit(buf, "KVM: Vulnerable\n");
3362 }
3363 #else
l1tf_show_state(char * buf)3364 static ssize_t l1tf_show_state(char *buf)
3365 {
3366 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3367 }
3368
itlb_multihit_show_state(char * buf)3369 static ssize_t itlb_multihit_show_state(char *buf)
3370 {
3371 return sysfs_emit(buf, "Processor vulnerable\n");
3372 }
3373 #endif
3374
mds_show_state(char * buf)3375 static ssize_t mds_show_state(char *buf)
3376 {
3377 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3378 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3379 mds_strings[mds_mitigation]);
3380 }
3381
3382 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
3383 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3384 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
3385 sched_smt_active() ? "mitigated" : "disabled"));
3386 }
3387
3388 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3389 sched_smt_active() ? "vulnerable" : "disabled");
3390 }
3391
tsx_async_abort_show_state(char * buf)3392 static ssize_t tsx_async_abort_show_state(char *buf)
3393 {
3394 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
3395 (taa_mitigation == TAA_MITIGATION_OFF))
3396 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
3397
3398 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3399 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3400 taa_strings[taa_mitigation]);
3401 }
3402
3403 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
3404 sched_smt_active() ? "vulnerable" : "disabled");
3405 }
3406
mmio_stale_data_show_state(char * buf)3407 static ssize_t mmio_stale_data_show_state(char *buf)
3408 {
3409 if (mmio_mitigation == MMIO_MITIGATION_OFF)
3410 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
3411
3412 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3413 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3414 mmio_strings[mmio_mitigation]);
3415 }
3416
3417 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
3418 sched_smt_active() ? "vulnerable" : "disabled");
3419 }
3420
rfds_show_state(char * buf)3421 static ssize_t rfds_show_state(char *buf)
3422 {
3423 return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
3424 }
3425
old_microcode_show_state(char * buf)3426 static ssize_t old_microcode_show_state(char *buf)
3427 {
3428 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
3429 return sysfs_emit(buf, "Unknown: running under hypervisor");
3430
3431 return sysfs_emit(buf, "Vulnerable\n");
3432 }
3433
its_show_state(char * buf)3434 static ssize_t its_show_state(char *buf)
3435 {
3436 return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
3437 }
3438
stibp_state(void)3439 static char *stibp_state(void)
3440 {
3441 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3442 !boot_cpu_has(X86_FEATURE_AUTOIBRS))
3443 return "";
3444
3445 switch (spectre_v2_user_stibp) {
3446 case SPECTRE_V2_USER_NONE:
3447 return "; STIBP: disabled";
3448 case SPECTRE_V2_USER_STRICT:
3449 return "; STIBP: forced";
3450 case SPECTRE_V2_USER_STRICT_PREFERRED:
3451 return "; STIBP: always-on";
3452 case SPECTRE_V2_USER_PRCTL:
3453 case SPECTRE_V2_USER_SECCOMP:
3454 if (static_key_enabled(&switch_to_cond_stibp))
3455 return "; STIBP: conditional";
3456 }
3457 return "";
3458 }
3459
ibpb_state(void)3460 static char *ibpb_state(void)
3461 {
3462 if (boot_cpu_has(X86_FEATURE_IBPB)) {
3463 if (static_key_enabled(&switch_mm_always_ibpb))
3464 return "; IBPB: always-on";
3465 if (static_key_enabled(&switch_mm_cond_ibpb))
3466 return "; IBPB: conditional";
3467 return "; IBPB: disabled";
3468 }
3469 return "";
3470 }
3471
pbrsb_eibrs_state(void)3472 static char *pbrsb_eibrs_state(void)
3473 {
3474 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
3475 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
3476 boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
3477 return "; PBRSB-eIBRS: SW sequence";
3478 else
3479 return "; PBRSB-eIBRS: Vulnerable";
3480 } else {
3481 return "; PBRSB-eIBRS: Not affected";
3482 }
3483 }
3484
spectre_bhi_state(void)3485 static const char *spectre_bhi_state(void)
3486 {
3487 if (!boot_cpu_has_bug(X86_BUG_BHI))
3488 return "; BHI: Not affected";
3489 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
3490 return "; BHI: BHI_DIS_S";
3491 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
3492 return "; BHI: SW loop, KVM: SW loop";
3493 else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
3494 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
3495 rrsba_disabled)
3496 return "; BHI: Retpoline";
3497 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT))
3498 return "; BHI: Vulnerable, KVM: SW loop";
3499
3500 return "; BHI: Vulnerable";
3501 }
3502
spectre_v2_show_state(char * buf)3503 static ssize_t spectre_v2_show_state(char *buf)
3504 {
3505 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
3506 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
3507
3508 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3509 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3510 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
3511
3512 return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
3513 spectre_v2_strings[spectre_v2_enabled],
3514 ibpb_state(),
3515 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
3516 stibp_state(),
3517 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
3518 pbrsb_eibrs_state(),
3519 spectre_bhi_state(),
3520 /* this should always be at the end */
3521 spectre_v2_module_string());
3522 }
3523
srbds_show_state(char * buf)3524 static ssize_t srbds_show_state(char *buf)
3525 {
3526 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
3527 }
3528
retbleed_show_state(char * buf)3529 static ssize_t retbleed_show_state(char *buf)
3530 {
3531 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
3532 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
3533 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
3534 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
3535 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
3536
3537 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
3538 !sched_smt_active() ? "disabled" :
3539 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3540 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
3541 "enabled with STIBP protection" : "vulnerable");
3542 }
3543
3544 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
3545 }
3546
srso_show_state(char * buf)3547 static ssize_t srso_show_state(char *buf)
3548 {
3549 return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
3550 }
3551
gds_show_state(char * buf)3552 static ssize_t gds_show_state(char *buf)
3553 {
3554 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
3555 }
3556
tsa_show_state(char * buf)3557 static ssize_t tsa_show_state(char *buf)
3558 {
3559 return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
3560 }
3561
vmscape_show_state(char * buf)3562 static ssize_t vmscape_show_state(char *buf)
3563 {
3564 return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
3565 }
3566
cpu_show_common(struct device * dev,struct device_attribute * attr,char * buf,unsigned int bug)3567 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
3568 char *buf, unsigned int bug)
3569 {
3570 if (!boot_cpu_has_bug(bug))
3571 return sysfs_emit(buf, "Not affected\n");
3572
3573 switch (bug) {
3574 case X86_BUG_CPU_MELTDOWN:
3575 if (boot_cpu_has(X86_FEATURE_PTI))
3576 return sysfs_emit(buf, "Mitigation: PTI\n");
3577
3578 if (hypervisor_is_type(X86_HYPER_XEN_PV))
3579 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
3580
3581 break;
3582
3583 case X86_BUG_SPECTRE_V1:
3584 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
3585
3586 case X86_BUG_SPECTRE_V2:
3587 return spectre_v2_show_state(buf);
3588
3589 case X86_BUG_SPEC_STORE_BYPASS:
3590 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
3591
3592 case X86_BUG_L1TF:
3593 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
3594 return l1tf_show_state(buf);
3595 break;
3596
3597 case X86_BUG_MDS:
3598 return mds_show_state(buf);
3599
3600 case X86_BUG_TAA:
3601 return tsx_async_abort_show_state(buf);
3602
3603 case X86_BUG_ITLB_MULTIHIT:
3604 return itlb_multihit_show_state(buf);
3605
3606 case X86_BUG_SRBDS:
3607 return srbds_show_state(buf);
3608
3609 case X86_BUG_MMIO_STALE_DATA:
3610 return mmio_stale_data_show_state(buf);
3611
3612 case X86_BUG_RETBLEED:
3613 return retbleed_show_state(buf);
3614
3615 case X86_BUG_SRSO:
3616 return srso_show_state(buf);
3617
3618 case X86_BUG_GDS:
3619 return gds_show_state(buf);
3620
3621 case X86_BUG_RFDS:
3622 return rfds_show_state(buf);
3623
3624 case X86_BUG_OLD_MICROCODE:
3625 return old_microcode_show_state(buf);
3626
3627 case X86_BUG_ITS:
3628 return its_show_state(buf);
3629
3630 case X86_BUG_TSA:
3631 return tsa_show_state(buf);
3632
3633 case X86_BUG_VMSCAPE:
3634 return vmscape_show_state(buf);
3635
3636 default:
3637 break;
3638 }
3639
3640 return sysfs_emit(buf, "Vulnerable\n");
3641 }
3642
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)3643 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
3644 {
3645 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
3646 }
3647
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)3648 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
3649 {
3650 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
3651 }
3652
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)3653 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
3654 {
3655 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
3656 }
3657
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)3658 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
3659 {
3660 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
3661 }
3662
cpu_show_l1tf(struct device * dev,struct device_attribute * attr,char * buf)3663 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
3664 {
3665 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
3666 }
3667
cpu_show_mds(struct device * dev,struct device_attribute * attr,char * buf)3668 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
3669 {
3670 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
3671 }
3672
cpu_show_tsx_async_abort(struct device * dev,struct device_attribute * attr,char * buf)3673 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
3674 {
3675 return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
3676 }
3677
cpu_show_itlb_multihit(struct device * dev,struct device_attribute * attr,char * buf)3678 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
3679 {
3680 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
3681 }
3682
cpu_show_srbds(struct device * dev,struct device_attribute * attr,char * buf)3683 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
3684 {
3685 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
3686 }
3687
cpu_show_mmio_stale_data(struct device * dev,struct device_attribute * attr,char * buf)3688 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
3689 {
3690 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
3691 }
3692
cpu_show_retbleed(struct device * dev,struct device_attribute * attr,char * buf)3693 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
3694 {
3695 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
3696 }
3697
cpu_show_spec_rstack_overflow(struct device * dev,struct device_attribute * attr,char * buf)3698 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
3699 {
3700 return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
3701 }
3702
cpu_show_gds(struct device * dev,struct device_attribute * attr,char * buf)3703 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
3704 {
3705 return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
3706 }
3707
cpu_show_reg_file_data_sampling(struct device * dev,struct device_attribute * attr,char * buf)3708 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
3709 {
3710 return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
3711 }
3712
cpu_show_old_microcode(struct device * dev,struct device_attribute * attr,char * buf)3713 ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
3714 {
3715 return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
3716 }
3717
cpu_show_indirect_target_selection(struct device * dev,struct device_attribute * attr,char * buf)3718 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
3719 {
3720 return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
3721 }
3722
cpu_show_tsa(struct device * dev,struct device_attribute * attr,char * buf)3723 ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
3724 {
3725 return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
3726 }
3727
cpu_show_vmscape(struct device * dev,struct device_attribute * attr,char * buf)3728 ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
3729 {
3730 return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
3731 }
3732 #endif
3733
__warn_thunk(void)3734 void __warn_thunk(void)
3735 {
3736 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
3737 }
3738