xref: /linux/arch/x86/kernel/cet.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/ptrace.h>
4 #include <asm/bugs.h>
5 #include <asm/msr.h>
6 #include <asm/traps.h>
7 
8 enum cp_error_code {
9 	CP_EC        = (1 << 15) - 1,
10 
11 	CP_RET       = 1,
12 	CP_IRET      = 2,
13 	CP_ENDBR     = 3,
14 	CP_RSTRORSSP = 4,
15 	CP_SETSSBSY  = 5,
16 
17 	CP_ENCL	     = 1 << 15,
18 };
19 
20 static const char cp_err[][10] = {
21 	[0] = "unknown",
22 	[1] = "near ret",
23 	[2] = "far/iret",
24 	[3] = "endbranch",
25 	[4] = "rstorssp",
26 	[5] = "setssbsy",
27 };
28 
cp_err_string(unsigned long error_code)29 static const char *cp_err_string(unsigned long error_code)
30 {
31 	unsigned int cpec = error_code & CP_EC;
32 
33 	if (cpec >= ARRAY_SIZE(cp_err))
34 		cpec = 0;
35 	return cp_err[cpec];
36 }
37 
do_unexpected_cp(struct pt_regs * regs,unsigned long error_code)38 static void do_unexpected_cp(struct pt_regs *regs, unsigned long error_code)
39 {
40 	WARN_ONCE(1, "Unexpected %s #CP, error_code: %s\n",
41 		  user_mode(regs) ? "user mode" : "kernel mode",
42 		  cp_err_string(error_code));
43 }
44 
45 static DEFINE_RATELIMIT_STATE(cpf_rate, DEFAULT_RATELIMIT_INTERVAL,
46 			      DEFAULT_RATELIMIT_BURST);
47 
do_user_cp_fault(struct pt_regs * regs,unsigned long error_code)48 static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
49 {
50 	struct task_struct *tsk;
51 	unsigned long ssp;
52 
53 	/*
54 	 * An exception was just taken from userspace. Since interrupts are disabled
55 	 * here, no scheduling should have messed with the registers yet and they
56 	 * will be whatever is live in userspace. So read the SSP before enabling
57 	 * interrupts so locking the fpregs to do it later is not required.
58 	 */
59 	rdmsrq(MSR_IA32_PL3_SSP, ssp);
60 
61 	cond_local_irq_enable(regs);
62 
63 	tsk = current;
64 	tsk->thread.error_code = error_code;
65 	tsk->thread.trap_nr = X86_TRAP_CP;
66 
67 	/* Ratelimit to prevent log spamming. */
68 	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
69 	    __ratelimit(&cpf_rate)) {
70 		pr_emerg("%s[%d] control protection ip:%lx sp:%lx ssp:%lx error:%lx(%s)%s",
71 			 tsk->comm, task_pid_nr(tsk),
72 			 regs->ip, regs->sp, ssp, error_code,
73 			 cp_err_string(error_code),
74 			 error_code & CP_ENCL ? " in enclave" : "");
75 		print_vma_addr(KERN_CONT " in ", regs->ip);
76 		pr_cont("\n");
77 	}
78 
79 	force_sig_fault(SIGSEGV, SEGV_CPERR, (void __user *)0);
80 	cond_local_irq_disable(regs);
81 }
82 
83 static __ro_after_init bool ibt_fatal = true;
84 
85 /*
86  * By definition, all missing-ENDBRANCH #CPs are a result of WFE && !ENDBR.
87  *
88  * For the kernel IBT no ENDBR selftest where #CPs are deliberately triggered,
89  * the WFE state of the interrupted context needs to be cleared to let execution
90  * continue.  Otherwise when the CPU resumes from the instruction that just
91  * caused the previous #CP, another missing-ENDBRANCH #CP is raised and the CPU
92  * enters a dead loop.
93  *
94  * This is not a problem with IDT because it doesn't preserve WFE and IRET doesn't
95  * set WFE.  But FRED provides space on the entry stack (in an expanded CS area)
96  * to save and restore the WFE state, thus the WFE state is no longer clobbered,
97  * so software must clear it.
98  */
ibt_clear_fred_wfe(struct pt_regs * regs)99 static void ibt_clear_fred_wfe(struct pt_regs *regs)
100 {
101 	/*
102 	 * No need to do any FRED checks.
103 	 *
104 	 * For IDT event delivery, the high-order 48 bits of CS are pushed
105 	 * as 0s into the stack, and later IRET ignores these bits.
106 	 *
107 	 * For FRED, a test to check if fred_cs.wfe is set would be dropped
108 	 * by compilers.
109 	 */
110 	regs->fred_cs.wfe = 0;
111 }
112 
do_kernel_cp_fault(struct pt_regs * regs,unsigned long error_code)113 static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
114 {
115 	if ((error_code & CP_EC) != CP_ENDBR) {
116 		do_unexpected_cp(regs, error_code);
117 		return;
118 	}
119 
120 	if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) {
121 		regs->ax = 0;
122 		ibt_clear_fred_wfe(regs);
123 		return;
124 	}
125 
126 	pr_err("Missing ENDBR: %pS\n", (void *)instruction_pointer(regs));
127 	if (!ibt_fatal) {
128 		printk(KERN_DEFAULT CUT_HERE);
129 		__warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
130 		ibt_clear_fred_wfe(regs);
131 		return;
132 	}
133 	BUG();
134 }
135 
ibt_setup(char * str)136 static int __init ibt_setup(char *str)
137 {
138 	if (!strcmp(str, "off"))
139 		setup_clear_cpu_cap(X86_FEATURE_IBT);
140 
141 	if (!strcmp(str, "warn"))
142 		ibt_fatal = false;
143 
144 	return 1;
145 }
146 
147 __setup("ibt=", ibt_setup);
148 
DEFINE_IDTENTRY_ERRORCODE(exc_control_protection)149 DEFINE_IDTENTRY_ERRORCODE(exc_control_protection)
150 {
151 	if (user_mode(regs)) {
152 		if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
153 			do_user_cp_fault(regs, error_code);
154 		else
155 			do_unexpected_cp(regs, error_code);
156 	} else {
157 		if (cpu_feature_enabled(X86_FEATURE_IBT))
158 			do_kernel_cp_fault(regs, error_code);
159 		else
160 			do_unexpected_cp(regs, error_code);
161 	}
162 }
163