1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
3
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/perf_event.h>
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/stringify.h>
10 #include <linux/highmem.h>
11 #include <linux/mm.h>
12 #include <linux/vmalloc.h>
13 #include <linux/memory.h>
14 #include <linux/stop_machine.h>
15 #include <linux/slab.h>
16 #include <linux/kdebug.h>
17 #include <linux/kprobes.h>
18 #include <linux/mmu_context.h>
19 #include <linux/bsearch.h>
20 #include <linux/sync_core.h>
21 #include <linux/execmem.h>
22 #include <asm/text-patching.h>
23 #include <asm/alternative.h>
24 #include <asm/sections.h>
25 #include <asm/mce.h>
26 #include <asm/nmi.h>
27 #include <asm/cacheflush.h>
28 #include <asm/tlbflush.h>
29 #include <asm/insn.h>
30 #include <asm/io.h>
31 #include <asm/fixmap.h>
32 #include <asm/paravirt.h>
33 #include <asm/asm-prototypes.h>
34 #include <asm/cfi.h>
35 #include <asm/ibt.h>
36 #include <asm/set_memory.h>
37
38 int __read_mostly alternatives_patched;
39
40 EXPORT_SYMBOL_GPL(alternatives_patched);
41
42 #define MAX_PATCH_LEN (255-1)
43
44 #define DA_ALL (~0)
45 #define DA_ALT 0x01
46 #define DA_RET 0x02
47 #define DA_RETPOLINE 0x04
48 #define DA_ENDBR 0x08
49 #define DA_SMP 0x10
50
51 static unsigned int debug_alternative;
52
debug_alt(char * str)53 static int __init debug_alt(char *str)
54 {
55 if (str && *str == '=')
56 str++;
57
58 if (!str || kstrtouint(str, 0, &debug_alternative))
59 debug_alternative = DA_ALL;
60
61 return 1;
62 }
63 __setup("debug-alternative", debug_alt);
64
65 static int noreplace_smp;
66
setup_noreplace_smp(char * str)67 static int __init setup_noreplace_smp(char *str)
68 {
69 noreplace_smp = 1;
70 return 1;
71 }
72 __setup("noreplace-smp", setup_noreplace_smp);
73
74 #define DPRINTK(type, fmt, args...) \
75 do { \
76 if (debug_alternative & DA_##type) \
77 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \
78 } while (0)
79
80 #define DUMP_BYTES(type, buf, len, fmt, args...) \
81 do { \
82 if (unlikely(debug_alternative & DA_##type)) { \
83 int j; \
84 \
85 if (!(len)) \
86 break; \
87 \
88 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
89 for (j = 0; j < (len) - 1; j++) \
90 printk(KERN_CONT "%02hhx ", buf[j]); \
91 printk(KERN_CONT "%02hhx\n", buf[j]); \
92 } \
93 } while (0)
94
95 static const unsigned char x86nops[] =
96 {
97 BYTES_NOP1,
98 BYTES_NOP2,
99 BYTES_NOP3,
100 BYTES_NOP4,
101 BYTES_NOP5,
102 BYTES_NOP6,
103 BYTES_NOP7,
104 BYTES_NOP8,
105 #ifdef CONFIG_64BIT
106 BYTES_NOP9,
107 BYTES_NOP10,
108 BYTES_NOP11,
109 #endif
110 };
111
112 const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
113 {
114 NULL,
115 x86nops,
116 x86nops + 1,
117 x86nops + 1 + 2,
118 x86nops + 1 + 2 + 3,
119 x86nops + 1 + 2 + 3 + 4,
120 x86nops + 1 + 2 + 3 + 4 + 5,
121 x86nops + 1 + 2 + 3 + 4 + 5 + 6,
122 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
123 #ifdef CONFIG_64BIT
124 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
125 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9,
126 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10,
127 #endif
128 };
129
130 #ifdef CONFIG_FINEIBT
131 static bool cfi_paranoid __ro_after_init;
132 #endif
133
134 #ifdef CONFIG_MITIGATION_ITS
135
136 #ifdef CONFIG_MODULES
137 static struct module *its_mod;
138 #endif
139 static void *its_page;
140 static unsigned int its_offset;
141
142 /* Initialize a thunk with the "jmp *reg; int3" instructions. */
its_init_thunk(void * thunk,int reg)143 static void *its_init_thunk(void *thunk, int reg)
144 {
145 u8 *bytes = thunk;
146 int offset = 0;
147 int i = 0;
148
149 #ifdef CONFIG_FINEIBT
150 if (cfi_paranoid) {
151 /*
152 * When ITS uses indirect branch thunk the fineibt_paranoid
153 * caller sequence doesn't fit in the caller site. So put the
154 * remaining part of the sequence (<ea> + JNE) into the ITS
155 * thunk.
156 */
157 bytes[i++] = 0xea; /* invalid instruction */
158 bytes[i++] = 0x75; /* JNE */
159 bytes[i++] = 0xfd;
160
161 offset = 1;
162 }
163 #endif
164
165 if (reg >= 8) {
166 bytes[i++] = 0x41; /* REX.B prefix */
167 reg -= 8;
168 }
169 bytes[i++] = 0xff;
170 bytes[i++] = 0xe0 + reg; /* jmp *reg */
171 bytes[i++] = 0xcc;
172
173 return thunk + offset;
174 }
175
176 #ifdef CONFIG_MODULES
its_init_mod(struct module * mod)177 void its_init_mod(struct module *mod)
178 {
179 if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
180 return;
181
182 mutex_lock(&text_mutex);
183 its_mod = mod;
184 its_page = NULL;
185 }
186
its_fini_mod(struct module * mod)187 void its_fini_mod(struct module *mod)
188 {
189 if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
190 return;
191
192 WARN_ON_ONCE(its_mod != mod);
193
194 its_mod = NULL;
195 its_page = NULL;
196 mutex_unlock(&text_mutex);
197
198 for (int i = 0; i < mod->its_num_pages; i++) {
199 void *page = mod->its_page_array[i];
200 execmem_restore_rox(page, PAGE_SIZE);
201 }
202 }
203
its_free_mod(struct module * mod)204 void its_free_mod(struct module *mod)
205 {
206 if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
207 return;
208
209 for (int i = 0; i < mod->its_num_pages; i++) {
210 void *page = mod->its_page_array[i];
211 execmem_free(page);
212 }
213 kfree(mod->its_page_array);
214 }
215 #endif /* CONFIG_MODULES */
216
its_alloc(void)217 static void *its_alloc(void)
218 {
219 void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE);
220
221 if (!page)
222 return NULL;
223
224 #ifdef CONFIG_MODULES
225 if (its_mod) {
226 void *tmp = krealloc(its_mod->its_page_array,
227 (its_mod->its_num_pages+1) * sizeof(void *),
228 GFP_KERNEL);
229 if (!tmp)
230 return NULL;
231
232 its_mod->its_page_array = tmp;
233 its_mod->its_page_array[its_mod->its_num_pages++] = page;
234
235 execmem_make_temp_rw(page, PAGE_SIZE);
236 }
237 #endif /* CONFIG_MODULES */
238
239 return no_free_ptr(page);
240 }
241
its_allocate_thunk(int reg)242 static void *its_allocate_thunk(int reg)
243 {
244 int size = 3 + (reg / 8);
245 void *thunk;
246
247 #ifdef CONFIG_FINEIBT
248 /*
249 * The ITS thunk contains an indirect jump and an int3 instruction so
250 * its size is 3 or 4 bytes depending on the register used. If CFI
251 * paranoid is used then 3 extra bytes are added in the ITS thunk to
252 * complete the fineibt_paranoid caller sequence.
253 */
254 if (cfi_paranoid)
255 size += 3;
256 #endif
257
258 if (!its_page || (its_offset + size - 1) >= PAGE_SIZE) {
259 its_page = its_alloc();
260 if (!its_page) {
261 pr_err("ITS page allocation failed\n");
262 return NULL;
263 }
264 memset(its_page, INT3_INSN_OPCODE, PAGE_SIZE);
265 its_offset = 32;
266 }
267
268 /*
269 * If the indirect branch instruction will be in the lower half
270 * of a cacheline, then update the offset to reach the upper half.
271 */
272 if ((its_offset + size - 1) % 64 < 32)
273 its_offset = ((its_offset - 1) | 0x3F) + 33;
274
275 thunk = its_page + its_offset;
276 its_offset += size;
277
278 return its_init_thunk(thunk, reg);
279 }
280
its_static_thunk(int reg)281 u8 *its_static_thunk(int reg)
282 {
283 u8 *thunk = __x86_indirect_its_thunk_array[reg];
284
285 #ifdef CONFIG_FINEIBT
286 /* Paranoid thunk starts 2 bytes before */
287 if (cfi_paranoid)
288 return thunk - 2;
289 #endif
290 return thunk;
291 }
292
293 #endif
294
295 /*
296 * Nomenclature for variable names to simplify and clarify this code and ease
297 * any potential staring at it:
298 *
299 * @instr: source address of the original instructions in the kernel text as
300 * generated by the compiler.
301 *
302 * @buf: temporary buffer on which the patching operates. This buffer is
303 * eventually text-poked into the kernel image.
304 *
305 * @replacement/@repl: pointer to the opcodes which are replacing @instr, located
306 * in the .altinstr_replacement section.
307 */
308
309 /*
310 * Fill the buffer with a single effective instruction of size @len.
311 *
312 * In order not to issue an ORC stack depth tracking CFI entry (Call Frame Info)
313 * for every single-byte NOP, try to generate the maximally available NOP of
314 * size <= ASM_NOP_MAX such that only a single CFI entry is generated (vs one for
315 * each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and
316 * *jump* over instead of executing long and daft NOPs.
317 */
add_nop(u8 * buf,unsigned int len)318 static void add_nop(u8 *buf, unsigned int len)
319 {
320 u8 *target = buf + len;
321
322 if (!len)
323 return;
324
325 if (len <= ASM_NOP_MAX) {
326 memcpy(buf, x86_nops[len], len);
327 return;
328 }
329
330 if (len < 128) {
331 __text_gen_insn(buf, JMP8_INSN_OPCODE, buf, target, JMP8_INSN_SIZE);
332 buf += JMP8_INSN_SIZE;
333 } else {
334 __text_gen_insn(buf, JMP32_INSN_OPCODE, buf, target, JMP32_INSN_SIZE);
335 buf += JMP32_INSN_SIZE;
336 }
337
338 for (;buf < target; buf++)
339 *buf = INT3_INSN_OPCODE;
340 }
341
342 extern s32 __retpoline_sites[], __retpoline_sites_end[];
343 extern s32 __return_sites[], __return_sites_end[];
344 extern s32 __cfi_sites[], __cfi_sites_end[];
345 extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
346 extern s32 __smp_locks[], __smp_locks_end[];
347 void text_poke_early(void *addr, const void *opcode, size_t len);
348
349 /*
350 * Matches NOP and NOPL, not any of the other possible NOPs.
351 */
insn_is_nop(struct insn * insn)352 static bool insn_is_nop(struct insn *insn)
353 {
354 /* Anything NOP, but no REP NOP */
355 if (insn->opcode.bytes[0] == 0x90 &&
356 (!insn->prefixes.nbytes || insn->prefixes.bytes[0] != 0xF3))
357 return true;
358
359 /* NOPL */
360 if (insn->opcode.bytes[0] == 0x0F && insn->opcode.bytes[1] == 0x1F)
361 return true;
362
363 /* TODO: more nops */
364
365 return false;
366 }
367
368 /*
369 * Find the offset of the first non-NOP instruction starting at @offset
370 * but no further than @len.
371 */
skip_nops(u8 * buf,int offset,int len)372 static int skip_nops(u8 *buf, int offset, int len)
373 {
374 struct insn insn;
375
376 for (; offset < len; offset += insn.length) {
377 if (insn_decode_kernel(&insn, &buf[offset]))
378 break;
379
380 if (!insn_is_nop(&insn))
381 break;
382 }
383
384 return offset;
385 }
386
387 /*
388 * "noinline" to cause control flow change and thus invalidate I$ and
389 * cause refetch after modification.
390 */
optimize_nops(const u8 * const instr,u8 * buf,size_t len)391 static void noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len)
392 {
393 for (int next, i = 0; i < len; i = next) {
394 struct insn insn;
395
396 if (insn_decode_kernel(&insn, &buf[i]))
397 return;
398
399 next = i + insn.length;
400
401 if (insn_is_nop(&insn)) {
402 int nop = i;
403
404 /* Has the NOP already been optimized? */
405 if (i + insn.length == len)
406 return;
407
408 next = skip_nops(buf, next, len);
409
410 add_nop(buf + nop, next - nop);
411 DUMP_BYTES(ALT, buf, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, next);
412 }
413 }
414 }
415
416 /*
417 * In this context, "source" is where the instructions are placed in the
418 * section .altinstr_replacement, for example during kernel build by the
419 * toolchain.
420 * "Destination" is where the instructions are being patched in by this
421 * machinery.
422 *
423 * The source offset is:
424 *
425 * src_imm = target - src_next_ip (1)
426 *
427 * and the target offset is:
428 *
429 * dst_imm = target - dst_next_ip (2)
430 *
431 * so rework (1) as an expression for target like:
432 *
433 * target = src_imm + src_next_ip (1a)
434 *
435 * and substitute in (2) to get:
436 *
437 * dst_imm = (src_imm + src_next_ip) - dst_next_ip (3)
438 *
439 * Now, since the instruction stream is 'identical' at src and dst (it
440 * is being copied after all) it can be stated that:
441 *
442 * src_next_ip = src + ip_offset
443 * dst_next_ip = dst + ip_offset (4)
444 *
445 * Substitute (4) in (3) and observe ip_offset being cancelled out to
446 * obtain:
447 *
448 * dst_imm = src_imm + (src + ip_offset) - (dst + ip_offset)
449 * = src_imm + src - dst + ip_offset - ip_offset
450 * = src_imm + src - dst (5)
451 *
452 * IOW, only the relative displacement of the code block matters.
453 */
454
455 #define apply_reloc_n(n_, p_, d_) \
456 do { \
457 s32 v = *(s##n_ *)(p_); \
458 v += (d_); \
459 BUG_ON((v >> 31) != (v >> (n_-1))); \
460 *(s##n_ *)(p_) = (s##n_)v; \
461 } while (0)
462
463
464 static __always_inline
apply_reloc(int n,void * ptr,uintptr_t diff)465 void apply_reloc(int n, void *ptr, uintptr_t diff)
466 {
467 switch (n) {
468 case 1: apply_reloc_n(8, ptr, diff); break;
469 case 2: apply_reloc_n(16, ptr, diff); break;
470 case 4: apply_reloc_n(32, ptr, diff); break;
471 default: BUG();
472 }
473 }
474
475 static __always_inline
need_reloc(unsigned long offset,u8 * src,size_t src_len)476 bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
477 {
478 u8 *target = src + offset;
479 /*
480 * If the target is inside the patched block, it's relative to the
481 * block itself and does not need relocation.
482 */
483 return (target < src || target > src + src_len);
484 }
485
__apply_relocation(u8 * buf,const u8 * const instr,size_t instrlen,u8 * repl,size_t repl_len)486 static void __apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
487 {
488 for (int next, i = 0; i < instrlen; i = next) {
489 struct insn insn;
490
491 if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i])))
492 return;
493
494 next = i + insn.length;
495
496 switch (insn.opcode.bytes[0]) {
497 case 0x0f:
498 if (insn.opcode.bytes[1] < 0x80 ||
499 insn.opcode.bytes[1] > 0x8f)
500 break;
501
502 fallthrough; /* Jcc.d32 */
503 case 0x70 ... 0x7f: /* Jcc.d8 */
504 case JMP8_INSN_OPCODE:
505 case JMP32_INSN_OPCODE:
506 case CALL_INSN_OPCODE:
507 if (need_reloc(next + insn.immediate.value, repl, repl_len)) {
508 apply_reloc(insn.immediate.nbytes,
509 buf + i + insn_offset_immediate(&insn),
510 repl - instr);
511 }
512
513 /*
514 * Where possible, convert JMP.d32 into JMP.d8.
515 */
516 if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) {
517 s32 imm = insn.immediate.value;
518 imm += repl - instr;
519 imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE;
520 if ((imm >> 31) == (imm >> 7)) {
521 buf[i+0] = JMP8_INSN_OPCODE;
522 buf[i+1] = (s8)imm;
523
524 memset(&buf[i+2], INT3_INSN_OPCODE, insn.length - 2);
525 }
526 }
527 break;
528 }
529
530 if (insn_rip_relative(&insn)) {
531 if (need_reloc(next + insn.displacement.value, repl, repl_len)) {
532 apply_reloc(insn.displacement.nbytes,
533 buf + i + insn_offset_displacement(&insn),
534 repl - instr);
535 }
536 }
537 }
538 }
539
apply_relocation(u8 * buf,const u8 * const instr,size_t instrlen,u8 * repl,size_t repl_len)540 void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
541 {
542 __apply_relocation(buf, instr, instrlen, repl, repl_len);
543 optimize_nops(instr, buf, instrlen);
544 }
545
546 /* Low-level backend functions usable from alternative code replacements. */
547 DEFINE_ASM_FUNC(nop_func, "", .entry.text);
548 EXPORT_SYMBOL_GPL(nop_func);
549
BUG_func(void)550 noinstr void BUG_func(void)
551 {
552 BUG();
553 }
554 EXPORT_SYMBOL(BUG_func);
555
556 #define CALL_RIP_REL_OPCODE 0xff
557 #define CALL_RIP_REL_MODRM 0x15
558
559 /*
560 * Rewrite the "call BUG_func" replacement to point to the target of the
561 * indirect pv_ops call "call *disp(%ip)".
562 */
alt_replace_call(u8 * instr,u8 * insn_buff,struct alt_instr * a)563 static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a)
564 {
565 void *target, *bug = &BUG_func;
566 s32 disp;
567
568 if (a->replacementlen != 5 || insn_buff[0] != CALL_INSN_OPCODE) {
569 pr_err("ALT_FLAG_DIRECT_CALL set for a non-call replacement instruction\n");
570 BUG();
571 }
572
573 if (a->instrlen != 6 ||
574 instr[0] != CALL_RIP_REL_OPCODE ||
575 instr[1] != CALL_RIP_REL_MODRM) {
576 pr_err("ALT_FLAG_DIRECT_CALL set for unrecognized indirect call\n");
577 BUG();
578 }
579
580 /* Skip CALL_RIP_REL_OPCODE and CALL_RIP_REL_MODRM */
581 disp = *(s32 *)(instr + 2);
582 #ifdef CONFIG_X86_64
583 /* ff 15 00 00 00 00 call *0x0(%rip) */
584 /* target address is stored at "next instruction + disp". */
585 target = *(void **)(instr + a->instrlen + disp);
586 #else
587 /* ff 15 00 00 00 00 call *0x0 */
588 /* target address is stored at disp. */
589 target = *(void **)disp;
590 #endif
591 if (!target)
592 target = bug;
593
594 /* (BUG_func - .) + (target - BUG_func) := target - . */
595 *(s32 *)(insn_buff + 1) += target - bug;
596
597 if (target == &nop_func)
598 return 0;
599
600 return 5;
601 }
602
instr_va(struct alt_instr * i)603 static inline u8 * instr_va(struct alt_instr *i)
604 {
605 return (u8 *)&i->instr_offset + i->instr_offset;
606 }
607
608 /*
609 * Replace instructions with better alternatives for this CPU type. This runs
610 * before SMP is initialized to avoid SMP problems with self modifying code.
611 * This implies that asymmetric systems where APs have less capabilities than
612 * the boot processor are not handled. Tough. Make sure you disable such
613 * features by hand.
614 *
615 * Marked "noinline" to cause control flow change and thus insn cache
616 * to refetch changed I$ lines.
617 */
apply_alternatives(struct alt_instr * start,struct alt_instr * end)618 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
619 struct alt_instr *end)
620 {
621 u8 insn_buff[MAX_PATCH_LEN];
622 u8 *instr, *replacement;
623 struct alt_instr *a, *b;
624
625 DPRINTK(ALT, "alt table %px, -> %px", start, end);
626
627 /*
628 * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
629 * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
630 * During the process, KASAN becomes confused seeing partial LA57
631 * conversion and triggers a false-positive out-of-bound report.
632 *
633 * Disable KASAN until the patching is complete.
634 */
635 kasan_disable_current();
636
637 /*
638 * The scan order should be from start to end. A later scanned
639 * alternative code can overwrite previously scanned alternative code.
640 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
641 * patch code.
642 *
643 * So be careful if you want to change the scan order to any other
644 * order.
645 */
646 for (a = start; a < end; a++) {
647 int insn_buff_sz = 0;
648
649 /*
650 * In case of nested ALTERNATIVE()s the outer alternative might
651 * add more padding. To ensure consistent patching find the max
652 * padding for all alt_instr entries for this site (nested
653 * alternatives result in consecutive entries).
654 */
655 for (b = a+1; b < end && instr_va(b) == instr_va(a); b++) {
656 u8 len = max(a->instrlen, b->instrlen);
657 a->instrlen = b->instrlen = len;
658 }
659
660 instr = instr_va(a);
661 replacement = (u8 *)&a->repl_offset + a->repl_offset;
662 BUG_ON(a->instrlen > sizeof(insn_buff));
663 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
664
665 /*
666 * Patch if either:
667 * - feature is present
668 * - feature not present but ALT_FLAG_NOT is set to mean,
669 * patch if feature is *NOT* present.
670 */
671 if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
672 memcpy(insn_buff, instr, a->instrlen);
673 optimize_nops(instr, insn_buff, a->instrlen);
674 text_poke_early(instr, insn_buff, a->instrlen);
675 continue;
676 }
677
678 DPRINTK(ALT, "feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d) flags: 0x%x",
679 a->cpuid >> 5,
680 a->cpuid & 0x1f,
681 instr, instr, a->instrlen,
682 replacement, a->replacementlen, a->flags);
683
684 memcpy(insn_buff, replacement, a->replacementlen);
685 insn_buff_sz = a->replacementlen;
686
687 if (a->flags & ALT_FLAG_DIRECT_CALL) {
688 insn_buff_sz = alt_replace_call(instr, insn_buff, a);
689 if (insn_buff_sz < 0)
690 continue;
691 }
692
693 for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
694 insn_buff[insn_buff_sz] = 0x90;
695
696 apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen);
697
698 DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr);
699 DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
700 DUMP_BYTES(ALT, insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
701
702 text_poke_early(instr, insn_buff, insn_buff_sz);
703 }
704
705 kasan_enable_current();
706 }
707
is_jcc32(struct insn * insn)708 static inline bool is_jcc32(struct insn *insn)
709 {
710 /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
711 return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
712 }
713
714 #if defined(CONFIG_MITIGATION_RETPOLINE) && defined(CONFIG_OBJTOOL)
715
716 /*
717 * CALL/JMP *%\reg
718 */
emit_indirect(int op,int reg,u8 * bytes)719 static int emit_indirect(int op, int reg, u8 *bytes)
720 {
721 int i = 0;
722 u8 modrm;
723
724 switch (op) {
725 case CALL_INSN_OPCODE:
726 modrm = 0x10; /* Reg = 2; CALL r/m */
727 break;
728
729 case JMP32_INSN_OPCODE:
730 modrm = 0x20; /* Reg = 4; JMP r/m */
731 break;
732
733 default:
734 WARN_ON_ONCE(1);
735 return -1;
736 }
737
738 if (reg >= 8) {
739 bytes[i++] = 0x41; /* REX.B prefix */
740 reg -= 8;
741 }
742
743 modrm |= 0xc0; /* Mod = 3 */
744 modrm += reg;
745
746 bytes[i++] = 0xff; /* opcode */
747 bytes[i++] = modrm;
748
749 return i;
750 }
751
__emit_trampoline(void * addr,struct insn * insn,u8 * bytes,void * call_dest,void * jmp_dest)752 static int __emit_trampoline(void *addr, struct insn *insn, u8 *bytes,
753 void *call_dest, void *jmp_dest)
754 {
755 u8 op = insn->opcode.bytes[0];
756 int i = 0;
757
758 /*
759 * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional
760 * tail-calls. Deal with them.
761 */
762 if (is_jcc32(insn)) {
763 bytes[i++] = op;
764 op = insn->opcode.bytes[1];
765 goto clang_jcc;
766 }
767
768 if (insn->length == 6)
769 bytes[i++] = 0x2e; /* CS-prefix */
770
771 switch (op) {
772 case CALL_INSN_OPCODE:
773 __text_gen_insn(bytes+i, op, addr+i,
774 call_dest,
775 CALL_INSN_SIZE);
776 i += CALL_INSN_SIZE;
777 break;
778
779 case JMP32_INSN_OPCODE:
780 clang_jcc:
781 __text_gen_insn(bytes+i, op, addr+i,
782 jmp_dest,
783 JMP32_INSN_SIZE);
784 i += JMP32_INSN_SIZE;
785 break;
786
787 default:
788 WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr);
789 return -1;
790 }
791
792 WARN_ON_ONCE(i != insn->length);
793
794 return i;
795 }
796
emit_call_track_retpoline(void * addr,struct insn * insn,int reg,u8 * bytes)797 static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
798 {
799 return __emit_trampoline(addr, insn, bytes,
800 __x86_indirect_call_thunk_array[reg],
801 __x86_indirect_jump_thunk_array[reg]);
802 }
803
804 #ifdef CONFIG_MITIGATION_ITS
emit_its_trampoline(void * addr,struct insn * insn,int reg,u8 * bytes)805 static int emit_its_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
806 {
807 u8 *thunk = __x86_indirect_its_thunk_array[reg];
808 u8 *tmp = its_allocate_thunk(reg);
809
810 if (tmp)
811 thunk = tmp;
812
813 return __emit_trampoline(addr, insn, bytes, thunk, thunk);
814 }
815
816 /* Check if an indirect branch is at ITS-unsafe address */
cpu_wants_indirect_its_thunk_at(unsigned long addr,int reg)817 static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg)
818 {
819 if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
820 return false;
821
822 /* Indirect branch opcode is 2 or 3 bytes depending on reg */
823 addr += 1 + reg / 8;
824
825 /* Lower-half of the cacheline? */
826 return !(addr & 0x20);
827 }
828 #else /* CONFIG_MITIGATION_ITS */
829
830 #ifdef CONFIG_FINEIBT
cpu_wants_indirect_its_thunk_at(unsigned long addr,int reg)831 static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg)
832 {
833 return false;
834 }
835 #endif
836
837 #endif /* CONFIG_MITIGATION_ITS */
838
839 /*
840 * Rewrite the compiler generated retpoline thunk calls.
841 *
842 * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate
843 * indirect instructions, avoiding the extra indirection.
844 *
845 * For example, convert:
846 *
847 * CALL __x86_indirect_thunk_\reg
848 *
849 * into:
850 *
851 * CALL *%\reg
852 *
853 * It also tries to inline spectre_v2=retpoline,lfence when size permits.
854 */
patch_retpoline(void * addr,struct insn * insn,u8 * bytes)855 static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
856 {
857 retpoline_thunk_t *target;
858 int reg, ret, i = 0;
859 u8 op, cc;
860
861 target = addr + insn->length + insn->immediate.value;
862 reg = target - __x86_indirect_thunk_array;
863
864 if (WARN_ON_ONCE(reg & ~0xf))
865 return -1;
866
867 /* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */
868 BUG_ON(reg == 4);
869
870 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
871 !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
872 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
873 return emit_call_track_retpoline(addr, insn, reg, bytes);
874
875 return -1;
876 }
877
878 op = insn->opcode.bytes[0];
879
880 /*
881 * Convert:
882 *
883 * Jcc.d32 __x86_indirect_thunk_\reg
884 *
885 * into:
886 *
887 * Jncc.d8 1f
888 * [ LFENCE ]
889 * JMP *%\reg
890 * [ NOP ]
891 * 1:
892 */
893 if (is_jcc32(insn)) {
894 cc = insn->opcode.bytes[1] & 0xf;
895 cc ^= 1; /* invert condition */
896
897 bytes[i++] = 0x70 + cc; /* Jcc.d8 */
898 bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */
899
900 /* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */
901 op = JMP32_INSN_OPCODE;
902 }
903
904 /*
905 * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE.
906 */
907 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
908 bytes[i++] = 0x0f;
909 bytes[i++] = 0xae;
910 bytes[i++] = 0xe8; /* LFENCE */
911 }
912
913 #ifdef CONFIG_MITIGATION_ITS
914 /*
915 * Check if the address of last byte of emitted-indirect is in
916 * lower-half of the cacheline. Such branches need ITS mitigation.
917 */
918 if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + i, reg))
919 return emit_its_trampoline(addr, insn, reg, bytes);
920 #endif
921
922 ret = emit_indirect(op, reg, bytes + i);
923 if (ret < 0)
924 return ret;
925 i += ret;
926
927 /*
928 * The compiler is supposed to EMIT an INT3 after every unconditional
929 * JMP instruction due to AMD BTC. However, if the compiler is too old
930 * or MITIGATION_SLS isn't enabled, we still need an INT3 after
931 * indirect JMPs even on Intel.
932 */
933 if (op == JMP32_INSN_OPCODE && i < insn->length)
934 bytes[i++] = INT3_INSN_OPCODE;
935
936 for (; i < insn->length;)
937 bytes[i++] = BYTES_NOP1;
938
939 return i;
940 }
941
942 /*
943 * Generated by 'objtool --retpoline'.
944 */
apply_retpolines(s32 * start,s32 * end)945 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
946 {
947 s32 *s;
948
949 for (s = start; s < end; s++) {
950 void *addr = (void *)s + *s;
951 struct insn insn;
952 int len, ret;
953 u8 bytes[16];
954 u8 op1, op2;
955 u8 *dest;
956
957 ret = insn_decode_kernel(&insn, addr);
958 if (WARN_ON_ONCE(ret < 0))
959 continue;
960
961 op1 = insn.opcode.bytes[0];
962 op2 = insn.opcode.bytes[1];
963
964 switch (op1) {
965 case 0x70 ... 0x7f: /* Jcc.d8 */
966 /* See cfi_paranoid. */
967 WARN_ON_ONCE(cfi_mode != CFI_FINEIBT);
968 continue;
969
970 case CALL_INSN_OPCODE:
971 case JMP32_INSN_OPCODE:
972 /* Check for cfi_paranoid + ITS */
973 dest = addr + insn.length + insn.immediate.value;
974 if (dest[-1] == 0xea && (dest[0] & 0xf0) == 0x70) {
975 WARN_ON_ONCE(cfi_mode != CFI_FINEIBT);
976 continue;
977 }
978 break;
979
980 case 0x0f: /* escape */
981 if (op2 >= 0x80 && op2 <= 0x8f)
982 break;
983 fallthrough;
984 default:
985 WARN_ON_ONCE(1);
986 continue;
987 }
988
989 DPRINTK(RETPOLINE, "retpoline at: %pS (%px) len: %d to: %pS",
990 addr, addr, insn.length,
991 addr + insn.length + insn.immediate.value);
992
993 len = patch_retpoline(addr, &insn, bytes);
994 if (len == insn.length) {
995 optimize_nops(addr, bytes, len);
996 DUMP_BYTES(RETPOLINE, ((u8*)addr), len, "%px: orig: ", addr);
997 DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr);
998 text_poke_early(addr, bytes, len);
999 }
1000 }
1001 }
1002
1003 #ifdef CONFIG_MITIGATION_RETHUNK
1004
cpu_wants_rethunk(void)1005 bool cpu_wants_rethunk(void)
1006 {
1007 return cpu_feature_enabled(X86_FEATURE_RETHUNK);
1008 }
1009
cpu_wants_rethunk_at(void * addr)1010 bool cpu_wants_rethunk_at(void *addr)
1011 {
1012 if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
1013 return false;
1014 if (x86_return_thunk != its_return_thunk)
1015 return true;
1016
1017 return !((unsigned long)addr & 0x20);
1018 }
1019
1020 /*
1021 * Rewrite the compiler generated return thunk tail-calls.
1022 *
1023 * For example, convert:
1024 *
1025 * JMP __x86_return_thunk
1026 *
1027 * into:
1028 *
1029 * RET
1030 */
patch_return(void * addr,struct insn * insn,u8 * bytes)1031 static int patch_return(void *addr, struct insn *insn, u8 *bytes)
1032 {
1033 int i = 0;
1034
1035 /* Patch the custom return thunks... */
1036 if (cpu_wants_rethunk_at(addr)) {
1037 i = JMP32_INSN_SIZE;
1038 __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
1039 } else {
1040 /* ... or patch them out if not needed. */
1041 bytes[i++] = RET_INSN_OPCODE;
1042 }
1043
1044 for (; i < insn->length;)
1045 bytes[i++] = INT3_INSN_OPCODE;
1046 return i;
1047 }
1048
apply_returns(s32 * start,s32 * end)1049 void __init_or_module noinline apply_returns(s32 *start, s32 *end)
1050 {
1051 s32 *s;
1052
1053 if (cpu_wants_rethunk())
1054 static_call_force_reinit();
1055
1056 for (s = start; s < end; s++) {
1057 void *dest = NULL, *addr = (void *)s + *s;
1058 struct insn insn;
1059 int len, ret;
1060 u8 bytes[16];
1061 u8 op;
1062
1063 ret = insn_decode_kernel(&insn, addr);
1064 if (WARN_ON_ONCE(ret < 0))
1065 continue;
1066
1067 op = insn.opcode.bytes[0];
1068 if (op == JMP32_INSN_OPCODE)
1069 dest = addr + insn.length + insn.immediate.value;
1070
1071 if (__static_call_fixup(addr, op, dest) ||
1072 WARN_ONCE(dest != &__x86_return_thunk,
1073 "missing return thunk: %pS-%pS: %*ph",
1074 addr, dest, 5, addr))
1075 continue;
1076
1077 DPRINTK(RET, "return thunk at: %pS (%px) len: %d to: %pS",
1078 addr, addr, insn.length,
1079 addr + insn.length + insn.immediate.value);
1080
1081 len = patch_return(addr, &insn, bytes);
1082 if (len == insn.length) {
1083 DUMP_BYTES(RET, ((u8*)addr), len, "%px: orig: ", addr);
1084 DUMP_BYTES(RET, ((u8*)bytes), len, "%px: repl: ", addr);
1085 text_poke_early(addr, bytes, len);
1086 }
1087 }
1088 }
1089 #else /* !CONFIG_MITIGATION_RETHUNK: */
apply_returns(s32 * start,s32 * end)1090 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
1091 #endif /* !CONFIG_MITIGATION_RETHUNK */
1092
1093 #else /* !CONFIG_MITIGATION_RETPOLINE || !CONFIG_OBJTOOL */
1094
apply_retpolines(s32 * start,s32 * end)1095 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
apply_returns(s32 * start,s32 * end)1096 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
1097
1098 #endif /* !CONFIG_MITIGATION_RETPOLINE || !CONFIG_OBJTOOL */
1099
1100 #ifdef CONFIG_X86_KERNEL_IBT
1101
is_endbr(u32 * val)1102 __noendbr bool is_endbr(u32 *val)
1103 {
1104 u32 endbr;
1105
1106 __get_kernel_nofault(&endbr, val, u32, Efault);
1107 return __is_endbr(endbr);
1108
1109 Efault:
1110 return false;
1111 }
1112
1113 #ifdef CONFIG_FINEIBT
1114
exact_endbr(u32 * val)1115 static __noendbr bool exact_endbr(u32 *val)
1116 {
1117 u32 endbr;
1118
1119 __get_kernel_nofault(&endbr, val, u32, Efault);
1120 return endbr == gen_endbr();
1121
1122 Efault:
1123 return false;
1124 }
1125
1126 #endif
1127
1128 static void poison_cfi(void *addr);
1129
poison_endbr(void * addr)1130 static void __init_or_module poison_endbr(void *addr)
1131 {
1132 u32 poison = gen_endbr_poison();
1133
1134 if (WARN_ON_ONCE(!is_endbr(addr)))
1135 return;
1136
1137 DPRINTK(ENDBR, "ENDBR at: %pS (%px)", addr, addr);
1138
1139 /*
1140 * When we have IBT, the lack of ENDBR will trigger #CP
1141 */
1142 DUMP_BYTES(ENDBR, ((u8*)addr), 4, "%px: orig: ", addr);
1143 DUMP_BYTES(ENDBR, ((u8*)&poison), 4, "%px: repl: ", addr);
1144 text_poke_early(addr, &poison, 4);
1145 }
1146
1147 /*
1148 * Generated by: objtool --ibt
1149 *
1150 * Seal the functions for indirect calls by clobbering the ENDBR instructions
1151 * and the kCFI hash value.
1152 */
apply_seal_endbr(s32 * start,s32 * end)1153 void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
1154 {
1155 s32 *s;
1156
1157 for (s = start; s < end; s++) {
1158 void *addr = (void *)s + *s;
1159
1160 poison_endbr(addr);
1161 if (IS_ENABLED(CONFIG_FINEIBT))
1162 poison_cfi(addr - 16);
1163 }
1164 }
1165
1166 #else /* !CONFIG_X86_KERNEL_IBT: */
1167
apply_seal_endbr(s32 * start,s32 * end)1168 void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { }
1169
1170 #endif /* !CONFIG_X86_KERNEL_IBT */
1171
1172 #ifdef CONFIG_CFI_AUTO_DEFAULT
1173 # define __CFI_DEFAULT CFI_AUTO
1174 #elif defined(CONFIG_CFI_CLANG)
1175 # define __CFI_DEFAULT CFI_KCFI
1176 #else
1177 # define __CFI_DEFAULT CFI_OFF
1178 #endif
1179
1180 enum cfi_mode cfi_mode __ro_after_init = __CFI_DEFAULT;
1181
1182 #ifdef CONFIG_FINEIBT_BHI
1183 bool cfi_bhi __ro_after_init = false;
1184 #endif
1185
1186 #ifdef CONFIG_CFI_CLANG
1187 struct bpf_insn;
1188
1189 /* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
1190 extern unsigned int __bpf_prog_runX(const void *ctx,
1191 const struct bpf_insn *insn);
1192
1193 KCFI_REFERENCE(__bpf_prog_runX);
1194
1195 /* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
1196 asm (
1197 " .pushsection .data..ro_after_init,\"aw\",@progbits \n"
1198 " .type cfi_bpf_hash,@object \n"
1199 " .globl cfi_bpf_hash \n"
1200 " .p2align 2, 0x0 \n"
1201 "cfi_bpf_hash: \n"
1202 " .long __kcfi_typeid___bpf_prog_runX \n"
1203 " .size cfi_bpf_hash, 4 \n"
1204 " .popsection \n"
1205 );
1206
1207 /* Must match bpf_callback_t */
1208 extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
1209
1210 KCFI_REFERENCE(__bpf_callback_fn);
1211
1212 /* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
1213 asm (
1214 " .pushsection .data..ro_after_init,\"aw\",@progbits \n"
1215 " .type cfi_bpf_subprog_hash,@object \n"
1216 " .globl cfi_bpf_subprog_hash \n"
1217 " .p2align 2, 0x0 \n"
1218 "cfi_bpf_subprog_hash: \n"
1219 " .long __kcfi_typeid___bpf_callback_fn \n"
1220 " .size cfi_bpf_subprog_hash, 4 \n"
1221 " .popsection \n"
1222 );
1223
cfi_get_func_hash(void * func)1224 u32 cfi_get_func_hash(void *func)
1225 {
1226 u32 hash;
1227
1228 func -= cfi_get_offset();
1229 switch (cfi_mode) {
1230 case CFI_FINEIBT:
1231 func += 7;
1232 break;
1233 case CFI_KCFI:
1234 func += 1;
1235 break;
1236 default:
1237 return 0;
1238 }
1239
1240 if (get_kernel_nofault(hash, func))
1241 return 0;
1242
1243 return hash;
1244 }
1245
cfi_get_func_arity(void * func)1246 int cfi_get_func_arity(void *func)
1247 {
1248 bhi_thunk *target;
1249 s32 disp;
1250
1251 if (cfi_mode != CFI_FINEIBT && !cfi_bhi)
1252 return 0;
1253
1254 if (get_kernel_nofault(disp, func - 4))
1255 return 0;
1256
1257 target = func + disp;
1258 return target - __bhi_args;
1259 }
1260 #endif
1261
1262 #ifdef CONFIG_FINEIBT
1263
1264 static bool cfi_rand __ro_after_init = true;
1265 static u32 cfi_seed __ro_after_init;
1266
1267 /*
1268 * Re-hash the CFI hash with a boot-time seed while making sure the result is
1269 * not a valid ENDBR instruction.
1270 */
cfi_rehash(u32 hash)1271 static u32 cfi_rehash(u32 hash)
1272 {
1273 hash ^= cfi_seed;
1274 while (unlikely(__is_endbr(hash) || __is_endbr(-hash))) {
1275 bool lsb = hash & 1;
1276 hash >>= 1;
1277 if (lsb)
1278 hash ^= 0x80200003;
1279 }
1280 return hash;
1281 }
1282
cfi_parse_cmdline(char * str)1283 static __init int cfi_parse_cmdline(char *str)
1284 {
1285 if (!str)
1286 return -EINVAL;
1287
1288 while (str) {
1289 char *next = strchr(str, ',');
1290 if (next) {
1291 *next = 0;
1292 next++;
1293 }
1294
1295 if (!strcmp(str, "auto")) {
1296 cfi_mode = CFI_AUTO;
1297 } else if (!strcmp(str, "off")) {
1298 cfi_mode = CFI_OFF;
1299 cfi_rand = false;
1300 } else if (!strcmp(str, "kcfi")) {
1301 cfi_mode = CFI_KCFI;
1302 } else if (!strcmp(str, "fineibt")) {
1303 cfi_mode = CFI_FINEIBT;
1304 } else if (!strcmp(str, "norand")) {
1305 cfi_rand = false;
1306 } else if (!strcmp(str, "warn")) {
1307 pr_alert("CFI mismatch non-fatal!\n");
1308 cfi_warn = true;
1309 } else if (!strcmp(str, "paranoid")) {
1310 if (cfi_mode == CFI_FINEIBT) {
1311 cfi_paranoid = true;
1312 } else {
1313 pr_err("Ignoring paranoid; depends on fineibt.\n");
1314 }
1315 } else if (!strcmp(str, "bhi")) {
1316 #ifdef CONFIG_FINEIBT_BHI
1317 if (cfi_mode == CFI_FINEIBT) {
1318 cfi_bhi = true;
1319 } else {
1320 pr_err("Ignoring bhi; depends on fineibt.\n");
1321 }
1322 #else
1323 pr_err("Ignoring bhi; depends on FINEIBT_BHI=y.\n");
1324 #endif
1325 } else {
1326 pr_err("Ignoring unknown cfi option (%s).", str);
1327 }
1328
1329 str = next;
1330 }
1331
1332 return 0;
1333 }
1334 early_param("cfi", cfi_parse_cmdline);
1335
1336 /*
1337 * kCFI FineIBT
1338 *
1339 * __cfi_\func: __cfi_\func:
1340 * movl $0x12345678,%eax // 5 endbr64 // 4
1341 * nop subl $0x12345678,%r10d // 7
1342 * nop jne __cfi_\func+6 // 2
1343 * nop nop3 // 3
1344 * nop
1345 * nop
1346 * nop
1347 * nop
1348 * nop
1349 * nop
1350 * nop
1351 * nop
1352 *
1353 *
1354 * caller: caller:
1355 * movl $(-0x12345678),%r10d // 6 movl $0x12345678,%r10d // 6
1356 * addl $-15(%r11),%r10d // 4 lea -0x10(%r11),%r11 // 4
1357 * je 1f // 2 nop4 // 4
1358 * ud2 // 2
1359 * 1: cs call __x86_indirect_thunk_r11 // 6 call *%r11; nop3; // 6
1360 *
1361 */
1362
1363 /*
1364 * <fineibt_preamble_start>:
1365 * 0: f3 0f 1e fa endbr64
1366 * 4: 41 81 <ea> 78 56 34 12 sub $0x12345678, %r10d
1367 * b: 75 f9 jne 6 <fineibt_preamble_start+0x6>
1368 * d: 0f 1f 00 nopl (%rax)
1369 *
1370 * Note that the JNE target is the 0xEA byte inside the SUB, this decodes as
1371 * (bad) on x86_64 and raises #UD.
1372 */
1373 asm( ".pushsection .rodata \n"
1374 "fineibt_preamble_start: \n"
1375 " endbr64 \n"
1376 " subl $0x12345678, %r10d \n"
1377 "fineibt_preamble_bhi: \n"
1378 " jne fineibt_preamble_start+6 \n"
1379 ASM_NOP3
1380 "fineibt_preamble_end: \n"
1381 ".popsection\n"
1382 );
1383
1384 extern u8 fineibt_preamble_start[];
1385 extern u8 fineibt_preamble_bhi[];
1386 extern u8 fineibt_preamble_end[];
1387
1388 #define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start)
1389 #define fineibt_preamble_bhi (fineibt_preamble_bhi - fineibt_preamble_start)
1390 #define fineibt_preamble_ud 6
1391 #define fineibt_preamble_hash 7
1392
1393 /*
1394 * <fineibt_caller_start>:
1395 * 0: 41 ba 78 56 34 12 mov $0x12345678, %r10d
1396 * 6: 4d 8d 5b f0 lea -0x10(%r11), %r11
1397 * a: 0f 1f 40 00 nopl 0x0(%rax)
1398 */
1399 asm( ".pushsection .rodata \n"
1400 "fineibt_caller_start: \n"
1401 " movl $0x12345678, %r10d \n"
1402 " lea -0x10(%r11), %r11 \n"
1403 ASM_NOP4
1404 "fineibt_caller_end: \n"
1405 ".popsection \n"
1406 );
1407
1408 extern u8 fineibt_caller_start[];
1409 extern u8 fineibt_caller_end[];
1410
1411 #define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start)
1412 #define fineibt_caller_hash 2
1413
1414 #define fineibt_caller_jmp (fineibt_caller_size - 2)
1415
1416 /*
1417 * Since FineIBT does hash validation on the callee side it is prone to
1418 * circumvention attacks where a 'naked' ENDBR instruction exists that
1419 * is not part of the fineibt_preamble sequence.
1420 *
1421 * Notably the x86 entry points must be ENDBR and equally cannot be
1422 * fineibt_preamble.
1423 *
1424 * The fineibt_paranoid caller sequence adds additional caller side
1425 * hash validation. This stops such circumvention attacks dead, but at the cost
1426 * of adding a load.
1427 *
1428 * <fineibt_paranoid_start>:
1429 * 0: 41 ba 78 56 34 12 mov $0x12345678, %r10d
1430 * 6: 45 3b 53 f7 cmp -0x9(%r11), %r10d
1431 * a: 4d 8d 5b <f0> lea -0x10(%r11), %r11
1432 * e: 75 fd jne d <fineibt_paranoid_start+0xd>
1433 * 10: 41 ff d3 call *%r11
1434 * 13: 90 nop
1435 *
1436 * Notably LEA does not modify flags and can be reordered with the CMP,
1437 * avoiding a dependency. Again, using a non-taken (backwards) branch
1438 * for the failure case, abusing LEA's immediate 0xf0 as LOCK prefix for the
1439 * Jcc.d8, causing #UD.
1440 */
1441 asm( ".pushsection .rodata \n"
1442 "fineibt_paranoid_start: \n"
1443 " movl $0x12345678, %r10d \n"
1444 " cmpl -9(%r11), %r10d \n"
1445 " lea -0x10(%r11), %r11 \n"
1446 " jne fineibt_paranoid_start+0xd \n"
1447 "fineibt_paranoid_ind: \n"
1448 " call *%r11 \n"
1449 " nop \n"
1450 "fineibt_paranoid_end: \n"
1451 ".popsection \n"
1452 );
1453
1454 extern u8 fineibt_paranoid_start[];
1455 extern u8 fineibt_paranoid_ind[];
1456 extern u8 fineibt_paranoid_end[];
1457
1458 #define fineibt_paranoid_size (fineibt_paranoid_end - fineibt_paranoid_start)
1459 #define fineibt_paranoid_ind (fineibt_paranoid_ind - fineibt_paranoid_start)
1460 #define fineibt_paranoid_ud 0xd
1461
decode_preamble_hash(void * addr,int * reg)1462 static u32 decode_preamble_hash(void *addr, int *reg)
1463 {
1464 u8 *p = addr;
1465
1466 /* b8+reg 78 56 34 12 movl $0x12345678,\reg */
1467 if (p[0] >= 0xb8 && p[0] < 0xc0) {
1468 if (reg)
1469 *reg = p[0] - 0xb8;
1470 return *(u32 *)(addr + 1);
1471 }
1472
1473 return 0; /* invalid hash value */
1474 }
1475
decode_caller_hash(void * addr)1476 static u32 decode_caller_hash(void *addr)
1477 {
1478 u8 *p = addr;
1479
1480 /* 41 ba 88 a9 cb ed mov $(-0x12345678),%r10d */
1481 if (p[0] == 0x41 && p[1] == 0xba)
1482 return -*(u32 *)(addr + 2);
1483
1484 /* e8 0c 88 a9 cb ed jmp.d8 +12 */
1485 if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp)
1486 return -*(u32 *)(addr + 2);
1487
1488 return 0; /* invalid hash value */
1489 }
1490
1491 /* .retpoline_sites */
cfi_disable_callers(s32 * start,s32 * end)1492 static int cfi_disable_callers(s32 *start, s32 *end)
1493 {
1494 /*
1495 * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate
1496 * in tact for later usage. Also see decode_caller_hash() and
1497 * cfi_rewrite_callers().
1498 */
1499 const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp };
1500 s32 *s;
1501
1502 for (s = start; s < end; s++) {
1503 void *addr = (void *)s + *s;
1504 u32 hash;
1505
1506 addr -= fineibt_caller_size;
1507 hash = decode_caller_hash(addr);
1508 if (!hash) /* nocfi callers */
1509 continue;
1510
1511 text_poke_early(addr, jmp, 2);
1512 }
1513
1514 return 0;
1515 }
1516
cfi_enable_callers(s32 * start,s32 * end)1517 static int cfi_enable_callers(s32 *start, s32 *end)
1518 {
1519 /*
1520 * Re-enable kCFI, undo what cfi_disable_callers() did.
1521 */
1522 const u8 mov[] = { 0x41, 0xba };
1523 s32 *s;
1524
1525 for (s = start; s < end; s++) {
1526 void *addr = (void *)s + *s;
1527 u32 hash;
1528
1529 addr -= fineibt_caller_size;
1530 hash = decode_caller_hash(addr);
1531 if (!hash) /* nocfi callers */
1532 continue;
1533
1534 text_poke_early(addr, mov, 2);
1535 }
1536
1537 return 0;
1538 }
1539
1540 /* .cfi_sites */
cfi_rand_preamble(s32 * start,s32 * end)1541 static int cfi_rand_preamble(s32 *start, s32 *end)
1542 {
1543 s32 *s;
1544
1545 for (s = start; s < end; s++) {
1546 void *addr = (void *)s + *s;
1547 u32 hash;
1548
1549 hash = decode_preamble_hash(addr, NULL);
1550 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1551 addr, addr, 5, addr))
1552 return -EINVAL;
1553
1554 hash = cfi_rehash(hash);
1555 text_poke_early(addr + 1, &hash, 4);
1556 }
1557
1558 return 0;
1559 }
1560
cfi_fineibt_bhi_preamble(void * addr,int arity)1561 static void cfi_fineibt_bhi_preamble(void *addr, int arity)
1562 {
1563 if (!arity)
1564 return;
1565
1566 if (!cfi_warn && arity == 1) {
1567 /*
1568 * Crazy scheme to allow arity-1 inline:
1569 *
1570 * __cfi_foo:
1571 * 0: f3 0f 1e fa endbr64
1572 * 4: 41 81 <ea> 78 56 34 12 sub 0x12345678, %r10d
1573 * b: 49 0f 45 fa cmovne %r10, %rdi
1574 * f: 75 f5 jne __cfi_foo+6
1575 * 11: 0f 1f 00 nopl (%rax)
1576 *
1577 * Code that direct calls to foo()+0, decodes the tail end as:
1578 *
1579 * foo:
1580 * 0: f5 cmc
1581 * 1: 0f 1f 00 nopl (%rax)
1582 *
1583 * which clobbers CF, but does not affect anything ABI
1584 * wise.
1585 *
1586 * Notably, this scheme is incompatible with permissive CFI
1587 * because the CMOVcc is unconditional and RDI will have been
1588 * clobbered.
1589 */
1590 const u8 magic[9] = {
1591 0x49, 0x0f, 0x45, 0xfa,
1592 0x75, 0xf5,
1593 BYTES_NOP3,
1594 };
1595
1596 text_poke_early(addr + fineibt_preamble_bhi, magic, 9);
1597
1598 return;
1599 }
1600
1601 text_poke_early(addr + fineibt_preamble_bhi,
1602 text_gen_insn(CALL_INSN_OPCODE,
1603 addr + fineibt_preamble_bhi,
1604 __bhi_args[arity]),
1605 CALL_INSN_SIZE);
1606 }
1607
cfi_rewrite_preamble(s32 * start,s32 * end)1608 static int cfi_rewrite_preamble(s32 *start, s32 *end)
1609 {
1610 s32 *s;
1611
1612 for (s = start; s < end; s++) {
1613 void *addr = (void *)s + *s;
1614 int arity;
1615 u32 hash;
1616
1617 /*
1618 * When the function doesn't start with ENDBR the compiler will
1619 * have determined there are no indirect calls to it and we
1620 * don't need no CFI either.
1621 */
1622 if (!is_endbr(addr + 16))
1623 continue;
1624
1625 hash = decode_preamble_hash(addr, &arity);
1626 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
1627 addr, addr, 5, addr))
1628 return -EINVAL;
1629
1630 text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size);
1631 WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678);
1632 text_poke_early(addr + fineibt_preamble_hash, &hash, 4);
1633
1634 WARN_ONCE(!IS_ENABLED(CONFIG_FINEIBT_BHI) && arity,
1635 "kCFI preamble has wrong register at: %pS %*ph\n",
1636 addr, 5, addr);
1637
1638 if (cfi_bhi)
1639 cfi_fineibt_bhi_preamble(addr, arity);
1640 }
1641
1642 return 0;
1643 }
1644
cfi_rewrite_endbr(s32 * start,s32 * end)1645 static void cfi_rewrite_endbr(s32 *start, s32 *end)
1646 {
1647 s32 *s;
1648
1649 for (s = start; s < end; s++) {
1650 void *addr = (void *)s + *s;
1651
1652 if (!exact_endbr(addr + 16))
1653 continue;
1654
1655 poison_endbr(addr + 16);
1656 }
1657 }
1658
1659 /* .retpoline_sites */
cfi_rand_callers(s32 * start,s32 * end)1660 static int cfi_rand_callers(s32 *start, s32 *end)
1661 {
1662 s32 *s;
1663
1664 for (s = start; s < end; s++) {
1665 void *addr = (void *)s + *s;
1666 u32 hash;
1667
1668 addr -= fineibt_caller_size;
1669 hash = decode_caller_hash(addr);
1670 if (hash) {
1671 hash = -cfi_rehash(hash);
1672 text_poke_early(addr + 2, &hash, 4);
1673 }
1674 }
1675
1676 return 0;
1677 }
1678
emit_paranoid_trampoline(void * addr,struct insn * insn,int reg,u8 * bytes)1679 static int emit_paranoid_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
1680 {
1681 u8 *thunk = (void *)__x86_indirect_its_thunk_array[reg] - 2;
1682
1683 #ifdef CONFIG_MITIGATION_ITS
1684 u8 *tmp = its_allocate_thunk(reg);
1685 if (tmp)
1686 thunk = tmp;
1687 #endif
1688
1689 return __emit_trampoline(addr, insn, bytes, thunk, thunk);
1690 }
1691
cfi_rewrite_callers(s32 * start,s32 * end)1692 static int cfi_rewrite_callers(s32 *start, s32 *end)
1693 {
1694 s32 *s;
1695
1696 BUG_ON(fineibt_paranoid_size != 20);
1697
1698 for (s = start; s < end; s++) {
1699 void *addr = (void *)s + *s;
1700 struct insn insn;
1701 u8 bytes[20];
1702 u32 hash;
1703 int ret;
1704 u8 op;
1705
1706 addr -= fineibt_caller_size;
1707 hash = decode_caller_hash(addr);
1708 if (!hash)
1709 continue;
1710
1711 if (!cfi_paranoid) {
1712 text_poke_early(addr, fineibt_caller_start, fineibt_caller_size);
1713 WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678);
1714 text_poke_early(addr + fineibt_caller_hash, &hash, 4);
1715 /* rely on apply_retpolines() */
1716 continue;
1717 }
1718
1719 /* cfi_paranoid */
1720 ret = insn_decode_kernel(&insn, addr + fineibt_caller_size);
1721 if (WARN_ON_ONCE(ret < 0))
1722 continue;
1723
1724 op = insn.opcode.bytes[0];
1725 if (op != CALL_INSN_OPCODE && op != JMP32_INSN_OPCODE) {
1726 WARN_ON_ONCE(1);
1727 continue;
1728 }
1729
1730 memcpy(bytes, fineibt_paranoid_start, fineibt_paranoid_size);
1731 memcpy(bytes + fineibt_caller_hash, &hash, 4);
1732
1733 if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + fineibt_paranoid_ind, 11)) {
1734 emit_paranoid_trampoline(addr + fineibt_caller_size,
1735 &insn, 11, bytes + fineibt_caller_size);
1736 } else {
1737 ret = emit_indirect(op, 11, bytes + fineibt_paranoid_ind);
1738 if (WARN_ON_ONCE(ret != 3))
1739 continue;
1740 }
1741
1742 text_poke_early(addr, bytes, fineibt_paranoid_size);
1743 }
1744
1745 return 0;
1746 }
1747
__apply_fineibt(s32 * start_retpoline,s32 * end_retpoline,s32 * start_cfi,s32 * end_cfi,bool builtin)1748 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1749 s32 *start_cfi, s32 *end_cfi, bool builtin)
1750 {
1751 int ret;
1752
1753 if (WARN_ONCE(fineibt_preamble_size != 16,
1754 "FineIBT preamble wrong size: %ld", fineibt_preamble_size))
1755 return;
1756
1757 if (cfi_mode == CFI_AUTO) {
1758 cfi_mode = CFI_KCFI;
1759 if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT)) {
1760 /*
1761 * FRED has much saner context on exception entry and
1762 * is less easy to take advantage of.
1763 */
1764 if (!cpu_feature_enabled(X86_FEATURE_FRED))
1765 cfi_paranoid = true;
1766 cfi_mode = CFI_FINEIBT;
1767 }
1768 }
1769
1770 /*
1771 * Rewrite the callers to not use the __cfi_ stubs, such that we might
1772 * rewrite them. This disables all CFI. If this succeeds but any of the
1773 * later stages fails, we're without CFI.
1774 */
1775 ret = cfi_disable_callers(start_retpoline, end_retpoline);
1776 if (ret)
1777 goto err;
1778
1779 if (cfi_rand) {
1780 if (builtin) {
1781 cfi_seed = get_random_u32();
1782 cfi_bpf_hash = cfi_rehash(cfi_bpf_hash);
1783 cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash);
1784 }
1785
1786 ret = cfi_rand_preamble(start_cfi, end_cfi);
1787 if (ret)
1788 goto err;
1789
1790 ret = cfi_rand_callers(start_retpoline, end_retpoline);
1791 if (ret)
1792 goto err;
1793 }
1794
1795 switch (cfi_mode) {
1796 case CFI_OFF:
1797 if (builtin)
1798 pr_info("Disabling CFI\n");
1799 return;
1800
1801 case CFI_KCFI:
1802 ret = cfi_enable_callers(start_retpoline, end_retpoline);
1803 if (ret)
1804 goto err;
1805
1806 if (builtin)
1807 pr_info("Using kCFI\n");
1808 return;
1809
1810 case CFI_FINEIBT:
1811 /* place the FineIBT preamble at func()-16 */
1812 ret = cfi_rewrite_preamble(start_cfi, end_cfi);
1813 if (ret)
1814 goto err;
1815
1816 /* rewrite the callers to target func()-16 */
1817 ret = cfi_rewrite_callers(start_retpoline, end_retpoline);
1818 if (ret)
1819 goto err;
1820
1821 /* now that nobody targets func()+0, remove ENDBR there */
1822 cfi_rewrite_endbr(start_cfi, end_cfi);
1823
1824 if (builtin) {
1825 pr_info("Using %sFineIBT%s CFI\n",
1826 cfi_paranoid ? "paranoid " : "",
1827 cfi_bhi ? "+BHI" : "");
1828 }
1829 return;
1830
1831 default:
1832 break;
1833 }
1834
1835 err:
1836 pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n");
1837 }
1838
poison_hash(void * addr)1839 static inline void poison_hash(void *addr)
1840 {
1841 *(u32 *)addr = 0;
1842 }
1843
poison_cfi(void * addr)1844 static void poison_cfi(void *addr)
1845 {
1846 /*
1847 * Compilers manage to be inconsistent with ENDBR vs __cfi prefixes,
1848 * some (static) functions for which they can determine the address
1849 * is never taken do not get a __cfi prefix, but *DO* get an ENDBR.
1850 *
1851 * As such, these functions will get sealed, but we need to be careful
1852 * to not unconditionally scribble the previous function.
1853 */
1854 switch (cfi_mode) {
1855 case CFI_FINEIBT:
1856 /*
1857 * FineIBT prefix should start with an ENDBR.
1858 */
1859 if (!is_endbr(addr))
1860 break;
1861
1862 /*
1863 * __cfi_\func:
1864 * osp nopl (%rax)
1865 * subl $0, %r10d
1866 * jz 1f
1867 * ud2
1868 * 1: nop
1869 */
1870 poison_endbr(addr);
1871 poison_hash(addr + fineibt_preamble_hash);
1872 break;
1873
1874 case CFI_KCFI:
1875 /*
1876 * kCFI prefix should start with a valid hash.
1877 */
1878 if (!decode_preamble_hash(addr, NULL))
1879 break;
1880
1881 /*
1882 * __cfi_\func:
1883 * movl $0, %eax
1884 * .skip 11, 0x90
1885 */
1886 poison_hash(addr + 1);
1887 break;
1888
1889 default:
1890 break;
1891 }
1892 }
1893
1894 /*
1895 * When regs->ip points to a 0xEA byte in the FineIBT preamble,
1896 * return true and fill out target and type.
1897 *
1898 * We check the preamble by checking for the ENDBR instruction relative to the
1899 * 0xEA instruction.
1900 */
decode_fineibt_preamble(struct pt_regs * regs,unsigned long * target,u32 * type)1901 static bool decode_fineibt_preamble(struct pt_regs *regs, unsigned long *target, u32 *type)
1902 {
1903 unsigned long addr = regs->ip - fineibt_preamble_ud;
1904 u32 hash;
1905
1906 if (!exact_endbr((void *)addr))
1907 return false;
1908
1909 *target = addr + fineibt_preamble_size;
1910
1911 __get_kernel_nofault(&hash, addr + fineibt_preamble_hash, u32, Efault);
1912 *type = (u32)regs->r10 + hash;
1913
1914 /*
1915 * Since regs->ip points to the middle of an instruction; it cannot
1916 * continue with the normal fixup.
1917 */
1918 regs->ip = *target;
1919
1920 return true;
1921
1922 Efault:
1923 return false;
1924 }
1925
1926 /*
1927 * regs->ip points to one of the UD2 in __bhi_args[].
1928 */
decode_fineibt_bhi(struct pt_regs * regs,unsigned long * target,u32 * type)1929 static bool decode_fineibt_bhi(struct pt_regs *regs, unsigned long *target, u32 *type)
1930 {
1931 unsigned long addr;
1932 u32 hash;
1933
1934 if (!cfi_bhi)
1935 return false;
1936
1937 if (regs->ip < (unsigned long)__bhi_args ||
1938 regs->ip >= (unsigned long)__bhi_args_end)
1939 return false;
1940
1941 /*
1942 * Fetch the return address from the stack, this points to the
1943 * FineIBT preamble. Since the CALL instruction is in the 5 last
1944 * bytes of the preamble, the return address is in fact the target
1945 * address.
1946 */
1947 __get_kernel_nofault(&addr, regs->sp, unsigned long, Efault);
1948 *target = addr;
1949
1950 addr -= fineibt_preamble_size;
1951 if (!exact_endbr((void *)addr))
1952 return false;
1953
1954 __get_kernel_nofault(&hash, addr + fineibt_preamble_hash, u32, Efault);
1955 *type = (u32)regs->r10 + hash;
1956
1957 /*
1958 * The UD2 sites are constructed with a RET immediately following,
1959 * as such the non-fatal case can use the regular fixup.
1960 */
1961 return true;
1962
1963 Efault:
1964 return false;
1965 }
1966
is_paranoid_thunk(unsigned long addr)1967 static bool is_paranoid_thunk(unsigned long addr)
1968 {
1969 u32 thunk;
1970
1971 __get_kernel_nofault(&thunk, (u32 *)addr, u32, Efault);
1972 return (thunk & 0x00FFFFFF) == 0xfd75ea;
1973
1974 Efault:
1975 return false;
1976 }
1977
1978 /*
1979 * regs->ip points to a LOCK Jcc.d8 instruction from the fineibt_paranoid_start[]
1980 * sequence, or to an invalid instruction (0xea) + Jcc.d8 for cfi_paranoid + ITS
1981 * thunk.
1982 */
decode_fineibt_paranoid(struct pt_regs * regs,unsigned long * target,u32 * type)1983 static bool decode_fineibt_paranoid(struct pt_regs *regs, unsigned long *target, u32 *type)
1984 {
1985 unsigned long addr = regs->ip - fineibt_paranoid_ud;
1986
1987 if (!cfi_paranoid)
1988 return false;
1989
1990 if (is_cfi_trap(addr + fineibt_caller_size - LEN_UD2)) {
1991 *target = regs->r11 + fineibt_preamble_size;
1992 *type = regs->r10;
1993
1994 /*
1995 * Since the trapping instruction is the exact, but LOCK prefixed,
1996 * Jcc.d8 that got us here, the normal fixup will work.
1997 */
1998 return true;
1999 }
2000
2001 /*
2002 * The cfi_paranoid + ITS thunk combination results in:
2003 *
2004 * 0: 41 ba 78 56 34 12 mov $0x12345678, %r10d
2005 * 6: 45 3b 53 f7 cmp -0x9(%r11), %r10d
2006 * a: 4d 8d 5b f0 lea -0x10(%r11), %r11
2007 * e: 2e e8 XX XX XX XX cs call __x86_indirect_paranoid_thunk_r11
2008 *
2009 * Where the paranoid_thunk looks like:
2010 *
2011 * 1d: <ea> (bad)
2012 * __x86_indirect_paranoid_thunk_r11:
2013 * 1e: 75 fd jne 1d
2014 * __x86_indirect_its_thunk_r11:
2015 * 20: 41 ff eb jmp *%r11
2016 * 23: cc int3
2017 *
2018 */
2019 if (is_paranoid_thunk(regs->ip)) {
2020 *target = regs->r11 + fineibt_preamble_size;
2021 *type = regs->r10;
2022
2023 regs->ip = *target;
2024 return true;
2025 }
2026
2027 return false;
2028 }
2029
decode_fineibt_insn(struct pt_regs * regs,unsigned long * target,u32 * type)2030 bool decode_fineibt_insn(struct pt_regs *regs, unsigned long *target, u32 *type)
2031 {
2032 if (decode_fineibt_paranoid(regs, target, type))
2033 return true;
2034
2035 if (decode_fineibt_bhi(regs, target, type))
2036 return true;
2037
2038 return decode_fineibt_preamble(regs, target, type);
2039 }
2040
2041 #else /* !CONFIG_FINEIBT: */
2042
__apply_fineibt(s32 * start_retpoline,s32 * end_retpoline,s32 * start_cfi,s32 * end_cfi,bool builtin)2043 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
2044 s32 *start_cfi, s32 *end_cfi, bool builtin)
2045 {
2046 }
2047
2048 #ifdef CONFIG_X86_KERNEL_IBT
poison_cfi(void * addr)2049 static void poison_cfi(void *addr) { }
2050 #endif
2051
2052 #endif /* !CONFIG_FINEIBT */
2053
apply_fineibt(s32 * start_retpoline,s32 * end_retpoline,s32 * start_cfi,s32 * end_cfi)2054 void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
2055 s32 *start_cfi, s32 *end_cfi)
2056 {
2057 return __apply_fineibt(start_retpoline, end_retpoline,
2058 start_cfi, end_cfi,
2059 /* .builtin = */ false);
2060 }
2061
2062 #ifdef CONFIG_SMP
alternatives_smp_lock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)2063 static void alternatives_smp_lock(const s32 *start, const s32 *end,
2064 u8 *text, u8 *text_end)
2065 {
2066 const s32 *poff;
2067
2068 for (poff = start; poff < end; poff++) {
2069 u8 *ptr = (u8 *)poff + *poff;
2070
2071 if (!*poff || ptr < text || ptr >= text_end)
2072 continue;
2073 /* turn DS segment override prefix into lock prefix */
2074 if (*ptr == 0x3e)
2075 text_poke(ptr, ((unsigned char []){0xf0}), 1);
2076 }
2077 }
2078
alternatives_smp_unlock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)2079 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
2080 u8 *text, u8 *text_end)
2081 {
2082 const s32 *poff;
2083
2084 for (poff = start; poff < end; poff++) {
2085 u8 *ptr = (u8 *)poff + *poff;
2086
2087 if (!*poff || ptr < text || ptr >= text_end)
2088 continue;
2089 /* turn lock prefix into DS segment override prefix */
2090 if (*ptr == 0xf0)
2091 text_poke(ptr, ((unsigned char []){0x3E}), 1);
2092 }
2093 }
2094
2095 struct smp_alt_module {
2096 /* what is this ??? */
2097 struct module *mod;
2098 char *name;
2099
2100 /* ptrs to lock prefixes */
2101 const s32 *locks;
2102 const s32 *locks_end;
2103
2104 /* .text segment, needed to avoid patching init code ;) */
2105 u8 *text;
2106 u8 *text_end;
2107
2108 struct list_head next;
2109 };
2110 static LIST_HEAD(smp_alt_modules);
2111 static bool uniproc_patched = false; /* protected by text_mutex */
2112
alternatives_smp_module_add(struct module * mod,char * name,void * locks,void * locks_end,void * text,void * text_end)2113 void __init_or_module alternatives_smp_module_add(struct module *mod,
2114 char *name,
2115 void *locks, void *locks_end,
2116 void *text, void *text_end)
2117 {
2118 struct smp_alt_module *smp;
2119
2120 mutex_lock(&text_mutex);
2121 if (!uniproc_patched)
2122 goto unlock;
2123
2124 if (num_possible_cpus() == 1)
2125 /* Don't bother remembering, we'll never have to undo it. */
2126 goto smp_unlock;
2127
2128 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
2129 if (NULL == smp)
2130 /* we'll run the (safe but slow) SMP code then ... */
2131 goto unlock;
2132
2133 smp->mod = mod;
2134 smp->name = name;
2135 smp->locks = locks;
2136 smp->locks_end = locks_end;
2137 smp->text = text;
2138 smp->text_end = text_end;
2139 DPRINTK(SMP, "locks %p -> %p, text %p -> %p, name %s\n",
2140 smp->locks, smp->locks_end,
2141 smp->text, smp->text_end, smp->name);
2142
2143 list_add_tail(&smp->next, &smp_alt_modules);
2144 smp_unlock:
2145 alternatives_smp_unlock(locks, locks_end, text, text_end);
2146 unlock:
2147 mutex_unlock(&text_mutex);
2148 }
2149
alternatives_smp_module_del(struct module * mod)2150 void __init_or_module alternatives_smp_module_del(struct module *mod)
2151 {
2152 struct smp_alt_module *item;
2153
2154 mutex_lock(&text_mutex);
2155 list_for_each_entry(item, &smp_alt_modules, next) {
2156 if (mod != item->mod)
2157 continue;
2158 list_del(&item->next);
2159 kfree(item);
2160 break;
2161 }
2162 mutex_unlock(&text_mutex);
2163 }
2164
alternatives_enable_smp(void)2165 void alternatives_enable_smp(void)
2166 {
2167 struct smp_alt_module *mod;
2168
2169 /* Why bother if there are no other CPUs? */
2170 BUG_ON(num_possible_cpus() == 1);
2171
2172 mutex_lock(&text_mutex);
2173
2174 if (uniproc_patched) {
2175 pr_info("switching to SMP code\n");
2176 BUG_ON(num_online_cpus() != 1);
2177 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
2178 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
2179 list_for_each_entry(mod, &smp_alt_modules, next)
2180 alternatives_smp_lock(mod->locks, mod->locks_end,
2181 mod->text, mod->text_end);
2182 uniproc_patched = false;
2183 }
2184 mutex_unlock(&text_mutex);
2185 }
2186
2187 /*
2188 * Return 1 if the address range is reserved for SMP-alternatives.
2189 * Must hold text_mutex.
2190 */
alternatives_text_reserved(void * start,void * end)2191 int alternatives_text_reserved(void *start, void *end)
2192 {
2193 struct smp_alt_module *mod;
2194 const s32 *poff;
2195 u8 *text_start = start;
2196 u8 *text_end = end;
2197
2198 lockdep_assert_held(&text_mutex);
2199
2200 list_for_each_entry(mod, &smp_alt_modules, next) {
2201 if (mod->text > text_end || mod->text_end < text_start)
2202 continue;
2203 for (poff = mod->locks; poff < mod->locks_end; poff++) {
2204 const u8 *ptr = (const u8 *)poff + *poff;
2205
2206 if (text_start <= ptr && text_end > ptr)
2207 return 1;
2208 }
2209 }
2210
2211 return 0;
2212 }
2213 #endif /* CONFIG_SMP */
2214
2215 /*
2216 * Self-test for the INT3 based CALL emulation code.
2217 *
2218 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
2219 * properly and that there is a stack gap between the INT3 frame and the
2220 * previous context. Without this gap doing a virtual PUSH on the interrupted
2221 * stack would corrupt the INT3 IRET frame.
2222 *
2223 * See entry_{32,64}.S for more details.
2224 */
2225
2226 /*
2227 * We define the int3_magic() function in assembly to control the calling
2228 * convention such that we can 'call' it from assembly.
2229 */
2230
2231 extern void int3_magic(unsigned int *ptr); /* defined in asm */
2232
2233 asm (
2234 " .pushsection .init.text, \"ax\", @progbits\n"
2235 " .type int3_magic, @function\n"
2236 "int3_magic:\n"
2237 ANNOTATE_NOENDBR
2238 " movl $1, (%" _ASM_ARG1 ")\n"
2239 ASM_RET
2240 " .size int3_magic, .-int3_magic\n"
2241 " .popsection\n"
2242 );
2243
2244 extern void int3_selftest_ip(void); /* defined in asm below */
2245
2246 static int __init
int3_exception_notify(struct notifier_block * self,unsigned long val,void * data)2247 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
2248 {
2249 unsigned long selftest = (unsigned long)&int3_selftest_ip;
2250 struct die_args *args = data;
2251 struct pt_regs *regs = args->regs;
2252
2253 OPTIMIZER_HIDE_VAR(selftest);
2254
2255 if (!regs || user_mode(regs))
2256 return NOTIFY_DONE;
2257
2258 if (val != DIE_INT3)
2259 return NOTIFY_DONE;
2260
2261 if (regs->ip - INT3_INSN_SIZE != selftest)
2262 return NOTIFY_DONE;
2263
2264 int3_emulate_call(regs, (unsigned long)&int3_magic);
2265 return NOTIFY_STOP;
2266 }
2267
2268 /* Must be noinline to ensure uniqueness of int3_selftest_ip. */
int3_selftest(void)2269 static noinline void __init int3_selftest(void)
2270 {
2271 static __initdata struct notifier_block int3_exception_nb = {
2272 .notifier_call = int3_exception_notify,
2273 .priority = INT_MAX-1, /* last */
2274 };
2275 unsigned int val = 0;
2276
2277 BUG_ON(register_die_notifier(&int3_exception_nb));
2278
2279 /*
2280 * Basically: int3_magic(&val); but really complicated :-)
2281 *
2282 * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
2283 * notifier above will emulate CALL for us.
2284 */
2285 asm volatile ("int3_selftest_ip:\n\t"
2286 ANNOTATE_NOENDBR
2287 " int3; nop; nop; nop; nop\n\t"
2288 : ASM_CALL_CONSTRAINT
2289 : __ASM_SEL_RAW(a, D) (&val)
2290 : "memory");
2291
2292 BUG_ON(val != 1);
2293
2294 unregister_die_notifier(&int3_exception_nb);
2295 }
2296
2297 static __initdata int __alt_reloc_selftest_addr;
2298
2299 extern void __init __alt_reloc_selftest(void *arg);
__alt_reloc_selftest(void * arg)2300 __visible noinline void __init __alt_reloc_selftest(void *arg)
2301 {
2302 WARN_ON(arg != &__alt_reloc_selftest_addr);
2303 }
2304
alt_reloc_selftest(void)2305 static noinline void __init alt_reloc_selftest(void)
2306 {
2307 /*
2308 * Tests apply_relocation().
2309 *
2310 * This has a relative immediate (CALL) in a place other than the first
2311 * instruction and additionally on x86_64 we get a RIP-relative LEA:
2312 *
2313 * lea 0x0(%rip),%rdi # 5d0: R_X86_64_PC32 .init.data+0x5566c
2314 * call +0 # 5d5: R_X86_64_PLT32 __alt_reloc_selftest-0x4
2315 *
2316 * Getting this wrong will either crash and burn or tickle the WARN
2317 * above.
2318 */
2319 asm_inline volatile (
2320 ALTERNATIVE("", "lea %[mem], %%" _ASM_ARG1 "; call __alt_reloc_selftest;", X86_FEATURE_ALWAYS)
2321 : ASM_CALL_CONSTRAINT
2322 : [mem] "m" (__alt_reloc_selftest_addr)
2323 : _ASM_ARG1
2324 );
2325 }
2326
alternative_instructions(void)2327 void __init alternative_instructions(void)
2328 {
2329 u64 ibt;
2330
2331 int3_selftest();
2332
2333 /*
2334 * The patching is not fully atomic, so try to avoid local
2335 * interruptions that might execute the to be patched code.
2336 * Other CPUs are not running.
2337 */
2338 stop_nmi();
2339
2340 /*
2341 * Don't stop machine check exceptions while patching.
2342 * MCEs only happen when something got corrupted and in this
2343 * case we must do something about the corruption.
2344 * Ignoring it is worse than an unlikely patching race.
2345 * Also machine checks tend to be broadcast and if one CPU
2346 * goes into machine check the others follow quickly, so we don't
2347 * expect a machine check to cause undue problems during to code
2348 * patching.
2349 */
2350
2351 /*
2352 * Make sure to set (artificial) features depending on used paravirt
2353 * functions which can later influence alternative patching.
2354 */
2355 paravirt_set_cap();
2356
2357 /* Keep CET-IBT disabled until caller/callee are patched */
2358 ibt = ibt_save(/*disable*/ true);
2359
2360 __apply_fineibt(__retpoline_sites, __retpoline_sites_end,
2361 __cfi_sites, __cfi_sites_end, true);
2362
2363 /*
2364 * Rewrite the retpolines, must be done before alternatives since
2365 * those can rewrite the retpoline thunks.
2366 */
2367 apply_retpolines(__retpoline_sites, __retpoline_sites_end);
2368 apply_returns(__return_sites, __return_sites_end);
2369
2370 /*
2371 * Adjust all CALL instructions to point to func()-10, including
2372 * those in .altinstr_replacement.
2373 */
2374 callthunks_patch_builtin_calls();
2375
2376 apply_alternatives(__alt_instructions, __alt_instructions_end);
2377
2378 /*
2379 * Seal all functions that do not have their address taken.
2380 */
2381 apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
2382
2383 ibt_restore(ibt);
2384
2385 #ifdef CONFIG_SMP
2386 /* Patch to UP if other cpus not imminent. */
2387 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
2388 uniproc_patched = true;
2389 alternatives_smp_module_add(NULL, "core kernel",
2390 __smp_locks, __smp_locks_end,
2391 _text, _etext);
2392 }
2393
2394 if (!uniproc_patched || num_possible_cpus() == 1) {
2395 free_init_pages("SMP alternatives",
2396 (unsigned long)__smp_locks,
2397 (unsigned long)__smp_locks_end);
2398 }
2399 #endif
2400
2401 restart_nmi();
2402 alternatives_patched = 1;
2403
2404 alt_reloc_selftest();
2405 }
2406
2407 /**
2408 * text_poke_early - Update instructions on a live kernel at boot time
2409 * @addr: address to modify
2410 * @opcode: source of the copy
2411 * @len: length to copy
2412 *
2413 * When you use this code to patch more than one byte of an instruction
2414 * you need to make sure that other CPUs cannot execute this code in parallel.
2415 * Also no thread must be currently preempted in the middle of these
2416 * instructions. And on the local CPU you need to be protected against NMI or
2417 * MCE handlers seeing an inconsistent instruction while you patch.
2418 */
text_poke_early(void * addr,const void * opcode,size_t len)2419 void __init_or_module text_poke_early(void *addr, const void *opcode,
2420 size_t len)
2421 {
2422 unsigned long flags;
2423
2424 if (boot_cpu_has(X86_FEATURE_NX) &&
2425 is_module_text_address((unsigned long)addr)) {
2426 /*
2427 * Modules text is marked initially as non-executable, so the
2428 * code cannot be running and speculative code-fetches are
2429 * prevented. Just change the code.
2430 */
2431 memcpy(addr, opcode, len);
2432 } else {
2433 local_irq_save(flags);
2434 memcpy(addr, opcode, len);
2435 sync_core();
2436 local_irq_restore(flags);
2437
2438 /*
2439 * Could also do a CLFLUSH here to speed up CPU recovery; but
2440 * that causes hangs on some VIA CPUs.
2441 */
2442 }
2443 }
2444
2445 typedef struct {
2446 struct mm_struct *mm;
2447 } temp_mm_state_t;
2448
2449 /*
2450 * Using a temporary mm allows to set temporary mappings that are not accessible
2451 * by other CPUs. Such mappings are needed to perform sensitive memory writes
2452 * that override the kernel memory protections (e.g., W^X), without exposing the
2453 * temporary page-table mappings that are required for these write operations to
2454 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
2455 * mapping is torn down.
2456 *
2457 * Context: The temporary mm needs to be used exclusively by a single core. To
2458 * harden security IRQs must be disabled while the temporary mm is
2459 * loaded, thereby preventing interrupt handler bugs from overriding
2460 * the kernel memory protection.
2461 */
use_temporary_mm(struct mm_struct * mm)2462 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
2463 {
2464 temp_mm_state_t temp_state;
2465
2466 lockdep_assert_irqs_disabled();
2467
2468 /*
2469 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
2470 * with a stale address space WITHOUT being in lazy mode after
2471 * restoring the previous mm.
2472 */
2473 if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
2474 leave_mm();
2475
2476 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
2477 switch_mm_irqs_off(NULL, mm, current);
2478
2479 /*
2480 * If breakpoints are enabled, disable them while the temporary mm is
2481 * used. Userspace might set up watchpoints on addresses that are used
2482 * in the temporary mm, which would lead to wrong signals being sent or
2483 * crashes.
2484 *
2485 * Note that breakpoints are not disabled selectively, which also causes
2486 * kernel breakpoints (e.g., perf's) to be disabled. This might be
2487 * undesirable, but still seems reasonable as the code that runs in the
2488 * temporary mm should be short.
2489 */
2490 if (hw_breakpoint_active())
2491 hw_breakpoint_disable();
2492
2493 return temp_state;
2494 }
2495
2496 __ro_after_init struct mm_struct *poking_mm;
2497 __ro_after_init unsigned long poking_addr;
2498
unuse_temporary_mm(temp_mm_state_t prev_state)2499 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
2500 {
2501 lockdep_assert_irqs_disabled();
2502
2503 switch_mm_irqs_off(NULL, prev_state.mm, current);
2504
2505 /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */
2506 cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(poking_mm));
2507
2508 /*
2509 * Restore the breakpoints if they were disabled before the temporary mm
2510 * was loaded.
2511 */
2512 if (hw_breakpoint_active())
2513 hw_breakpoint_restore();
2514 }
2515
text_poke_memcpy(void * dst,const void * src,size_t len)2516 static void text_poke_memcpy(void *dst, const void *src, size_t len)
2517 {
2518 memcpy(dst, src, len);
2519 }
2520
text_poke_memset(void * dst,const void * src,size_t len)2521 static void text_poke_memset(void *dst, const void *src, size_t len)
2522 {
2523 int c = *(const int *)src;
2524
2525 memset(dst, c, len);
2526 }
2527
2528 typedef void text_poke_f(void *dst, const void *src, size_t len);
2529
__text_poke(text_poke_f func,void * addr,const void * src,size_t len)2530 static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t len)
2531 {
2532 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
2533 struct page *pages[2] = {NULL};
2534 temp_mm_state_t prev;
2535 unsigned long flags;
2536 pte_t pte, *ptep;
2537 spinlock_t *ptl;
2538 pgprot_t pgprot;
2539
2540 /*
2541 * While boot memory allocator is running we cannot use struct pages as
2542 * they are not yet initialized. There is no way to recover.
2543 */
2544 BUG_ON(!after_bootmem);
2545
2546 if (!core_kernel_text((unsigned long)addr)) {
2547 pages[0] = vmalloc_to_page(addr);
2548 if (cross_page_boundary)
2549 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
2550 } else {
2551 pages[0] = virt_to_page(addr);
2552 WARN_ON(!PageReserved(pages[0]));
2553 if (cross_page_boundary)
2554 pages[1] = virt_to_page(addr + PAGE_SIZE);
2555 }
2556 /*
2557 * If something went wrong, crash and burn since recovery paths are not
2558 * implemented.
2559 */
2560 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
2561
2562 /*
2563 * Map the page without the global bit, as TLB flushing is done with
2564 * flush_tlb_mm_range(), which is intended for non-global PTEs.
2565 */
2566 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
2567
2568 /*
2569 * The lock is not really needed, but this allows to avoid open-coding.
2570 */
2571 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
2572
2573 /*
2574 * This must not fail; preallocated in poking_init().
2575 */
2576 VM_BUG_ON(!ptep);
2577
2578 local_irq_save(flags);
2579
2580 pte = mk_pte(pages[0], pgprot);
2581 set_pte_at(poking_mm, poking_addr, ptep, pte);
2582
2583 if (cross_page_boundary) {
2584 pte = mk_pte(pages[1], pgprot);
2585 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
2586 }
2587
2588 /*
2589 * Loading the temporary mm behaves as a compiler barrier, which
2590 * guarantees that the PTE will be set at the time memcpy() is done.
2591 */
2592 prev = use_temporary_mm(poking_mm);
2593
2594 kasan_disable_current();
2595 func((u8 *)poking_addr + offset_in_page(addr), src, len);
2596 kasan_enable_current();
2597
2598 /*
2599 * Ensure that the PTE is only cleared after the instructions of memcpy
2600 * were issued by using a compiler barrier.
2601 */
2602 barrier();
2603
2604 pte_clear(poking_mm, poking_addr, ptep);
2605 if (cross_page_boundary)
2606 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
2607
2608 /*
2609 * Loading the previous page-table hierarchy requires a serializing
2610 * instruction that already allows the core to see the updated version.
2611 * Xen-PV is assumed to serialize execution in a similar manner.
2612 */
2613 unuse_temporary_mm(prev);
2614
2615 /*
2616 * Flushing the TLB might involve IPIs, which would require enabled
2617 * IRQs, but not if the mm is not used, as it is in this point.
2618 */
2619 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
2620 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
2621 PAGE_SHIFT, false);
2622
2623 if (func == text_poke_memcpy) {
2624 /*
2625 * If the text does not match what we just wrote then something is
2626 * fundamentally screwy; there's nothing we can really do about that.
2627 */
2628 BUG_ON(memcmp(addr, src, len));
2629 }
2630
2631 local_irq_restore(flags);
2632 pte_unmap_unlock(ptep, ptl);
2633 return addr;
2634 }
2635
2636 /**
2637 * text_poke - Update instructions on a live kernel
2638 * @addr: address to modify
2639 * @opcode: source of the copy
2640 * @len: length to copy
2641 *
2642 * Only atomic text poke/set should be allowed when not doing early patching.
2643 * It means the size must be writable atomically and the address must be aligned
2644 * in a way that permits an atomic write. It also makes sure we fit on a single
2645 * page.
2646 *
2647 * Note that the caller must ensure that if the modified code is part of a
2648 * module, the module would not be removed during poking. This can be achieved
2649 * by registering a module notifier, and ordering module removal and patching
2650 * through a mutex.
2651 */
text_poke(void * addr,const void * opcode,size_t len)2652 void *text_poke(void *addr, const void *opcode, size_t len)
2653 {
2654 lockdep_assert_held(&text_mutex);
2655
2656 return __text_poke(text_poke_memcpy, addr, opcode, len);
2657 }
2658
2659 /**
2660 * text_poke_kgdb - Update instructions on a live kernel by kgdb
2661 * @addr: address to modify
2662 * @opcode: source of the copy
2663 * @len: length to copy
2664 *
2665 * Only atomic text poke/set should be allowed when not doing early patching.
2666 * It means the size must be writable atomically and the address must be aligned
2667 * in a way that permits an atomic write. It also makes sure we fit on a single
2668 * page.
2669 *
2670 * Context: should only be used by kgdb, which ensures no other core is running,
2671 * despite the fact it does not hold the text_mutex.
2672 */
text_poke_kgdb(void * addr,const void * opcode,size_t len)2673 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
2674 {
2675 return __text_poke(text_poke_memcpy, addr, opcode, len);
2676 }
2677
text_poke_copy_locked(void * addr,const void * opcode,size_t len,bool core_ok)2678 void *text_poke_copy_locked(void *addr, const void *opcode, size_t len,
2679 bool core_ok)
2680 {
2681 unsigned long start = (unsigned long)addr;
2682 size_t patched = 0;
2683
2684 if (WARN_ON_ONCE(!core_ok && core_kernel_text(start)))
2685 return NULL;
2686
2687 while (patched < len) {
2688 unsigned long ptr = start + patched;
2689 size_t s;
2690
2691 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2692
2693 __text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s);
2694 patched += s;
2695 }
2696 return addr;
2697 }
2698
2699 /**
2700 * text_poke_copy - Copy instructions into (an unused part of) RX memory
2701 * @addr: address to modify
2702 * @opcode: source of the copy
2703 * @len: length to copy, could be more than 2x PAGE_SIZE
2704 *
2705 * Not safe against concurrent execution; useful for JITs to dump
2706 * new code blocks into unused regions of RX memory. Can be used in
2707 * conjunction with synchronize_rcu_tasks() to wait for existing
2708 * execution to quiesce after having made sure no existing functions
2709 * pointers are live.
2710 */
text_poke_copy(void * addr,const void * opcode,size_t len)2711 void *text_poke_copy(void *addr, const void *opcode, size_t len)
2712 {
2713 mutex_lock(&text_mutex);
2714 addr = text_poke_copy_locked(addr, opcode, len, false);
2715 mutex_unlock(&text_mutex);
2716 return addr;
2717 }
2718
2719 /**
2720 * text_poke_set - memset into (an unused part of) RX memory
2721 * @addr: address to modify
2722 * @c: the byte to fill the area with
2723 * @len: length to copy, could be more than 2x PAGE_SIZE
2724 *
2725 * This is useful to overwrite unused regions of RX memory with illegal
2726 * instructions.
2727 */
text_poke_set(void * addr,int c,size_t len)2728 void *text_poke_set(void *addr, int c, size_t len)
2729 {
2730 unsigned long start = (unsigned long)addr;
2731 size_t patched = 0;
2732
2733 if (WARN_ON_ONCE(core_kernel_text(start)))
2734 return NULL;
2735
2736 mutex_lock(&text_mutex);
2737 while (patched < len) {
2738 unsigned long ptr = start + patched;
2739 size_t s;
2740
2741 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
2742
2743 __text_poke(text_poke_memset, (void *)ptr, (void *)&c, s);
2744 patched += s;
2745 }
2746 mutex_unlock(&text_mutex);
2747 return addr;
2748 }
2749
do_sync_core(void * info)2750 static void do_sync_core(void *info)
2751 {
2752 sync_core();
2753 }
2754
text_poke_sync(void)2755 void text_poke_sync(void)
2756 {
2757 on_each_cpu(do_sync_core, NULL, 1);
2758 }
2759
2760 /*
2761 * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
2762 * this thing. When len == 6 everything is prefixed with 0x0f and we map
2763 * opcode to Jcc.d8, using len to distinguish.
2764 */
2765 struct text_poke_loc {
2766 /* addr := _stext + rel_addr */
2767 s32 rel_addr;
2768 s32 disp;
2769 u8 len;
2770 u8 opcode;
2771 const u8 text[POKE_MAX_OPCODE_SIZE];
2772 /* see text_poke_bp_batch() */
2773 u8 old;
2774 };
2775
2776 struct bp_patching_desc {
2777 struct text_poke_loc *vec;
2778 int nr_entries;
2779 atomic_t refs;
2780 };
2781
2782 static struct bp_patching_desc bp_desc;
2783
2784 static __always_inline
try_get_desc(void)2785 struct bp_patching_desc *try_get_desc(void)
2786 {
2787 struct bp_patching_desc *desc = &bp_desc;
2788
2789 if (!raw_atomic_inc_not_zero(&desc->refs))
2790 return NULL;
2791
2792 return desc;
2793 }
2794
put_desc(void)2795 static __always_inline void put_desc(void)
2796 {
2797 struct bp_patching_desc *desc = &bp_desc;
2798
2799 smp_mb__before_atomic();
2800 raw_atomic_dec(&desc->refs);
2801 }
2802
text_poke_addr(struct text_poke_loc * tp)2803 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
2804 {
2805 return _stext + tp->rel_addr;
2806 }
2807
patch_cmp(const void * key,const void * elt)2808 static __always_inline int patch_cmp(const void *key, const void *elt)
2809 {
2810 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
2811
2812 if (key < text_poke_addr(tp))
2813 return -1;
2814 if (key > text_poke_addr(tp))
2815 return 1;
2816 return 0;
2817 }
2818
poke_int3_handler(struct pt_regs * regs)2819 noinstr int poke_int3_handler(struct pt_regs *regs)
2820 {
2821 struct bp_patching_desc *desc;
2822 struct text_poke_loc *tp;
2823 int ret = 0;
2824 void *ip;
2825
2826 if (user_mode(regs))
2827 return 0;
2828
2829 /*
2830 * Having observed our INT3 instruction, we now must observe
2831 * bp_desc with non-zero refcount:
2832 *
2833 * bp_desc.refs = 1 INT3
2834 * WMB RMB
2835 * write INT3 if (bp_desc.refs != 0)
2836 */
2837 smp_rmb();
2838
2839 desc = try_get_desc();
2840 if (!desc)
2841 return 0;
2842
2843 /*
2844 * Discount the INT3. See text_poke_bp_batch().
2845 */
2846 ip = (void *) regs->ip - INT3_INSN_SIZE;
2847
2848 /*
2849 * Skip the binary search if there is a single member in the vector.
2850 */
2851 if (unlikely(desc->nr_entries > 1)) {
2852 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
2853 sizeof(struct text_poke_loc),
2854 patch_cmp);
2855 if (!tp)
2856 goto out_put;
2857 } else {
2858 tp = desc->vec;
2859 if (text_poke_addr(tp) != ip)
2860 goto out_put;
2861 }
2862
2863 ip += tp->len;
2864
2865 switch (tp->opcode) {
2866 case INT3_INSN_OPCODE:
2867 /*
2868 * Someone poked an explicit INT3, they'll want to handle it,
2869 * do not consume.
2870 */
2871 goto out_put;
2872
2873 case RET_INSN_OPCODE:
2874 int3_emulate_ret(regs);
2875 break;
2876
2877 case CALL_INSN_OPCODE:
2878 int3_emulate_call(regs, (long)ip + tp->disp);
2879 break;
2880
2881 case JMP32_INSN_OPCODE:
2882 case JMP8_INSN_OPCODE:
2883 int3_emulate_jmp(regs, (long)ip + tp->disp);
2884 break;
2885
2886 case 0x70 ... 0x7f: /* Jcc */
2887 int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
2888 break;
2889
2890 default:
2891 BUG();
2892 }
2893
2894 ret = 1;
2895
2896 out_put:
2897 put_desc();
2898 return ret;
2899 }
2900
2901 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
2902 static struct text_poke_loc tp_vec[TP_VEC_MAX];
2903 static int tp_vec_nr;
2904
2905 /**
2906 * text_poke_bp_batch() -- update instructions on live kernel on SMP
2907 * @tp: vector of instructions to patch
2908 * @nr_entries: number of entries in the vector
2909 *
2910 * Modify multi-byte instruction by using int3 breakpoint on SMP.
2911 * We completely avoid stop_machine() here, and achieve the
2912 * synchronization using int3 breakpoint.
2913 *
2914 * The way it is done:
2915 * - For each entry in the vector:
2916 * - add a int3 trap to the address that will be patched
2917 * - sync cores
2918 * - For each entry in the vector:
2919 * - update all but the first byte of the patched range
2920 * - sync cores
2921 * - For each entry in the vector:
2922 * - replace the first byte (int3) by the first byte of
2923 * replacing opcode
2924 * - sync cores
2925 */
text_poke_bp_batch(struct text_poke_loc * tp,unsigned int nr_entries)2926 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
2927 {
2928 unsigned char int3 = INT3_INSN_OPCODE;
2929 unsigned int i;
2930 int do_sync;
2931
2932 lockdep_assert_held(&text_mutex);
2933
2934 bp_desc.vec = tp;
2935 bp_desc.nr_entries = nr_entries;
2936
2937 /*
2938 * Corresponds to the implicit memory barrier in try_get_desc() to
2939 * ensure reading a non-zero refcount provides up to date bp_desc data.
2940 */
2941 atomic_set_release(&bp_desc.refs, 1);
2942
2943 /*
2944 * Function tracing can enable thousands of places that need to be
2945 * updated. This can take quite some time, and with full kernel debugging
2946 * enabled, this could cause the softlockup watchdog to trigger.
2947 * This function gets called every 256 entries added to be patched.
2948 * Call cond_resched() here to make sure that other tasks can get scheduled
2949 * while processing all the functions being patched.
2950 */
2951 cond_resched();
2952
2953 /*
2954 * Corresponding read barrier in int3 notifier for making sure the
2955 * nr_entries and handler are correctly ordered wrt. patching.
2956 */
2957 smp_wmb();
2958
2959 /*
2960 * First step: add a int3 trap to the address that will be patched.
2961 */
2962 for (i = 0; i < nr_entries; i++) {
2963 tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
2964 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
2965 }
2966
2967 text_poke_sync();
2968
2969 /*
2970 * Second step: update all but the first byte of the patched range.
2971 */
2972 for (do_sync = 0, i = 0; i < nr_entries; i++) {
2973 u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
2974 u8 _new[POKE_MAX_OPCODE_SIZE+1];
2975 const u8 *new = tp[i].text;
2976 int len = tp[i].len;
2977
2978 if (len - INT3_INSN_SIZE > 0) {
2979 memcpy(old + INT3_INSN_SIZE,
2980 text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2981 len - INT3_INSN_SIZE);
2982
2983 if (len == 6) {
2984 _new[0] = 0x0f;
2985 memcpy(_new + 1, new, 5);
2986 new = _new;
2987 }
2988
2989 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
2990 new + INT3_INSN_SIZE,
2991 len - INT3_INSN_SIZE);
2992
2993 do_sync++;
2994 }
2995
2996 /*
2997 * Emit a perf event to record the text poke, primarily to
2998 * support Intel PT decoding which must walk the executable code
2999 * to reconstruct the trace. The flow up to here is:
3000 * - write INT3 byte
3001 * - IPI-SYNC
3002 * - write instruction tail
3003 * At this point the actual control flow will be through the
3004 * INT3 and handler and not hit the old or new instruction.
3005 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
3006 * can still be decoded. Subsequently:
3007 * - emit RECORD_TEXT_POKE with the new instruction
3008 * - IPI-SYNC
3009 * - write first byte
3010 * - IPI-SYNC
3011 * So before the text poke event timestamp, the decoder will see
3012 * either the old instruction flow or FUP/TIP of INT3. After the
3013 * text poke event timestamp, the decoder will see either the
3014 * new instruction flow or FUP/TIP of INT3. Thus decoders can
3015 * use the timestamp as the point at which to modify the
3016 * executable code.
3017 * The old instruction is recorded so that the event can be
3018 * processed forwards or backwards.
3019 */
3020 perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
3021 }
3022
3023 if (do_sync) {
3024 /*
3025 * According to Intel, this core syncing is very likely
3026 * not necessary and we'd be safe even without it. But
3027 * better safe than sorry (plus there's not only Intel).
3028 */
3029 text_poke_sync();
3030 }
3031
3032 /*
3033 * Third step: replace the first byte (int3) by the first byte of
3034 * replacing opcode.
3035 */
3036 for (do_sync = 0, i = 0; i < nr_entries; i++) {
3037 u8 byte = tp[i].text[0];
3038
3039 if (tp[i].len == 6)
3040 byte = 0x0f;
3041
3042 if (byte == INT3_INSN_OPCODE)
3043 continue;
3044
3045 text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
3046 do_sync++;
3047 }
3048
3049 if (do_sync)
3050 text_poke_sync();
3051
3052 /*
3053 * Remove and wait for refs to be zero.
3054 */
3055 if (!atomic_dec_and_test(&bp_desc.refs))
3056 atomic_cond_read_acquire(&bp_desc.refs, !VAL);
3057 }
3058
text_poke_loc_init(struct text_poke_loc * tp,void * addr,const void * opcode,size_t len,const void * emulate)3059 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
3060 const void *opcode, size_t len, const void *emulate)
3061 {
3062 struct insn insn;
3063 int ret, i = 0;
3064
3065 if (len == 6)
3066 i = 1;
3067 memcpy((void *)tp->text, opcode+i, len-i);
3068 if (!emulate)
3069 emulate = opcode;
3070
3071 ret = insn_decode_kernel(&insn, emulate);
3072 BUG_ON(ret < 0);
3073
3074 tp->rel_addr = addr - (void *)_stext;
3075 tp->len = len;
3076 tp->opcode = insn.opcode.bytes[0];
3077
3078 if (is_jcc32(&insn)) {
3079 /*
3080 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
3081 */
3082 tp->opcode = insn.opcode.bytes[1] - 0x10;
3083 }
3084
3085 switch (tp->opcode) {
3086 case RET_INSN_OPCODE:
3087 case JMP32_INSN_OPCODE:
3088 case JMP8_INSN_OPCODE:
3089 /*
3090 * Control flow instructions without implied execution of the
3091 * next instruction can be padded with INT3.
3092 */
3093 for (i = insn.length; i < len; i++)
3094 BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
3095 break;
3096
3097 default:
3098 BUG_ON(len != insn.length);
3099 }
3100
3101 switch (tp->opcode) {
3102 case INT3_INSN_OPCODE:
3103 case RET_INSN_OPCODE:
3104 break;
3105
3106 case CALL_INSN_OPCODE:
3107 case JMP32_INSN_OPCODE:
3108 case JMP8_INSN_OPCODE:
3109 case 0x70 ... 0x7f: /* Jcc */
3110 tp->disp = insn.immediate.value;
3111 break;
3112
3113 default: /* assume NOP */
3114 switch (len) {
3115 case 2: /* NOP2 -- emulate as JMP8+0 */
3116 BUG_ON(memcmp(emulate, x86_nops[len], len));
3117 tp->opcode = JMP8_INSN_OPCODE;
3118 tp->disp = 0;
3119 break;
3120
3121 case 5: /* NOP5 -- emulate as JMP32+0 */
3122 BUG_ON(memcmp(emulate, x86_nops[len], len));
3123 tp->opcode = JMP32_INSN_OPCODE;
3124 tp->disp = 0;
3125 break;
3126
3127 default: /* unknown instruction */
3128 BUG();
3129 }
3130 break;
3131 }
3132 }
3133
3134 /*
3135 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
3136 * early if needed.
3137 */
tp_order_fail(void * addr)3138 static bool tp_order_fail(void *addr)
3139 {
3140 struct text_poke_loc *tp;
3141
3142 if (!tp_vec_nr)
3143 return false;
3144
3145 if (!addr) /* force */
3146 return true;
3147
3148 tp = &tp_vec[tp_vec_nr - 1];
3149 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
3150 return true;
3151
3152 return false;
3153 }
3154
text_poke_flush(void * addr)3155 static void text_poke_flush(void *addr)
3156 {
3157 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
3158 text_poke_bp_batch(tp_vec, tp_vec_nr);
3159 tp_vec_nr = 0;
3160 }
3161 }
3162
text_poke_finish(void)3163 void text_poke_finish(void)
3164 {
3165 text_poke_flush(NULL);
3166 }
3167
text_poke_queue(void * addr,const void * opcode,size_t len,const void * emulate)3168 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
3169 {
3170 struct text_poke_loc *tp;
3171
3172 text_poke_flush(addr);
3173
3174 tp = &tp_vec[tp_vec_nr++];
3175 text_poke_loc_init(tp, addr, opcode, len, emulate);
3176 }
3177
3178 /**
3179 * text_poke_bp() -- update instructions on live kernel on SMP
3180 * @addr: address to patch
3181 * @opcode: opcode of new instruction
3182 * @len: length to copy
3183 * @emulate: instruction to be emulated
3184 *
3185 * Update a single instruction with the vector in the stack, avoiding
3186 * dynamically allocated memory. This function should be used when it is
3187 * not possible to allocate memory.
3188 */
text_poke_bp(void * addr,const void * opcode,size_t len,const void * emulate)3189 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
3190 {
3191 struct text_poke_loc tp;
3192
3193 text_poke_loc_init(&tp, addr, opcode, len, emulate);
3194 text_poke_bp_batch(&tp, 1);
3195 }
3196