1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_TEXT_PATCHING_H
3 #define _ASM_X86_TEXT_PATCHING_H
4
5 #include <linux/types.h>
6 #include <linux/stddef.h>
7 #include <asm/ptrace.h>
8
9 /*
10 * Currently, the max observed size in the kernel code is
11 * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5.
12 * Raise it if needed.
13 */
14 #define POKE_MAX_OPCODE_SIZE 5
15
16 extern void text_poke_early(void *addr, const void *opcode, size_t len);
17
18 /*
19 * Clear and restore the kernel write-protection flag on the local CPU.
20 * Allows the kernel to edit read-only pages.
21 * Side-effect: any interrupt handler running between save and restore will have
22 * the ability to write to read-only pages.
23 *
24 * Warning:
25 * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
26 * no thread can be preempted in the instructions being modified (no iret to an
27 * invalid instruction possible) or if the instructions are changed from a
28 * consistent state to another consistent state atomically.
29 * On the local CPU you need to be protected against NMI or MCE handlers seeing
30 * an inconsistent instruction while you patch.
31 */
32 extern void *text_poke(void *addr, const void *opcode, size_t len);
33 extern void text_poke_sync(void);
34 extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
35 extern void *text_poke_copy(void *addr, const void *opcode, size_t len);
36 extern void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, bool core_ok);
37 extern void *text_poke_set(void *addr, int c, size_t len);
38 extern int poke_int3_handler(struct pt_regs *regs);
39 extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
40
41 extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate);
42 extern void text_poke_finish(void);
43
44 #define INT3_INSN_SIZE 1
45 #define INT3_INSN_OPCODE 0xCC
46
47 #define RET_INSN_SIZE 1
48 #define RET_INSN_OPCODE 0xC3
49
50 #define CALL_INSN_SIZE 5
51 #define CALL_INSN_OPCODE 0xE8
52
53 #define JMP32_INSN_SIZE 5
54 #define JMP32_INSN_OPCODE 0xE9
55
56 #define JMP8_INSN_SIZE 2
57 #define JMP8_INSN_OPCODE 0xEB
58
59 #define DISP32_SIZE 4
60
text_opcode_size(u8 opcode)61 static __always_inline int text_opcode_size(u8 opcode)
62 {
63 int size = 0;
64
65 #define __CASE(insn) \
66 case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break
67
68 switch(opcode) {
69 __CASE(INT3);
70 __CASE(RET);
71 __CASE(CALL);
72 __CASE(JMP32);
73 __CASE(JMP8);
74 }
75
76 #undef __CASE
77
78 return size;
79 }
80
81 union text_poke_insn {
82 u8 text[POKE_MAX_OPCODE_SIZE];
83 struct {
84 u8 opcode;
85 s32 disp;
86 } __attribute__((packed));
87 };
88
89 static __always_inline
__text_gen_insn(void * buf,u8 opcode,const void * addr,const void * dest,int size)90 void __text_gen_insn(void *buf, u8 opcode, const void *addr, const void *dest, int size)
91 {
92 union text_poke_insn *insn = buf;
93
94 BUG_ON(size < text_opcode_size(opcode));
95
96 /*
97 * Hide the addresses to avoid the compiler folding in constants when
98 * referencing code, these can mess up annotations like
99 * ANNOTATE_NOENDBR.
100 */
101 OPTIMIZER_HIDE_VAR(insn);
102 OPTIMIZER_HIDE_VAR(addr);
103 OPTIMIZER_HIDE_VAR(dest);
104
105 insn->opcode = opcode;
106
107 if (size > 1) {
108 insn->disp = (long)dest - (long)(addr + size);
109 if (size == 2) {
110 /*
111 * Ensure that for JMP8 the displacement
112 * actually fits the signed byte.
113 */
114 BUG_ON((insn->disp >> 31) != (insn->disp >> 7));
115 }
116 }
117 }
118
119 static __always_inline
text_gen_insn(u8 opcode,const void * addr,const void * dest)120 void *text_gen_insn(u8 opcode, const void *addr, const void *dest)
121 {
122 static union text_poke_insn insn; /* per instance */
123 __text_gen_insn(&insn, opcode, addr, dest, text_opcode_size(opcode));
124 return &insn.text;
125 }
126
127 extern int after_bootmem;
128 extern __ro_after_init struct mm_struct *poking_mm;
129 extern __ro_after_init unsigned long poking_addr;
130
131 #ifndef CONFIG_UML_X86
132 static __always_inline
int3_emulate_jmp(struct pt_regs * regs,unsigned long ip)133 void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
134 {
135 regs->ip = ip;
136 }
137
138 static __always_inline
int3_emulate_push(struct pt_regs * regs,unsigned long val)139 void int3_emulate_push(struct pt_regs *regs, unsigned long val)
140 {
141 /*
142 * The int3 handler in entry_64.S adds a gap between the
143 * stack where the break point happened, and the saving of
144 * pt_regs. We can extend the original stack because of
145 * this gap. See the idtentry macro's create_gap option.
146 *
147 * Similarly entry_32.S will have a gap on the stack for (any) hardware
148 * exception and pt_regs; see FIXUP_FRAME.
149 */
150 regs->sp -= sizeof(unsigned long);
151 *(unsigned long *)regs->sp = val;
152 }
153
154 static __always_inline
int3_emulate_pop(struct pt_regs * regs)155 unsigned long int3_emulate_pop(struct pt_regs *regs)
156 {
157 unsigned long val = *(unsigned long *)regs->sp;
158 regs->sp += sizeof(unsigned long);
159 return val;
160 }
161
162 static __always_inline
int3_emulate_call(struct pt_regs * regs,unsigned long func)163 void int3_emulate_call(struct pt_regs *regs, unsigned long func)
164 {
165 int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
166 int3_emulate_jmp(regs, func);
167 }
168
169 static __always_inline
int3_emulate_ret(struct pt_regs * regs)170 void int3_emulate_ret(struct pt_regs *regs)
171 {
172 unsigned long ip = int3_emulate_pop(regs);
173 int3_emulate_jmp(regs, ip);
174 }
175
176 static __always_inline
int3_emulate_jcc(struct pt_regs * regs,u8 cc,unsigned long ip,unsigned long disp)177 void int3_emulate_jcc(struct pt_regs *regs, u8 cc, unsigned long ip, unsigned long disp)
178 {
179 static const unsigned long jcc_mask[6] = {
180 [0] = X86_EFLAGS_OF,
181 [1] = X86_EFLAGS_CF,
182 [2] = X86_EFLAGS_ZF,
183 [3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
184 [4] = X86_EFLAGS_SF,
185 [5] = X86_EFLAGS_PF,
186 };
187
188 bool invert = cc & 1;
189 bool match;
190
191 if (cc < 0xc) {
192 match = regs->flags & jcc_mask[cc >> 1];
193 } else {
194 match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
195 ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
196 if (cc >= 0xe)
197 match = match || (regs->flags & X86_EFLAGS_ZF);
198 }
199
200 if ((match && !invert) || (!match && invert))
201 ip += disp;
202
203 int3_emulate_jmp(regs, ip);
204 }
205
206 #endif /* !CONFIG_UML_X86 */
207
208 #endif /* _ASM_X86_TEXT_PATCHING_H */
209