1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/highmem.h>
4 #include <linux/ptrace.h>
5 #include <linux/uprobes.h>
6
7 #include "decode-insn.h"
8
9 #define UPROBE_TRAP_NR UINT_MAX
10
is_swbp_insn(uprobe_opcode_t * insn)11 bool is_swbp_insn(uprobe_opcode_t *insn)
12 {
13 #ifdef CONFIG_RISCV_ISA_C
14 return (*insn & 0xffff) == UPROBE_SWBP_INSN;
15 #else
16 return *insn == UPROBE_SWBP_INSN;
17 #endif
18 }
19
uprobe_get_swbp_addr(struct pt_regs * regs)20 unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
21 {
22 return instruction_pointer(regs);
23 }
24
arch_uprobe_analyze_insn(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long addr)25 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
26 unsigned long addr)
27 {
28 probe_opcode_t opcode;
29
30 opcode = *(probe_opcode_t *)(&auprobe->insn[0]);
31
32 auprobe->insn_size = GET_INSN_LENGTH(opcode);
33
34 switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) {
35 case INSN_REJECTED:
36 return -EINVAL;
37
38 case INSN_GOOD_NO_SLOT:
39 auprobe->simulate = true;
40 break;
41
42 case INSN_GOOD:
43 auprobe->simulate = false;
44 break;
45
46 default:
47 return -EINVAL;
48 }
49
50 return 0;
51 }
52
arch_uprobe_pre_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)53 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
54 {
55 struct uprobe_task *utask = current->utask;
56
57 utask->autask.saved_cause = current->thread.bad_cause;
58 current->thread.bad_cause = UPROBE_TRAP_NR;
59
60 instruction_pointer_set(regs, utask->xol_vaddr);
61
62 return 0;
63 }
64
arch_uprobe_post_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)65 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
66 {
67 struct uprobe_task *utask = current->utask;
68
69 WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
70 current->thread.bad_cause = utask->autask.saved_cause;
71
72 instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
73
74 return 0;
75 }
76
arch_uprobe_xol_was_trapped(struct task_struct * t)77 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
78 {
79 if (t->thread.bad_cause != UPROBE_TRAP_NR)
80 return true;
81
82 return false;
83 }
84
arch_uprobe_skip_sstep(struct arch_uprobe * auprobe,struct pt_regs * regs)85 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
86 {
87 probe_opcode_t insn;
88 unsigned long addr;
89
90 if (!auprobe->simulate)
91 return false;
92
93 insn = *(probe_opcode_t *)(&auprobe->insn[0]);
94 addr = instruction_pointer(regs);
95
96 if (auprobe->api.handler)
97 auprobe->api.handler(insn, addr, regs);
98
99 return true;
100 }
101
arch_uprobe_abort_xol(struct arch_uprobe * auprobe,struct pt_regs * regs)102 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
103 {
104 struct uprobe_task *utask = current->utask;
105
106 current->thread.bad_cause = utask->autask.saved_cause;
107 /*
108 * Task has received a fatal signal, so reset back to probbed
109 * address.
110 */
111 instruction_pointer_set(regs, utask->vaddr);
112 }
113
arch_uretprobe_is_alive(struct return_instance * ret,enum rp_check ctx,struct pt_regs * regs)114 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
115 struct pt_regs *regs)
116 {
117 if (ctx == RP_CHECK_CHAIN_CALL)
118 return regs->sp <= ret->stack;
119 else
120 return regs->sp < ret->stack;
121 }
122
123 unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,struct pt_regs * regs)124 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
125 struct pt_regs *regs)
126 {
127 unsigned long ra;
128
129 ra = regs->ra;
130
131 regs->ra = trampoline_vaddr;
132
133 return ra;
134 }
135
arch_uprobe_exception_notify(struct notifier_block * self,unsigned long val,void * data)136 int arch_uprobe_exception_notify(struct notifier_block *self,
137 unsigned long val, void *data)
138 {
139 return NOTIFY_DONE;
140 }
141
uprobe_breakpoint_handler(struct pt_regs * regs)142 bool uprobe_breakpoint_handler(struct pt_regs *regs)
143 {
144 if (uprobe_pre_sstep_notifier(regs))
145 return true;
146
147 return false;
148 }
149
uprobe_single_step_handler(struct pt_regs * regs)150 bool uprobe_single_step_handler(struct pt_regs *regs)
151 {
152 if (uprobe_post_sstep_notifier(regs))
153 return true;
154
155 return false;
156 }
157
arch_uprobe_copy_ixol(struct page * page,unsigned long vaddr,void * src,unsigned long len)158 void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
159 void *src, unsigned long len)
160 {
161 /* Initialize the slot */
162 void *kaddr = kmap_atomic(page);
163 void *dst = kaddr + (vaddr & ~PAGE_MASK);
164
165 memcpy(dst, src, len);
166
167 /* Add ebreak behind opcode to simulate singlestep */
168 if (vaddr) {
169 dst += GET_INSN_LENGTH(*(probe_opcode_t *)src);
170 *(uprobe_opcode_t *)dst = __BUG_INSN_32;
171 }
172
173 kunmap_atomic(kaddr);
174
175 /*
176 * We probably need flush_icache_user_page() but it needs vma.
177 * This should work on most of architectures by default. If
178 * architecture needs to do something different it can define
179 * its own version of the function.
180 */
181 flush_dcache_page(page);
182 }
183