Lines Matching +full:use +full:- +full:guard +full:- +full:pages

1 // SPDX-License-Identifier: GPL-2.0-or-later
7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
10 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
12 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
14 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
16 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
18 * <prasanna@in.ibm.com> added function-return probes.
58 * - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held.
60 * - RCU hlist traversal under disabling preempt (breakpoint handlers)
78 * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where
86 * single-stepped. x86_64, POWER4 and above have no-exec support and
101 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); in slots_per_page()
113 * Use execmem_alloc() so this page is within +/- 2GB of where the in alloc_insn_page()
116 * (e.g. x86-64 needs this to handle the %rip-relative fixups.) in alloc_insn_page()
131 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
138 * __get_insn_slot() - Find a slot on an executable page for an instruction.
146 guard(mutex)(&c->mutex); in __get_insn_slot()
148 guard(rcu)(); in __get_insn_slot()
149 list_for_each_entry_rcu(kip, &c->pages, list) { in __get_insn_slot()
150 if (kip->nused < slots_per_page(c)) { in __get_insn_slot()
154 if (kip->slot_used[i] == SLOT_CLEAN) { in __get_insn_slot()
155 kip->slot_used[i] = SLOT_USED; in __get_insn_slot()
156 kip->nused++; in __get_insn_slot()
157 return kip->insns + (i * c->insn_size); in __get_insn_slot()
160 /* kip->nused is broken. Fix it. */ in __get_insn_slot()
161 kip->nused = slots_per_page(c); in __get_insn_slot()
166 } while (c->nr_garbage && collect_garbage_slots(c) == 0); in __get_insn_slot()
173 kip->insns = c->alloc(); in __get_insn_slot()
174 if (!kip->insns) { in __get_insn_slot()
178 INIT_LIST_HEAD(&kip->list); in __get_insn_slot()
179 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); in __get_insn_slot()
180 kip->slot_used[0] = SLOT_USED; in __get_insn_slot()
181 kip->nused = 1; in __get_insn_slot()
182 kip->ngarbage = 0; in __get_insn_slot()
183 kip->cache = c; in __get_insn_slot()
184 list_add_rcu(&kip->list, &c->pages); in __get_insn_slot()
187 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, in __get_insn_slot()
188 PAGE_SIZE, false, c->sym); in __get_insn_slot()
190 return kip->insns; in __get_insn_slot()
196 kip->slot_used[idx] = SLOT_CLEAN; in collect_one_slot()
197 kip->nused--; in collect_one_slot()
198 if (kip->nused != 0) in collect_one_slot()
202 * Page is no longer in use. Free it unless in collect_one_slot()
207 if (!list_is_singular(&kip->list)) { in collect_one_slot()
213 (unsigned long)kip->insns, PAGE_SIZE, true, in collect_one_slot()
214 kip->cache->sym); in collect_one_slot()
215 list_del_rcu(&kip->list); in collect_one_slot()
217 kip->cache->free(kip->insns); in collect_one_slot()
227 /* Ensure no-one is interrupted on the garbages */ in collect_garbage_slots()
230 list_for_each_entry_safe(kip, next, &c->pages, list) { in collect_garbage_slots()
233 if (kip->ngarbage == 0) in collect_garbage_slots()
235 kip->ngarbage = 0; /* we will collect all garbages */ in collect_garbage_slots()
237 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i)) in collect_garbage_slots()
241 c->nr_garbage = 0; in collect_garbage_slots()
251 guard(rcu)(); in __find_insn_page()
252 list_for_each_entry_rcu(kip, &c->pages, list) { in __find_insn_page()
253 idx = ((long)slot - (long)kip->insns) / in __find_insn_page()
254 (c->insn_size * sizeof(kprobe_opcode_t)); in __find_insn_page()
263 return -1; in __find_insn_page()
272 guard(mutex)(&c->mutex); in __free_insn_slot()
277 WARN_ON(kip->slot_used[idx] != SLOT_USED); in __free_insn_slot()
279 kip->slot_used[idx] = SLOT_DIRTY; in __free_insn_slot()
280 kip->ngarbage++; in __free_insn_slot()
281 if (++c->nr_garbage > slots_per_page(c)) in __free_insn_slot()
300 list_for_each_entry_rcu(kip, &c->pages, list) { in __is_insn_slot_addr()
301 if (addr >= (unsigned long)kip->insns && in __is_insn_slot_addr()
302 addr < (unsigned long)kip->insns + PAGE_SIZE) { in __is_insn_slot_addr()
316 int ret = -ERANGE; in kprobe_cache_get_kallsym()
319 list_for_each_entry_rcu(kip, &c->pages, list) { in kprobe_cache_get_kallsym()
320 if ((*symnum)--) in kprobe_cache_get_kallsym()
322 strscpy(sym, c->sym, KSYM_NAME_LEN); in kprobe_cache_get_kallsym()
324 *value = (unsigned long)kip->insns; in kprobe_cache_get_kallsym()
350 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
357 /* We have preemption disabled.. so it is safe to use __ versions */
370 * - under the 'kprobe_mutex' - during kprobe_[un]register().
372 * - with preemption disabled - from architecture specific code.
382 if (p->addr == addr) in get_kprobe()
395 return p->pre_handler == aggr_pre_handler; in kprobe_aggrprobe()
402 list_empty(&p->list); in kprobe_unused()
408 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); in copy_kprobe()
409 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); in copy_kprobe()
418 * This must be called from arch-dep optimized caller.
424 list_for_each_entry_rcu(kp, &p->list, list) { in opt_pre_handler()
425 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { in opt_pre_handler()
427 kp->pre_handler(kp, regs); in opt_pre_handler()
452 return arch_prepared_optinsn(&op->optinsn); in kprobe_optready()
469 return kprobe_disabled(p) && list_empty(&op->list); in kprobe_disarmed()
479 if (!list_empty(&op->list)) in kprobe_queued()
497 p = get_kprobe(addr - i); in get_optimized_kprobe()
526 * stop_machine() and cpu-hotplug modifies the 'online_cpus'. in do_optimize_kprobes()
527 * And same time, 'text_mutex' will be held in cpu-hotplug and here. in do_optimize_kprobes()
528 * This combination can cause a deadlock (cpu-hotplug tries to lock in do_optimize_kprobes()
531 * To avoid this deadlock, caller must have locked cpu-hotplug in do_optimize_kprobes()
532 * for preventing cpu-hotplug outside of 'text_mutex' locking. in do_optimize_kprobes()
562 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; in do_unoptimize_kprobes()
564 if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp)) in do_unoptimize_kprobes()
565 arch_disarm_kprobe(&op->kp); in do_unoptimize_kprobes()
566 if (kprobe_unused(&op->kp)) { in do_unoptimize_kprobes()
572 hlist_del_rcu(&op->kp.hlist); in do_unoptimize_kprobes()
574 list_del_init(&op->list); in do_unoptimize_kprobes()
584 list_del_init(&op->list); in do_free_cleaned_kprobes()
585 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { in do_free_cleaned_kprobes()
588 * still in use, keep it on kprobes hash list. in do_free_cleaned_kprobes()
592 free_aggr_kprobe(&op->kp); in do_free_cleaned_kprobes()
605 guard(mutex)(&kprobe_mutex); in kprobe_optimizer()
608 guard(mutex)(&text_mutex); in kprobe_optimizer()
621 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. in kprobe_optimizer()
622 * Note that on non-preemptive kernel, this is transparently converted in kprobe_optimizer()
658 guard(mutex)(&kprobe_mutex); in wait_for_kprobe_optimizer()
686 if (p->post_handler) in optimize_kprobe()
696 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) { in optimize_kprobe()
699 list_del_init(&op->list); in optimize_kprobe()
703 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; in optimize_kprobe()
709 if (WARN_ON_ONCE(!list_empty(&op->list))) in optimize_kprobe()
712 list_add(&op->list, &optimizing_list); in optimize_kprobe()
721 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; in force_unoptimize_kprobe()
736 if (!list_empty(&op->list)) { in unoptimize_kprobe()
745 list_move(&op->list, &freeing_list); in unoptimize_kprobe()
749 list_del_init(&op->list); in unoptimize_kprobe()
750 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; in unoptimize_kprobe()
760 list_add(&op->list, &unoptimizing_list); in unoptimize_kprobe()
775 WARN_ON_ONCE(list_empty(&op->list)); in reuse_unused_kprobe()
777 ap->flags &= ~KPROBE_FLAG_DISABLED; in reuse_unused_kprobe()
778 /* Optimize it again. (remove from 'op->list') */ in reuse_unused_kprobe()
780 return -EINVAL; in reuse_unused_kprobe()
792 if (!list_empty(&op->list)) in kill_optimized_kprobe()
794 list_del_init(&op->list); in kill_optimized_kprobe()
795 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; in kill_optimized_kprobe()
804 list_move(&op->list, &freeing_list); in kill_optimized_kprobe()
836 INIT_LIST_HEAD(&op->list); in alloc_aggr_kprobe()
837 op->kp.addr = p->addr; in alloc_aggr_kprobe()
840 return &op->kp; in alloc_aggr_kprobe()
854 /* Impossible to optimize ftrace-based kprobe. */ in try_to_optimize_kprobe()
859 guard(cpus_read_lock)(); in try_to_optimize_kprobe()
860 guard(jump_label_lock)(); in try_to_optimize_kprobe()
861 guard(mutex)(&text_mutex); in try_to_optimize_kprobe()
868 if (!arch_prepared_optinsn(&op->optinsn)) { in try_to_optimize_kprobe()
885 guard(mutex)(&kprobe_mutex); in optimize_all_kprobes()
899 pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n"); in optimize_all_kprobes()
909 guard(mutex)(&kprobe_mutex); in unoptimize_all_kprobes()
926 pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n"); in unoptimize_all_kprobes()
937 guard(mutex)(&kprobe_sysctl_mutex); in proc_kprobes_optimization_handler()
951 .procname = "kprobes-optimization",
975 _p = get_optimized_kprobe(p->addr); in __arm_kprobe()
996 /* If another kprobe was blocked, re-optimize it. */ in __disarm_kprobe()
997 _p = get_optimized_kprobe(p->addr); in __disarm_kprobe()
1004 * unoptimized because of this probe here. It should be re-optimized in __disarm_kprobe()
1031 return -EINVAL; in reuse_unused_kprobe()
1068 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0); in __arm_kprobe_ftrace()
1069 if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret)) in __arm_kprobe_ftrace()
1074 if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) { in __arm_kprobe_ftrace()
1079 ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); in __arm_kprobe_ftrace()
1090 bool ipmodify = (p->post_handler != NULL); in arm_kprobe_ftrace()
1106 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret)) in __disarm_kprobe_ftrace()
1110 (*cnt)--; in __disarm_kprobe_ftrace()
1112 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); in __disarm_kprobe_ftrace()
1113 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n", in __disarm_kprobe_ftrace()
1114 p->addr, ret); in __disarm_kprobe_ftrace()
1120 bool ipmodify = (p->post_handler != NULL); in disarm_kprobe_ftrace()
1134 return -ENODEV; in arm_kprobe_ftrace()
1139 return -ENODEV; in disarm_kprobe_ftrace()
1145 /* Must ensure p->addr is really on ftrace */ in prepare_kprobe()
1157 guard(cpus_read_lock)(); in arm_kprobe()
1158 guard(mutex)(&text_mutex); in arm_kprobe()
1168 guard(cpus_read_lock)(); in disarm_kprobe()
1169 guard(mutex)(&text_mutex); in disarm_kprobe()
1175 * Aggregate handlers for multiple kprobes support - these handlers
1176 * take care of invoking the individual kprobe handlers on p->list
1182 list_for_each_entry_rcu(kp, &p->list, list) { in aggr_pre_handler()
1183 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { in aggr_pre_handler()
1185 if (kp->pre_handler(kp, regs)) in aggr_pre_handler()
1199 list_for_each_entry_rcu(kp, &p->list, list) { in aggr_post_handler()
1200 if (kp->post_handler && likely(!kprobe_disabled(kp))) { in aggr_post_handler()
1202 kp->post_handler(kp, regs, flags); in aggr_post_handler()
1215 p->nmissed++; in kprobes_inc_nmissed_count()
1217 list_for_each_entry_rcu(kp, &p->list, list) in kprobes_inc_nmissed_count()
1218 kp->nmissed++; in kprobes_inc_nmissed_count()
1234 kcb->kprobe_status = KPROBE_HIT_ACTIVE; in kprobe_busy_begin()
1243 /* Add the new probe to 'ap->list'. */
1246 if (p->post_handler) in add_new_kprobe()
1249 list_add_rcu(&p->list, &ap->list); in add_new_kprobe()
1250 if (p->post_handler && !ap->post_handler) in add_new_kprobe()
1251 ap->post_handler = aggr_post_handler; in add_new_kprobe()
1265 ap->addr = p->addr; in init_aggr_kprobe()
1266 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; in init_aggr_kprobe()
1267 ap->pre_handler = aggr_pre_handler; in init_aggr_kprobe()
1269 if (p->post_handler && !kprobe_gone(p)) in init_aggr_kprobe()
1270 ap->post_handler = aggr_post_handler; in init_aggr_kprobe()
1272 INIT_LIST_HEAD(&ap->list); in init_aggr_kprobe()
1273 INIT_HLIST_NODE(&ap->hlist); in init_aggr_kprobe()
1275 list_add_rcu(&p->list, &ap->list); in init_aggr_kprobe()
1276 hlist_replace_rcu(&p->hlist, &ap->hlist); in init_aggr_kprobe()
1289 guard(jump_label_lock)(); in register_aggr_kprobe()
1290 guard(mutex)(&text_mutex); in register_aggr_kprobe()
1296 return -ENOMEM; in register_aggr_kprobe()
1328 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) in register_aggr_kprobe()
1338 ap->flags &= ~KPROBE_FLAG_DISABLED; in register_aggr_kprobe()
1343 ap->flags |= KPROBE_FLAG_DISABLED; in register_aggr_kprobe()
1344 list_del_rcu(&p->list); in register_aggr_kprobe()
1370 if (addr >= ent->start_addr && addr < ent->end_addr) in __within_kprobe_blacklist()
1383 /* Check if the address is on a suffixed-symbol */ in within_kprobe_blacklist()
1397 * arch_adjust_kprobe_addr - adjust the address
1429 return ERR_PTR(-EINVAL); in _kprobe_addr()
1441 return ERR_PTR(-ENOENT); in _kprobe_addr()
1450 return ERR_PTR(-ENOENT); in _kprobe_addr()
1451 addr = (void *)addr - offset; in _kprobe_addr()
1454 * Then ask the architecture to re-combine them, taking care of in _kprobe_addr()
1460 return ERR_PTR(-EINVAL); in _kprobe_addr()
1469 return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); in kprobe_addr()
1482 ap = get_kprobe(p->addr); in __get_valid_kprobe()
1489 list_for_each_entry(list_p, &ap->list, list) in __get_valid_kprobe()
1498 * Warn and return error if the kprobe is being re-registered since
1503 guard(mutex)(&kprobe_mutex); in warn_kprobe_rereg()
1506 return -EINVAL; in warn_kprobe_rereg()
1513 unsigned long addr = (unsigned long)p->addr; in check_ftrace_location()
1517 p->flags |= KPROBE_FLAG_FTRACE; in check_ftrace_location()
1519 return -EINVAL; in check_ftrace_location()
1545 guard(jump_label_lock)(); in check_kprobe_address_safe()
1549 if (!core_kernel_text((unsigned long) p->addr)) { in check_kprobe_address_safe()
1550 guard(rcu)(); in check_kprobe_address_safe()
1551 *probed_mod = __module_text_address((unsigned long) p->addr); in check_kprobe_address_safe()
1553 return -EINVAL; in check_kprobe_address_safe()
1560 return -ENOENT; in check_kprobe_address_safe()
1563 if (in_gate_area_no_mm((unsigned long) p->addr) || in check_kprobe_address_safe()
1564 within_kprobe_blacklist((unsigned long) p->addr) || in check_kprobe_address_safe()
1565 jump_label_text_reserved(p->addr, p->addr) || in check_kprobe_address_safe()
1566 static_call_text_reserved(p->addr, p->addr) || in check_kprobe_address_safe()
1567 find_bug((unsigned long)p->addr) || in check_kprobe_address_safe()
1568 is_cfi_preamble_symbol((unsigned long)p->addr)) { in check_kprobe_address_safe()
1570 return -EINVAL; in check_kprobe_address_safe()
1579 if (within_module_init((unsigned long)p->addr, *probed_mod) && in check_kprobe_address_safe()
1582 return -ENOENT; in check_kprobe_address_safe()
1594 guard(mutex)(&kprobe_mutex); in __register_kprobe()
1596 old_p = get_kprobe(p->addr); in __register_kprobe()
1603 guard(mutex)(&text_mutex); in __register_kprobe()
1609 INIT_HLIST_NODE(&p->hlist); in __register_kprobe()
1610 hlist_add_head_rcu(&p->hlist, in __register_kprobe()
1611 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); in __register_kprobe()
1616 hlist_del_rcu(&p->hlist); in __register_kprobe()
1634 addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); in register_kprobe()
1637 p->addr = addr; in register_kprobe()
1644 p->flags &= KPROBE_FLAG_DISABLED; in register_kprobe()
1646 p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY; in register_kprobe()
1647 p->nmissed = 0; in register_kprobe()
1648 INIT_LIST_HEAD(&p->list); in register_kprobe()
1670 list_for_each_entry(kp, &ap->list, list) in aggr_kprobe_disabled()
1691 return ERR_PTR(-EINVAL); in __disable_kprobe()
1698 p->flags |= KPROBE_FLAG_DISABLED; in __disable_kprobe()
1711 p->flags &= ~KPROBE_FLAG_DISABLED; in __disable_kprobe()
1715 orig_p->flags |= KPROBE_FLAG_DISABLED; in __disable_kprobe()
1736 * If the probe is an independent(and non-optimized) kprobe in __unregister_kprobe_top()
1741 (list_is_singular(&ap->list) && kprobe_disarmed(ap))) { in __unregister_kprobe_top()
1746 hlist_del_rcu(&ap->hlist); in __unregister_kprobe_top()
1751 if (p->post_handler && !kprobe_gone(p)) { in __unregister_kprobe_top()
1752 list_for_each_entry(list_p, &ap->list, list) { in __unregister_kprobe_top()
1753 if ((list_p != p) && (list_p->post_handler)) in __unregister_kprobe_top()
1757 if (list_entry_is_head(list_p, &ap->list, list)) { in __unregister_kprobe_top()
1759 * For the kprobe-on-ftrace case, we keep the in __unregister_kprobe_top()
1764 ap->post_handler = NULL; in __unregister_kprobe_top()
1772 list_del_rcu(&p->list); in __unregister_kprobe_top()
1787 if (list_empty(&p->list)) in __unregister_kprobe_bottom()
1790 else if (list_is_singular(&p->list)) { in __unregister_kprobe_bottom()
1792 ap = list_entry(p->list.next, struct kprobe, list); in __unregister_kprobe_bottom()
1793 list_del(&p->list); in __unregister_kprobe_bottom()
1804 return -EINVAL; in register_kprobes()
1832 kps[i]->addr = NULL; in unregister_kprobes()
1836 if (kps[i]->addr) in unregister_kprobes()
1862 ri->rph = context; in kretprobe_init_inst()
1874 struct kretprobe_holder *rph = ri->rph; in free_rp_inst_rcu()
1876 objpool_drop(ri, &rph->pool); in free_rp_inst_rcu()
1885 objpool_push(ri, &rp->rph->pool); in recycle_rp_inst()
1887 call_rcu(&ri->rcu, free_rp_inst_rcu); in recycle_rp_inst()
1908 node = __llist_del_all(&tk->kretprobe_instances); in kprobe_flush_task()
1911 node = node->next; in kprobe_flush_task()
1922 struct kretprobe_holder *rph = rp->rph; in free_rp_inst()
1926 rp->rph = NULL; in free_rp_inst()
1927 objpool_fini(&rph->pool); in free_rp_inst()
1938 node = tsk->kretprobe_instances.first; in __kretprobe_find_ret_addr()
1940 node = node->next; in __kretprobe_find_ret_addr()
1944 if (ri->ret_addr != kretprobe_trampoline_addr()) { in __kretprobe_find_ret_addr()
1946 return ri->ret_addr; in __kretprobe_find_ret_addr()
1948 node = node->next; in __kretprobe_find_ret_addr()
1955 * kretprobe_find_ret_addr -- Find correct return address modified by kretprobe
1964 * to get the currect return address - which is compared with the
1983 } while (ri->fp != fp); in kretprobe_find_ret_addr()
2021 first = current->kretprobe_instances.first; in __kretprobe_trampoline_handler()
2025 if (WARN_ON_ONCE(ri->fp != frame_pointer)) in __kretprobe_trampoline_handler()
2029 if (rp && rp->handler) { in __kretprobe_trampoline_handler()
2032 __this_cpu_write(current_kprobe, &rp->kp); in __kretprobe_trampoline_handler()
2033 ri->ret_addr = correct_ret_addr; in __kretprobe_trampoline_handler()
2034 rp->handler(ri, regs); in __kretprobe_trampoline_handler()
2040 first = first->next; in __kretprobe_trampoline_handler()
2046 first = current->kretprobe_instances.first; in __kretprobe_trampoline_handler()
2047 current->kretprobe_instances.first = node->next; in __kretprobe_trampoline_handler()
2048 node->next = NULL; in __kretprobe_trampoline_handler()
2053 first = first->next; in __kretprobe_trampoline_handler()
2069 struct kretprobe_holder *rph = rp->rph; in NOKPROBE_SYMBOL()
2072 ri = objpool_pop(&rph->pool); in NOKPROBE_SYMBOL()
2074 rp->nmissed++; in NOKPROBE_SYMBOL()
2078 if (rp->entry_handler && rp->entry_handler(ri, regs)) { in NOKPROBE_SYMBOL()
2079 objpool_push(ri, &rph->pool); in NOKPROBE_SYMBOL()
2085 __llist_add(&ri->llist, &current->kretprobe_instances); in NOKPROBE_SYMBOL()
2101 rhn = rethook_try_get(rp->rh); in pre_handler_kretprobe()
2103 rp->nmissed++; in pre_handler_kretprobe()
2109 if (rp->entry_handler && rp->entry_handler(ri, regs)) in pre_handler_kretprobe()
2127 if (WARN_ON_ONCE(!data) || !rp->handler) in kretprobe_rethook_handler()
2130 __this_cpu_write(current_kprobe, &rp->kp); in kretprobe_rethook_handler()
2132 kcb->kprobe_status = KPROBE_HIT_ACTIVE; in kretprobe_rethook_handler()
2135 rp->handler(ri, regs); in kretprobe_rethook_handler()
2144 * kprobe_on_func_entry() -- check whether given address is function entry
2151 * This returns 0 if it is the function entry, or -EINVAL if it is not.
2152 * And also it returns -ENOENT if it fails the symbol or address lookup.
2154 * returns -EINVAL.
2165 return -EINVAL; in kprobe_on_func_entry()
2176 ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset); in register_kretprobe()
2180 /* If only 'rp->kp.addr' is specified, check reregistering kprobes */ in register_kretprobe()
2181 if (rp->kp.addr && warn_kprobe_rereg(&rp->kp)) in register_kretprobe()
2182 return -EINVAL; in register_kretprobe()
2185 addr = kprobe_addr(&rp->kp); in register_kretprobe()
2191 return -EINVAL; in register_kretprobe()
2195 if (rp->data_size > KRETPROBE_MAX_DATA_SIZE) in register_kretprobe()
2196 return -E2BIG; in register_kretprobe()
2198 rp->kp.pre_handler = pre_handler_kretprobe; in register_kretprobe()
2199 rp->kp.post_handler = NULL; in register_kretprobe()
2201 /* Pre-allocate memory for max kretprobe instances */ in register_kretprobe()
2202 if (rp->maxactive <= 0) in register_kretprobe()
2203 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); in register_kretprobe()
2206 rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler, in register_kretprobe()
2208 rp->data_size, rp->maxactive); in register_kretprobe()
2209 if (IS_ERR(rp->rh)) in register_kretprobe()
2210 return PTR_ERR(rp->rh); in register_kretprobe()
2212 rp->nmissed = 0; in register_kretprobe()
2214 ret = register_kprobe(&rp->kp); in register_kretprobe()
2216 rethook_free(rp->rh); in register_kretprobe()
2217 rp->rh = NULL; in register_kretprobe()
2220 rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL); in register_kretprobe()
2221 if (!rp->rph) in register_kretprobe()
2222 return -ENOMEM; in register_kretprobe()
2224 if (objpool_init(&rp->rph->pool, rp->maxactive, rp->data_size + in register_kretprobe()
2226 rp->rph, kretprobe_init_inst, kretprobe_fini_pool)) { in register_kretprobe()
2227 kfree(rp->rph); in register_kretprobe()
2228 rp->rph = NULL; in register_kretprobe()
2229 return -ENOMEM; in register_kretprobe()
2231 rcu_assign_pointer(rp->rph->rp, rp); in register_kretprobe()
2232 rp->nmissed = 0; in register_kretprobe()
2234 ret = register_kprobe(&rp->kp); in register_kretprobe()
2247 return -EINVAL; in register_kretprobes()
2273 guard(mutex)(&kprobe_mutex); in unregister_kretprobes()
2275 if (__unregister_kprobe_top(&rps[i]->kp) < 0) in unregister_kretprobes()
2276 rps[i]->kp.addr = NULL; in unregister_kretprobes()
2278 rethook_free(rps[i]->rh); in unregister_kretprobes()
2280 rcu_assign_pointer(rps[i]->rph->rp, NULL); in unregister_kretprobes()
2286 if (rps[i]->kp.addr) { in unregister_kretprobes()
2287 __unregister_kprobe_bottom(&rps[i]->kp); in unregister_kretprobes()
2299 return -EOPNOTSUPP; in register_kretprobe()
2305 return -EOPNOTSUPP; in register_kretprobes()
2342 p->flags |= KPROBE_FLAG_GONE; in kill_kprobe()
2348 list_for_each_entry(kp, &p->list, list) in kill_kprobe()
2349 kp->flags |= KPROBE_FLAG_GONE; in kill_kprobe()
2350 p->post_handler = NULL; in kill_kprobe()
2365 guard(mutex)(&kprobe_mutex); in disable_kprobe()
2380 guard(mutex)(&kprobe_mutex); in enable_kprobe()
2385 return -EINVAL; in enable_kprobe()
2389 return -EINVAL; in enable_kprobe()
2392 kp->flags &= ~KPROBE_FLAG_DISABLED; in enable_kprobe()
2395 p->flags &= ~KPROBE_FLAG_DISABLED; in enable_kprobe()
2398 p->flags |= KPROBE_FLAG_DISABLED; in enable_kprobe()
2400 kp->flags |= KPROBE_FLAG_DISABLED; in enable_kprobe()
2411 kp->symbol_name, kp->offset, kp->addr); in dump_kprobe()
2422 return -EINVAL; in kprobe_add_ksym_blacklist()
2426 return -ENOMEM; in kprobe_add_ksym_blacklist()
2427 ent->start_addr = entry; in kprobe_add_ksym_blacklist()
2428 ent->end_addr = entry + size; in kprobe_add_ksym_blacklist()
2429 INIT_LIST_HEAD(&ent->list); in kprobe_add_ksym_blacklist()
2430 list_add_tail(&ent->list, &kprobe_blacklist); in kprobe_add_ksym_blacklist()
2454 return -ERANGE; in arch_kprobe_get_kallsym()
2470 return -ERANGE; in kprobe_get_kallsym()
2496 if (ret == -EINVAL) in populate_kprobe_blacklist()
2522 if (ent->start_addr < start || ent->start_addr >= end) in kprobe_remove_area_blacklist()
2524 list_del(&ent->list); in kprobe_remove_area_blacklist()
2539 if (mod->kprobe_blacklist) { in add_module_kprobe_blacklist()
2540 for (i = 0; i < mod->num_kprobe_blacklist; i++) in add_module_kprobe_blacklist()
2541 kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]); in add_module_kprobe_blacklist()
2544 start = (unsigned long)mod->kprobes_text_start; in add_module_kprobe_blacklist()
2546 end = start + mod->kprobes_text_size; in add_module_kprobe_blacklist()
2550 start = (unsigned long)mod->noinstr_text_start; in add_module_kprobe_blacklist()
2552 end = start + mod->noinstr_text_size; in add_module_kprobe_blacklist()
2562 if (mod->kprobe_blacklist) { in remove_module_kprobe_blacklist()
2563 for (i = 0; i < mod->num_kprobe_blacklist; i++) in remove_module_kprobe_blacklist()
2564 kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]); in remove_module_kprobe_blacklist()
2567 start = (unsigned long)mod->kprobes_text_start; in remove_module_kprobe_blacklist()
2569 end = start + mod->kprobes_text_size; in remove_module_kprobe_blacklist()
2573 start = (unsigned long)mod->noinstr_text_start; in remove_module_kprobe_blacklist()
2575 end = start + mod->noinstr_text_size; in remove_module_kprobe_blacklist()
2590 guard(mutex)(&kprobe_mutex); in kprobes_module_callback()
2607 if (within_module_init((unsigned long)p->addr, mod) || in kprobes_module_callback()
2609 within_module_core((unsigned long)p->addr, mod))) { in kprobes_module_callback()
2653 guard(mutex)(&kprobe_mutex); in kprobe_free_init_mem()
2659 if (start <= (void *)p->addr && (void *)p->addr < end) in kprobe_free_init_mem()
2714 * Enable kprobe optimization - this kicks the optimizer which in init_optprobes()
2730 void *addr = p->addr; in report_probe()
2732 if (p->pre_handler == pre_handler_kretprobe) in report_probe()
2737 if (!kallsyms_show_value(pi->file->f_cred)) in report_probe()
2744 else /* try to use %pS */ in report_probe()
2746 addr, kprobe_type, p->addr); in report_probe()
2787 sym = kallsyms_lookup((unsigned long)p->addr, NULL, in show_kprobe_addr()
2790 list_for_each_entry_rcu(kp, &p->list, list) in show_kprobe_addr()
2808 /* kprobes/blacklist -- shows which functions can not be probed */
2829 if (!kallsyms_show_value(m->file->f_cred)) in kprobe_blacklist_seq_show()
2830 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, in kprobe_blacklist_seq_show()
2831 (void *)ent->start_addr); in kprobe_blacklist_seq_show()
2833 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, in kprobe_blacklist_seq_show()
2834 (void *)ent->end_addr, (void *)ent->start_addr); in kprobe_blacklist_seq_show()
2858 guard(mutex)(&kprobe_mutex); in arm_all_kprobes()
2873 /* Arm all kprobes on a best-effort basis */ in arm_all_kprobes()
2902 guard(mutex)(&kprobe_mutex); in disarm_all_kprobes()
2912 /* Disarm all kprobes on a best-effort basis */ in disarm_all_kprobes()