Lines Matching +full:attr +full:- +full:cnt +full:- +full:name
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
18 #include <linux/error-injection.h>
57 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) in bpf_get_raw_tracepoint_module() argument
65 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { in bpf_get_raw_tracepoint_module()
66 btp = &btm->module->bpf_raw_events[i]; in bpf_get_raw_tracepoint_module()
67 if (!strcmp(btp->tp->name, name)) { in bpf_get_raw_tracepoint_module()
68 if (try_module_get(btm->module)) in bpf_get_raw_tracepoint_module()
79 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) in bpf_get_raw_tracepoint_module() argument
98 * trace_call_bpf - invoke BPF program
107 * 0 - return from kprobe (event is filtered out)
108 * 1 - store kprobe event into ring buffer
121 * and don't send kprobe event into ring-buffer, in trace_call_bpf()
125 bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array)); in trace_call_bpf()
134 * whether call->prog_array is empty or not, which is in trace_call_bpf()
138 * non-NULL, we go into trace_call_bpf() and do the actual in trace_call_bpf()
147 ret = bpf_prog_run_array(rcu_dereference(call->prog_array), in trace_call_bpf()
210 * strncpy_from_user() does long-sized strides in the fast path. If the in bpf_probe_read_user_str_common()
334 * access_ok() should prevent writing to non-user memory, but in in BPF_CALL_3()
344 current->flags & (PF_KTHREAD | PF_EXITING))) in BPF_CALL_3()
345 return -EPERM; in BPF_CALL_3()
347 return -EPERM; in BPF_CALL_3()
367 current->comm, task_pid_nr(current)); in bpf_get_probe_write_proto()
438 return -EINVAL; in BPF_CALL_4()
480 return -EINVAL; in BPF_CALL_5()
491 return seq_has_overflowed(m) ? -EOVERFLOW : 0; in BPF_CALL_5()
510 return seq_write(m, data, len) ? -EOVERFLOW : 0; in BPF_CALL_3()
534 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); in BPF_CALL_4()
558 return -EINVAL; in get_map_perf_counter()
561 if (unlikely(index >= array->map.max_entries)) in get_map_perf_counter()
562 return -E2BIG; in get_map_perf_counter()
564 ee = READ_ONCE(array->ptrs[index]); in get_map_perf_counter()
566 return -ENOENT; in get_map_perf_counter()
568 return perf_event_read_local(ee->event, value, enabled, running); in get_map_perf_counter()
578 * this api is ugly since we miss [-22..-2] range of valid in BPF_CALL_2()
597 int err = -EINVAL; in BPF_CALL_4()
601 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, in BPF_CALL_4()
602 &buf->running); in BPF_CALL_4()
633 if (unlikely(index >= array->map.max_entries)) in __bpf_perf_event_output()
634 return -E2BIG; in __bpf_perf_event_output()
636 ee = READ_ONCE(array->ptrs[index]); in __bpf_perf_event_output()
638 return -ENOENT; in __bpf_perf_event_output()
640 event = ee->event; in __bpf_perf_event_output()
641 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || in __bpf_perf_event_output()
642 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) in __bpf_perf_event_output()
643 return -EINVAL; in __bpf_perf_event_output()
645 if (unlikely(event->oncpu != cpu)) in __bpf_perf_event_output()
646 return -EOPNOTSUPP; in __bpf_perf_event_output()
678 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { in BPF_CALL_5()
679 err = -EBUSY; in BPF_CALL_5()
683 sd = &sds->sds[nest_level - 1]; in BPF_CALL_5()
686 err = -EINVAL; in BPF_CALL_5()
744 ret = -EBUSY; in bpf_event_output()
747 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); in bpf_event_output()
748 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); in bpf_event_output()
806 if (unlikely(idx >= array->map.max_entries)) in BPF_CALL_2()
807 return -E2BIG; in BPF_CALL_2()
809 cgrp = READ_ONCE(array->ptrs[idx]); in BPF_CALL_2()
811 return -EAGAIN; in BPF_CALL_2()
838 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); in do_bpf_send_signal()
839 put_task_struct(work->task); in do_bpf_send_signal()
851 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) in bpf_send_signal_common()
852 return -EPERM; in bpf_send_signal_common()
854 return -EPERM; in bpf_send_signal_common()
857 return -EPERM; in bpf_send_signal_common()
864 return -EINVAL; in bpf_send_signal_common()
867 if (irq_work_is_busy(&work->irq_work)) in bpf_send_signal_common()
868 return -EBUSY; in bpf_send_signal_common()
874 work->task = get_task_struct(current); in bpf_send_signal_common()
875 work->sig = sig; in bpf_send_signal_common()
876 work->type = type; in bpf_send_signal_common()
877 irq_work_queue(&work->irq_work); in bpf_send_signal_common()
930 len = buf + sz - p; in BPF_CALL_3()
955 if (prog->type == BPF_PROG_TYPE_TRACING && in BTF_ID()
956 prog->expected_attach_type == BPF_TRACE_ITER) in BTF_ID()
959 if (prog->type == BPF_PROG_TYPE_LSM) in BTF_ID()
960 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); in BTF_ID()
963 prog->aux->attach_btf_id); in BTF_ID()
989 return -EINVAL; in bpf_btf_printf_prepare()
992 return -EINVAL; in bpf_btf_printf_prepare()
997 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL; in bpf_btf_printf_prepare()
999 if (ptr->type_id > 0) in bpf_btf_printf_prepare()
1000 *btf_id = ptr->type_id; in bpf_btf_printf_prepare()
1002 return -EINVAL; in bpf_btf_printf_prepare()
1007 return -ENOENT; in bpf_btf_printf_prepare()
1023 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, in BPF_CALL_5()
1041 return ((u64 *)ctx)[-2]; in BPF_CALL_1()
1056 /* Being extra safe in here in case entry ip is on the page-edge. */ in get_entry_ip()
1057 if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1)) in get_entry_ip()
1060 fentry_ip -= ENDBR_INSN_SIZE; in get_entry_ip()
1073 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); in BPF_CALL_1()
1074 if (run_ctx->is_uprobe) in BPF_CALL_1()
1075 return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr; in BPF_CALL_1()
1080 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) in BPF_CALL_1()
1083 return get_entry_ip((uintptr_t)kp->addr); in BPF_CALL_1()
1095 return bpf_kprobe_multi_entry_ip(current->bpf_ctx); in BPF_CALL_1()
1107 return bpf_kprobe_multi_cookie(current->bpf_ctx); in BPF_CALL_1()
1119 return bpf_uprobe_multi_entry_ip(current->bpf_ctx); in BPF_CALL_1()
1131 return bpf_uprobe_multi_cookie(current->bpf_ctx); in BPF_CALL_1()
1145 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); in BPF_CALL_1()
1146 return run_ctx->bpf_cookie; in BPF_CALL_1()
1158 return ctx->event->bpf_cookie; in BPF_CALL_1()
1172 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); in BPF_CALL_1()
1173 return run_ctx->bpf_cookie; in BPF_CALL_1()
1186 return -ENOENT; in BPF_CALL_3()
1194 return -EINVAL; in BPF_CALL_3()
1197 return -ENOENT; in BPF_CALL_3()
1214 u64 nr_args = ((u64 *)ctx)[-1]; in BPF_CALL_3()
1217 return -EINVAL; in BPF_CALL_3()
1233 u64 nr_args = ((u64 *)ctx)[-1]; in BPF_CALL_2()
1249 return ((u64 *)ctx)[-1]; in BPF_CALL_1()
1262 * bpf_lookup_user_key - lookup a key by its serial
1264 * @flags: lookup-specific flags
1275 * one of the available key-specific kfuncs.
1308 bkey->key = key_ref_to_ptr(key_ref); in bpf_lookup_user_key()
1309 bkey->has_ref = true; in bpf_lookup_user_key()
1315 * bpf_lookup_system_key - lookup a key by a system-defined ID
1333 * pre-determined ID on success, a NULL pointer otherwise
1346 bkey->key = (struct key *)(unsigned long)id; in bpf_lookup_system_key()
1347 bkey->has_ref = false; in bpf_lookup_system_key()
1353 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1361 if (bkey->has_ref) in bpf_key_put()
1362 key_put(bkey->key); in bpf_key_put()
1369 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1387 if (trusted_keyring->has_ref) { in bpf_verify_pkcs7_signature()
1396 ret = key_validate(trusted_keyring->key); in bpf_verify_pkcs7_signature()
1407 trusted_keyring->key, in bpf_verify_pkcs7_signature()
1442 * bpf_get_file_xattr - get xattr of a file
1444 * @name__str: name of the xattr
1462 return -EPERM; in bpf_get_file_xattr()
1467 return -EINVAL; in bpf_get_file_xattr()
1470 ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ); in bpf_get_file_xattr()
1473 return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len); in bpf_get_file_xattr()
1488 return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0; in BTF_SET8_END()
1651 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) in kprobe_prog_func_proto()
1653 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) in kprobe_prog_func_proto()
1657 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) in kprobe_prog_func_proto()
1659 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) in kprobe_prog_func_proto()
1805 int err = -EINVAL; in BPF_CALL_3()
1809 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, in BPF_CALL_3()
1810 &buf->running); in BPF_CALL_3()
1832 struct perf_branch_stack *br_stack = ctx->data->br_stack; in BPF_CALL_4()
1836 return -EINVAL; in BPF_CALL_4()
1838 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK))) in BPF_CALL_4()
1839 return -ENOENT; in BPF_CALL_4()
1842 return -ENOENT; in BPF_CALL_4()
1845 return br_stack->nr * br_entry_size; in BPF_CALL_4()
1848 return -EINVAL; in BPF_CALL_4()
1850 to_copy = min_t(u32, br_stack->nr * br_entry_size, size); in BPF_CALL_4()
1851 memcpy(buf, br_stack->entries, to_copy); in BPF_CALL_4()
1905 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { in get_bpf_raw_tp_regs()
1907 return ERR_PTR(-EBUSY); in get_bpf_raw_tp_regs()
1910 return &tp_regs->regs[nest_level - 1]; in get_bpf_raw_tp_regs()
2053 return prog->expected_attach_type == BPF_TRACE_ITER ? in tracing_prog_func_proto()
2057 return prog->expected_attach_type == BPF_TRACE_ITER ? in tracing_prog_func_proto()
2061 return prog->expected_attach_type == BPF_TRACE_ITER ? in tracing_prog_func_proto()
2076 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) in tracing_prog_func_proto()
2102 return -ENOTSUPP; in bpf_prog_test_run_tracing()
2133 info->reg_type = PTR_TO_TP_BUFFER; in raw_tp_writable_prog_is_valid_access()
2191 switch (si->off) { in pe_prog_convert_ctx_access()
2194 data), si->dst_reg, si->src_reg, in pe_prog_convert_ctx_access()
2196 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, in pe_prog_convert_ctx_access()
2202 data), si->dst_reg, si->src_reg, in pe_prog_convert_ctx_access()
2204 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, in pe_prog_convert_ctx_access()
2210 regs), si->dst_reg, si->src_reg, in pe_prog_convert_ctx_access()
2212 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, in pe_prog_convert_ctx_access()
2213 si->off); in pe_prog_convert_ctx_access()
2217 return insn - insn_buf; in pe_prog_convert_ctx_access()
2239 int ret = -EEXIST; in perf_event_attach_bpf_prog()
2243 * and only if they are on the opt-in list. in perf_event_attach_bpf_prog()
2245 if (prog->kprobe_override && in perf_event_attach_bpf_prog()
2246 (!trace_kprobe_on_func_entry(event->tp_event) || in perf_event_attach_bpf_prog()
2247 !trace_kprobe_error_injectable(event->tp_event))) in perf_event_attach_bpf_prog()
2248 return -EINVAL; in perf_event_attach_bpf_prog()
2252 if (event->prog) in perf_event_attach_bpf_prog()
2255 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); in perf_event_attach_bpf_prog()
2258 ret = -E2BIG; in perf_event_attach_bpf_prog()
2266 /* set the new array to event->tp_event and set event->prog */ in perf_event_attach_bpf_prog()
2267 event->prog = prog; in perf_event_attach_bpf_prog()
2268 event->bpf_cookie = bpf_cookie; in perf_event_attach_bpf_prog()
2269 rcu_assign_pointer(event->tp_event->prog_array, new_array); in perf_event_attach_bpf_prog()
2285 if (!event->prog) in perf_event_detach_bpf_prog()
2288 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); in perf_event_detach_bpf_prog()
2289 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array); in perf_event_detach_bpf_prog()
2290 if (ret == -ENOENT) in perf_event_detach_bpf_prog()
2293 bpf_prog_array_delete_safe(old_array, event->prog); in perf_event_detach_bpf_prog()
2295 rcu_assign_pointer(event->tp_event->prog_array, new_array); in perf_event_detach_bpf_prog()
2299 bpf_prog_put(event->prog); in perf_event_detach_bpf_prog()
2300 event->prog = NULL; in perf_event_detach_bpf_prog()
2315 return -EPERM; in perf_event_query_prog_array()
2316 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_event_query_prog_array()
2317 return -EINVAL; in perf_event_query_prog_array()
2319 return -EFAULT; in perf_event_query_prog_array()
2323 return -E2BIG; in perf_event_query_prog_array()
2326 return -ENOMEM; in perf_event_query_prog_array()
2329 * is required when user only wants to check for uquery->prog_cnt. in perf_event_query_prog_array()
2335 progs = bpf_event_rcu_dereference(event->tp_event->prog_array); in perf_event_query_prog_array()
2339 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || in perf_event_query_prog_array()
2340 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) in perf_event_query_prog_array()
2341 ret = -EFAULT; in perf_event_query_prog_array()
2350 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) in bpf_get_raw_tracepoint() argument
2355 if (!strcmp(btp->tp->name, name)) in bpf_get_raw_tracepoint()
2359 return bpf_get_raw_tracepoint_module(name); in bpf_get_raw_tracepoint()
2376 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { in __bpf_trace_run()
2384 this_cpu_dec(*(prog->active)); in __bpf_trace_run()
2434 struct tracepoint *tp = btp->tp; in __bpf_probe_register()
2440 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) in __bpf_probe_register()
2441 return -EINVAL; in __bpf_probe_register()
2443 if (prog->aux->max_tp_access > btp->writable_size) in __bpf_probe_register()
2444 return -EINVAL; in __bpf_probe_register()
2446 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, in __bpf_probe_register()
2457 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); in bpf_probe_unregister()
2469 prog = event->prog; in bpf_get_perf_event_info()
2471 return -ENOENT; in bpf_get_perf_event_info()
2474 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) in bpf_get_perf_event_info()
2475 return -EOPNOTSUPP; in bpf_get_perf_event_info()
2477 *prog_id = prog->aux->id; in bpf_get_perf_event_info()
2478 flags = event->tp_event->flags; in bpf_get_perf_event_info()
2480 is_syscall_tp = is_syscall_trace_event(event->tp_event); in bpf_get_perf_event_info()
2483 *buf = is_tracepoint ? event->tp_event->tp->name in bpf_get_perf_event_info()
2484 : event->tp_event->name; in bpf_get_perf_event_info()
2494 err = -EOPNOTSUPP; in bpf_get_perf_event_info()
2499 event->attr.type == PERF_TYPE_TRACEPOINT); in bpf_get_perf_event_info()
2505 event->attr.type == PERF_TYPE_TRACEPOINT); in bpf_get_perf_event_info()
2519 init_irq_work(&work->irq_work, do_bpf_send_signal); in send_signal_irq_work_init()
2534 if (mod->num_bpf_raw_events == 0 || in bpf_event_notify()
2544 btm->module = module; in bpf_event_notify()
2545 list_add(&btm->list, &bpf_trace_modules); in bpf_event_notify()
2547 ret = -ENOMEM; in bpf_event_notify()
2552 if (btm->module == module) { in bpf_event_notify()
2553 list_del(&btm->list); in bpf_event_notify()
2586 u32 cnt; member
2603 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt) in copy_user_syms() argument
2608 int err = -ENOMEM; in copy_user_syms()
2611 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); in copy_user_syms()
2615 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); in copy_user_syms()
2619 for (p = buf, i = 0; i < cnt; i++) { in copy_user_syms()
2621 err = -EFAULT; in copy_user_syms()
2626 err = -E2BIG; in copy_user_syms()
2633 us->syms = syms; in copy_user_syms()
2634 us->buf = buf; in copy_user_syms()
2645 static void kprobe_multi_put_modules(struct module **mods, u32 cnt) in kprobe_multi_put_modules() argument
2649 for (i = 0; i < cnt; i++) in kprobe_multi_put_modules()
2655 kvfree(us->syms); in free_user_syms()
2656 kvfree(us->buf); in free_user_syms()
2664 unregister_fprobe(&kmulti_link->fp); in bpf_kprobe_multi_link_release()
2665 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt); in bpf_kprobe_multi_link_release()
2673 kvfree(kmulti_link->addrs); in bpf_kprobe_multi_link_dealloc()
2674 kvfree(kmulti_link->cookies); in bpf_kprobe_multi_link_dealloc()
2675 kfree(kmulti_link->mods); in bpf_kprobe_multi_link_dealloc()
2682 u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs); in bpf_kprobe_multi_link_fill_link_info()
2684 u32 ucount = info->kprobe_multi.count; in bpf_kprobe_multi_link_fill_link_info()
2688 return -EINVAL; in bpf_kprobe_multi_link_fill_link_info()
2691 info->kprobe_multi.count = kmulti_link->cnt; in bpf_kprobe_multi_link_fill_link_info()
2692 info->kprobe_multi.flags = kmulti_link->flags; in bpf_kprobe_multi_link_fill_link_info()
2693 info->kprobe_multi.missed = kmulti_link->fp.nmissed; in bpf_kprobe_multi_link_fill_link_info()
2697 if (ucount < kmulti_link->cnt) in bpf_kprobe_multi_link_fill_link_info()
2698 err = -ENOSPC; in bpf_kprobe_multi_link_fill_link_info()
2700 ucount = kmulti_link->cnt; in bpf_kprobe_multi_link_fill_link_info()
2703 if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64))) in bpf_kprobe_multi_link_fill_link_info()
2704 return -EFAULT; in bpf_kprobe_multi_link_fill_link_info()
2708 return -EFAULT; in bpf_kprobe_multi_link_fill_link_info()
2726 cookie_a = link->cookies + (addr_a - link->addrs); in bpf_kprobe_multi_cookie_swap()
2727 cookie_b = link->cookies + (addr_b - link->addrs); in bpf_kprobe_multi_cookie_swap()
2740 return *addr_a < *addr_b ? -1 : 1; in bpf_kprobe_multi_addrs_cmp()
2757 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); in bpf_kprobe_multi_cookie()
2758 link = run_ctx->link; in bpf_kprobe_multi_cookie()
2759 if (!link->cookies) in bpf_kprobe_multi_cookie()
2761 entry_ip = run_ctx->entry_ip; in bpf_kprobe_multi_cookie()
2762 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), in bpf_kprobe_multi_cookie()
2766 cookie = link->cookies + (addr - link->addrs); in bpf_kprobe_multi_cookie()
2774 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); in bpf_kprobe_multi_entry_ip()
2775 return run_ctx->entry_ip; in bpf_kprobe_multi_entry_ip()
2790 bpf_prog_inc_misses_counter(link->link.prog); in kprobe_multi_link_prog_run()
2798 err = bpf_prog_run(link->link.prog, regs); in kprobe_multi_link_prog_run()
2852 if (data->cookies) { in symbols_swap_r()
2855 cookie_a = data->cookies + (name_a - data->funcs); in symbols_swap_r()
2856 cookie_b = data->cookies + (name_b - data->funcs); in symbols_swap_r()
2871 if (arr->mods_cnt == arr->mods_cap) { in add_module()
2872 arr->mods_cap = max(16, arr->mods_cap * 3 / 2); in add_module()
2873 mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL); in add_module()
2875 return -ENOMEM; in add_module()
2876 arr->mods = mods; in add_module()
2879 arr->mods[arr->mods_cnt] = mod; in add_module()
2880 arr->mods_cnt++; in add_module()
2888 for (i = arr->mods_cnt - 1; i >= 0; i--) { in has_module()
2889 if (arr->mods[i] == mod) in has_module()
2911 err = -EINVAL; in get_modules_for_addrs()
2934 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt) in addrs_check_error_injection_list() argument
2938 for (i = 0; i < cnt; i++) { in addrs_check_error_injection_list()
2940 return -EINVAL; in addrs_check_error_injection_list()
2945 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) in bpf_kprobe_multi_link_attach() argument
2951 u32 flags, cnt, size; in bpf_kprobe_multi_link_attach() local
2959 return -EOPNOTSUPP; in bpf_kprobe_multi_link_attach()
2961 if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI) in bpf_kprobe_multi_link_attach()
2962 return -EINVAL; in bpf_kprobe_multi_link_attach()
2964 flags = attr->link_create.kprobe_multi.flags; in bpf_kprobe_multi_link_attach()
2966 return -EINVAL; in bpf_kprobe_multi_link_attach()
2968 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); in bpf_kprobe_multi_link_attach()
2969 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); in bpf_kprobe_multi_link_attach()
2971 return -EINVAL; in bpf_kprobe_multi_link_attach()
2973 cnt = attr->link_create.kprobe_multi.cnt; in bpf_kprobe_multi_link_attach()
2974 if (!cnt) in bpf_kprobe_multi_link_attach()
2975 return -EINVAL; in bpf_kprobe_multi_link_attach()
2976 if (cnt > MAX_KPROBE_MULTI_CNT) in bpf_kprobe_multi_link_attach()
2977 return -E2BIG; in bpf_kprobe_multi_link_attach()
2979 size = cnt * sizeof(*addrs); in bpf_kprobe_multi_link_attach()
2980 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); in bpf_kprobe_multi_link_attach()
2982 return -ENOMEM; in bpf_kprobe_multi_link_attach()
2984 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); in bpf_kprobe_multi_link_attach()
2986 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); in bpf_kprobe_multi_link_attach()
2988 err = -ENOMEM; in bpf_kprobe_multi_link_attach()
2992 err = -EFAULT; in bpf_kprobe_multi_link_attach()
2999 err = -EFAULT; in bpf_kprobe_multi_link_attach()
3008 err = copy_user_syms(&us, usyms, cnt); in bpf_kprobe_multi_link_attach()
3015 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r, in bpf_kprobe_multi_link_attach()
3018 err = ftrace_lookup_symbols(us.syms, cnt, addrs); in bpf_kprobe_multi_link_attach()
3024 if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) { in bpf_kprobe_multi_link_attach()
3025 err = -EINVAL; in bpf_kprobe_multi_link_attach()
3031 err = -ENOMEM; in bpf_kprobe_multi_link_attach()
3035 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, in bpf_kprobe_multi_link_attach()
3038 err = bpf_link_prime(&link->link, &link_primer); in bpf_kprobe_multi_link_attach()
3043 link->fp.exit_handler = kprobe_multi_link_exit_handler; in bpf_kprobe_multi_link_attach()
3045 link->fp.entry_handler = kprobe_multi_link_handler; in bpf_kprobe_multi_link_attach()
3047 link->addrs = addrs; in bpf_kprobe_multi_link_attach()
3048 link->cookies = cookies; in bpf_kprobe_multi_link_attach()
3049 link->cnt = cnt; in bpf_kprobe_multi_link_attach()
3050 link->flags = flags; in bpf_kprobe_multi_link_attach()
3059 sort_r(addrs, cnt, sizeof(*addrs), in bpf_kprobe_multi_link_attach()
3065 err = get_modules_for_addrs(&link->mods, addrs, cnt); in bpf_kprobe_multi_link_attach()
3070 link->mods_cnt = err; in bpf_kprobe_multi_link_attach()
3072 err = register_fprobe_ips(&link->fp, addrs, cnt); in bpf_kprobe_multi_link_attach()
3074 kprobe_multi_put_modules(link->mods, link->mods_cnt); in bpf_kprobe_multi_link_attach()
3088 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) in bpf_kprobe_multi_link_attach() argument
3090 return -EOPNOTSUPP; in bpf_kprobe_multi_link_attach()
3116 u32 cnt; member
3129 u32 cnt) in bpf_uprobe_unregister() argument
3133 for (i = 0; i < cnt; i++) { in bpf_uprobe_unregister()
3134 uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset, in bpf_uprobe_unregister()
3144 bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt); in bpf_uprobe_multi_link_release()
3152 if (umulti_link->task) in bpf_uprobe_multi_link_dealloc()
3153 put_task_struct(umulti_link->task); in bpf_uprobe_multi_link_dealloc()
3154 path_put(&umulti_link->path); in bpf_uprobe_multi_link_dealloc()
3155 kvfree(umulti_link->uprobes); in bpf_uprobe_multi_link_dealloc()
3162 u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets); in bpf_uprobe_multi_link_fill_link_info()
3163 u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies); in bpf_uprobe_multi_link_fill_link_info()
3164 u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets); in bpf_uprobe_multi_link_fill_link_info()
3165 u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path); in bpf_uprobe_multi_link_fill_link_info()
3166 u32 upath_size = info->uprobe_multi.path_size; in bpf_uprobe_multi_link_fill_link_info()
3168 u32 ucount = info->uprobe_multi.count; in bpf_uprobe_multi_link_fill_link_info()
3173 return -EINVAL; in bpf_uprobe_multi_link_fill_link_info()
3176 return -EINVAL; in bpf_uprobe_multi_link_fill_link_info()
3179 info->uprobe_multi.count = umulti_link->cnt; in bpf_uprobe_multi_link_fill_link_info()
3180 info->uprobe_multi.flags = umulti_link->flags; in bpf_uprobe_multi_link_fill_link_info()
3181 info->uprobe_multi.pid = umulti_link->task ? in bpf_uprobe_multi_link_fill_link_info()
3182 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0; in bpf_uprobe_multi_link_fill_link_info()
3191 return -ENOMEM; in bpf_uprobe_multi_link_fill_link_info()
3192 p = d_path(&umulti_link->path, buf, upath_size); in bpf_uprobe_multi_link_fill_link_info()
3197 upath_size = buf + upath_size - p; in bpf_uprobe_multi_link_fill_link_info()
3201 return -EFAULT; in bpf_uprobe_multi_link_fill_link_info()
3202 info->uprobe_multi.path_size = upath_size; in bpf_uprobe_multi_link_fill_link_info()
3208 if (ucount < umulti_link->cnt) in bpf_uprobe_multi_link_fill_link_info()
3209 err = -ENOSPC; in bpf_uprobe_multi_link_fill_link_info()
3211 ucount = umulti_link->cnt; in bpf_uprobe_multi_link_fill_link_info()
3215 put_user(umulti_link->uprobes[i].offset, uoffsets + i)) in bpf_uprobe_multi_link_fill_link_info()
3216 return -EFAULT; in bpf_uprobe_multi_link_fill_link_info()
3218 put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) in bpf_uprobe_multi_link_fill_link_info()
3219 return -EFAULT; in bpf_uprobe_multi_link_fill_link_info()
3221 put_user(umulti_link->uprobes[i].cookie, ucookies + i)) in bpf_uprobe_multi_link_fill_link_info()
3222 return -EFAULT; in bpf_uprobe_multi_link_fill_link_info()
3238 struct bpf_uprobe_multi_link *link = uprobe->link; in uprobe_prog_run()
3243 struct bpf_prog *prog = link->link.prog; in uprobe_prog_run()
3244 bool sleepable = prog->aux->sleepable; in uprobe_prog_run()
3248 if (link->task && current != link->task) in uprobe_prog_run()
3259 err = bpf_prog_run(link->link.prog, regs); in uprobe_prog_run()
3278 return uprobe->link->task->mm == mm; in uprobe_multi_link_filter()
3303 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); in bpf_uprobe_multi_entry_ip()
3304 return run_ctx->entry_ip; in bpf_uprobe_multi_entry_ip()
3311 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); in bpf_uprobe_multi_cookie()
3312 return run_ctx->uprobe->cookie; in bpf_uprobe_multi_cookie()
3315 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) in bpf_uprobe_multi_link_attach() argument
3325 u32 flags, cnt, i; in bpf_uprobe_multi_link_attach() local
3327 char *name; in bpf_uprobe_multi_link_attach() local
3333 return -EOPNOTSUPP; in bpf_uprobe_multi_link_attach()
3335 if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI) in bpf_uprobe_multi_link_attach()
3336 return -EINVAL; in bpf_uprobe_multi_link_attach()
3338 flags = attr->link_create.uprobe_multi.flags; in bpf_uprobe_multi_link_attach()
3340 return -EINVAL; in bpf_uprobe_multi_link_attach()
3343 * path, offsets and cnt are mandatory, in bpf_uprobe_multi_link_attach()
3346 upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path); in bpf_uprobe_multi_link_attach()
3347 uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets); in bpf_uprobe_multi_link_attach()
3348 cnt = attr->link_create.uprobe_multi.cnt; in bpf_uprobe_multi_link_attach()
3350 if (!upath || !uoffsets || !cnt) in bpf_uprobe_multi_link_attach()
3351 return -EINVAL; in bpf_uprobe_multi_link_attach()
3352 if (cnt > MAX_UPROBE_MULTI_CNT) in bpf_uprobe_multi_link_attach()
3353 return -E2BIG; in bpf_uprobe_multi_link_attach()
3355 uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets); in bpf_uprobe_multi_link_attach()
3356 ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies); in bpf_uprobe_multi_link_attach()
3358 name = strndup_user(upath, PATH_MAX); in bpf_uprobe_multi_link_attach()
3359 if (IS_ERR(name)) { in bpf_uprobe_multi_link_attach()
3360 err = PTR_ERR(name); in bpf_uprobe_multi_link_attach()
3364 err = kern_path(name, LOOKUP_FOLLOW, &path); in bpf_uprobe_multi_link_attach()
3365 kfree(name); in bpf_uprobe_multi_link_attach()
3370 err = -EBADF; in bpf_uprobe_multi_link_attach()
3374 pid = attr->link_create.uprobe_multi.pid; in bpf_uprobe_multi_link_attach()
3380 err = -ESRCH; in bpf_uprobe_multi_link_attach()
3385 err = -ENOMEM; in bpf_uprobe_multi_link_attach()
3388 uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL); in bpf_uprobe_multi_link_attach()
3393 for (i = 0; i < cnt; i++) { in bpf_uprobe_multi_link_attach()
3395 err = -EFAULT; in bpf_uprobe_multi_link_attach()
3399 err = -EINVAL; in bpf_uprobe_multi_link_attach()
3403 err = -EFAULT; in bpf_uprobe_multi_link_attach()
3407 err = -EFAULT; in bpf_uprobe_multi_link_attach()
3422 link->cnt = cnt; in bpf_uprobe_multi_link_attach()
3423 link->uprobes = uprobes; in bpf_uprobe_multi_link_attach()
3424 link->path = path; in bpf_uprobe_multi_link_attach()
3425 link->task = task; in bpf_uprobe_multi_link_attach()
3426 link->flags = flags; in bpf_uprobe_multi_link_attach()
3428 bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI, in bpf_uprobe_multi_link_attach()
3431 for (i = 0; i < cnt; i++) { in bpf_uprobe_multi_link_attach()
3432 err = uprobe_register_refctr(d_real_inode(link->path.dentry), in bpf_uprobe_multi_link_attach()
3442 err = bpf_link_prime(&link->link, &link_primer); in bpf_uprobe_multi_link_attach()
3458 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) in bpf_uprobe_multi_link_attach() argument
3460 return -EOPNOTSUPP; in bpf_uprobe_multi_link_attach()