Lines Matching +full:async +full:- +full:enum

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 #include <linux/bpf-cgroup.h>
30 * inside its own verifier_ops->get_func_proto() callback it should return
42 return (unsigned long) map->ops->map_lookup_elem(map, key); in BPF_CALL_2()
59 return map->ops->map_update_elem(map, key, value, flags); in BPF_CALL_4()
77 return map->ops->map_delete_elem(map, key); in BPF_CALL_2()
91 return map->ops->map_push_elem(map, value, flags); in BPF_CALL_3()
106 return map->ops->map_pop_elem(map, value); in BPF_CALL_2()
119 return map->ops->map_peek_elem(map, value); in BPF_CALL_2()
133 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); in BPF_CALL_3()
227 return -EINVAL; in BPF_CALL_0()
229 return (u64) task->tgid << 32 | task->pid; in BPF_CALL_0()
245 return -EINVAL; in BPF_CALL_0()
266 strscpy_pad(buf, task->comm, size); in BPF_CALL_2()
270 return -EINVAL; in BPF_CALL_2()
381 lock = src + map->record->spin_lock_off; in copy_map_value_locked()
383 lock = dst + map->record->spin_lock_off; in copy_map_value_locked()
458 return -EINVAL; in __bpf_strtoull()
461 return -EINVAL; in __bpf_strtoull()
464 return -EINVAL; in __bpf_strtoull()
469 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); in __bpf_strtoull()
473 consumed = cur_buf - buf; in __bpf_strtoull()
474 cur_len -= consumed; in __bpf_strtoull()
476 return -EINVAL; in __bpf_strtoull()
478 cur_len = min(cur_len, sizeof(str) - 1); in __bpf_strtoull()
487 return -ERANGE; in __bpf_strtoull()
490 return -EINVAL; in __bpf_strtoull()
493 consumed += cur_buf - str; in __bpf_strtoull()
509 if ((long long)-_res > 0) in __bpf_strtoll()
510 return -ERANGE; in __bpf_strtoll()
511 *res = -_res; in __bpf_strtoll()
514 return -ERANGE; in __bpf_strtoll()
557 return -EINVAL; in BPF_CALL_4()
592 int err = -EINVAL; in BPF_CALL_4()
605 err = -ENOENT; in BPF_CALL_4()
609 if (!ns_match(&pidns->ns, (dev_t)dev, ino)) in BPF_CALL_4()
612 nsdata->pid = task_pid_nr_ns(task, pidns); in BPF_CALL_4()
613 nsdata->tgid = task_tgid_nr_ns(task, pidns); in BPF_CALL_4()
640 return -EINVAL; in BPF_CALL_5()
663 ret = -EFAULT; in BPF_CALL_3()
686 return -EINVAL; in BPF_CALL_5()
696 /* Return -EFAULT for partial read */ in BPF_CALL_5()
697 return ret < 0 ? ret : -EFAULT; in BPF_CALL_5()
761 return -EINVAL; in bpf_trace_copy_string()
764 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
788 return -EBUSY; in try_get_buffers()
790 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); in try_get_buffers()
797 if (!data->bin_args && !data->buf) in bpf_bprintf_cleanup()
806 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
811 * - Format string verification only: when data->get_bin_args is false
812 * - Arguments preparation: in addition to the above verification, it writes in
813 * data->bin_args a binary representation of arguments usable by bstr_printf
822 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; in bpf_bprintf_prepare()
832 return -EINVAL; in bpf_bprintf_prepare()
833 fmt_size = fmt_end - fmt; in bpf_bprintf_prepare()
836 return -EBUSY; in bpf_bprintf_prepare()
838 if (data->get_bin_args) { in bpf_bprintf_prepare()
840 tmp_buf = buffers->bin_args; in bpf_bprintf_prepare()
842 data->bin_args = (u32 *)tmp_buf; in bpf_bprintf_prepare()
845 if (data->get_buf) in bpf_bprintf_prepare()
846 data->buf = buffers->buf; in bpf_bprintf_prepare()
850 err = -EINVAL; in bpf_bprintf_prepare()
863 err = -EINVAL; in bpf_bprintf_prepare()
867 /* The string is zero-terminated so if fmt[i] != 0, we can in bpf_bprintf_prepare()
872 /* skip optional "[0 +-][num]" width formatting field */ in bpf_bprintf_prepare()
873 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || in bpf_bprintf_prepare()
906 (tmp_buf_end - tmp_buf), in bpf_bprintf_prepare()
920 err = -EINVAL; in bpf_bprintf_prepare()
929 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { in bpf_bprintf_prepare()
930 err = -ENOSPC; in bpf_bprintf_prepare()
941 * pre-formatted as strings, ironically, the easiest way in bpf_bprintf_prepare()
944 ip_spec[2] = fmt[i - 1]; in bpf_bprintf_prepare()
946 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, in bpf_bprintf_prepare()
959 err = -EINVAL; in bpf_bprintf_prepare()
967 err = -ENOSPC; in bpf_bprintf_prepare()
974 tmp_buf_end - tmp_buf); in bpf_bprintf_prepare()
989 err = -ENOSPC; in bpf_bprintf_prepare()
1013 err = -EINVAL; in bpf_bprintf_prepare()
1022 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { in bpf_bprintf_prepare()
1023 err = -ENOSPC; in bpf_bprintf_prepare()
1055 return -EINVAL; in BPF_CALL_5()
1058 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we in BPF_CALL_5()
1105 * ops->map_release_uref callback is responsible for cancelling the timers,
1108 * Inner maps can contain bpf timers as well. ops->map_release_uref is
1137 enum bpf_async_type {
1144 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) in bpf_timer_cb()
1147 struct bpf_map *map = t->cb.map; in bpf_timer_cb()
1148 void *value = t->cb.value; in bpf_timer_cb()
1154 callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held()); in bpf_timer_cb()
1165 if (map->map_type == BPF_MAP_TYPE_ARRAY) { in bpf_timer_cb()
1169 idx = ((char *)value - array->value) / array->elem_size; in bpf_timer_cb()
1172 key = value - round_up(map->key_size, 8); in bpf_timer_cb()
1186 struct bpf_async_cb *cb = &w->cb; in bpf_wq_work()
1187 struct bpf_map *map = cb->map; in bpf_wq_work()
1189 void *value = cb->value; in bpf_wq_work()
1195 callback_fn = READ_ONCE(cb->callback_fn); in bpf_wq_work()
1199 if (map->map_type == BPF_MAP_TYPE_ARRAY) { in bpf_wq_work()
1203 idx = ((char *)value - array->value) / array->elem_size; in bpf_wq_work()
1206 key = value - round_up(map->key_size, 8); in bpf_wq_work()
1222 cancel_work_sync(&w->work); in bpf_wq_delete_work()
1233 * kfree_rcu(t) right after for both preallocated and non-preallocated in bpf_timer_delete_work()
1234 * maps. The async->cb = NULL was already done and no code path can see in bpf_timer_delete_work()
1238 hrtimer_cancel(&t->timer); in bpf_timer_delete_work()
1242 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags, in __bpf_async_init() argument
1243 enum bpf_async_type type) in __bpf_async_init()
1253 return -EOPNOTSUPP; in __bpf_async_init()
1263 return -EINVAL; in __bpf_async_init()
1266 __bpf_spin_lock_irqsave(&async->lock); in __bpf_async_init()
1267 t = async->timer; in __bpf_async_init()
1269 ret = -EBUSY; in __bpf_async_init()
1274 cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node); in __bpf_async_init()
1276 ret = -ENOMEM; in __bpf_async_init()
1282 clockid = flags & (MAX_CLOCKS - 1); in __bpf_async_init()
1285 atomic_set(&t->cancelling, 0); in __bpf_async_init()
1286 INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work); in __bpf_async_init()
1287 hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT); in __bpf_async_init()
1288 cb->value = (void *)async - map->record->timer_off; in __bpf_async_init()
1293 INIT_WORK(&w->work, bpf_wq_work); in __bpf_async_init()
1294 INIT_WORK(&w->delete_work, bpf_wq_delete_work); in __bpf_async_init()
1295 cb->value = (void *)async - map->record->wq_off; in __bpf_async_init()
1298 cb->map = map; in __bpf_async_init()
1299 cb->prog = NULL; in __bpf_async_init()
1300 cb->flags = flags; in __bpf_async_init()
1301 rcu_assign_pointer(cb->callback_fn, NULL); in __bpf_async_init()
1303 WRITE_ONCE(async->cb, cb); in __bpf_async_init()
1304 /* Guarantee the order between async->cb and map->usercnt. So in __bpf_async_init()
1306 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL in __bpf_async_init()
1310 if (!atomic64_read(&map->usercnt)) { in __bpf_async_init()
1314 WRITE_ONCE(async->cb, NULL); in __bpf_async_init()
1316 ret = -EPERM; in __bpf_async_init()
1319 __bpf_spin_unlock_irqrestore(&async->lock); in __bpf_async_init()
1326 clock_t clockid = flags & (MAX_CLOCKS - 1); in BPF_CALL_3()
1337 return -EINVAL; in BPF_CALL_3()
1351 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn, in __bpf_async_set_callback() argument
1353 enum bpf_async_type type) in __bpf_async_set_callback()
1355 struct bpf_prog *prev, *prog = aux->prog; in __bpf_async_set_callback()
1360 return -EOPNOTSUPP; in __bpf_async_set_callback()
1361 __bpf_spin_lock_irqsave(&async->lock); in __bpf_async_set_callback()
1362 cb = async->cb; in __bpf_async_set_callback()
1364 ret = -EINVAL; in __bpf_async_set_callback()
1367 if (!atomic64_read(&cb->map->usercnt)) { in __bpf_async_set_callback()
1373 ret = -EPERM; in __bpf_async_set_callback()
1376 prev = cb->prog; in __bpf_async_set_callback()
1379 * can pick different callback_fn-s within the same prog. in __bpf_async_set_callback()
1389 cb->prog = prog; in __bpf_async_set_callback()
1391 rcu_assign_pointer(cb->callback_fn, callback_fn); in __bpf_async_set_callback()
1393 __bpf_spin_unlock_irqrestore(&async->lock); in __bpf_async_set_callback()
1415 enum hrtimer_mode mode; in BPF_CALL_3()
1418 return -EOPNOTSUPP; in BPF_CALL_3()
1420 return -EINVAL; in BPF_CALL_3()
1421 __bpf_spin_lock_irqsave(&timer->lock); in BPF_CALL_3()
1422 t = timer->timer; in BPF_CALL_3()
1423 if (!t || !t->cb.prog) { in BPF_CALL_3()
1424 ret = -EINVAL; in BPF_CALL_3()
1436 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); in BPF_CALL_3()
1438 __bpf_spin_unlock_irqrestore(&timer->lock); in BPF_CALL_3()
1451 static void drop_prog_refcnt(struct bpf_async_cb *async) in drop_prog_refcnt() argument
1453 struct bpf_prog *prog = async->prog; in drop_prog_refcnt()
1457 async->prog = NULL; in drop_prog_refcnt()
1458 rcu_assign_pointer(async->callback_fn, NULL); in drop_prog_refcnt()
1469 return -EOPNOTSUPP; in BPF_CALL_1()
1471 __bpf_spin_lock_irqsave(&timer->lock); in BPF_CALL_1()
1472 t = timer->timer; in BPF_CALL_1()
1474 ret = -EINVAL; in BPF_CALL_1()
1484 ret = -EDEADLK; in BPF_CALL_1()
1488 /* Only account in-flight cancellations when invoked from a timer in BPF_CALL_1()
1490 * are waiting on us, to avoid introducing lockups. Non-callback paths in BPF_CALL_1()
1495 atomic_inc(&t->cancelling); in BPF_CALL_1()
1499 if (atomic_read(&cur_t->cancelling)) { in BPF_CALL_1()
1508 ret = -EDEADLK; in BPF_CALL_1()
1512 drop_prog_refcnt(&t->cb); in BPF_CALL_1()
1514 __bpf_spin_unlock_irqrestore(&timer->lock); in BPF_CALL_1()
1518 ret = ret ?: hrtimer_cancel(&t->timer); in BPF_CALL_1()
1520 atomic_dec(&t->cancelling); in BPF_CALL_1()
1532 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async) in __bpf_async_cancel_and_free() argument
1536 /* Performance optimization: read async->cb without lock first. */ in __bpf_async_cancel_and_free()
1537 if (!READ_ONCE(async->cb)) in __bpf_async_cancel_and_free()
1540 __bpf_spin_lock_irqsave(&async->lock); in __bpf_async_cancel_and_free()
1541 /* re-read it under lock */ in __bpf_async_cancel_and_free()
1542 cb = async->cb; in __bpf_async_cancel_and_free()
1549 WRITE_ONCE(async->cb, NULL); in __bpf_async_cancel_and_free()
1551 __bpf_spin_unlock_irqrestore(&async->lock); in __bpf_async_cancel_and_free()
1556 * by ops->map_release_uref when the user space reference to a map reaches zero.
1569 * just return -1). Though callback_fn is still running on this cpu it's in bpf_timer_cancel_and_free()
1572 * since async->cb = NULL was already done. The timer will be in bpf_timer_cancel_and_free()
1596 queue_work(system_unbound_wq, &t->cb.delete_work); in bpf_timer_cancel_and_free()
1606 if (hrtimer_try_to_cancel(&t->timer) >= 0) in bpf_timer_cancel_and_free()
1609 queue_work(system_unbound_wq, &t->cb.delete_work); in bpf_timer_cancel_and_free()
1611 bpf_timer_delete_work(&t->cb.delete_work); in bpf_timer_cancel_and_free()
1616 * by ops->map_release_uref when the user space reference to a map reaches zero.
1632 schedule_work(&work->delete_work); in bpf_wq_cancel_and_free()
1657 /* Since the upper 8 bits of dynptr->size is reserved, the
1658 * maximum supported size is 2^24 - 1.
1660 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1)
1667 return ptr->size & DYNPTR_RDONLY_BIT; in __bpf_dynptr_is_rdonly()
1672 ptr->size |= DYNPTR_RDONLY_BIT; in bpf_dynptr_set_rdonly()
1675 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) in bpf_dynptr_set_type()
1677 ptr->size |= type << DYNPTR_TYPE_SHIFT; in bpf_dynptr_set_type()
1680 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) in bpf_dynptr_get_type()
1682 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; in bpf_dynptr_get_type()
1687 return ptr->size & DYNPTR_SIZE_MASK; in __bpf_dynptr_size()
1692 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; in bpf_dynptr_set_size()
1694 ptr->size = new_size | metadata; in bpf_dynptr_set_size()
1699 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; in bpf_dynptr_check_size()
1703 enum bpf_dynptr_type type, u32 offset, u32 size) in bpf_dynptr_init()
1705 ptr->data = data; in bpf_dynptr_init()
1706 ptr->offset = offset; in bpf_dynptr_init()
1707 ptr->size = size; in bpf_dynptr_init()
1720 if (len > size || offset > size - len) in bpf_dynptr_check_off_len()
1721 return -E2BIG; in bpf_dynptr_check_off_len()
1738 err = -EINVAL; in BPF_CALL_4()
1764 enum bpf_dynptr_type type; in __bpf_dynptr_read()
1767 if (!src->data || flags) in __bpf_dynptr_read()
1768 return -EINVAL; in __bpf_dynptr_read()
1783 memmove(dst, src->data + src->offset + offset, len); in __bpf_dynptr_read()
1786 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); in __bpf_dynptr_read()
1788 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); in __bpf_dynptr_read()
1791 return -EFAULT; in __bpf_dynptr_read()
1815 enum bpf_dynptr_type type; in __bpf_dynptr_write()
1818 if (!dst->data || __bpf_dynptr_is_rdonly(dst)) in __bpf_dynptr_write()
1819 return -EINVAL; in __bpf_dynptr_write()
1831 return -EINVAL; in __bpf_dynptr_write()
1836 memmove(dst->data + dst->offset + offset, src, len); in __bpf_dynptr_write()
1839 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, in __bpf_dynptr_write()
1843 return -EINVAL; in __bpf_dynptr_write()
1844 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); in __bpf_dynptr_write()
1847 return -EFAULT; in __bpf_dynptr_write()
1870 enum bpf_dynptr_type type; in BPF_CALL_3()
1873 if (!ptr->data) in BPF_CALL_3()
1888 return (unsigned long)(ptr->data + ptr->offset + offset); in BPF_CALL_3()
1917 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) in bpf_base_func_proto()
1972 if (!bpf_token_capable(prog->aux->token, CAP_BPF)) in bpf_base_func_proto()
2030 if (!bpf_token_capable(prog->aux->token, CAP_PERFMON)) in bpf_base_func_proto()
2081 if (!head->next || list_empty(head)) in bpf_list_head_free()
2083 head = head->next; in bpf_list_head_free()
2091 obj -= field->graph_root.node_offset; in bpf_list_head_free()
2092 head = head->next; in bpf_list_head_free()
2096 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); in bpf_list_head_free()
2130 obj -= field->graph_root.node_offset; in bpf_rb_root_free()
2133 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); in bpf_rb_root_free()
2149 bpf_obj_init(meta->record, p); in bpf_obj_new_impl()
2166 if (rec && rec->refcount_off >= 0 && in __bpf_obj_drop_impl()
2167 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) { in __bpf_obj_drop_impl()
2189 __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false); in bpf_obj_drop_impl()
2206 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); in bpf_refcount_acquire_impl()
2220 struct list_head *n = &node->list_head, *h = (void *)head; in __bpf_list_add()
2222 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't in __bpf_list_add()
2225 if (unlikely(!h->next)) in __bpf_list_add()
2228 /* node->owner != NULL implies !list_empty(n), no need to separately in __bpf_list_add()
2231 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { in __bpf_list_add()
2233 __bpf_obj_drop_impl((void *)n - off, rec, false); in __bpf_list_add()
2234 return -EINVAL; in __bpf_list_add()
2238 WRITE_ONCE(node->owner, head); in __bpf_list_add()
2250 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); in bpf_list_push_front_impl()
2260 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); in bpf_list_push_back_impl()
2268 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't in __bpf_list_del()
2271 if (unlikely(!h->next)) in __bpf_list_del()
2276 n = tail ? h->prev : h->next; in __bpf_list_del()
2278 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head)) in __bpf_list_del()
2282 WRITE_ONCE(node->owner, NULL); in __bpf_list_del()
2301 struct rb_node *n = &node_internal->rb_node; in bpf_rbtree_remove()
2303 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or in bpf_rbtree_remove()
2306 if (READ_ONCE(node_internal->owner) != root) in bpf_rbtree_remove()
2311 WRITE_ONCE(node_internal->owner, NULL); in bpf_rbtree_remove()
2322 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; in __bpf_rbtree_add()
2323 struct rb_node *parent = NULL, *n = &node->rb_node; in __bpf_rbtree_add()
2327 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately in __bpf_rbtree_add()
2330 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { in __bpf_rbtree_add()
2332 __bpf_obj_drop_impl((void *)n - off, rec, false); in __bpf_rbtree_add()
2333 return -EINVAL; in __bpf_rbtree_add()
2339 link = &parent->rb_left; in __bpf_rbtree_add()
2341 link = &parent->rb_right; in __bpf_rbtree_add()
2348 WRITE_ONCE(node->owner, root); in __bpf_rbtree_add()
2359 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off); in bpf_rbtree_add_impl()
2370 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2377 if (refcount_inc_not_zero(&p->rcu_users)) in bpf_task_acquire()
2383 * bpf_task_release - Release the reference acquired on a task.
2399 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2410 * bpf_cgroup_release - Release the reference acquired on a cgroup.
2428 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2438 if (level > cgrp->level || level < 0) in bpf_cgroup_ancestor()
2442 ancestor = cgrp->ancestors[level]; in bpf_cgroup_ancestor()
2449 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2465 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2490 if (unlikely(idx >= array->map.max_entries)) in BPF_CALL_2()
2491 return -E2BIG; in BPF_CALL_2()
2493 cgrp = READ_ONCE(array->ptrs[idx]); in BPF_CALL_2()
2495 return -EAGAIN; in BPF_CALL_2()
2509 * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
2529 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2548 * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up
2567 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2570 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2574 * For non-skb and non-xdp type dynptrs, there is no difference between
2586 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2589 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2598 enum bpf_dynptr_type type; in bpf_dynptr_slice()
2602 if (!ptr->data) in bpf_dynptr_slice()
2614 return ptr->data + ptr->offset + offset; in bpf_dynptr_slice()
2617 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt); in bpf_dynptr_slice()
2619 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len); in bpf_dynptr_slice()
2622 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); in bpf_dynptr_slice()
2628 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false); in bpf_dynptr_slice()
2638 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2641 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2645 * For non-skb and non-xdp type dynptrs, there is no difference between
2671 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2684 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) in bpf_dynptr_slice_rdwr()
2689 * For skb-type dynptrs, it is safe to write into the returned pointer in bpf_dynptr_slice_rdwr()
2717 if (!ptr->data || start > end) in bpf_dynptr_adjust()
2718 return -EINVAL; in bpf_dynptr_adjust()
2723 return -ERANGE; in bpf_dynptr_adjust()
2725 ptr->offset += start; in bpf_dynptr_adjust()
2726 bpf_dynptr_set_size(ptr, end - start); in bpf_dynptr_adjust()
2735 return !ptr->data; in bpf_dynptr_is_null()
2742 if (!ptr->data) in bpf_dynptr_is_rdonly()
2752 if (!ptr->data) in bpf_dynptr_size()
2753 return -EINVAL; in bpf_dynptr_size()
2764 if (!ptr->data) { in bpf_dynptr_clone()
2766 return -EINVAL; in bpf_dynptr_clone()
2775 * bpf_dynptr_copy() - Copy data from one dynptr to another.
2776 * @dst_ptr: Destination dynptr - where data should be copied to
2778 * @src_ptr: Source dynptr - where data should be copied from
2810 return -E2BIG; in bpf_dynptr_copy()
2814 u32 chunk_sz = min_t(u32, sizeof(buf), size - off); in bpf_dynptr_copy()
2862 return !ctx->cnt; in bpf_stack_walker()
2864 ctx->cnt++; in bpf_stack_walker()
2867 ctx->aux = prog->aux; in bpf_stack_walker()
2868 ctx->sp = sp; in bpf_stack_walker()
2869 ctx->bp = bp; in bpf_stack_walker()
2880 WARN_ON_ONCE(!ctx.aux->exception_boundary); in bpf_throw()
2888 ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0); in bpf_throw()
2894 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; in bpf_wq_init() local
2901 return -EINVAL; in bpf_wq_init()
2903 return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ); in bpf_wq_init()
2908 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; in bpf_wq_start() local
2912 return -EOPNOTSUPP; in bpf_wq_start()
2914 return -EINVAL; in bpf_wq_start()
2915 w = READ_ONCE(async->work); in bpf_wq_start()
2916 if (!w || !READ_ONCE(w->cb.prog)) in bpf_wq_start()
2917 return -EINVAL; in bpf_wq_start()
2919 schedule_work(&w->work); in bpf_wq_start()
2929 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; in bpf_wq_set_callback_impl() local
2932 return -EINVAL; in bpf_wq_set_callback_impl()
2934 return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ); in bpf_wq_set_callback_impl()
2962 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing
2964 * return the same result, as both point to the same 8-byte area.
2966 * For 32-bit little-endian hosts, using a u64 pointer or unsigned long
2968 * unsigned long is composed of bits 0-31 of the u64 and the second unsigned
2969 * long is composed of bits 32-63 of the u64.
2971 * However, for 32-bit big-endian hosts, this is not the case. The first
2972 * iterated unsigned long will be bits 32-63 of the u64, so swap these two
2986 * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
2989 * @nr_words: The size of the specified memory area, measured in 8-byte units.
3012 kit->nr_bits = 0; in bpf_iter_bits_new()
3013 kit->bits_copy = 0; in bpf_iter_bits_new()
3014 kit->bit = -1; in bpf_iter_bits_new()
3017 return -EINVAL; in bpf_iter_bits_new()
3019 return -E2BIG; in bpf_iter_bits_new()
3023 err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign); in bpf_iter_bits_new()
3025 return -EFAULT; in bpf_iter_bits_new()
3027 swap_ulong_in_u64(&kit->bits_copy, nr_words); in bpf_iter_bits_new()
3029 kit->nr_bits = nr_bits; in bpf_iter_bits_new()
3034 return -E2BIG; in bpf_iter_bits_new()
3037 kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes); in bpf_iter_bits_new()
3038 if (!kit->bits) in bpf_iter_bits_new()
3039 return -ENOMEM; in bpf_iter_bits_new()
3041 err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign); in bpf_iter_bits_new()
3043 bpf_mem_free(&bpf_global_ma, kit->bits); in bpf_iter_bits_new()
3047 swap_ulong_in_u64(kit->bits, nr_words); in bpf_iter_bits_new()
3049 kit->nr_bits = nr_bits; in bpf_iter_bits_new()
3054 * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
3065 int bit = kit->bit, nr_bits = kit->nr_bits; in bpf_iter_bits_next()
3071 bits = nr_bits == 64 ? &kit->bits_copy : kit->bits; in bpf_iter_bits_next()
3074 kit->bit = bit; in bpf_iter_bits_next()
3078 kit->bit = bit; in bpf_iter_bits_next()
3079 return &kit->bit; in bpf_iter_bits_next()
3083 * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
3092 if (kit->nr_bits <= 64) in bpf_iter_bits_destroy()
3094 bpf_mem_free(&bpf_global_ma, kit->bits); in bpf_iter_bits_destroy()
3098 * bpf_copy_from_user_str() - Copy a string from an unsafe user address
3105 * Copies a NUL-terminated string from userspace to BPF space. If user string is
3117 return -EINVAL; in bpf_copy_from_user_str()
3122 ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1); in bpf_copy_from_user_str()
3131 memset((char *)dst + ret, 0, dst__sz - ret); in bpf_copy_from_user_str()
3139 * bpf_copy_from_user_task_str() - Copy a string from an task's address space
3164 return -EINVAL; in bpf_copy_from_user_task_str()
3177 memset(dst + ret, 0, dst__sz - ret); in bpf_copy_from_user_task_str()
3184 * unsigned long always points to 8-byte region on stack, the kernel may only
3185 * read and write the 4-bytes on 32-bit.