1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/bpf-cgroup.h>
7 #include <linux/cgroup.h>
8 #include <linux/rcupdate.h>
9 #include <linux/random.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/ktime.h>
13 #include <linux/sched.h>
14 #include <linux/uidgid.h>
15 #include <linux/filter.h>
16 #include <linux/ctype.h>
17 #include <linux/jiffies.h>
18 #include <linux/pid_namespace.h>
19 #include <linux/poison.h>
20 #include <linux/proc_ns.h>
21 #include <linux/sched/task.h>
22 #include <linux/security.h>
23 #include <linux/btf_ids.h>
24 #include <linux/bpf_mem_alloc.h>
25 #include <linux/kasan.h>
26 #include <linux/bpf_verifier.h>
27 
28 #include "../../lib/kstrtox.h"
29 
30 /* If kernel subsystem is allowing eBPF programs to call this function,
31  * inside its own verifier_ops->get_func_proto() callback it should return
32  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
33  *
34  * Different map implementations will rely on rcu in map methods
35  * lookup/update/delete, therefore eBPF programs must run under rcu lock
36  * if program is allowed to access maps, so check rcu_read_lock_held() or
37  * rcu_read_lock_trace_held() in all three functions.
38  */
39 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
40 {
41 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
42 		     !rcu_read_lock_bh_held());
43 	return (unsigned long) map->ops->map_lookup_elem(map, key);
44 }
45 
46 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
47 	.func		= bpf_map_lookup_elem,
48 	.gpl_only	= false,
49 	.pkt_access	= true,
50 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
51 	.arg1_type	= ARG_CONST_MAP_PTR,
52 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
53 };
54 
55 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
56 	   void *, value, u64, flags)
57 {
58 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
59 		     !rcu_read_lock_bh_held());
60 	return map->ops->map_update_elem(map, key, value, flags);
61 }
62 
63 const struct bpf_func_proto bpf_map_update_elem_proto = {
64 	.func		= bpf_map_update_elem,
65 	.gpl_only	= false,
66 	.pkt_access	= true,
67 	.ret_type	= RET_INTEGER,
68 	.arg1_type	= ARG_CONST_MAP_PTR,
69 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
70 	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
71 	.arg4_type	= ARG_ANYTHING,
72 };
73 
74 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
75 {
76 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
77 		     !rcu_read_lock_bh_held());
78 	return map->ops->map_delete_elem(map, key);
79 }
80 
81 const struct bpf_func_proto bpf_map_delete_elem_proto = {
82 	.func		= bpf_map_delete_elem,
83 	.gpl_only	= false,
84 	.pkt_access	= true,
85 	.ret_type	= RET_INTEGER,
86 	.arg1_type	= ARG_CONST_MAP_PTR,
87 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
88 };
89 
90 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
91 {
92 	return map->ops->map_push_elem(map, value, flags);
93 }
94 
95 const struct bpf_func_proto bpf_map_push_elem_proto = {
96 	.func		= bpf_map_push_elem,
97 	.gpl_only	= false,
98 	.pkt_access	= true,
99 	.ret_type	= RET_INTEGER,
100 	.arg1_type	= ARG_CONST_MAP_PTR,
101 	.arg2_type	= ARG_PTR_TO_MAP_VALUE,
102 	.arg3_type	= ARG_ANYTHING,
103 };
104 
105 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
106 {
107 	return map->ops->map_pop_elem(map, value);
108 }
109 
110 const struct bpf_func_proto bpf_map_pop_elem_proto = {
111 	.func		= bpf_map_pop_elem,
112 	.gpl_only	= false,
113 	.ret_type	= RET_INTEGER,
114 	.arg1_type	= ARG_CONST_MAP_PTR,
115 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
116 };
117 
118 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
119 {
120 	return map->ops->map_peek_elem(map, value);
121 }
122 
123 const struct bpf_func_proto bpf_map_peek_elem_proto = {
124 	.func		= bpf_map_peek_elem,
125 	.gpl_only	= false,
126 	.ret_type	= RET_INTEGER,
127 	.arg1_type	= ARG_CONST_MAP_PTR,
128 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
129 };
130 
131 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
132 {
133 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
134 		     !rcu_read_lock_bh_held());
135 	return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
136 }
137 
138 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
139 	.func		= bpf_map_lookup_percpu_elem,
140 	.gpl_only	= false,
141 	.pkt_access	= true,
142 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
143 	.arg1_type	= ARG_CONST_MAP_PTR,
144 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
145 	.arg3_type	= ARG_ANYTHING,
146 };
147 
148 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
149 	.func		= bpf_user_rnd_u32,
150 	.gpl_only	= false,
151 	.ret_type	= RET_INTEGER,
152 };
153 
154 BPF_CALL_0(bpf_get_smp_processor_id)
155 {
156 	return smp_processor_id();
157 }
158 
159 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
160 	.func		= bpf_get_smp_processor_id,
161 	.gpl_only	= false,
162 	.ret_type	= RET_INTEGER,
163 	.allow_fastcall	= true,
164 };
165 
166 BPF_CALL_0(bpf_get_numa_node_id)
167 {
168 	return numa_node_id();
169 }
170 
171 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
172 	.func		= bpf_get_numa_node_id,
173 	.gpl_only	= false,
174 	.ret_type	= RET_INTEGER,
175 };
176 
177 BPF_CALL_0(bpf_ktime_get_ns)
178 {
179 	/* NMI safe access to clock monotonic */
180 	return ktime_get_mono_fast_ns();
181 }
182 
183 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
184 	.func		= bpf_ktime_get_ns,
185 	.gpl_only	= false,
186 	.ret_type	= RET_INTEGER,
187 };
188 
189 BPF_CALL_0(bpf_ktime_get_boot_ns)
190 {
191 	/* NMI safe access to clock boottime */
192 	return ktime_get_boot_fast_ns();
193 }
194 
195 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
196 	.func		= bpf_ktime_get_boot_ns,
197 	.gpl_only	= false,
198 	.ret_type	= RET_INTEGER,
199 };
200 
201 BPF_CALL_0(bpf_ktime_get_coarse_ns)
202 {
203 	return ktime_get_coarse_ns();
204 }
205 
206 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
207 	.func		= bpf_ktime_get_coarse_ns,
208 	.gpl_only	= false,
209 	.ret_type	= RET_INTEGER,
210 };
211 
212 BPF_CALL_0(bpf_ktime_get_tai_ns)
213 {
214 	/* NMI safe access to clock tai */
215 	return ktime_get_tai_fast_ns();
216 }
217 
218 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
219 	.func		= bpf_ktime_get_tai_ns,
220 	.gpl_only	= false,
221 	.ret_type	= RET_INTEGER,
222 };
223 
224 BPF_CALL_0(bpf_get_current_pid_tgid)
225 {
226 	struct task_struct *task = current;
227 
228 	if (unlikely(!task))
229 		return -EINVAL;
230 
231 	return (u64) task->tgid << 32 | task->pid;
232 }
233 
234 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
235 	.func		= bpf_get_current_pid_tgid,
236 	.gpl_only	= false,
237 	.ret_type	= RET_INTEGER,
238 };
239 
240 BPF_CALL_0(bpf_get_current_uid_gid)
241 {
242 	struct task_struct *task = current;
243 	kuid_t uid;
244 	kgid_t gid;
245 
246 	if (unlikely(!task))
247 		return -EINVAL;
248 
249 	current_uid_gid(&uid, &gid);
250 	return (u64) from_kgid(&init_user_ns, gid) << 32 |
251 		     from_kuid(&init_user_ns, uid);
252 }
253 
254 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
255 	.func		= bpf_get_current_uid_gid,
256 	.gpl_only	= false,
257 	.ret_type	= RET_INTEGER,
258 };
259 
260 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
261 {
262 	struct task_struct *task = current;
263 
264 	if (unlikely(!task))
265 		goto err_clear;
266 
267 	/* Verifier guarantees that size > 0 */
268 	strscpy_pad(buf, task->comm, size);
269 	return 0;
270 err_clear:
271 	memset(buf, 0, size);
272 	return -EINVAL;
273 }
274 
275 const struct bpf_func_proto bpf_get_current_comm_proto = {
276 	.func		= bpf_get_current_comm,
277 	.gpl_only	= false,
278 	.ret_type	= RET_INTEGER,
279 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
280 	.arg2_type	= ARG_CONST_SIZE,
281 };
282 
283 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
284 
285 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
286 {
287 	arch_spinlock_t *l = (void *)lock;
288 	union {
289 		__u32 val;
290 		arch_spinlock_t lock;
291 	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
292 
293 	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
294 	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
295 	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
296 	preempt_disable();
297 	arch_spin_lock(l);
298 }
299 
300 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
301 {
302 	arch_spinlock_t *l = (void *)lock;
303 
304 	arch_spin_unlock(l);
305 	preempt_enable();
306 }
307 
308 #else
309 
310 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
311 {
312 	atomic_t *l = (void *)lock;
313 
314 	BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
315 	do {
316 		atomic_cond_read_relaxed(l, !VAL);
317 	} while (atomic_xchg(l, 1));
318 }
319 
320 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
321 {
322 	atomic_t *l = (void *)lock;
323 
324 	atomic_set_release(l, 0);
325 }
326 
327 #endif
328 
329 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
330 
331 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
332 {
333 	unsigned long flags;
334 
335 	local_irq_save(flags);
336 	__bpf_spin_lock(lock);
337 	__this_cpu_write(irqsave_flags, flags);
338 }
339 
340 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
341 {
342 	__bpf_spin_lock_irqsave(lock);
343 	return 0;
344 }
345 
346 const struct bpf_func_proto bpf_spin_lock_proto = {
347 	.func		= bpf_spin_lock,
348 	.gpl_only	= false,
349 	.ret_type	= RET_VOID,
350 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
351 	.arg1_btf_id    = BPF_PTR_POISON,
352 };
353 
354 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
355 {
356 	unsigned long flags;
357 
358 	flags = __this_cpu_read(irqsave_flags);
359 	__bpf_spin_unlock(lock);
360 	local_irq_restore(flags);
361 }
362 
363 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
364 {
365 	__bpf_spin_unlock_irqrestore(lock);
366 	return 0;
367 }
368 
369 const struct bpf_func_proto bpf_spin_unlock_proto = {
370 	.func		= bpf_spin_unlock,
371 	.gpl_only	= false,
372 	.ret_type	= RET_VOID,
373 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
374 	.arg1_btf_id    = BPF_PTR_POISON,
375 };
376 
377 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
378 			   bool lock_src)
379 {
380 	struct bpf_spin_lock *lock;
381 
382 	if (lock_src)
383 		lock = src + map->record->spin_lock_off;
384 	else
385 		lock = dst + map->record->spin_lock_off;
386 	preempt_disable();
387 	__bpf_spin_lock_irqsave(lock);
388 	copy_map_value(map, dst, src);
389 	__bpf_spin_unlock_irqrestore(lock);
390 	preempt_enable();
391 }
392 
393 BPF_CALL_0(bpf_jiffies64)
394 {
395 	return get_jiffies_64();
396 }
397 
398 const struct bpf_func_proto bpf_jiffies64_proto = {
399 	.func		= bpf_jiffies64,
400 	.gpl_only	= false,
401 	.ret_type	= RET_INTEGER,
402 };
403 
404 #ifdef CONFIG_CGROUPS
405 BPF_CALL_0(bpf_get_current_cgroup_id)
406 {
407 	struct cgroup *cgrp;
408 	u64 cgrp_id;
409 
410 	rcu_read_lock();
411 	cgrp = task_dfl_cgroup(current);
412 	cgrp_id = cgroup_id(cgrp);
413 	rcu_read_unlock();
414 
415 	return cgrp_id;
416 }
417 
418 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
419 	.func		= bpf_get_current_cgroup_id,
420 	.gpl_only	= false,
421 	.ret_type	= RET_INTEGER,
422 };
423 
424 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
425 {
426 	struct cgroup *cgrp;
427 	struct cgroup *ancestor;
428 	u64 cgrp_id;
429 
430 	rcu_read_lock();
431 	cgrp = task_dfl_cgroup(current);
432 	ancestor = cgroup_ancestor(cgrp, ancestor_level);
433 	cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
434 	rcu_read_unlock();
435 
436 	return cgrp_id;
437 }
438 
439 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
440 	.func		= bpf_get_current_ancestor_cgroup_id,
441 	.gpl_only	= false,
442 	.ret_type	= RET_INTEGER,
443 	.arg1_type	= ARG_ANYTHING,
444 };
445 #endif /* CONFIG_CGROUPS */
446 
447 #define BPF_STRTOX_BASE_MASK 0x1F
448 
449 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
450 			  unsigned long long *res, bool *is_negative)
451 {
452 	unsigned int base = flags & BPF_STRTOX_BASE_MASK;
453 	const char *cur_buf = buf;
454 	size_t cur_len = buf_len;
455 	unsigned int consumed;
456 	size_t val_len;
457 	char str[64];
458 
459 	if (!buf || !buf_len || !res || !is_negative)
460 		return -EINVAL;
461 
462 	if (base != 0 && base != 8 && base != 10 && base != 16)
463 		return -EINVAL;
464 
465 	if (flags & ~BPF_STRTOX_BASE_MASK)
466 		return -EINVAL;
467 
468 	while (cur_buf < buf + buf_len && isspace(*cur_buf))
469 		++cur_buf;
470 
471 	*is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
472 	if (*is_negative)
473 		++cur_buf;
474 
475 	consumed = cur_buf - buf;
476 	cur_len -= consumed;
477 	if (!cur_len)
478 		return -EINVAL;
479 
480 	cur_len = min(cur_len, sizeof(str) - 1);
481 	memcpy(str, cur_buf, cur_len);
482 	str[cur_len] = '\0';
483 	cur_buf = str;
484 
485 	cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
486 	val_len = _parse_integer(cur_buf, base, res);
487 
488 	if (val_len & KSTRTOX_OVERFLOW)
489 		return -ERANGE;
490 
491 	if (val_len == 0)
492 		return -EINVAL;
493 
494 	cur_buf += val_len;
495 	consumed += cur_buf - str;
496 
497 	return consumed;
498 }
499 
500 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
501 			 long long *res)
502 {
503 	unsigned long long _res;
504 	bool is_negative;
505 	int err;
506 
507 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
508 	if (err < 0)
509 		return err;
510 	if (is_negative) {
511 		if ((long long)-_res > 0)
512 			return -ERANGE;
513 		*res = -_res;
514 	} else {
515 		if ((long long)_res < 0)
516 			return -ERANGE;
517 		*res = _res;
518 	}
519 	return err;
520 }
521 
522 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
523 	   s64 *, res)
524 {
525 	long long _res;
526 	int err;
527 
528 	*res = 0;
529 	err = __bpf_strtoll(buf, buf_len, flags, &_res);
530 	if (err < 0)
531 		return err;
532 	*res = _res;
533 	return err;
534 }
535 
536 const struct bpf_func_proto bpf_strtol_proto = {
537 	.func		= bpf_strtol,
538 	.gpl_only	= false,
539 	.ret_type	= RET_INTEGER,
540 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
541 	.arg2_type	= ARG_CONST_SIZE,
542 	.arg3_type	= ARG_ANYTHING,
543 	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
544 	.arg4_size	= sizeof(s64),
545 };
546 
547 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
548 	   u64 *, res)
549 {
550 	unsigned long long _res;
551 	bool is_negative;
552 	int err;
553 
554 	*res = 0;
555 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
556 	if (err < 0)
557 		return err;
558 	if (is_negative)
559 		return -EINVAL;
560 	*res = _res;
561 	return err;
562 }
563 
564 const struct bpf_func_proto bpf_strtoul_proto = {
565 	.func		= bpf_strtoul,
566 	.gpl_only	= false,
567 	.ret_type	= RET_INTEGER,
568 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
569 	.arg2_type	= ARG_CONST_SIZE,
570 	.arg3_type	= ARG_ANYTHING,
571 	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
572 	.arg4_size	= sizeof(u64),
573 };
574 
575 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
576 {
577 	return strncmp(s1, s2, s1_sz);
578 }
579 
580 static const struct bpf_func_proto bpf_strncmp_proto = {
581 	.func		= bpf_strncmp,
582 	.gpl_only	= false,
583 	.ret_type	= RET_INTEGER,
584 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
585 	.arg2_type	= ARG_CONST_SIZE,
586 	.arg3_type	= ARG_PTR_TO_CONST_STR,
587 };
588 
589 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
590 	   struct bpf_pidns_info *, nsdata, u32, size)
591 {
592 	struct task_struct *task = current;
593 	struct pid_namespace *pidns;
594 	int err = -EINVAL;
595 
596 	if (unlikely(size != sizeof(struct bpf_pidns_info)))
597 		goto clear;
598 
599 	if (unlikely((u64)(dev_t)dev != dev))
600 		goto clear;
601 
602 	if (unlikely(!task))
603 		goto clear;
604 
605 	pidns = task_active_pid_ns(task);
606 	if (unlikely(!pidns)) {
607 		err = -ENOENT;
608 		goto clear;
609 	}
610 
611 	if (!ns_match(&pidns->ns, (dev_t)dev, ino))
612 		goto clear;
613 
614 	nsdata->pid = task_pid_nr_ns(task, pidns);
615 	nsdata->tgid = task_tgid_nr_ns(task, pidns);
616 	return 0;
617 clear:
618 	memset((void *)nsdata, 0, (size_t) size);
619 	return err;
620 }
621 
622 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
623 	.func		= bpf_get_ns_current_pid_tgid,
624 	.gpl_only	= false,
625 	.ret_type	= RET_INTEGER,
626 	.arg1_type	= ARG_ANYTHING,
627 	.arg2_type	= ARG_ANYTHING,
628 	.arg3_type      = ARG_PTR_TO_UNINIT_MEM,
629 	.arg4_type      = ARG_CONST_SIZE,
630 };
631 
632 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
633 	.func		= bpf_get_raw_cpu_id,
634 	.gpl_only	= false,
635 	.ret_type	= RET_INTEGER,
636 };
637 
638 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
639 	   u64, flags, void *, data, u64, size)
640 {
641 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
642 		return -EINVAL;
643 
644 	return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
645 }
646 
647 const struct bpf_func_proto bpf_event_output_data_proto =  {
648 	.func		= bpf_event_output_data,
649 	.gpl_only       = true,
650 	.ret_type       = RET_INTEGER,
651 	.arg1_type      = ARG_PTR_TO_CTX,
652 	.arg2_type      = ARG_CONST_MAP_PTR,
653 	.arg3_type      = ARG_ANYTHING,
654 	.arg4_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
655 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
656 };
657 
658 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
659 	   const void __user *, user_ptr)
660 {
661 	int ret = copy_from_user(dst, user_ptr, size);
662 
663 	if (unlikely(ret)) {
664 		memset(dst, 0, size);
665 		ret = -EFAULT;
666 	}
667 
668 	return ret;
669 }
670 
671 const struct bpf_func_proto bpf_copy_from_user_proto = {
672 	.func		= bpf_copy_from_user,
673 	.gpl_only	= false,
674 	.might_sleep	= true,
675 	.ret_type	= RET_INTEGER,
676 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
677 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
678 	.arg3_type	= ARG_ANYTHING,
679 };
680 
681 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
682 	   const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
683 {
684 	int ret;
685 
686 	/* flags is not used yet */
687 	if (unlikely(flags))
688 		return -EINVAL;
689 
690 	if (unlikely(!size))
691 		return 0;
692 
693 	ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
694 	if (ret == size)
695 		return 0;
696 
697 	memset(dst, 0, size);
698 	/* Return -EFAULT for partial read */
699 	return ret < 0 ? ret : -EFAULT;
700 }
701 
702 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
703 	.func		= bpf_copy_from_user_task,
704 	.gpl_only	= true,
705 	.might_sleep	= true,
706 	.ret_type	= RET_INTEGER,
707 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
708 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
709 	.arg3_type	= ARG_ANYTHING,
710 	.arg4_type	= ARG_PTR_TO_BTF_ID,
711 	.arg4_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
712 	.arg5_type	= ARG_ANYTHING
713 };
714 
715 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
716 {
717 	if (cpu >= nr_cpu_ids)
718 		return (unsigned long)NULL;
719 
720 	return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
721 }
722 
723 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
724 	.func		= bpf_per_cpu_ptr,
725 	.gpl_only	= false,
726 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
727 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
728 	.arg2_type	= ARG_ANYTHING,
729 };
730 
731 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
732 {
733 	return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
734 }
735 
736 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
737 	.func		= bpf_this_cpu_ptr,
738 	.gpl_only	= false,
739 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
740 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
741 };
742 
743 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
744 		size_t bufsz)
745 {
746 	void __user *user_ptr = (__force void __user *)unsafe_ptr;
747 
748 	buf[0] = 0;
749 
750 	switch (fmt_ptype) {
751 	case 's':
752 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
753 		if ((unsigned long)unsafe_ptr < TASK_SIZE)
754 			return strncpy_from_user_nofault(buf, user_ptr, bufsz);
755 		fallthrough;
756 #endif
757 	case 'k':
758 		return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
759 	case 'u':
760 		return strncpy_from_user_nofault(buf, user_ptr, bufsz);
761 	}
762 
763 	return -EINVAL;
764 }
765 
766 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
767  * arguments representation.
768  */
769 #define MAX_BPRINTF_BIN_ARGS	512
770 
771 /* Support executing three nested bprintf helper calls on a given CPU */
772 #define MAX_BPRINTF_NEST_LEVEL	3
773 struct bpf_bprintf_buffers {
774 	char bin_args[MAX_BPRINTF_BIN_ARGS];
775 	char buf[MAX_BPRINTF_BUF];
776 };
777 
778 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
779 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
780 
781 static int try_get_buffers(struct bpf_bprintf_buffers **bufs)
782 {
783 	int nest_level;
784 
785 	preempt_disable();
786 	nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
787 	if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
788 		this_cpu_dec(bpf_bprintf_nest_level);
789 		preempt_enable();
790 		return -EBUSY;
791 	}
792 	*bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
793 
794 	return 0;
795 }
796 
797 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
798 {
799 	if (!data->bin_args && !data->buf)
800 		return;
801 	if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
802 		return;
803 	this_cpu_dec(bpf_bprintf_nest_level);
804 	preempt_enable();
805 }
806 
807 /*
808  * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
809  *
810  * Returns a negative value if fmt is an invalid format string or 0 otherwise.
811  *
812  * This can be used in two ways:
813  * - Format string verification only: when data->get_bin_args is false
814  * - Arguments preparation: in addition to the above verification, it writes in
815  *   data->bin_args a binary representation of arguments usable by bstr_printf
816  *   where pointers from BPF have been sanitized.
817  *
818  * In argument preparation mode, if 0 is returned, safe temporary buffers are
819  * allocated and bpf_bprintf_cleanup should be called to free them after use.
820  */
821 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
822 			u32 num_args, struct bpf_bprintf_data *data)
823 {
824 	bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
825 	char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
826 	struct bpf_bprintf_buffers *buffers = NULL;
827 	size_t sizeof_cur_arg, sizeof_cur_ip;
828 	int err, i, num_spec = 0;
829 	u64 cur_arg;
830 	char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
831 
832 	fmt_end = strnchr(fmt, fmt_size, 0);
833 	if (!fmt_end)
834 		return -EINVAL;
835 	fmt_size = fmt_end - fmt;
836 
837 	if (get_buffers && try_get_buffers(&buffers))
838 		return -EBUSY;
839 
840 	if (data->get_bin_args) {
841 		if (num_args)
842 			tmp_buf = buffers->bin_args;
843 		tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
844 		data->bin_args = (u32 *)tmp_buf;
845 	}
846 
847 	if (data->get_buf)
848 		data->buf = buffers->buf;
849 
850 	for (i = 0; i < fmt_size; i++) {
851 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
852 			err = -EINVAL;
853 			goto out;
854 		}
855 
856 		if (fmt[i] != '%')
857 			continue;
858 
859 		if (fmt[i + 1] == '%') {
860 			i++;
861 			continue;
862 		}
863 
864 		if (num_spec >= num_args) {
865 			err = -EINVAL;
866 			goto out;
867 		}
868 
869 		/* The string is zero-terminated so if fmt[i] != 0, we can
870 		 * always access fmt[i + 1], in the worst case it will be a 0
871 		 */
872 		i++;
873 
874 		/* skip optional "[0 +-][num]" width formatting field */
875 		while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
876 		       fmt[i] == ' ')
877 			i++;
878 		if (fmt[i] >= '1' && fmt[i] <= '9') {
879 			i++;
880 			while (fmt[i] >= '0' && fmt[i] <= '9')
881 				i++;
882 		}
883 
884 		if (fmt[i] == 'p') {
885 			sizeof_cur_arg = sizeof(long);
886 
887 			if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
888 			    fmt[i + 2] == 's') {
889 				fmt_ptype = fmt[i + 1];
890 				i += 2;
891 				goto fmt_str;
892 			}
893 
894 			if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
895 			    ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
896 			    fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
897 			    fmt[i + 1] == 'S') {
898 				/* just kernel pointers */
899 				if (tmp_buf)
900 					cur_arg = raw_args[num_spec];
901 				i++;
902 				goto nocopy_fmt;
903 			}
904 
905 			if (fmt[i + 1] == 'B') {
906 				if (tmp_buf)  {
907 					err = snprintf(tmp_buf,
908 						       (tmp_buf_end - tmp_buf),
909 						       "%pB",
910 						       (void *)(long)raw_args[num_spec]);
911 					tmp_buf += (err + 1);
912 				}
913 
914 				i++;
915 				num_spec++;
916 				continue;
917 			}
918 
919 			/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
920 			if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
921 			    (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
922 				err = -EINVAL;
923 				goto out;
924 			}
925 
926 			i += 2;
927 			if (!tmp_buf)
928 				goto nocopy_fmt;
929 
930 			sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
931 			if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
932 				err = -ENOSPC;
933 				goto out;
934 			}
935 
936 			unsafe_ptr = (char *)(long)raw_args[num_spec];
937 			err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
938 						       sizeof_cur_ip);
939 			if (err < 0)
940 				memset(cur_ip, 0, sizeof_cur_ip);
941 
942 			/* hack: bstr_printf expects IP addresses to be
943 			 * pre-formatted as strings, ironically, the easiest way
944 			 * to do that is to call snprintf.
945 			 */
946 			ip_spec[2] = fmt[i - 1];
947 			ip_spec[3] = fmt[i];
948 			err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
949 				       ip_spec, &cur_ip);
950 
951 			tmp_buf += err + 1;
952 			num_spec++;
953 
954 			continue;
955 		} else if (fmt[i] == 's') {
956 			fmt_ptype = fmt[i];
957 fmt_str:
958 			if (fmt[i + 1] != 0 &&
959 			    !isspace(fmt[i + 1]) &&
960 			    !ispunct(fmt[i + 1])) {
961 				err = -EINVAL;
962 				goto out;
963 			}
964 
965 			if (!tmp_buf)
966 				goto nocopy_fmt;
967 
968 			if (tmp_buf_end == tmp_buf) {
969 				err = -ENOSPC;
970 				goto out;
971 			}
972 
973 			unsafe_ptr = (char *)(long)raw_args[num_spec];
974 			err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
975 						    fmt_ptype,
976 						    tmp_buf_end - tmp_buf);
977 			if (err < 0) {
978 				tmp_buf[0] = '\0';
979 				err = 1;
980 			}
981 
982 			tmp_buf += err;
983 			num_spec++;
984 
985 			continue;
986 		} else if (fmt[i] == 'c') {
987 			if (!tmp_buf)
988 				goto nocopy_fmt;
989 
990 			if (tmp_buf_end == tmp_buf) {
991 				err = -ENOSPC;
992 				goto out;
993 			}
994 
995 			*tmp_buf = raw_args[num_spec];
996 			tmp_buf++;
997 			num_spec++;
998 
999 			continue;
1000 		}
1001 
1002 		sizeof_cur_arg = sizeof(int);
1003 
1004 		if (fmt[i] == 'l') {
1005 			sizeof_cur_arg = sizeof(long);
1006 			i++;
1007 		}
1008 		if (fmt[i] == 'l') {
1009 			sizeof_cur_arg = sizeof(long long);
1010 			i++;
1011 		}
1012 
1013 		if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1014 		    fmt[i] != 'x' && fmt[i] != 'X') {
1015 			err = -EINVAL;
1016 			goto out;
1017 		}
1018 
1019 		if (tmp_buf)
1020 			cur_arg = raw_args[num_spec];
1021 nocopy_fmt:
1022 		if (tmp_buf) {
1023 			tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1024 			if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1025 				err = -ENOSPC;
1026 				goto out;
1027 			}
1028 
1029 			if (sizeof_cur_arg == 8) {
1030 				*(u32 *)tmp_buf = *(u32 *)&cur_arg;
1031 				*(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1032 			} else {
1033 				*(u32 *)tmp_buf = (u32)(long)cur_arg;
1034 			}
1035 			tmp_buf += sizeof_cur_arg;
1036 		}
1037 		num_spec++;
1038 	}
1039 
1040 	err = 0;
1041 out:
1042 	if (err)
1043 		bpf_bprintf_cleanup(data);
1044 	return err;
1045 }
1046 
1047 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1048 	   const void *, args, u32, data_len)
1049 {
1050 	struct bpf_bprintf_data data = {
1051 		.get_bin_args	= true,
1052 	};
1053 	int err, num_args;
1054 
1055 	if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1056 	    (data_len && !args))
1057 		return -EINVAL;
1058 	num_args = data_len / 8;
1059 
1060 	/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1061 	 * can safely give an unbounded size.
1062 	 */
1063 	err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1064 	if (err < 0)
1065 		return err;
1066 
1067 	err = bstr_printf(str, str_size, fmt, data.bin_args);
1068 
1069 	bpf_bprintf_cleanup(&data);
1070 
1071 	return err + 1;
1072 }
1073 
1074 const struct bpf_func_proto bpf_snprintf_proto = {
1075 	.func		= bpf_snprintf,
1076 	.gpl_only	= true,
1077 	.ret_type	= RET_INTEGER,
1078 	.arg1_type	= ARG_PTR_TO_MEM_OR_NULL,
1079 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1080 	.arg3_type	= ARG_PTR_TO_CONST_STR,
1081 	.arg4_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1082 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1083 };
1084 
1085 struct bpf_async_cb {
1086 	struct bpf_map *map;
1087 	struct bpf_prog *prog;
1088 	void __rcu *callback_fn;
1089 	void *value;
1090 	union {
1091 		struct rcu_head rcu;
1092 		struct work_struct delete_work;
1093 	};
1094 	u64 flags;
1095 };
1096 
1097 /* BPF map elements can contain 'struct bpf_timer'.
1098  * Such map owns all of its BPF timers.
1099  * 'struct bpf_timer' is allocated as part of map element allocation
1100  * and it's zero initialized.
1101  * That space is used to keep 'struct bpf_async_kern'.
1102  * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1103  * remembers 'struct bpf_map *' pointer it's part of.
1104  * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1105  * bpf_timer_start() arms the timer.
1106  * If user space reference to a map goes to zero at this point
1107  * ops->map_release_uref callback is responsible for cancelling the timers,
1108  * freeing their memory, and decrementing prog's refcnts.
1109  * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1110  * Inner maps can contain bpf timers as well. ops->map_release_uref is
1111  * freeing the timers when inner map is replaced or deleted by user space.
1112  */
1113 struct bpf_hrtimer {
1114 	struct bpf_async_cb cb;
1115 	struct hrtimer timer;
1116 	atomic_t cancelling;
1117 };
1118 
1119 struct bpf_work {
1120 	struct bpf_async_cb cb;
1121 	struct work_struct work;
1122 	struct work_struct delete_work;
1123 };
1124 
1125 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */
1126 struct bpf_async_kern {
1127 	union {
1128 		struct bpf_async_cb *cb;
1129 		struct bpf_hrtimer *timer;
1130 		struct bpf_work *work;
1131 	};
1132 	/* bpf_spin_lock is used here instead of spinlock_t to make
1133 	 * sure that it always fits into space reserved by struct bpf_timer
1134 	 * regardless of LOCKDEP and spinlock debug flags.
1135 	 */
1136 	struct bpf_spin_lock lock;
1137 } __attribute__((aligned(8)));
1138 
1139 enum bpf_async_type {
1140 	BPF_ASYNC_TYPE_TIMER = 0,
1141 	BPF_ASYNC_TYPE_WQ,
1142 };
1143 
1144 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1145 
1146 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1147 {
1148 	struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1149 	struct bpf_map *map = t->cb.map;
1150 	void *value = t->cb.value;
1151 	bpf_callback_t callback_fn;
1152 	void *key;
1153 	u32 idx;
1154 
1155 	BTF_TYPE_EMIT(struct bpf_timer);
1156 	callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held());
1157 	if (!callback_fn)
1158 		goto out;
1159 
1160 	/* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1161 	 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1162 	 * Remember the timer this callback is servicing to prevent
1163 	 * deadlock if callback_fn() calls bpf_timer_cancel() or
1164 	 * bpf_map_delete_elem() on the same timer.
1165 	 */
1166 	this_cpu_write(hrtimer_running, t);
1167 	if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1168 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1169 
1170 		/* compute the key */
1171 		idx = ((char *)value - array->value) / array->elem_size;
1172 		key = &idx;
1173 	} else { /* hash or lru */
1174 		key = value - round_up(map->key_size, 8);
1175 	}
1176 
1177 	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1178 	/* The verifier checked that return value is zero. */
1179 
1180 	this_cpu_write(hrtimer_running, NULL);
1181 out:
1182 	return HRTIMER_NORESTART;
1183 }
1184 
1185 static void bpf_wq_work(struct work_struct *work)
1186 {
1187 	struct bpf_work *w = container_of(work, struct bpf_work, work);
1188 	struct bpf_async_cb *cb = &w->cb;
1189 	struct bpf_map *map = cb->map;
1190 	bpf_callback_t callback_fn;
1191 	void *value = cb->value;
1192 	void *key;
1193 	u32 idx;
1194 
1195 	BTF_TYPE_EMIT(struct bpf_wq);
1196 
1197 	callback_fn = READ_ONCE(cb->callback_fn);
1198 	if (!callback_fn)
1199 		return;
1200 
1201 	if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1202 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1203 
1204 		/* compute the key */
1205 		idx = ((char *)value - array->value) / array->elem_size;
1206 		key = &idx;
1207 	} else { /* hash or lru */
1208 		key = value - round_up(map->key_size, 8);
1209 	}
1210 
1211         rcu_read_lock_trace();
1212         migrate_disable();
1213 
1214 	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1215 
1216 	migrate_enable();
1217 	rcu_read_unlock_trace();
1218 }
1219 
1220 static void bpf_wq_delete_work(struct work_struct *work)
1221 {
1222 	struct bpf_work *w = container_of(work, struct bpf_work, delete_work);
1223 
1224 	cancel_work_sync(&w->work);
1225 
1226 	kfree_rcu(w, cb.rcu);
1227 }
1228 
1229 static void bpf_timer_delete_work(struct work_struct *work)
1230 {
1231 	struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work);
1232 
1233 	/* Cancel the timer and wait for callback to complete if it was running.
1234 	 * If hrtimer_cancel() can be safely called it's safe to call
1235 	 * kfree_rcu(t) right after for both preallocated and non-preallocated
1236 	 * maps.  The async->cb = NULL was already done and no code path can see
1237 	 * address 't' anymore. Timer if armed for existing bpf_hrtimer before
1238 	 * bpf_timer_cancel_and_free will have been cancelled.
1239 	 */
1240 	hrtimer_cancel(&t->timer);
1241 	kfree_rcu(t, cb.rcu);
1242 }
1243 
1244 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
1245 			    enum bpf_async_type type)
1246 {
1247 	struct bpf_async_cb *cb;
1248 	struct bpf_hrtimer *t;
1249 	struct bpf_work *w;
1250 	clockid_t clockid;
1251 	size_t size;
1252 	int ret = 0;
1253 
1254 	if (in_nmi())
1255 		return -EOPNOTSUPP;
1256 
1257 	switch (type) {
1258 	case BPF_ASYNC_TYPE_TIMER:
1259 		size = sizeof(struct bpf_hrtimer);
1260 		break;
1261 	case BPF_ASYNC_TYPE_WQ:
1262 		size = sizeof(struct bpf_work);
1263 		break;
1264 	default:
1265 		return -EINVAL;
1266 	}
1267 
1268 	__bpf_spin_lock_irqsave(&async->lock);
1269 	t = async->timer;
1270 	if (t) {
1271 		ret = -EBUSY;
1272 		goto out;
1273 	}
1274 
1275 	/* allocate hrtimer via map_kmalloc to use memcg accounting */
1276 	cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
1277 	if (!cb) {
1278 		ret = -ENOMEM;
1279 		goto out;
1280 	}
1281 
1282 	switch (type) {
1283 	case BPF_ASYNC_TYPE_TIMER:
1284 		clockid = flags & (MAX_CLOCKS - 1);
1285 		t = (struct bpf_hrtimer *)cb;
1286 
1287 		atomic_set(&t->cancelling, 0);
1288 		INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work);
1289 		hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT);
1290 		cb->value = (void *)async - map->record->timer_off;
1291 		break;
1292 	case BPF_ASYNC_TYPE_WQ:
1293 		w = (struct bpf_work *)cb;
1294 
1295 		INIT_WORK(&w->work, bpf_wq_work);
1296 		INIT_WORK(&w->delete_work, bpf_wq_delete_work);
1297 		cb->value = (void *)async - map->record->wq_off;
1298 		break;
1299 	}
1300 	cb->map = map;
1301 	cb->prog = NULL;
1302 	cb->flags = flags;
1303 	rcu_assign_pointer(cb->callback_fn, NULL);
1304 
1305 	WRITE_ONCE(async->cb, cb);
1306 	/* Guarantee the order between async->cb and map->usercnt. So
1307 	 * when there are concurrent uref release and bpf timer init, either
1308 	 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
1309 	 * timer or atomic64_read() below returns a zero usercnt.
1310 	 */
1311 	smp_mb();
1312 	if (!atomic64_read(&map->usercnt)) {
1313 		/* maps with timers must be either held by user space
1314 		 * or pinned in bpffs.
1315 		 */
1316 		WRITE_ONCE(async->cb, NULL);
1317 		kfree(cb);
1318 		ret = -EPERM;
1319 	}
1320 out:
1321 	__bpf_spin_unlock_irqrestore(&async->lock);
1322 	return ret;
1323 }
1324 
1325 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
1326 	   u64, flags)
1327 {
1328 	clock_t clockid = flags & (MAX_CLOCKS - 1);
1329 
1330 	BUILD_BUG_ON(MAX_CLOCKS != 16);
1331 	BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
1332 	BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
1333 
1334 	if (flags >= MAX_CLOCKS ||
1335 	    /* similar to timerfd except _ALARM variants are not supported */
1336 	    (clockid != CLOCK_MONOTONIC &&
1337 	     clockid != CLOCK_REALTIME &&
1338 	     clockid != CLOCK_BOOTTIME))
1339 		return -EINVAL;
1340 
1341 	return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
1342 }
1343 
1344 static const struct bpf_func_proto bpf_timer_init_proto = {
1345 	.func		= bpf_timer_init,
1346 	.gpl_only	= true,
1347 	.ret_type	= RET_INTEGER,
1348 	.arg1_type	= ARG_PTR_TO_TIMER,
1349 	.arg2_type	= ARG_CONST_MAP_PTR,
1350 	.arg3_type	= ARG_ANYTHING,
1351 };
1352 
1353 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
1354 				    struct bpf_prog_aux *aux, unsigned int flags,
1355 				    enum bpf_async_type type)
1356 {
1357 	struct bpf_prog *prev, *prog = aux->prog;
1358 	struct bpf_async_cb *cb;
1359 	int ret = 0;
1360 
1361 	if (in_nmi())
1362 		return -EOPNOTSUPP;
1363 	__bpf_spin_lock_irqsave(&async->lock);
1364 	cb = async->cb;
1365 	if (!cb) {
1366 		ret = -EINVAL;
1367 		goto out;
1368 	}
1369 	if (!atomic64_read(&cb->map->usercnt)) {
1370 		/* maps with timers must be either held by user space
1371 		 * or pinned in bpffs. Otherwise timer might still be
1372 		 * running even when bpf prog is detached and user space
1373 		 * is gone, since map_release_uref won't ever be called.
1374 		 */
1375 		ret = -EPERM;
1376 		goto out;
1377 	}
1378 	prev = cb->prog;
1379 	if (prev != prog) {
1380 		/* Bump prog refcnt once. Every bpf_timer_set_callback()
1381 		 * can pick different callback_fn-s within the same prog.
1382 		 */
1383 		prog = bpf_prog_inc_not_zero(prog);
1384 		if (IS_ERR(prog)) {
1385 			ret = PTR_ERR(prog);
1386 			goto out;
1387 		}
1388 		if (prev)
1389 			/* Drop prev prog refcnt when swapping with new prog */
1390 			bpf_prog_put(prev);
1391 		cb->prog = prog;
1392 	}
1393 	rcu_assign_pointer(cb->callback_fn, callback_fn);
1394 out:
1395 	__bpf_spin_unlock_irqrestore(&async->lock);
1396 	return ret;
1397 }
1398 
1399 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
1400 	   struct bpf_prog_aux *, aux)
1401 {
1402 	return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER);
1403 }
1404 
1405 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1406 	.func		= bpf_timer_set_callback,
1407 	.gpl_only	= true,
1408 	.ret_type	= RET_INTEGER,
1409 	.arg1_type	= ARG_PTR_TO_TIMER,
1410 	.arg2_type	= ARG_PTR_TO_FUNC,
1411 };
1412 
1413 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags)
1414 {
1415 	struct bpf_hrtimer *t;
1416 	int ret = 0;
1417 	enum hrtimer_mode mode;
1418 
1419 	if (in_nmi())
1420 		return -EOPNOTSUPP;
1421 	if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN))
1422 		return -EINVAL;
1423 	__bpf_spin_lock_irqsave(&timer->lock);
1424 	t = timer->timer;
1425 	if (!t || !t->cb.prog) {
1426 		ret = -EINVAL;
1427 		goto out;
1428 	}
1429 
1430 	if (flags & BPF_F_TIMER_ABS)
1431 		mode = HRTIMER_MODE_ABS_SOFT;
1432 	else
1433 		mode = HRTIMER_MODE_REL_SOFT;
1434 
1435 	if (flags & BPF_F_TIMER_CPU_PIN)
1436 		mode |= HRTIMER_MODE_PINNED;
1437 
1438 	hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
1439 out:
1440 	__bpf_spin_unlock_irqrestore(&timer->lock);
1441 	return ret;
1442 }
1443 
1444 static const struct bpf_func_proto bpf_timer_start_proto = {
1445 	.func		= bpf_timer_start,
1446 	.gpl_only	= true,
1447 	.ret_type	= RET_INTEGER,
1448 	.arg1_type	= ARG_PTR_TO_TIMER,
1449 	.arg2_type	= ARG_ANYTHING,
1450 	.arg3_type	= ARG_ANYTHING,
1451 };
1452 
1453 static void drop_prog_refcnt(struct bpf_async_cb *async)
1454 {
1455 	struct bpf_prog *prog = async->prog;
1456 
1457 	if (prog) {
1458 		bpf_prog_put(prog);
1459 		async->prog = NULL;
1460 		rcu_assign_pointer(async->callback_fn, NULL);
1461 	}
1462 }
1463 
1464 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
1465 {
1466 	struct bpf_hrtimer *t, *cur_t;
1467 	bool inc = false;
1468 	int ret = 0;
1469 
1470 	if (in_nmi())
1471 		return -EOPNOTSUPP;
1472 	rcu_read_lock();
1473 	__bpf_spin_lock_irqsave(&timer->lock);
1474 	t = timer->timer;
1475 	if (!t) {
1476 		ret = -EINVAL;
1477 		goto out;
1478 	}
1479 
1480 	cur_t = this_cpu_read(hrtimer_running);
1481 	if (cur_t == t) {
1482 		/* If bpf callback_fn is trying to bpf_timer_cancel()
1483 		 * its own timer the hrtimer_cancel() will deadlock
1484 		 * since it waits for callback_fn to finish.
1485 		 */
1486 		ret = -EDEADLK;
1487 		goto out;
1488 	}
1489 
1490 	/* Only account in-flight cancellations when invoked from a timer
1491 	 * callback, since we want to avoid waiting only if other _callbacks_
1492 	 * are waiting on us, to avoid introducing lockups. Non-callback paths
1493 	 * are ok, since nobody would synchronously wait for their completion.
1494 	 */
1495 	if (!cur_t)
1496 		goto drop;
1497 	atomic_inc(&t->cancelling);
1498 	/* Need full barrier after relaxed atomic_inc */
1499 	smp_mb__after_atomic();
1500 	inc = true;
1501 	if (atomic_read(&cur_t->cancelling)) {
1502 		/* We're cancelling timer t, while some other timer callback is
1503 		 * attempting to cancel us. In such a case, it might be possible
1504 		 * that timer t belongs to the other callback, or some other
1505 		 * callback waiting upon it (creating transitive dependencies
1506 		 * upon us), and we will enter a deadlock if we continue
1507 		 * cancelling and waiting for it synchronously, since it might
1508 		 * do the same. Bail!
1509 		 */
1510 		ret = -EDEADLK;
1511 		goto out;
1512 	}
1513 drop:
1514 	drop_prog_refcnt(&t->cb);
1515 out:
1516 	__bpf_spin_unlock_irqrestore(&timer->lock);
1517 	/* Cancel the timer and wait for associated callback to finish
1518 	 * if it was running.
1519 	 */
1520 	ret = ret ?: hrtimer_cancel(&t->timer);
1521 	if (inc)
1522 		atomic_dec(&t->cancelling);
1523 	rcu_read_unlock();
1524 	return ret;
1525 }
1526 
1527 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1528 	.func		= bpf_timer_cancel,
1529 	.gpl_only	= true,
1530 	.ret_type	= RET_INTEGER,
1531 	.arg1_type	= ARG_PTR_TO_TIMER,
1532 };
1533 
1534 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async)
1535 {
1536 	struct bpf_async_cb *cb;
1537 
1538 	/* Performance optimization: read async->cb without lock first. */
1539 	if (!READ_ONCE(async->cb))
1540 		return NULL;
1541 
1542 	__bpf_spin_lock_irqsave(&async->lock);
1543 	/* re-read it under lock */
1544 	cb = async->cb;
1545 	if (!cb)
1546 		goto out;
1547 	drop_prog_refcnt(cb);
1548 	/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1549 	 * this timer, since it won't be initialized.
1550 	 */
1551 	WRITE_ONCE(async->cb, NULL);
1552 out:
1553 	__bpf_spin_unlock_irqrestore(&async->lock);
1554 	return cb;
1555 }
1556 
1557 /* This function is called by map_delete/update_elem for individual element and
1558  * by ops->map_release_uref when the user space reference to a map reaches zero.
1559  */
1560 void bpf_timer_cancel_and_free(void *val)
1561 {
1562 	struct bpf_hrtimer *t;
1563 
1564 	t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val);
1565 
1566 	if (!t)
1567 		return;
1568 	/* We check that bpf_map_delete/update_elem() was called from timer
1569 	 * callback_fn. In such case we don't call hrtimer_cancel() (since it
1570 	 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will
1571 	 * just return -1). Though callback_fn is still running on this cpu it's
1572 	 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1573 	 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1574 	 * since async->cb = NULL was already done. The timer will be
1575 	 * effectively cancelled because bpf_timer_cb() will return
1576 	 * HRTIMER_NORESTART.
1577 	 *
1578 	 * However, it is possible the timer callback_fn calling us armed the
1579 	 * timer _before_ calling us, such that failing to cancel it here will
1580 	 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer.
1581 	 * Therefore, we _need_ to cancel any outstanding timers before we do
1582 	 * kfree_rcu, even though no more timers can be armed.
1583 	 *
1584 	 * Moreover, we need to schedule work even if timer does not belong to
1585 	 * the calling callback_fn, as on two different CPUs, we can end up in a
1586 	 * situation where both sides run in parallel, try to cancel one
1587 	 * another, and we end up waiting on both sides in hrtimer_cancel
1588 	 * without making forward progress, since timer1 depends on time2
1589 	 * callback to finish, and vice versa.
1590 	 *
1591 	 *  CPU 1 (timer1_cb)			CPU 2 (timer2_cb)
1592 	 *  bpf_timer_cancel_and_free(timer2)	bpf_timer_cancel_and_free(timer1)
1593 	 *
1594 	 * To avoid these issues, punt to workqueue context when we are in a
1595 	 * timer callback.
1596 	 */
1597 	if (this_cpu_read(hrtimer_running)) {
1598 		queue_work(system_unbound_wq, &t->cb.delete_work);
1599 		return;
1600 	}
1601 
1602 	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1603 		/* If the timer is running on other CPU, also use a kworker to
1604 		 * wait for the completion of the timer instead of trying to
1605 		 * acquire a sleepable lock in hrtimer_cancel() to wait for its
1606 		 * completion.
1607 		 */
1608 		if (hrtimer_try_to_cancel(&t->timer) >= 0)
1609 			kfree_rcu(t, cb.rcu);
1610 		else
1611 			queue_work(system_unbound_wq, &t->cb.delete_work);
1612 	} else {
1613 		bpf_timer_delete_work(&t->cb.delete_work);
1614 	}
1615 }
1616 
1617 /* This function is called by map_delete/update_elem for individual element and
1618  * by ops->map_release_uref when the user space reference to a map reaches zero.
1619  */
1620 void bpf_wq_cancel_and_free(void *val)
1621 {
1622 	struct bpf_work *work;
1623 
1624 	BTF_TYPE_EMIT(struct bpf_wq);
1625 
1626 	work = (struct bpf_work *)__bpf_async_cancel_and_free(val);
1627 	if (!work)
1628 		return;
1629 	/* Trigger cancel of the sleepable work, but *do not* wait for
1630 	 * it to finish if it was running as we might not be in a
1631 	 * sleepable context.
1632 	 * kfree will be called once the work has finished.
1633 	 */
1634 	schedule_work(&work->delete_work);
1635 }
1636 
1637 BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr)
1638 {
1639 	unsigned long *kptr = dst;
1640 
1641 	/* This helper may be inlined by verifier. */
1642 	return xchg(kptr, (unsigned long)ptr);
1643 }
1644 
1645 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1646  * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1647  * denote type that verifier will determine.
1648  */
1649 static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1650 	.func         = bpf_kptr_xchg,
1651 	.gpl_only     = false,
1652 	.ret_type     = RET_PTR_TO_BTF_ID_OR_NULL,
1653 	.ret_btf_id   = BPF_PTR_POISON,
1654 	.arg1_type    = ARG_KPTR_XCHG_DEST,
1655 	.arg2_type    = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1656 	.arg2_btf_id  = BPF_PTR_POISON,
1657 };
1658 
1659 /* Since the upper 8 bits of dynptr->size is reserved, the
1660  * maximum supported size is 2^24 - 1.
1661  */
1662 #define DYNPTR_MAX_SIZE	((1UL << 24) - 1)
1663 #define DYNPTR_TYPE_SHIFT	28
1664 #define DYNPTR_SIZE_MASK	0xFFFFFF
1665 #define DYNPTR_RDONLY_BIT	BIT(31)
1666 
1667 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1668 {
1669 	return ptr->size & DYNPTR_RDONLY_BIT;
1670 }
1671 
1672 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
1673 {
1674 	ptr->size |= DYNPTR_RDONLY_BIT;
1675 }
1676 
1677 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1678 {
1679 	ptr->size |= type << DYNPTR_TYPE_SHIFT;
1680 }
1681 
1682 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
1683 {
1684 	return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
1685 }
1686 
1687 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
1688 {
1689 	return ptr->size & DYNPTR_SIZE_MASK;
1690 }
1691 
1692 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
1693 {
1694 	u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
1695 
1696 	ptr->size = new_size | metadata;
1697 }
1698 
1699 int bpf_dynptr_check_size(u32 size)
1700 {
1701 	return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1702 }
1703 
1704 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1705 		     enum bpf_dynptr_type type, u32 offset, u32 size)
1706 {
1707 	ptr->data = data;
1708 	ptr->offset = offset;
1709 	ptr->size = size;
1710 	bpf_dynptr_set_type(ptr, type);
1711 }
1712 
1713 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1714 {
1715 	memset(ptr, 0, sizeof(*ptr));
1716 }
1717 
1718 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1719 {
1720 	int err;
1721 
1722 	BTF_TYPE_EMIT(struct bpf_dynptr);
1723 
1724 	err = bpf_dynptr_check_size(size);
1725 	if (err)
1726 		goto error;
1727 
1728 	/* flags is currently unsupported */
1729 	if (flags) {
1730 		err = -EINVAL;
1731 		goto error;
1732 	}
1733 
1734 	bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1735 
1736 	return 0;
1737 
1738 error:
1739 	bpf_dynptr_set_null(ptr);
1740 	return err;
1741 }
1742 
1743 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1744 	.func		= bpf_dynptr_from_mem,
1745 	.gpl_only	= false,
1746 	.ret_type	= RET_INTEGER,
1747 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1748 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1749 	.arg3_type	= ARG_ANYTHING,
1750 	.arg4_type	= ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE,
1751 };
1752 
1753 static int __bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr_kern *src,
1754 			     u32 offset, u64 flags)
1755 {
1756 	enum bpf_dynptr_type type;
1757 	int err;
1758 
1759 	if (!src->data || flags)
1760 		return -EINVAL;
1761 
1762 	err = bpf_dynptr_check_off_len(src, offset, len);
1763 	if (err)
1764 		return err;
1765 
1766 	type = bpf_dynptr_get_type(src);
1767 
1768 	switch (type) {
1769 	case BPF_DYNPTR_TYPE_LOCAL:
1770 	case BPF_DYNPTR_TYPE_RINGBUF:
1771 		/* Source and destination may possibly overlap, hence use memmove to
1772 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1773 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1774 		 */
1775 		memmove(dst, src->data + src->offset + offset, len);
1776 		return 0;
1777 	case BPF_DYNPTR_TYPE_SKB:
1778 		return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len);
1779 	case BPF_DYNPTR_TYPE_XDP:
1780 		return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len);
1781 	default:
1782 		WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type);
1783 		return -EFAULT;
1784 	}
1785 }
1786 
1787 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
1788 	   u32, offset, u64, flags)
1789 {
1790 	return __bpf_dynptr_read(dst, len, src, offset, flags);
1791 }
1792 
1793 static const struct bpf_func_proto bpf_dynptr_read_proto = {
1794 	.func		= bpf_dynptr_read,
1795 	.gpl_only	= false,
1796 	.ret_type	= RET_INTEGER,
1797 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1798 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1799 	.arg3_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1800 	.arg4_type	= ARG_ANYTHING,
1801 	.arg5_type	= ARG_ANYTHING,
1802 };
1803 
1804 int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src,
1805 		       u32 len, u64 flags)
1806 {
1807 	enum bpf_dynptr_type type;
1808 	int err;
1809 
1810 	if (!dst->data || __bpf_dynptr_is_rdonly(dst))
1811 		return -EINVAL;
1812 
1813 	err = bpf_dynptr_check_off_len(dst, offset, len);
1814 	if (err)
1815 		return err;
1816 
1817 	type = bpf_dynptr_get_type(dst);
1818 
1819 	switch (type) {
1820 	case BPF_DYNPTR_TYPE_LOCAL:
1821 	case BPF_DYNPTR_TYPE_RINGBUF:
1822 		if (flags)
1823 			return -EINVAL;
1824 		/* Source and destination may possibly overlap, hence use memmove to
1825 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1826 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1827 		 */
1828 		memmove(dst->data + dst->offset + offset, src, len);
1829 		return 0;
1830 	case BPF_DYNPTR_TYPE_SKB:
1831 		return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len,
1832 					     flags);
1833 	case BPF_DYNPTR_TYPE_XDP:
1834 		if (flags)
1835 			return -EINVAL;
1836 		return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len);
1837 	default:
1838 		WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type);
1839 		return -EFAULT;
1840 	}
1841 }
1842 
1843 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
1844 	   u32, len, u64, flags)
1845 {
1846 	return __bpf_dynptr_write(dst, offset, src, len, flags);
1847 }
1848 
1849 static const struct bpf_func_proto bpf_dynptr_write_proto = {
1850 	.func		= bpf_dynptr_write,
1851 	.gpl_only	= false,
1852 	.ret_type	= RET_INTEGER,
1853 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1854 	.arg2_type	= ARG_ANYTHING,
1855 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1856 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
1857 	.arg5_type	= ARG_ANYTHING,
1858 };
1859 
1860 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1861 {
1862 	enum bpf_dynptr_type type;
1863 	int err;
1864 
1865 	if (!ptr->data)
1866 		return 0;
1867 
1868 	err = bpf_dynptr_check_off_len(ptr, offset, len);
1869 	if (err)
1870 		return 0;
1871 
1872 	if (__bpf_dynptr_is_rdonly(ptr))
1873 		return 0;
1874 
1875 	type = bpf_dynptr_get_type(ptr);
1876 
1877 	switch (type) {
1878 	case BPF_DYNPTR_TYPE_LOCAL:
1879 	case BPF_DYNPTR_TYPE_RINGBUF:
1880 		return (unsigned long)(ptr->data + ptr->offset + offset);
1881 	case BPF_DYNPTR_TYPE_SKB:
1882 	case BPF_DYNPTR_TYPE_XDP:
1883 		/* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */
1884 		return 0;
1885 	default:
1886 		WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type);
1887 		return 0;
1888 	}
1889 }
1890 
1891 static const struct bpf_func_proto bpf_dynptr_data_proto = {
1892 	.func		= bpf_dynptr_data,
1893 	.gpl_only	= false,
1894 	.ret_type	= RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1895 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1896 	.arg2_type	= ARG_ANYTHING,
1897 	.arg3_type	= ARG_CONST_ALLOC_SIZE_OR_ZERO,
1898 };
1899 
1900 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1901 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1902 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1903 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1904 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1905 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1906 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1907 const struct bpf_func_proto bpf_perf_event_read_proto __weak;
1908 const struct bpf_func_proto bpf_send_signal_proto __weak;
1909 const struct bpf_func_proto bpf_send_signal_thread_proto __weak;
1910 const struct bpf_func_proto bpf_get_task_stack_sleepable_proto __weak;
1911 const struct bpf_func_proto bpf_get_task_stack_proto __weak;
1912 const struct bpf_func_proto bpf_get_branch_snapshot_proto __weak;
1913 
1914 const struct bpf_func_proto *
1915 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1916 {
1917 	switch (func_id) {
1918 	case BPF_FUNC_map_lookup_elem:
1919 		return &bpf_map_lookup_elem_proto;
1920 	case BPF_FUNC_map_update_elem:
1921 		return &bpf_map_update_elem_proto;
1922 	case BPF_FUNC_map_delete_elem:
1923 		return &bpf_map_delete_elem_proto;
1924 	case BPF_FUNC_map_push_elem:
1925 		return &bpf_map_push_elem_proto;
1926 	case BPF_FUNC_map_pop_elem:
1927 		return &bpf_map_pop_elem_proto;
1928 	case BPF_FUNC_map_peek_elem:
1929 		return &bpf_map_peek_elem_proto;
1930 	case BPF_FUNC_map_lookup_percpu_elem:
1931 		return &bpf_map_lookup_percpu_elem_proto;
1932 	case BPF_FUNC_get_prandom_u32:
1933 		return &bpf_get_prandom_u32_proto;
1934 	case BPF_FUNC_get_smp_processor_id:
1935 		return &bpf_get_raw_smp_processor_id_proto;
1936 	case BPF_FUNC_get_numa_node_id:
1937 		return &bpf_get_numa_node_id_proto;
1938 	case BPF_FUNC_tail_call:
1939 		return &bpf_tail_call_proto;
1940 	case BPF_FUNC_ktime_get_ns:
1941 		return &bpf_ktime_get_ns_proto;
1942 	case BPF_FUNC_ktime_get_boot_ns:
1943 		return &bpf_ktime_get_boot_ns_proto;
1944 	case BPF_FUNC_ktime_get_tai_ns:
1945 		return &bpf_ktime_get_tai_ns_proto;
1946 	case BPF_FUNC_ringbuf_output:
1947 		return &bpf_ringbuf_output_proto;
1948 	case BPF_FUNC_ringbuf_reserve:
1949 		return &bpf_ringbuf_reserve_proto;
1950 	case BPF_FUNC_ringbuf_submit:
1951 		return &bpf_ringbuf_submit_proto;
1952 	case BPF_FUNC_ringbuf_discard:
1953 		return &bpf_ringbuf_discard_proto;
1954 	case BPF_FUNC_ringbuf_query:
1955 		return &bpf_ringbuf_query_proto;
1956 	case BPF_FUNC_strncmp:
1957 		return &bpf_strncmp_proto;
1958 	case BPF_FUNC_strtol:
1959 		return &bpf_strtol_proto;
1960 	case BPF_FUNC_strtoul:
1961 		return &bpf_strtoul_proto;
1962 	case BPF_FUNC_get_current_pid_tgid:
1963 		return &bpf_get_current_pid_tgid_proto;
1964 	case BPF_FUNC_get_ns_current_pid_tgid:
1965 		return &bpf_get_ns_current_pid_tgid_proto;
1966 	case BPF_FUNC_get_current_uid_gid:
1967 		return &bpf_get_current_uid_gid_proto;
1968 	default:
1969 		break;
1970 	}
1971 
1972 	if (!bpf_token_capable(prog->aux->token, CAP_BPF))
1973 		return NULL;
1974 
1975 	switch (func_id) {
1976 	case BPF_FUNC_spin_lock:
1977 		return &bpf_spin_lock_proto;
1978 	case BPF_FUNC_spin_unlock:
1979 		return &bpf_spin_unlock_proto;
1980 	case BPF_FUNC_jiffies64:
1981 		return &bpf_jiffies64_proto;
1982 	case BPF_FUNC_per_cpu_ptr:
1983 		return &bpf_per_cpu_ptr_proto;
1984 	case BPF_FUNC_this_cpu_ptr:
1985 		return &bpf_this_cpu_ptr_proto;
1986 	case BPF_FUNC_timer_init:
1987 		return &bpf_timer_init_proto;
1988 	case BPF_FUNC_timer_set_callback:
1989 		return &bpf_timer_set_callback_proto;
1990 	case BPF_FUNC_timer_start:
1991 		return &bpf_timer_start_proto;
1992 	case BPF_FUNC_timer_cancel:
1993 		return &bpf_timer_cancel_proto;
1994 	case BPF_FUNC_kptr_xchg:
1995 		return &bpf_kptr_xchg_proto;
1996 	case BPF_FUNC_for_each_map_elem:
1997 		return &bpf_for_each_map_elem_proto;
1998 	case BPF_FUNC_loop:
1999 		return &bpf_loop_proto;
2000 	case BPF_FUNC_user_ringbuf_drain:
2001 		return &bpf_user_ringbuf_drain_proto;
2002 	case BPF_FUNC_ringbuf_reserve_dynptr:
2003 		return &bpf_ringbuf_reserve_dynptr_proto;
2004 	case BPF_FUNC_ringbuf_submit_dynptr:
2005 		return &bpf_ringbuf_submit_dynptr_proto;
2006 	case BPF_FUNC_ringbuf_discard_dynptr:
2007 		return &bpf_ringbuf_discard_dynptr_proto;
2008 	case BPF_FUNC_dynptr_from_mem:
2009 		return &bpf_dynptr_from_mem_proto;
2010 	case BPF_FUNC_dynptr_read:
2011 		return &bpf_dynptr_read_proto;
2012 	case BPF_FUNC_dynptr_write:
2013 		return &bpf_dynptr_write_proto;
2014 	case BPF_FUNC_dynptr_data:
2015 		return &bpf_dynptr_data_proto;
2016 #ifdef CONFIG_CGROUPS
2017 	case BPF_FUNC_cgrp_storage_get:
2018 		return &bpf_cgrp_storage_get_proto;
2019 	case BPF_FUNC_cgrp_storage_delete:
2020 		return &bpf_cgrp_storage_delete_proto;
2021 	case BPF_FUNC_get_current_cgroup_id:
2022 		return &bpf_get_current_cgroup_id_proto;
2023 	case BPF_FUNC_get_current_ancestor_cgroup_id:
2024 		return &bpf_get_current_ancestor_cgroup_id_proto;
2025 	case BPF_FUNC_current_task_under_cgroup:
2026 		return &bpf_current_task_under_cgroup_proto;
2027 #endif
2028 #ifdef CONFIG_CGROUP_NET_CLASSID
2029 	case BPF_FUNC_get_cgroup_classid:
2030 		return &bpf_get_cgroup_classid_curr_proto;
2031 #endif
2032 	case BPF_FUNC_task_storage_get:
2033 		if (bpf_prog_check_recur(prog))
2034 			return &bpf_task_storage_get_recur_proto;
2035 		return &bpf_task_storage_get_proto;
2036 	case BPF_FUNC_task_storage_delete:
2037 		if (bpf_prog_check_recur(prog))
2038 			return &bpf_task_storage_delete_recur_proto;
2039 		return &bpf_task_storage_delete_proto;
2040 	default:
2041 		break;
2042 	}
2043 
2044 	if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
2045 		return NULL;
2046 
2047 	switch (func_id) {
2048 	case BPF_FUNC_trace_printk:
2049 		return bpf_get_trace_printk_proto();
2050 	case BPF_FUNC_get_current_task:
2051 		return &bpf_get_current_task_proto;
2052 	case BPF_FUNC_get_current_task_btf:
2053 		return &bpf_get_current_task_btf_proto;
2054 	case BPF_FUNC_get_current_comm:
2055 		return &bpf_get_current_comm_proto;
2056 	case BPF_FUNC_probe_read_user:
2057 		return &bpf_probe_read_user_proto;
2058 	case BPF_FUNC_probe_read_kernel:
2059 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2060 		       NULL : &bpf_probe_read_kernel_proto;
2061 	case BPF_FUNC_probe_read_user_str:
2062 		return &bpf_probe_read_user_str_proto;
2063 	case BPF_FUNC_probe_read_kernel_str:
2064 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2065 		       NULL : &bpf_probe_read_kernel_str_proto;
2066 	case BPF_FUNC_copy_from_user:
2067 		return &bpf_copy_from_user_proto;
2068 	case BPF_FUNC_copy_from_user_task:
2069 		return &bpf_copy_from_user_task_proto;
2070 	case BPF_FUNC_snprintf_btf:
2071 		return &bpf_snprintf_btf_proto;
2072 	case BPF_FUNC_snprintf:
2073 		return &bpf_snprintf_proto;
2074 	case BPF_FUNC_task_pt_regs:
2075 		return &bpf_task_pt_regs_proto;
2076 	case BPF_FUNC_trace_vprintk:
2077 		return bpf_get_trace_vprintk_proto();
2078 	case BPF_FUNC_perf_event_read_value:
2079 		return bpf_get_perf_event_read_value_proto();
2080 	case BPF_FUNC_perf_event_read:
2081 		return &bpf_perf_event_read_proto;
2082 	case BPF_FUNC_send_signal:
2083 		return &bpf_send_signal_proto;
2084 	case BPF_FUNC_send_signal_thread:
2085 		return &bpf_send_signal_thread_proto;
2086 	case BPF_FUNC_get_task_stack:
2087 		return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
2088 				       : &bpf_get_task_stack_proto;
2089 	case BPF_FUNC_get_branch_snapshot:
2090 		return &bpf_get_branch_snapshot_proto;
2091 	case BPF_FUNC_find_vma:
2092 		return &bpf_find_vma_proto;
2093 	default:
2094 		return NULL;
2095 	}
2096 }
2097 EXPORT_SYMBOL_GPL(bpf_base_func_proto);
2098 
2099 void bpf_list_head_free(const struct btf_field *field, void *list_head,
2100 			struct bpf_spin_lock *spin_lock)
2101 {
2102 	struct list_head *head = list_head, *orig_head = list_head;
2103 
2104 	BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
2105 	BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
2106 
2107 	/* Do the actual list draining outside the lock to not hold the lock for
2108 	 * too long, and also prevent deadlocks if tracing programs end up
2109 	 * executing on entry/exit of functions called inside the critical
2110 	 * section, and end up doing map ops that call bpf_list_head_free for
2111 	 * the same map value again.
2112 	 */
2113 	__bpf_spin_lock_irqsave(spin_lock);
2114 	if (!head->next || list_empty(head))
2115 		goto unlock;
2116 	head = head->next;
2117 unlock:
2118 	INIT_LIST_HEAD(orig_head);
2119 	__bpf_spin_unlock_irqrestore(spin_lock);
2120 
2121 	while (head != orig_head) {
2122 		void *obj = head;
2123 
2124 		obj -= field->graph_root.node_offset;
2125 		head = head->next;
2126 		/* The contained type can also have resources, including a
2127 		 * bpf_list_head which needs to be freed.
2128 		 */
2129 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2130 	}
2131 }
2132 
2133 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
2134  * 'rb_node *', so field name of rb_node within containing struct is not
2135  * needed.
2136  *
2137  * Since bpf_rb_tree's node type has a corresponding struct btf_field with
2138  * graph_root.node_offset, it's not necessary to know field name
2139  * or type of node struct
2140  */
2141 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
2142 	for (pos = rb_first_postorder(root); \
2143 	    pos && ({ n = rb_next_postorder(pos); 1; }); \
2144 	    pos = n)
2145 
2146 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
2147 		      struct bpf_spin_lock *spin_lock)
2148 {
2149 	struct rb_root_cached orig_root, *root = rb_root;
2150 	struct rb_node *pos, *n;
2151 	void *obj;
2152 
2153 	BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
2154 	BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
2155 
2156 	__bpf_spin_lock_irqsave(spin_lock);
2157 	orig_root = *root;
2158 	*root = RB_ROOT_CACHED;
2159 	__bpf_spin_unlock_irqrestore(spin_lock);
2160 
2161 	bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
2162 		obj = pos;
2163 		obj -= field->graph_root.node_offset;
2164 
2165 
2166 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2167 	}
2168 }
2169 
2170 __bpf_kfunc_start_defs();
2171 
2172 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2173 {
2174 	struct btf_struct_meta *meta = meta__ign;
2175 	u64 size = local_type_id__k;
2176 	void *p;
2177 
2178 	p = bpf_mem_alloc(&bpf_global_ma, size);
2179 	if (!p)
2180 		return NULL;
2181 	if (meta)
2182 		bpf_obj_init(meta->record, p);
2183 	return p;
2184 }
2185 
2186 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2187 {
2188 	u64 size = local_type_id__k;
2189 
2190 	/* The verifier has ensured that meta__ign must be NULL */
2191 	return bpf_mem_alloc(&bpf_global_percpu_ma, size);
2192 }
2193 
2194 /* Must be called under migrate_disable(), as required by bpf_mem_free */
2195 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
2196 {
2197 	struct bpf_mem_alloc *ma;
2198 
2199 	if (rec && rec->refcount_off >= 0 &&
2200 	    !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) {
2201 		/* Object is refcounted and refcount_dec didn't result in 0
2202 		 * refcount. Return without freeing the object
2203 		 */
2204 		return;
2205 	}
2206 
2207 	if (rec)
2208 		bpf_obj_free_fields(rec, p);
2209 
2210 	if (percpu)
2211 		ma = &bpf_global_percpu_ma;
2212 	else
2213 		ma = &bpf_global_ma;
2214 	bpf_mem_free_rcu(ma, p);
2215 }
2216 
2217 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
2218 {
2219 	struct btf_struct_meta *meta = meta__ign;
2220 	void *p = p__alloc;
2221 
2222 	__bpf_obj_drop_impl(p, meta ? meta->record : NULL, false);
2223 }
2224 
2225 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
2226 {
2227 	/* The verifier has ensured that meta__ign must be NULL */
2228 	bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
2229 }
2230 
2231 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
2232 {
2233 	struct btf_struct_meta *meta = meta__ign;
2234 	struct bpf_refcount *ref;
2235 
2236 	/* Could just cast directly to refcount_t *, but need some code using
2237 	 * bpf_refcount type so that it is emitted in vmlinux BTF
2238 	 */
2239 	ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off);
2240 	if (!refcount_inc_not_zero((refcount_t *)ref))
2241 		return NULL;
2242 
2243 	/* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null
2244 	 * in verifier.c
2245 	 */
2246 	return (void *)p__refcounted_kptr;
2247 }
2248 
2249 static int __bpf_list_add(struct bpf_list_node_kern *node,
2250 			  struct bpf_list_head *head,
2251 			  bool tail, struct btf_record *rec, u64 off)
2252 {
2253 	struct list_head *n = &node->list_head, *h = (void *)head;
2254 
2255 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2256 	 * called on its fields, so init here
2257 	 */
2258 	if (unlikely(!h->next))
2259 		INIT_LIST_HEAD(h);
2260 
2261 	/* node->owner != NULL implies !list_empty(n), no need to separately
2262 	 * check the latter
2263 	 */
2264 	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2265 		/* Only called from BPF prog, no need to migrate_disable */
2266 		__bpf_obj_drop_impl((void *)n - off, rec, false);
2267 		return -EINVAL;
2268 	}
2269 
2270 	tail ? list_add_tail(n, h) : list_add(n, h);
2271 	WRITE_ONCE(node->owner, head);
2272 
2273 	return 0;
2274 }
2275 
2276 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
2277 					 struct bpf_list_node *node,
2278 					 void *meta__ign, u64 off)
2279 {
2280 	struct bpf_list_node_kern *n = (void *)node;
2281 	struct btf_struct_meta *meta = meta__ign;
2282 
2283 	return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
2284 }
2285 
2286 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
2287 					struct bpf_list_node *node,
2288 					void *meta__ign, u64 off)
2289 {
2290 	struct bpf_list_node_kern *n = (void *)node;
2291 	struct btf_struct_meta *meta = meta__ign;
2292 
2293 	return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
2294 }
2295 
2296 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
2297 {
2298 	struct list_head *n, *h = (void *)head;
2299 	struct bpf_list_node_kern *node;
2300 
2301 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2302 	 * called on its fields, so init here
2303 	 */
2304 	if (unlikely(!h->next))
2305 		INIT_LIST_HEAD(h);
2306 	if (list_empty(h))
2307 		return NULL;
2308 
2309 	n = tail ? h->prev : h->next;
2310 	node = container_of(n, struct bpf_list_node_kern, list_head);
2311 	if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
2312 		return NULL;
2313 
2314 	list_del_init(n);
2315 	WRITE_ONCE(node->owner, NULL);
2316 	return (struct bpf_list_node *)n;
2317 }
2318 
2319 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
2320 {
2321 	return __bpf_list_del(head, false);
2322 }
2323 
2324 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
2325 {
2326 	return __bpf_list_del(head, true);
2327 }
2328 
2329 __bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head)
2330 {
2331 	struct list_head *h = (struct list_head *)head;
2332 
2333 	if (list_empty(h) || unlikely(!h->next))
2334 		return NULL;
2335 
2336 	return (struct bpf_list_node *)h->next;
2337 }
2338 
2339 __bpf_kfunc struct bpf_list_node *bpf_list_back(struct bpf_list_head *head)
2340 {
2341 	struct list_head *h = (struct list_head *)head;
2342 
2343 	if (list_empty(h) || unlikely(!h->next))
2344 		return NULL;
2345 
2346 	return (struct bpf_list_node *)h->prev;
2347 }
2348 
2349 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
2350 						  struct bpf_rb_node *node)
2351 {
2352 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2353 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2354 	struct rb_node *n = &node_internal->rb_node;
2355 
2356 	/* node_internal->owner != root implies either RB_EMPTY_NODE(n) or
2357 	 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n)
2358 	 */
2359 	if (READ_ONCE(node_internal->owner) != root)
2360 		return NULL;
2361 
2362 	rb_erase_cached(n, r);
2363 	RB_CLEAR_NODE(n);
2364 	WRITE_ONCE(node_internal->owner, NULL);
2365 	return (struct bpf_rb_node *)n;
2366 }
2367 
2368 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
2369  * program
2370  */
2371 static int __bpf_rbtree_add(struct bpf_rb_root *root,
2372 			    struct bpf_rb_node_kern *node,
2373 			    void *less, struct btf_record *rec, u64 off)
2374 {
2375 	struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
2376 	struct rb_node *parent = NULL, *n = &node->rb_node;
2377 	bpf_callback_t cb = (bpf_callback_t)less;
2378 	bool leftmost = true;
2379 
2380 	/* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately
2381 	 * check the latter
2382 	 */
2383 	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2384 		/* Only called from BPF prog, no need to migrate_disable */
2385 		__bpf_obj_drop_impl((void *)n - off, rec, false);
2386 		return -EINVAL;
2387 	}
2388 
2389 	while (*link) {
2390 		parent = *link;
2391 		if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
2392 			link = &parent->rb_left;
2393 		} else {
2394 			link = &parent->rb_right;
2395 			leftmost = false;
2396 		}
2397 	}
2398 
2399 	rb_link_node(n, parent, link);
2400 	rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
2401 	WRITE_ONCE(node->owner, root);
2402 	return 0;
2403 }
2404 
2405 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
2406 				    bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
2407 				    void *meta__ign, u64 off)
2408 {
2409 	struct btf_struct_meta *meta = meta__ign;
2410 	struct bpf_rb_node_kern *n = (void *)node;
2411 
2412 	return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
2413 }
2414 
2415 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
2416 {
2417 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2418 
2419 	return (struct bpf_rb_node *)rb_first_cached(r);
2420 }
2421 
2422 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root)
2423 {
2424 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2425 
2426 	return (struct bpf_rb_node *)r->rb_root.rb_node;
2427 }
2428 
2429 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node)
2430 {
2431 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2432 
2433 	if (READ_ONCE(node_internal->owner) != root)
2434 		return NULL;
2435 
2436 	return (struct bpf_rb_node *)node_internal->rb_node.rb_left;
2437 }
2438 
2439 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node)
2440 {
2441 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2442 
2443 	if (READ_ONCE(node_internal->owner) != root)
2444 		return NULL;
2445 
2446 	return (struct bpf_rb_node *)node_internal->rb_node.rb_right;
2447 }
2448 
2449 /**
2450  * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2451  * kfunc which is not stored in a map as a kptr, must be released by calling
2452  * bpf_task_release().
2453  * @p: The task on which a reference is being acquired.
2454  */
2455 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
2456 {
2457 	if (refcount_inc_not_zero(&p->rcu_users))
2458 		return p;
2459 	return NULL;
2460 }
2461 
2462 /**
2463  * bpf_task_release - Release the reference acquired on a task.
2464  * @p: The task on which a reference is being released.
2465  */
2466 __bpf_kfunc void bpf_task_release(struct task_struct *p)
2467 {
2468 	put_task_struct_rcu_user(p);
2469 }
2470 
2471 __bpf_kfunc void bpf_task_release_dtor(void *p)
2472 {
2473 	put_task_struct_rcu_user(p);
2474 }
2475 CFI_NOSEAL(bpf_task_release_dtor);
2476 
2477 #ifdef CONFIG_CGROUPS
2478 /**
2479  * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2480  * this kfunc which is not stored in a map as a kptr, must be released by
2481  * calling bpf_cgroup_release().
2482  * @cgrp: The cgroup on which a reference is being acquired.
2483  */
2484 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2485 {
2486 	return cgroup_tryget(cgrp) ? cgrp : NULL;
2487 }
2488 
2489 /**
2490  * bpf_cgroup_release - Release the reference acquired on a cgroup.
2491  * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2492  * not be freed until the current grace period has ended, even if its refcount
2493  * drops to 0.
2494  * @cgrp: The cgroup on which a reference is being released.
2495  */
2496 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2497 {
2498 	cgroup_put(cgrp);
2499 }
2500 
2501 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
2502 {
2503 	cgroup_put(cgrp);
2504 }
2505 CFI_NOSEAL(bpf_cgroup_release_dtor);
2506 
2507 /**
2508  * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2509  * array. A cgroup returned by this kfunc which is not subsequently stored in a
2510  * map, must be released by calling bpf_cgroup_release().
2511  * @cgrp: The cgroup for which we're performing a lookup.
2512  * @level: The level of ancestor to look up.
2513  */
2514 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2515 {
2516 	struct cgroup *ancestor;
2517 
2518 	if (level > cgrp->level || level < 0)
2519 		return NULL;
2520 
2521 	/* cgrp's refcnt could be 0 here, but ancestors can still be accessed */
2522 	ancestor = cgrp->ancestors[level];
2523 	if (!cgroup_tryget(ancestor))
2524 		return NULL;
2525 	return ancestor;
2526 }
2527 
2528 /**
2529  * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2530  * kfunc which is not subsequently stored in a map, must be released by calling
2531  * bpf_cgroup_release().
2532  * @cgid: cgroup id.
2533  */
2534 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
2535 {
2536 	struct cgroup *cgrp;
2537 
2538 	cgrp = cgroup_get_from_id(cgid);
2539 	if (IS_ERR(cgrp))
2540 		return NULL;
2541 	return cgrp;
2542 }
2543 
2544 /**
2545  * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2546  * task's membership of cgroup ancestry.
2547  * @task: the task to be tested
2548  * @ancestor: possible ancestor of @task's cgroup
2549  *
2550  * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
2551  * It follows all the same rules as cgroup_is_descendant, and only applies
2552  * to the default hierarchy.
2553  */
2554 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
2555 				       struct cgroup *ancestor)
2556 {
2557 	long ret;
2558 
2559 	rcu_read_lock();
2560 	ret = task_under_cgroup_hierarchy(task, ancestor);
2561 	rcu_read_unlock();
2562 	return ret;
2563 }
2564 
2565 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
2566 {
2567 	struct bpf_array *array = container_of(map, struct bpf_array, map);
2568 	struct cgroup *cgrp;
2569 
2570 	if (unlikely(idx >= array->map.max_entries))
2571 		return -E2BIG;
2572 
2573 	cgrp = READ_ONCE(array->ptrs[idx]);
2574 	if (unlikely(!cgrp))
2575 		return -EAGAIN;
2576 
2577 	return task_under_cgroup_hierarchy(current, cgrp);
2578 }
2579 
2580 const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
2581 	.func           = bpf_current_task_under_cgroup,
2582 	.gpl_only       = false,
2583 	.ret_type       = RET_INTEGER,
2584 	.arg1_type      = ARG_CONST_MAP_PTR,
2585 	.arg2_type      = ARG_ANYTHING,
2586 };
2587 
2588 /**
2589  * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
2590  * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
2591  * hierarchy ID.
2592  * @task: The target task
2593  * @hierarchy_id: The ID of a cgroup1 hierarchy
2594  *
2595  * On success, the cgroup is returen. On failure, NULL is returned.
2596  */
2597 __bpf_kfunc struct cgroup *
2598 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
2599 {
2600 	struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id);
2601 
2602 	if (IS_ERR(cgrp))
2603 		return NULL;
2604 	return cgrp;
2605 }
2606 #endif /* CONFIG_CGROUPS */
2607 
2608 /**
2609  * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2610  * in the root pid namespace idr. If a task is returned, it must either be
2611  * stored in a map, or released with bpf_task_release().
2612  * @pid: The pid of the task being looked up.
2613  */
2614 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2615 {
2616 	struct task_struct *p;
2617 
2618 	rcu_read_lock();
2619 	p = find_task_by_pid_ns(pid, &init_pid_ns);
2620 	if (p)
2621 		p = bpf_task_acquire(p);
2622 	rcu_read_unlock();
2623 
2624 	return p;
2625 }
2626 
2627 /**
2628  * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up
2629  * in the pid namespace of the current task. If a task is returned, it must
2630  * either be stored in a map, or released with bpf_task_release().
2631  * @vpid: The vpid of the task being looked up.
2632  */
2633 __bpf_kfunc struct task_struct *bpf_task_from_vpid(s32 vpid)
2634 {
2635 	struct task_struct *p;
2636 
2637 	rcu_read_lock();
2638 	p = find_task_by_vpid(vpid);
2639 	if (p)
2640 		p = bpf_task_acquire(p);
2641 	rcu_read_unlock();
2642 
2643 	return p;
2644 }
2645 
2646 /**
2647  * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2648  * @p: The dynptr whose data slice to retrieve
2649  * @offset: Offset into the dynptr
2650  * @buffer__opt: User-provided buffer to copy contents into.  May be NULL
2651  * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2652  *               length of the requested slice. This must be a constant.
2653  *
2654  * For non-skb and non-xdp type dynptrs, there is no difference between
2655  * bpf_dynptr_slice and bpf_dynptr_data.
2656  *
2657  *  If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2658  *
2659  * If the intention is to write to the data slice, please use
2660  * bpf_dynptr_slice_rdwr.
2661  *
2662  * The user must check that the returned pointer is not null before using it.
2663  *
2664  * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
2665  * does not change the underlying packet data pointers, so a call to
2666  * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2667  * the bpf program.
2668  *
2669  * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2670  * data slice (can be either direct pointer to the data or a pointer to the user
2671  * provided buffer, with its contents containing the data, if unable to obtain
2672  * direct pointer)
2673  */
2674 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset,
2675 				   void *buffer__opt, u32 buffer__szk)
2676 {
2677 	const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2678 	enum bpf_dynptr_type type;
2679 	u32 len = buffer__szk;
2680 	int err;
2681 
2682 	if (!ptr->data)
2683 		return NULL;
2684 
2685 	err = bpf_dynptr_check_off_len(ptr, offset, len);
2686 	if (err)
2687 		return NULL;
2688 
2689 	type = bpf_dynptr_get_type(ptr);
2690 
2691 	switch (type) {
2692 	case BPF_DYNPTR_TYPE_LOCAL:
2693 	case BPF_DYNPTR_TYPE_RINGBUF:
2694 		return ptr->data + ptr->offset + offset;
2695 	case BPF_DYNPTR_TYPE_SKB:
2696 		if (buffer__opt)
2697 			return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt);
2698 		else
2699 			return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len);
2700 	case BPF_DYNPTR_TYPE_XDP:
2701 	{
2702 		void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
2703 		if (!IS_ERR_OR_NULL(xdp_ptr))
2704 			return xdp_ptr;
2705 
2706 		if (!buffer__opt)
2707 			return NULL;
2708 		bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false);
2709 		return buffer__opt;
2710 	}
2711 	default:
2712 		WARN_ONCE(true, "unknown dynptr type %d\n", type);
2713 		return NULL;
2714 	}
2715 }
2716 
2717 /**
2718  * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2719  * @p: The dynptr whose data slice to retrieve
2720  * @offset: Offset into the dynptr
2721  * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2722  * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2723  *               length of the requested slice. This must be a constant.
2724  *
2725  * For non-skb and non-xdp type dynptrs, there is no difference between
2726  * bpf_dynptr_slice and bpf_dynptr_data.
2727  *
2728  * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2729  *
2730  * The returned pointer is writable and may point to either directly the dynptr
2731  * data at the requested offset or to the buffer if unable to obtain a direct
2732  * data pointer to (example: the requested slice is to the paged area of an skb
2733  * packet). In the case where the returned pointer is to the buffer, the user
2734  * is responsible for persisting writes through calling bpf_dynptr_write(). This
2735  * usually looks something like this pattern:
2736  *
2737  * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
2738  * if (!eth)
2739  *	return TC_ACT_SHOT;
2740  *
2741  * // mutate eth header //
2742  *
2743  * if (eth == buffer)
2744  *	bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
2745  *
2746  * Please note that, as in the example above, the user must check that the
2747  * returned pointer is not null before using it.
2748  *
2749  * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
2750  * does not change the underlying packet data pointers, so a call to
2751  * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2752  * the bpf program.
2753  *
2754  * Return: NULL if the call failed (eg invalid dynptr), pointer to a
2755  * data slice (can be either direct pointer to the data or a pointer to the user
2756  * provided buffer, with its contents containing the data, if unable to obtain
2757  * direct pointer)
2758  */
2759 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
2760 					void *buffer__opt, u32 buffer__szk)
2761 {
2762 	const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2763 
2764 	if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
2765 		return NULL;
2766 
2767 	/* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
2768 	 *
2769 	 * For skb-type dynptrs, it is safe to write into the returned pointer
2770 	 * if the bpf program allows skb data writes. There are two possibilities
2771 	 * that may occur when calling bpf_dynptr_slice_rdwr:
2772 	 *
2773 	 * 1) The requested slice is in the head of the skb. In this case, the
2774 	 * returned pointer is directly to skb data, and if the skb is cloned, the
2775 	 * verifier will have uncloned it (see bpf_unclone_prologue()) already.
2776 	 * The pointer can be directly written into.
2777 	 *
2778 	 * 2) Some portion of the requested slice is in the paged buffer area.
2779 	 * In this case, the requested data will be copied out into the buffer
2780 	 * and the returned pointer will be a pointer to the buffer. The skb
2781 	 * will not be pulled. To persist the write, the user will need to call
2782 	 * bpf_dynptr_write(), which will pull the skb and commit the write.
2783 	 *
2784 	 * Similarly for xdp programs, if the requested slice is not across xdp
2785 	 * fragments, then a direct pointer will be returned, otherwise the data
2786 	 * will be copied out into the buffer and the user will need to call
2787 	 * bpf_dynptr_write() to commit changes.
2788 	 */
2789 	return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk);
2790 }
2791 
2792 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end)
2793 {
2794 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2795 	u32 size;
2796 
2797 	if (!ptr->data || start > end)
2798 		return -EINVAL;
2799 
2800 	size = __bpf_dynptr_size(ptr);
2801 
2802 	if (start > size || end > size)
2803 		return -ERANGE;
2804 
2805 	ptr->offset += start;
2806 	bpf_dynptr_set_size(ptr, end - start);
2807 
2808 	return 0;
2809 }
2810 
2811 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
2812 {
2813 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2814 
2815 	return !ptr->data;
2816 }
2817 
2818 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
2819 {
2820 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2821 
2822 	if (!ptr->data)
2823 		return false;
2824 
2825 	return __bpf_dynptr_is_rdonly(ptr);
2826 }
2827 
2828 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p)
2829 {
2830 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2831 
2832 	if (!ptr->data)
2833 		return -EINVAL;
2834 
2835 	return __bpf_dynptr_size(ptr);
2836 }
2837 
2838 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
2839 				 struct bpf_dynptr *clone__uninit)
2840 {
2841 	struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit;
2842 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2843 
2844 	if (!ptr->data) {
2845 		bpf_dynptr_set_null(clone);
2846 		return -EINVAL;
2847 	}
2848 
2849 	*clone = *ptr;
2850 
2851 	return 0;
2852 }
2853 
2854 /**
2855  * bpf_dynptr_copy() - Copy data from one dynptr to another.
2856  * @dst_ptr: Destination dynptr - where data should be copied to
2857  * @dst_off: Offset into the destination dynptr
2858  * @src_ptr: Source dynptr - where data should be copied from
2859  * @src_off: Offset into the source dynptr
2860  * @size: Length of the data to copy from source to destination
2861  *
2862  * Copies data from source dynptr to destination dynptr.
2863  * Returns 0 on success; negative error, otherwise.
2864  */
2865 __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u32 dst_off,
2866 				struct bpf_dynptr *src_ptr, u32 src_off, u32 size)
2867 {
2868 	struct bpf_dynptr_kern *dst = (struct bpf_dynptr_kern *)dst_ptr;
2869 	struct bpf_dynptr_kern *src = (struct bpf_dynptr_kern *)src_ptr;
2870 	void *src_slice, *dst_slice;
2871 	char buf[256];
2872 	u32 off;
2873 
2874 	src_slice = bpf_dynptr_slice(src_ptr, src_off, NULL, size);
2875 	dst_slice = bpf_dynptr_slice_rdwr(dst_ptr, dst_off, NULL, size);
2876 
2877 	if (src_slice && dst_slice) {
2878 		memmove(dst_slice, src_slice, size);
2879 		return 0;
2880 	}
2881 
2882 	if (src_slice)
2883 		return __bpf_dynptr_write(dst, dst_off, src_slice, size, 0);
2884 
2885 	if (dst_slice)
2886 		return __bpf_dynptr_read(dst_slice, size, src, src_off, 0);
2887 
2888 	if (bpf_dynptr_check_off_len(dst, dst_off, size) ||
2889 	    bpf_dynptr_check_off_len(src, src_off, size))
2890 		return -E2BIG;
2891 
2892 	off = 0;
2893 	while (off < size) {
2894 		u32 chunk_sz = min_t(u32, sizeof(buf), size - off);
2895 		int err;
2896 
2897 		err = __bpf_dynptr_read(buf, chunk_sz, src, src_off + off, 0);
2898 		if (err)
2899 			return err;
2900 		err = __bpf_dynptr_write(dst, dst_off + off, buf, chunk_sz, 0);
2901 		if (err)
2902 			return err;
2903 
2904 		off += chunk_sz;
2905 	}
2906 	return 0;
2907 }
2908 
2909 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
2910 {
2911 	return obj;
2912 }
2913 
2914 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
2915 {
2916 	return (void *)obj__ign;
2917 }
2918 
2919 __bpf_kfunc void bpf_rcu_read_lock(void)
2920 {
2921 	rcu_read_lock();
2922 }
2923 
2924 __bpf_kfunc void bpf_rcu_read_unlock(void)
2925 {
2926 	rcu_read_unlock();
2927 }
2928 
2929 struct bpf_throw_ctx {
2930 	struct bpf_prog_aux *aux;
2931 	u64 sp;
2932 	u64 bp;
2933 	int cnt;
2934 };
2935 
2936 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
2937 {
2938 	struct bpf_throw_ctx *ctx = cookie;
2939 	struct bpf_prog *prog;
2940 
2941 	if (!is_bpf_text_address(ip))
2942 		return !ctx->cnt;
2943 	prog = bpf_prog_ksym_find(ip);
2944 	ctx->cnt++;
2945 	if (bpf_is_subprog(prog))
2946 		return true;
2947 	ctx->aux = prog->aux;
2948 	ctx->sp = sp;
2949 	ctx->bp = bp;
2950 	return false;
2951 }
2952 
2953 __bpf_kfunc void bpf_throw(u64 cookie)
2954 {
2955 	struct bpf_throw_ctx ctx = {};
2956 
2957 	arch_bpf_stack_walk(bpf_stack_walker, &ctx);
2958 	WARN_ON_ONCE(!ctx.aux);
2959 	if (ctx.aux)
2960 		WARN_ON_ONCE(!ctx.aux->exception_boundary);
2961 	WARN_ON_ONCE(!ctx.bp);
2962 	WARN_ON_ONCE(!ctx.cnt);
2963 	/* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning
2964 	 * deeper stack depths than ctx.sp as we do not return from bpf_throw,
2965 	 * which skips compiler generated instrumentation to do the same.
2966 	 */
2967 	kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
2968 	ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0);
2969 	WARN(1, "A call to BPF exception callback should never return\n");
2970 }
2971 
2972 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
2973 {
2974 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
2975 	struct bpf_map *map = p__map;
2976 
2977 	BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq));
2978 	BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq));
2979 
2980 	if (flags)
2981 		return -EINVAL;
2982 
2983 	return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
2984 }
2985 
2986 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
2987 {
2988 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
2989 	struct bpf_work *w;
2990 
2991 	if (in_nmi())
2992 		return -EOPNOTSUPP;
2993 	if (flags)
2994 		return -EINVAL;
2995 	w = READ_ONCE(async->work);
2996 	if (!w || !READ_ONCE(w->cb.prog))
2997 		return -EINVAL;
2998 
2999 	schedule_work(&w->work);
3000 	return 0;
3001 }
3002 
3003 __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
3004 					 int (callback_fn)(void *map, int *key, void *value),
3005 					 unsigned int flags,
3006 					 void *aux__prog)
3007 {
3008 	struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog;
3009 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3010 
3011 	if (flags)
3012 		return -EINVAL;
3013 
3014 	return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ);
3015 }
3016 
3017 __bpf_kfunc void bpf_preempt_disable(void)
3018 {
3019 	preempt_disable();
3020 }
3021 
3022 __bpf_kfunc void bpf_preempt_enable(void)
3023 {
3024 	preempt_enable();
3025 }
3026 
3027 struct bpf_iter_bits {
3028 	__u64 __opaque[2];
3029 } __aligned(8);
3030 
3031 #define BITS_ITER_NR_WORDS_MAX 511
3032 
3033 struct bpf_iter_bits_kern {
3034 	union {
3035 		__u64 *bits;
3036 		__u64 bits_copy;
3037 	};
3038 	int nr_bits;
3039 	int bit;
3040 } __aligned(8);
3041 
3042 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing
3043  * a u64 pointer and an unsigned long pointer to find_next_bit() will
3044  * return the same result, as both point to the same 8-byte area.
3045  *
3046  * For 32-bit little-endian hosts, using a u64 pointer or unsigned long
3047  * pointer also makes no difference. This is because the first iterated
3048  * unsigned long is composed of bits 0-31 of the u64 and the second unsigned
3049  * long is composed of bits 32-63 of the u64.
3050  *
3051  * However, for 32-bit big-endian hosts, this is not the case. The first
3052  * iterated unsigned long will be bits 32-63 of the u64, so swap these two
3053  * ulong values within the u64.
3054  */
3055 static void swap_ulong_in_u64(u64 *bits, unsigned int nr)
3056 {
3057 #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
3058 	unsigned int i;
3059 
3060 	for (i = 0; i < nr; i++)
3061 		bits[i] = (bits[i] >> 32) | ((u64)(u32)bits[i] << 32);
3062 #endif
3063 }
3064 
3065 /**
3066  * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
3067  * @it: The new bpf_iter_bits to be created
3068  * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over
3069  * @nr_words: The size of the specified memory area, measured in 8-byte units.
3070  * The maximum value of @nr_words is @BITS_ITER_NR_WORDS_MAX. This limit may be
3071  * further reduced by the BPF memory allocator implementation.
3072  *
3073  * This function initializes a new bpf_iter_bits structure for iterating over
3074  * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It
3075  * copies the data of the memory area to the newly created bpf_iter_bits @it for
3076  * subsequent iteration operations.
3077  *
3078  * On success, 0 is returned. On failure, ERR is returned.
3079  */
3080 __bpf_kfunc int
3081 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
3082 {
3083 	struct bpf_iter_bits_kern *kit = (void *)it;
3084 	u32 nr_bytes = nr_words * sizeof(u64);
3085 	u32 nr_bits = BYTES_TO_BITS(nr_bytes);
3086 	int err;
3087 
3088 	BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits));
3089 	BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) !=
3090 		     __alignof__(struct bpf_iter_bits));
3091 
3092 	kit->nr_bits = 0;
3093 	kit->bits_copy = 0;
3094 	kit->bit = -1;
3095 
3096 	if (!unsafe_ptr__ign || !nr_words)
3097 		return -EINVAL;
3098 	if (nr_words > BITS_ITER_NR_WORDS_MAX)
3099 		return -E2BIG;
3100 
3101 	/* Optimization for u64 mask */
3102 	if (nr_bits == 64) {
3103 		err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign);
3104 		if (err)
3105 			return -EFAULT;
3106 
3107 		swap_ulong_in_u64(&kit->bits_copy, nr_words);
3108 
3109 		kit->nr_bits = nr_bits;
3110 		return 0;
3111 	}
3112 
3113 	if (bpf_mem_alloc_check_size(false, nr_bytes))
3114 		return -E2BIG;
3115 
3116 	/* Fallback to memalloc */
3117 	kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes);
3118 	if (!kit->bits)
3119 		return -ENOMEM;
3120 
3121 	err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign);
3122 	if (err) {
3123 		bpf_mem_free(&bpf_global_ma, kit->bits);
3124 		return err;
3125 	}
3126 
3127 	swap_ulong_in_u64(kit->bits, nr_words);
3128 
3129 	kit->nr_bits = nr_bits;
3130 	return 0;
3131 }
3132 
3133 /**
3134  * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
3135  * @it: The bpf_iter_bits to be checked
3136  *
3137  * This function returns a pointer to a number representing the value of the
3138  * next bit in the bits.
3139  *
3140  * If there are no further bits available, it returns NULL.
3141  */
3142 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
3143 {
3144 	struct bpf_iter_bits_kern *kit = (void *)it;
3145 	int bit = kit->bit, nr_bits = kit->nr_bits;
3146 	const void *bits;
3147 
3148 	if (!nr_bits || bit >= nr_bits)
3149 		return NULL;
3150 
3151 	bits = nr_bits == 64 ? &kit->bits_copy : kit->bits;
3152 	bit = find_next_bit(bits, nr_bits, bit + 1);
3153 	if (bit >= nr_bits) {
3154 		kit->bit = bit;
3155 		return NULL;
3156 	}
3157 
3158 	kit->bit = bit;
3159 	return &kit->bit;
3160 }
3161 
3162 /**
3163  * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
3164  * @it: The bpf_iter_bits to be destroyed
3165  *
3166  * Destroy the resource associated with the bpf_iter_bits.
3167  */
3168 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
3169 {
3170 	struct bpf_iter_bits_kern *kit = (void *)it;
3171 
3172 	if (kit->nr_bits <= 64)
3173 		return;
3174 	bpf_mem_free(&bpf_global_ma, kit->bits);
3175 }
3176 
3177 /**
3178  * bpf_copy_from_user_str() - Copy a string from an unsafe user address
3179  * @dst:             Destination address, in kernel space.  This buffer must be
3180  *                   at least @dst__sz bytes long.
3181  * @dst__sz:         Maximum number of bytes to copy, includes the trailing NUL.
3182  * @unsafe_ptr__ign: Source address, in user space.
3183  * @flags:           The only supported flag is BPF_F_PAD_ZEROS
3184  *
3185  * Copies a NUL-terminated string from userspace to BPF space. If user string is
3186  * too long this will still ensure zero termination in the dst buffer unless
3187  * buffer size is 0.
3188  *
3189  * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and
3190  * memset all of @dst on failure.
3191  */
3192 __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags)
3193 {
3194 	int ret;
3195 
3196 	if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3197 		return -EINVAL;
3198 
3199 	if (unlikely(!dst__sz))
3200 		return 0;
3201 
3202 	ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1);
3203 	if (ret < 0) {
3204 		if (flags & BPF_F_PAD_ZEROS)
3205 			memset((char *)dst, 0, dst__sz);
3206 
3207 		return ret;
3208 	}
3209 
3210 	if (flags & BPF_F_PAD_ZEROS)
3211 		memset((char *)dst + ret, 0, dst__sz - ret);
3212 	else
3213 		((char *)dst)[ret] = '\0';
3214 
3215 	return ret + 1;
3216 }
3217 
3218 /**
3219  * bpf_copy_from_user_task_str() - Copy a string from an task's address space
3220  * @dst:             Destination address, in kernel space.  This buffer must be
3221  *                   at least @dst__sz bytes long.
3222  * @dst__sz:         Maximum number of bytes to copy, includes the trailing NUL.
3223  * @unsafe_ptr__ign: Source address in the task's address space.
3224  * @tsk:             The task whose address space will be used
3225  * @flags:           The only supported flag is BPF_F_PAD_ZEROS
3226  *
3227  * Copies a NUL terminated string from a task's address space to @dst__sz
3228  * buffer. If user string is too long this will still ensure zero termination
3229  * in the @dst__sz buffer unless buffer size is 0.
3230  *
3231  * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst__sz to 0 on success
3232  * and memset all of @dst__sz on failure.
3233  *
3234  * Return: The number of copied bytes on success including the NUL terminator.
3235  * A negative error code on failure.
3236  */
3237 __bpf_kfunc int bpf_copy_from_user_task_str(void *dst, u32 dst__sz,
3238 					    const void __user *unsafe_ptr__ign,
3239 					    struct task_struct *tsk, u64 flags)
3240 {
3241 	int ret;
3242 
3243 	if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3244 		return -EINVAL;
3245 
3246 	if (unlikely(dst__sz == 0))
3247 		return 0;
3248 
3249 	ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_ptr__ign, dst, dst__sz, 0);
3250 	if (ret < 0) {
3251 		if (flags & BPF_F_PAD_ZEROS)
3252 			memset(dst, 0, dst__sz);
3253 		return ret;
3254 	}
3255 
3256 	if (flags & BPF_F_PAD_ZEROS)
3257 		memset(dst + ret, 0, dst__sz - ret);
3258 
3259 	return ret + 1;
3260 }
3261 
3262 /* Keep unsinged long in prototype so that kfunc is usable when emitted to
3263  * vmlinux.h in BPF programs directly, but note that while in BPF prog, the
3264  * unsigned long always points to 8-byte region on stack, the kernel may only
3265  * read and write the 4-bytes on 32-bit.
3266  */
3267 __bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag)
3268 {
3269 	local_irq_save(*flags__irq_flag);
3270 }
3271 
3272 __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag)
3273 {
3274 	local_irq_restore(*flags__irq_flag);
3275 }
3276 
3277 __bpf_kfunc void __bpf_trap(void)
3278 {
3279 }
3280 
3281 __bpf_kfunc_end_defs();
3282 
3283 BTF_KFUNCS_START(generic_btf_ids)
3284 #ifdef CONFIG_CRASH_DUMP
3285 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
3286 #endif
3287 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
3288 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
3289 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
3290 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
3291 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
3292 BTF_ID_FLAGS(func, bpf_list_push_front_impl)
3293 BTF_ID_FLAGS(func, bpf_list_push_back_impl)
3294 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
3295 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
3296 BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL)
3297 BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL)
3298 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
3299 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
3300 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
3301 BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
3302 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
3303 BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL)
3304 BTF_ID_FLAGS(func, bpf_rbtree_left, KF_RET_NULL)
3305 BTF_ID_FLAGS(func, bpf_rbtree_right, KF_RET_NULL)
3306 
3307 #ifdef CONFIG_CGROUPS
3308 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
3309 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
3310 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
3311 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
3312 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
3313 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
3314 #endif
3315 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
3316 BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL)
3317 BTF_ID_FLAGS(func, bpf_throw)
3318 #ifdef CONFIG_BPF_EVENTS
3319 BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS)
3320 #endif
3321 BTF_KFUNCS_END(generic_btf_ids)
3322 
3323 static const struct btf_kfunc_id_set generic_kfunc_set = {
3324 	.owner = THIS_MODULE,
3325 	.set   = &generic_btf_ids,
3326 };
3327 
3328 
3329 BTF_ID_LIST(generic_dtor_ids)
3330 BTF_ID(struct, task_struct)
3331 BTF_ID(func, bpf_task_release_dtor)
3332 #ifdef CONFIG_CGROUPS
3333 BTF_ID(struct, cgroup)
3334 BTF_ID(func, bpf_cgroup_release_dtor)
3335 #endif
3336 
3337 BTF_KFUNCS_START(common_btf_ids)
3338 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx, KF_FASTCALL)
3339 BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL)
3340 BTF_ID_FLAGS(func, bpf_rcu_read_lock)
3341 BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
3342 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
3343 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
3344 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
3345 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
3346 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
3347 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
3348 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
3349 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
3350 #ifdef CONFIG_CGROUPS
3351 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
3352 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
3353 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
3354 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
3355 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL)
3356 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY)
3357 #endif
3358 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
3359 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
3360 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
3361 BTF_ID_FLAGS(func, bpf_dynptr_adjust)
3362 BTF_ID_FLAGS(func, bpf_dynptr_is_null)
3363 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
3364 BTF_ID_FLAGS(func, bpf_dynptr_size)
3365 BTF_ID_FLAGS(func, bpf_dynptr_clone)
3366 BTF_ID_FLAGS(func, bpf_dynptr_copy)
3367 #ifdef CONFIG_NET
3368 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
3369 #endif
3370 BTF_ID_FLAGS(func, bpf_wq_init)
3371 BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
3372 BTF_ID_FLAGS(func, bpf_wq_start)
3373 BTF_ID_FLAGS(func, bpf_preempt_disable)
3374 BTF_ID_FLAGS(func, bpf_preempt_enable)
3375 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
3376 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
3377 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
3378 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
3379 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str, KF_SLEEPABLE)
3380 BTF_ID_FLAGS(func, bpf_get_kmem_cache)
3381 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
3382 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
3383 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
3384 BTF_ID_FLAGS(func, bpf_local_irq_save)
3385 BTF_ID_FLAGS(func, bpf_local_irq_restore)
3386 BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr)
3387 BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr)
3388 BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr)
3389 BTF_ID_FLAGS(func, bpf_probe_read_kernel_str_dynptr)
3390 BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE)
3391 BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE)
3392 BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
3393 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
3394 #ifdef CONFIG_DMA_SHARED_BUFFER
3395 BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE)
3396 BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
3397 BTF_ID_FLAGS(func, bpf_iter_dmabuf_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
3398 #endif
3399 BTF_ID_FLAGS(func, __bpf_trap)
3400 BTF_KFUNCS_END(common_btf_ids)
3401 
3402 static const struct btf_kfunc_id_set common_kfunc_set = {
3403 	.owner = THIS_MODULE,
3404 	.set   = &common_btf_ids,
3405 };
3406 
3407 static int __init kfunc_init(void)
3408 {
3409 	int ret;
3410 	const struct btf_id_dtor_kfunc generic_dtors[] = {
3411 		{
3412 			.btf_id       = generic_dtor_ids[0],
3413 			.kfunc_btf_id = generic_dtor_ids[1]
3414 		},
3415 #ifdef CONFIG_CGROUPS
3416 		{
3417 			.btf_id       = generic_dtor_ids[2],
3418 			.kfunc_btf_id = generic_dtor_ids[3]
3419 		},
3420 #endif
3421 	};
3422 
3423 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
3424 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
3425 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
3426 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
3427 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set);
3428 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set);
3429 	ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
3430 						  ARRAY_SIZE(generic_dtors),
3431 						  THIS_MODULE);
3432 	return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
3433 }
3434 
3435 late_initcall(kfunc_init);
3436 
3437 /* Get a pointer to dynptr data up to len bytes for read only access. If
3438  * the dynptr doesn't have continuous data up to len bytes, return NULL.
3439  */
3440 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
3441 {
3442 	const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr;
3443 
3444 	return bpf_dynptr_slice(p, 0, NULL, len);
3445 }
3446 
3447 /* Get a pointer to dynptr data up to len bytes for read write access. If
3448  * the dynptr doesn't have continuous data up to len bytes, or the dynptr
3449  * is read only, return NULL.
3450  */
3451 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len)
3452 {
3453 	if (__bpf_dynptr_is_rdonly(ptr))
3454 		return NULL;
3455 	return (void *)__bpf_dynptr_data(ptr, len);
3456 }
3457