1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/bpf-cgroup.h>
7 #include <linux/cgroup.h>
8 #include <linux/rcupdate.h>
9 #include <linux/random.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/ktime.h>
13 #include <linux/sched.h>
14 #include <linux/uidgid.h>
15 #include <linux/filter.h>
16 #include <linux/ctype.h>
17 #include <linux/jiffies.h>
18 #include <linux/pid_namespace.h>
19 #include <linux/poison.h>
20 #include <linux/proc_ns.h>
21 #include <linux/sched/task.h>
22 #include <linux/security.h>
23 #include <linux/btf_ids.h>
24 #include <linux/bpf_mem_alloc.h>
25 #include <linux/kasan.h>
26
27 #include "../../lib/kstrtox.h"
28
29 /* If kernel subsystem is allowing eBPF programs to call this function,
30 * inside its own verifier_ops->get_func_proto() callback it should return
31 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
32 *
33 * Different map implementations will rely on rcu in map methods
34 * lookup/update/delete, therefore eBPF programs must run under rcu lock
35 * if program is allowed to access maps, so check rcu_read_lock_held() or
36 * rcu_read_lock_trace_held() in all three functions.
37 */
BPF_CALL_2(bpf_map_lookup_elem,struct bpf_map *,map,void *,key)38 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
39 {
40 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
41 !rcu_read_lock_bh_held());
42 return (unsigned long) map->ops->map_lookup_elem(map, key);
43 }
44
45 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
46 .func = bpf_map_lookup_elem,
47 .gpl_only = false,
48 .pkt_access = true,
49 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
50 .arg1_type = ARG_CONST_MAP_PTR,
51 .arg2_type = ARG_PTR_TO_MAP_KEY,
52 };
53
BPF_CALL_4(bpf_map_update_elem,struct bpf_map *,map,void *,key,void *,value,u64,flags)54 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
55 void *, value, u64, flags)
56 {
57 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
58 !rcu_read_lock_bh_held());
59 return map->ops->map_update_elem(map, key, value, flags);
60 }
61
62 const struct bpf_func_proto bpf_map_update_elem_proto = {
63 .func = bpf_map_update_elem,
64 .gpl_only = false,
65 .pkt_access = true,
66 .ret_type = RET_INTEGER,
67 .arg1_type = ARG_CONST_MAP_PTR,
68 .arg2_type = ARG_PTR_TO_MAP_KEY,
69 .arg3_type = ARG_PTR_TO_MAP_VALUE,
70 .arg4_type = ARG_ANYTHING,
71 };
72
BPF_CALL_2(bpf_map_delete_elem,struct bpf_map *,map,void *,key)73 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
74 {
75 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
76 !rcu_read_lock_bh_held());
77 return map->ops->map_delete_elem(map, key);
78 }
79
80 const struct bpf_func_proto bpf_map_delete_elem_proto = {
81 .func = bpf_map_delete_elem,
82 .gpl_only = false,
83 .pkt_access = true,
84 .ret_type = RET_INTEGER,
85 .arg1_type = ARG_CONST_MAP_PTR,
86 .arg2_type = ARG_PTR_TO_MAP_KEY,
87 };
88
BPF_CALL_3(bpf_map_push_elem,struct bpf_map *,map,void *,value,u64,flags)89 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
90 {
91 return map->ops->map_push_elem(map, value, flags);
92 }
93
94 const struct bpf_func_proto bpf_map_push_elem_proto = {
95 .func = bpf_map_push_elem,
96 .gpl_only = false,
97 .pkt_access = true,
98 .ret_type = RET_INTEGER,
99 .arg1_type = ARG_CONST_MAP_PTR,
100 .arg2_type = ARG_PTR_TO_MAP_VALUE,
101 .arg3_type = ARG_ANYTHING,
102 };
103
BPF_CALL_2(bpf_map_pop_elem,struct bpf_map *,map,void *,value)104 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
105 {
106 return map->ops->map_pop_elem(map, value);
107 }
108
109 const struct bpf_func_proto bpf_map_pop_elem_proto = {
110 .func = bpf_map_pop_elem,
111 .gpl_only = false,
112 .ret_type = RET_INTEGER,
113 .arg1_type = ARG_CONST_MAP_PTR,
114 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
115 };
116
BPF_CALL_2(bpf_map_peek_elem,struct bpf_map *,map,void *,value)117 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
118 {
119 return map->ops->map_peek_elem(map, value);
120 }
121
122 const struct bpf_func_proto bpf_map_peek_elem_proto = {
123 .func = bpf_map_peek_elem,
124 .gpl_only = false,
125 .ret_type = RET_INTEGER,
126 .arg1_type = ARG_CONST_MAP_PTR,
127 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
128 };
129
BPF_CALL_3(bpf_map_lookup_percpu_elem,struct bpf_map *,map,void *,key,u32,cpu)130 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
131 {
132 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
133 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
134 }
135
136 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
137 .func = bpf_map_lookup_percpu_elem,
138 .gpl_only = false,
139 .pkt_access = true,
140 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
141 .arg1_type = ARG_CONST_MAP_PTR,
142 .arg2_type = ARG_PTR_TO_MAP_KEY,
143 .arg3_type = ARG_ANYTHING,
144 };
145
146 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
147 .func = bpf_user_rnd_u32,
148 .gpl_only = false,
149 .ret_type = RET_INTEGER,
150 };
151
BPF_CALL_0(bpf_get_smp_processor_id)152 BPF_CALL_0(bpf_get_smp_processor_id)
153 {
154 return smp_processor_id();
155 }
156
157 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
158 .func = bpf_get_smp_processor_id,
159 .gpl_only = false,
160 .ret_type = RET_INTEGER,
161 .allow_fastcall = true,
162 };
163
BPF_CALL_0(bpf_get_numa_node_id)164 BPF_CALL_0(bpf_get_numa_node_id)
165 {
166 return numa_node_id();
167 }
168
169 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
170 .func = bpf_get_numa_node_id,
171 .gpl_only = false,
172 .ret_type = RET_INTEGER,
173 };
174
BPF_CALL_0(bpf_ktime_get_ns)175 BPF_CALL_0(bpf_ktime_get_ns)
176 {
177 /* NMI safe access to clock monotonic */
178 return ktime_get_mono_fast_ns();
179 }
180
181 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
182 .func = bpf_ktime_get_ns,
183 .gpl_only = false,
184 .ret_type = RET_INTEGER,
185 };
186
BPF_CALL_0(bpf_ktime_get_boot_ns)187 BPF_CALL_0(bpf_ktime_get_boot_ns)
188 {
189 /* NMI safe access to clock boottime */
190 return ktime_get_boot_fast_ns();
191 }
192
193 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
194 .func = bpf_ktime_get_boot_ns,
195 .gpl_only = false,
196 .ret_type = RET_INTEGER,
197 };
198
BPF_CALL_0(bpf_ktime_get_coarse_ns)199 BPF_CALL_0(bpf_ktime_get_coarse_ns)
200 {
201 return ktime_get_coarse_ns();
202 }
203
204 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
205 .func = bpf_ktime_get_coarse_ns,
206 .gpl_only = false,
207 .ret_type = RET_INTEGER,
208 };
209
BPF_CALL_0(bpf_ktime_get_tai_ns)210 BPF_CALL_0(bpf_ktime_get_tai_ns)
211 {
212 /* NMI safe access to clock tai */
213 return ktime_get_tai_fast_ns();
214 }
215
216 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
217 .func = bpf_ktime_get_tai_ns,
218 .gpl_only = false,
219 .ret_type = RET_INTEGER,
220 };
221
BPF_CALL_0(bpf_get_current_pid_tgid)222 BPF_CALL_0(bpf_get_current_pid_tgid)
223 {
224 struct task_struct *task = current;
225
226 if (unlikely(!task))
227 return -EINVAL;
228
229 return (u64) task->tgid << 32 | task->pid;
230 }
231
232 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
233 .func = bpf_get_current_pid_tgid,
234 .gpl_only = false,
235 .ret_type = RET_INTEGER,
236 };
237
BPF_CALL_0(bpf_get_current_uid_gid)238 BPF_CALL_0(bpf_get_current_uid_gid)
239 {
240 struct task_struct *task = current;
241 kuid_t uid;
242 kgid_t gid;
243
244 if (unlikely(!task))
245 return -EINVAL;
246
247 current_uid_gid(&uid, &gid);
248 return (u64) from_kgid(&init_user_ns, gid) << 32 |
249 from_kuid(&init_user_ns, uid);
250 }
251
252 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
253 .func = bpf_get_current_uid_gid,
254 .gpl_only = false,
255 .ret_type = RET_INTEGER,
256 };
257
BPF_CALL_2(bpf_get_current_comm,char *,buf,u32,size)258 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
259 {
260 struct task_struct *task = current;
261
262 if (unlikely(!task))
263 goto err_clear;
264
265 /* Verifier guarantees that size > 0 */
266 strscpy_pad(buf, task->comm, size);
267 return 0;
268 err_clear:
269 memset(buf, 0, size);
270 return -EINVAL;
271 }
272
273 const struct bpf_func_proto bpf_get_current_comm_proto = {
274 .func = bpf_get_current_comm,
275 .gpl_only = false,
276 .ret_type = RET_INTEGER,
277 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
278 .arg2_type = ARG_CONST_SIZE,
279 };
280
281 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
282
__bpf_spin_lock(struct bpf_spin_lock * lock)283 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
284 {
285 arch_spinlock_t *l = (void *)lock;
286 union {
287 __u32 val;
288 arch_spinlock_t lock;
289 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
290
291 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
292 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
293 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
294 preempt_disable();
295 arch_spin_lock(l);
296 }
297
__bpf_spin_unlock(struct bpf_spin_lock * lock)298 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
299 {
300 arch_spinlock_t *l = (void *)lock;
301
302 arch_spin_unlock(l);
303 preempt_enable();
304 }
305
306 #else
307
__bpf_spin_lock(struct bpf_spin_lock * lock)308 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
309 {
310 atomic_t *l = (void *)lock;
311
312 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
313 do {
314 atomic_cond_read_relaxed(l, !VAL);
315 } while (atomic_xchg(l, 1));
316 }
317
__bpf_spin_unlock(struct bpf_spin_lock * lock)318 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
319 {
320 atomic_t *l = (void *)lock;
321
322 atomic_set_release(l, 0);
323 }
324
325 #endif
326
327 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
328
__bpf_spin_lock_irqsave(struct bpf_spin_lock * lock)329 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
330 {
331 unsigned long flags;
332
333 local_irq_save(flags);
334 __bpf_spin_lock(lock);
335 __this_cpu_write(irqsave_flags, flags);
336 }
337
NOTRACE_BPF_CALL_1(bpf_spin_lock,struct bpf_spin_lock *,lock)338 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
339 {
340 __bpf_spin_lock_irqsave(lock);
341 return 0;
342 }
343
344 const struct bpf_func_proto bpf_spin_lock_proto = {
345 .func = bpf_spin_lock,
346 .gpl_only = false,
347 .ret_type = RET_VOID,
348 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
349 .arg1_btf_id = BPF_PTR_POISON,
350 };
351
__bpf_spin_unlock_irqrestore(struct bpf_spin_lock * lock)352 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
353 {
354 unsigned long flags;
355
356 flags = __this_cpu_read(irqsave_flags);
357 __bpf_spin_unlock(lock);
358 local_irq_restore(flags);
359 }
360
NOTRACE_BPF_CALL_1(bpf_spin_unlock,struct bpf_spin_lock *,lock)361 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
362 {
363 __bpf_spin_unlock_irqrestore(lock);
364 return 0;
365 }
366
367 const struct bpf_func_proto bpf_spin_unlock_proto = {
368 .func = bpf_spin_unlock,
369 .gpl_only = false,
370 .ret_type = RET_VOID,
371 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
372 .arg1_btf_id = BPF_PTR_POISON,
373 };
374
copy_map_value_locked(struct bpf_map * map,void * dst,void * src,bool lock_src)375 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
376 bool lock_src)
377 {
378 struct bpf_spin_lock *lock;
379
380 if (lock_src)
381 lock = src + map->record->spin_lock_off;
382 else
383 lock = dst + map->record->spin_lock_off;
384 preempt_disable();
385 __bpf_spin_lock_irqsave(lock);
386 copy_map_value(map, dst, src);
387 __bpf_spin_unlock_irqrestore(lock);
388 preempt_enable();
389 }
390
BPF_CALL_0(bpf_jiffies64)391 BPF_CALL_0(bpf_jiffies64)
392 {
393 return get_jiffies_64();
394 }
395
396 const struct bpf_func_proto bpf_jiffies64_proto = {
397 .func = bpf_jiffies64,
398 .gpl_only = false,
399 .ret_type = RET_INTEGER,
400 };
401
402 #ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)403 BPF_CALL_0(bpf_get_current_cgroup_id)
404 {
405 struct cgroup *cgrp;
406 u64 cgrp_id;
407
408 rcu_read_lock();
409 cgrp = task_dfl_cgroup(current);
410 cgrp_id = cgroup_id(cgrp);
411 rcu_read_unlock();
412
413 return cgrp_id;
414 }
415
416 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
417 .func = bpf_get_current_cgroup_id,
418 .gpl_only = false,
419 .ret_type = RET_INTEGER,
420 };
421
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id,int,ancestor_level)422 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
423 {
424 struct cgroup *cgrp;
425 struct cgroup *ancestor;
426 u64 cgrp_id;
427
428 rcu_read_lock();
429 cgrp = task_dfl_cgroup(current);
430 ancestor = cgroup_ancestor(cgrp, ancestor_level);
431 cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
432 rcu_read_unlock();
433
434 return cgrp_id;
435 }
436
437 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
438 .func = bpf_get_current_ancestor_cgroup_id,
439 .gpl_only = false,
440 .ret_type = RET_INTEGER,
441 .arg1_type = ARG_ANYTHING,
442 };
443 #endif /* CONFIG_CGROUPS */
444
445 #define BPF_STRTOX_BASE_MASK 0x1F
446
__bpf_strtoull(const char * buf,size_t buf_len,u64 flags,unsigned long long * res,bool * is_negative)447 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
448 unsigned long long *res, bool *is_negative)
449 {
450 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
451 const char *cur_buf = buf;
452 size_t cur_len = buf_len;
453 unsigned int consumed;
454 size_t val_len;
455 char str[64];
456
457 if (!buf || !buf_len || !res || !is_negative)
458 return -EINVAL;
459
460 if (base != 0 && base != 8 && base != 10 && base != 16)
461 return -EINVAL;
462
463 if (flags & ~BPF_STRTOX_BASE_MASK)
464 return -EINVAL;
465
466 while (cur_buf < buf + buf_len && isspace(*cur_buf))
467 ++cur_buf;
468
469 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
470 if (*is_negative)
471 ++cur_buf;
472
473 consumed = cur_buf - buf;
474 cur_len -= consumed;
475 if (!cur_len)
476 return -EINVAL;
477
478 cur_len = min(cur_len, sizeof(str) - 1);
479 memcpy(str, cur_buf, cur_len);
480 str[cur_len] = '\0';
481 cur_buf = str;
482
483 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
484 val_len = _parse_integer(cur_buf, base, res);
485
486 if (val_len & KSTRTOX_OVERFLOW)
487 return -ERANGE;
488
489 if (val_len == 0)
490 return -EINVAL;
491
492 cur_buf += val_len;
493 consumed += cur_buf - str;
494
495 return consumed;
496 }
497
__bpf_strtoll(const char * buf,size_t buf_len,u64 flags,long long * res)498 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
499 long long *res)
500 {
501 unsigned long long _res;
502 bool is_negative;
503 int err;
504
505 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
506 if (err < 0)
507 return err;
508 if (is_negative) {
509 if ((long long)-_res > 0)
510 return -ERANGE;
511 *res = -_res;
512 } else {
513 if ((long long)_res < 0)
514 return -ERANGE;
515 *res = _res;
516 }
517 return err;
518 }
519
BPF_CALL_4(bpf_strtol,const char *,buf,size_t,buf_len,u64,flags,s64 *,res)520 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
521 s64 *, res)
522 {
523 long long _res;
524 int err;
525
526 *res = 0;
527 err = __bpf_strtoll(buf, buf_len, flags, &_res);
528 if (err < 0)
529 return err;
530 *res = _res;
531 return err;
532 }
533
534 const struct bpf_func_proto bpf_strtol_proto = {
535 .func = bpf_strtol,
536 .gpl_only = false,
537 .ret_type = RET_INTEGER,
538 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
539 .arg2_type = ARG_CONST_SIZE,
540 .arg3_type = ARG_ANYTHING,
541 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
542 .arg4_size = sizeof(s64),
543 };
544
BPF_CALL_4(bpf_strtoul,const char *,buf,size_t,buf_len,u64,flags,u64 *,res)545 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
546 u64 *, res)
547 {
548 unsigned long long _res;
549 bool is_negative;
550 int err;
551
552 *res = 0;
553 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
554 if (err < 0)
555 return err;
556 if (is_negative)
557 return -EINVAL;
558 *res = _res;
559 return err;
560 }
561
562 const struct bpf_func_proto bpf_strtoul_proto = {
563 .func = bpf_strtoul,
564 .gpl_only = false,
565 .ret_type = RET_INTEGER,
566 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
567 .arg2_type = ARG_CONST_SIZE,
568 .arg3_type = ARG_ANYTHING,
569 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
570 .arg4_size = sizeof(u64),
571 };
572
BPF_CALL_3(bpf_strncmp,const char *,s1,u32,s1_sz,const char *,s2)573 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
574 {
575 return strncmp(s1, s2, s1_sz);
576 }
577
578 static const struct bpf_func_proto bpf_strncmp_proto = {
579 .func = bpf_strncmp,
580 .gpl_only = false,
581 .ret_type = RET_INTEGER,
582 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
583 .arg2_type = ARG_CONST_SIZE,
584 .arg3_type = ARG_PTR_TO_CONST_STR,
585 };
586
BPF_CALL_4(bpf_get_ns_current_pid_tgid,u64,dev,u64,ino,struct bpf_pidns_info *,nsdata,u32,size)587 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
588 struct bpf_pidns_info *, nsdata, u32, size)
589 {
590 struct task_struct *task = current;
591 struct pid_namespace *pidns;
592 int err = -EINVAL;
593
594 if (unlikely(size != sizeof(struct bpf_pidns_info)))
595 goto clear;
596
597 if (unlikely((u64)(dev_t)dev != dev))
598 goto clear;
599
600 if (unlikely(!task))
601 goto clear;
602
603 pidns = task_active_pid_ns(task);
604 if (unlikely(!pidns)) {
605 err = -ENOENT;
606 goto clear;
607 }
608
609 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
610 goto clear;
611
612 nsdata->pid = task_pid_nr_ns(task, pidns);
613 nsdata->tgid = task_tgid_nr_ns(task, pidns);
614 return 0;
615 clear:
616 memset((void *)nsdata, 0, (size_t) size);
617 return err;
618 }
619
620 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
621 .func = bpf_get_ns_current_pid_tgid,
622 .gpl_only = false,
623 .ret_type = RET_INTEGER,
624 .arg1_type = ARG_ANYTHING,
625 .arg2_type = ARG_ANYTHING,
626 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
627 .arg4_type = ARG_CONST_SIZE,
628 };
629
630 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
631 .func = bpf_get_raw_cpu_id,
632 .gpl_only = false,
633 .ret_type = RET_INTEGER,
634 };
635
BPF_CALL_5(bpf_event_output_data,void *,ctx,struct bpf_map *,map,u64,flags,void *,data,u64,size)636 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
637 u64, flags, void *, data, u64, size)
638 {
639 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
640 return -EINVAL;
641
642 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
643 }
644
645 const struct bpf_func_proto bpf_event_output_data_proto = {
646 .func = bpf_event_output_data,
647 .gpl_only = true,
648 .ret_type = RET_INTEGER,
649 .arg1_type = ARG_PTR_TO_CTX,
650 .arg2_type = ARG_CONST_MAP_PTR,
651 .arg3_type = ARG_ANYTHING,
652 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
653 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
654 };
655
BPF_CALL_3(bpf_copy_from_user,void *,dst,u32,size,const void __user *,user_ptr)656 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
657 const void __user *, user_ptr)
658 {
659 int ret = copy_from_user(dst, user_ptr, size);
660
661 if (unlikely(ret)) {
662 memset(dst, 0, size);
663 ret = -EFAULT;
664 }
665
666 return ret;
667 }
668
669 const struct bpf_func_proto bpf_copy_from_user_proto = {
670 .func = bpf_copy_from_user,
671 .gpl_only = false,
672 .might_sleep = true,
673 .ret_type = RET_INTEGER,
674 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
675 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
676 .arg3_type = ARG_ANYTHING,
677 };
678
BPF_CALL_5(bpf_copy_from_user_task,void *,dst,u32,size,const void __user *,user_ptr,struct task_struct *,tsk,u64,flags)679 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
680 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
681 {
682 int ret;
683
684 /* flags is not used yet */
685 if (unlikely(flags))
686 return -EINVAL;
687
688 if (unlikely(!size))
689 return 0;
690
691 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
692 if (ret == size)
693 return 0;
694
695 memset(dst, 0, size);
696 /* Return -EFAULT for partial read */
697 return ret < 0 ? ret : -EFAULT;
698 }
699
700 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
701 .func = bpf_copy_from_user_task,
702 .gpl_only = true,
703 .might_sleep = true,
704 .ret_type = RET_INTEGER,
705 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
706 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
707 .arg3_type = ARG_ANYTHING,
708 .arg4_type = ARG_PTR_TO_BTF_ID,
709 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
710 .arg5_type = ARG_ANYTHING
711 };
712
BPF_CALL_2(bpf_per_cpu_ptr,const void *,ptr,u32,cpu)713 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
714 {
715 if (cpu >= nr_cpu_ids)
716 return (unsigned long)NULL;
717
718 return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
719 }
720
721 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
722 .func = bpf_per_cpu_ptr,
723 .gpl_only = false,
724 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
725 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
726 .arg2_type = ARG_ANYTHING,
727 };
728
BPF_CALL_1(bpf_this_cpu_ptr,const void *,percpu_ptr)729 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
730 {
731 return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
732 }
733
734 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
735 .func = bpf_this_cpu_ptr,
736 .gpl_only = false,
737 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
738 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
739 };
740
bpf_trace_copy_string(char * buf,void * unsafe_ptr,char fmt_ptype,size_t bufsz)741 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
742 size_t bufsz)
743 {
744 void __user *user_ptr = (__force void __user *)unsafe_ptr;
745
746 buf[0] = 0;
747
748 switch (fmt_ptype) {
749 case 's':
750 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
751 if ((unsigned long)unsafe_ptr < TASK_SIZE)
752 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
753 fallthrough;
754 #endif
755 case 'k':
756 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
757 case 'u':
758 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
759 }
760
761 return -EINVAL;
762 }
763
764 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
765 * arguments representation.
766 */
767 #define MAX_BPRINTF_BIN_ARGS 512
768
769 /* Support executing three nested bprintf helper calls on a given CPU */
770 #define MAX_BPRINTF_NEST_LEVEL 3
771 struct bpf_bprintf_buffers {
772 char bin_args[MAX_BPRINTF_BIN_ARGS];
773 char buf[MAX_BPRINTF_BUF];
774 };
775
776 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
777 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
778
try_get_buffers(struct bpf_bprintf_buffers ** bufs)779 static int try_get_buffers(struct bpf_bprintf_buffers **bufs)
780 {
781 int nest_level;
782
783 preempt_disable();
784 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
785 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
786 this_cpu_dec(bpf_bprintf_nest_level);
787 preempt_enable();
788 return -EBUSY;
789 }
790 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
791
792 return 0;
793 }
794
bpf_bprintf_cleanup(struct bpf_bprintf_data * data)795 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
796 {
797 if (!data->bin_args && !data->buf)
798 return;
799 if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
800 return;
801 this_cpu_dec(bpf_bprintf_nest_level);
802 preempt_enable();
803 }
804
805 /*
806 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
807 *
808 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
809 *
810 * This can be used in two ways:
811 * - Format string verification only: when data->get_bin_args is false
812 * - Arguments preparation: in addition to the above verification, it writes in
813 * data->bin_args a binary representation of arguments usable by bstr_printf
814 * where pointers from BPF have been sanitized.
815 *
816 * In argument preparation mode, if 0 is returned, safe temporary buffers are
817 * allocated and bpf_bprintf_cleanup should be called to free them after use.
818 */
bpf_bprintf_prepare(char * fmt,u32 fmt_size,const u64 * raw_args,u32 num_args,struct bpf_bprintf_data * data)819 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
820 u32 num_args, struct bpf_bprintf_data *data)
821 {
822 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
823 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
824 struct bpf_bprintf_buffers *buffers = NULL;
825 size_t sizeof_cur_arg, sizeof_cur_ip;
826 int err, i, num_spec = 0;
827 u64 cur_arg;
828 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
829
830 fmt_end = strnchr(fmt, fmt_size, 0);
831 if (!fmt_end)
832 return -EINVAL;
833 fmt_size = fmt_end - fmt;
834
835 if (get_buffers && try_get_buffers(&buffers))
836 return -EBUSY;
837
838 if (data->get_bin_args) {
839 if (num_args)
840 tmp_buf = buffers->bin_args;
841 tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
842 data->bin_args = (u32 *)tmp_buf;
843 }
844
845 if (data->get_buf)
846 data->buf = buffers->buf;
847
848 for (i = 0; i < fmt_size; i++) {
849 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
850 err = -EINVAL;
851 goto out;
852 }
853
854 if (fmt[i] != '%')
855 continue;
856
857 if (fmt[i + 1] == '%') {
858 i++;
859 continue;
860 }
861
862 if (num_spec >= num_args) {
863 err = -EINVAL;
864 goto out;
865 }
866
867 /* The string is zero-terminated so if fmt[i] != 0, we can
868 * always access fmt[i + 1], in the worst case it will be a 0
869 */
870 i++;
871
872 /* skip optional "[0 +-][num]" width formatting field */
873 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
874 fmt[i] == ' ')
875 i++;
876 if (fmt[i] >= '1' && fmt[i] <= '9') {
877 i++;
878 while (fmt[i] >= '0' && fmt[i] <= '9')
879 i++;
880 }
881
882 if (fmt[i] == 'p') {
883 sizeof_cur_arg = sizeof(long);
884
885 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
886 fmt[i + 2] == 's') {
887 fmt_ptype = fmt[i + 1];
888 i += 2;
889 goto fmt_str;
890 }
891
892 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
893 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
894 fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
895 fmt[i + 1] == 'S') {
896 /* just kernel pointers */
897 if (tmp_buf)
898 cur_arg = raw_args[num_spec];
899 i++;
900 goto nocopy_fmt;
901 }
902
903 if (fmt[i + 1] == 'B') {
904 if (tmp_buf) {
905 err = snprintf(tmp_buf,
906 (tmp_buf_end - tmp_buf),
907 "%pB",
908 (void *)(long)raw_args[num_spec]);
909 tmp_buf += (err + 1);
910 }
911
912 i++;
913 num_spec++;
914 continue;
915 }
916
917 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
918 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
919 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
920 err = -EINVAL;
921 goto out;
922 }
923
924 i += 2;
925 if (!tmp_buf)
926 goto nocopy_fmt;
927
928 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
929 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
930 err = -ENOSPC;
931 goto out;
932 }
933
934 unsafe_ptr = (char *)(long)raw_args[num_spec];
935 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
936 sizeof_cur_ip);
937 if (err < 0)
938 memset(cur_ip, 0, sizeof_cur_ip);
939
940 /* hack: bstr_printf expects IP addresses to be
941 * pre-formatted as strings, ironically, the easiest way
942 * to do that is to call snprintf.
943 */
944 ip_spec[2] = fmt[i - 1];
945 ip_spec[3] = fmt[i];
946 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
947 ip_spec, &cur_ip);
948
949 tmp_buf += err + 1;
950 num_spec++;
951
952 continue;
953 } else if (fmt[i] == 's') {
954 fmt_ptype = fmt[i];
955 fmt_str:
956 if (fmt[i + 1] != 0 &&
957 !isspace(fmt[i + 1]) &&
958 !ispunct(fmt[i + 1])) {
959 err = -EINVAL;
960 goto out;
961 }
962
963 if (!tmp_buf)
964 goto nocopy_fmt;
965
966 if (tmp_buf_end == tmp_buf) {
967 err = -ENOSPC;
968 goto out;
969 }
970
971 unsafe_ptr = (char *)(long)raw_args[num_spec];
972 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
973 fmt_ptype,
974 tmp_buf_end - tmp_buf);
975 if (err < 0) {
976 tmp_buf[0] = '\0';
977 err = 1;
978 }
979
980 tmp_buf += err;
981 num_spec++;
982
983 continue;
984 } else if (fmt[i] == 'c') {
985 if (!tmp_buf)
986 goto nocopy_fmt;
987
988 if (tmp_buf_end == tmp_buf) {
989 err = -ENOSPC;
990 goto out;
991 }
992
993 *tmp_buf = raw_args[num_spec];
994 tmp_buf++;
995 num_spec++;
996
997 continue;
998 }
999
1000 sizeof_cur_arg = sizeof(int);
1001
1002 if (fmt[i] == 'l') {
1003 sizeof_cur_arg = sizeof(long);
1004 i++;
1005 }
1006 if (fmt[i] == 'l') {
1007 sizeof_cur_arg = sizeof(long long);
1008 i++;
1009 }
1010
1011 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1012 fmt[i] != 'x' && fmt[i] != 'X') {
1013 err = -EINVAL;
1014 goto out;
1015 }
1016
1017 if (tmp_buf)
1018 cur_arg = raw_args[num_spec];
1019 nocopy_fmt:
1020 if (tmp_buf) {
1021 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1022 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1023 err = -ENOSPC;
1024 goto out;
1025 }
1026
1027 if (sizeof_cur_arg == 8) {
1028 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
1029 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1030 } else {
1031 *(u32 *)tmp_buf = (u32)(long)cur_arg;
1032 }
1033 tmp_buf += sizeof_cur_arg;
1034 }
1035 num_spec++;
1036 }
1037
1038 err = 0;
1039 out:
1040 if (err)
1041 bpf_bprintf_cleanup(data);
1042 return err;
1043 }
1044
BPF_CALL_5(bpf_snprintf,char *,str,u32,str_size,char *,fmt,const void *,args,u32,data_len)1045 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1046 const void *, args, u32, data_len)
1047 {
1048 struct bpf_bprintf_data data = {
1049 .get_bin_args = true,
1050 };
1051 int err, num_args;
1052
1053 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1054 (data_len && !args))
1055 return -EINVAL;
1056 num_args = data_len / 8;
1057
1058 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1059 * can safely give an unbounded size.
1060 */
1061 err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1062 if (err < 0)
1063 return err;
1064
1065 err = bstr_printf(str, str_size, fmt, data.bin_args);
1066
1067 bpf_bprintf_cleanup(&data);
1068
1069 return err + 1;
1070 }
1071
1072 const struct bpf_func_proto bpf_snprintf_proto = {
1073 .func = bpf_snprintf,
1074 .gpl_only = true,
1075 .ret_type = RET_INTEGER,
1076 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
1077 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1078 .arg3_type = ARG_PTR_TO_CONST_STR,
1079 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1080 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1081 };
1082
1083 struct bpf_async_cb {
1084 struct bpf_map *map;
1085 struct bpf_prog *prog;
1086 void __rcu *callback_fn;
1087 void *value;
1088 union {
1089 struct rcu_head rcu;
1090 struct work_struct delete_work;
1091 };
1092 u64 flags;
1093 };
1094
1095 /* BPF map elements can contain 'struct bpf_timer'.
1096 * Such map owns all of its BPF timers.
1097 * 'struct bpf_timer' is allocated as part of map element allocation
1098 * and it's zero initialized.
1099 * That space is used to keep 'struct bpf_async_kern'.
1100 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1101 * remembers 'struct bpf_map *' pointer it's part of.
1102 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1103 * bpf_timer_start() arms the timer.
1104 * If user space reference to a map goes to zero at this point
1105 * ops->map_release_uref callback is responsible for cancelling the timers,
1106 * freeing their memory, and decrementing prog's refcnts.
1107 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1108 * Inner maps can contain bpf timers as well. ops->map_release_uref is
1109 * freeing the timers when inner map is replaced or deleted by user space.
1110 */
1111 struct bpf_hrtimer {
1112 struct bpf_async_cb cb;
1113 struct hrtimer timer;
1114 atomic_t cancelling;
1115 };
1116
1117 struct bpf_work {
1118 struct bpf_async_cb cb;
1119 struct work_struct work;
1120 struct work_struct delete_work;
1121 };
1122
1123 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */
1124 struct bpf_async_kern {
1125 union {
1126 struct bpf_async_cb *cb;
1127 struct bpf_hrtimer *timer;
1128 struct bpf_work *work;
1129 };
1130 /* bpf_spin_lock is used here instead of spinlock_t to make
1131 * sure that it always fits into space reserved by struct bpf_timer
1132 * regardless of LOCKDEP and spinlock debug flags.
1133 */
1134 struct bpf_spin_lock lock;
1135 } __attribute__((aligned(8)));
1136
1137 enum bpf_async_type {
1138 BPF_ASYNC_TYPE_TIMER = 0,
1139 BPF_ASYNC_TYPE_WQ,
1140 };
1141
1142 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1143
bpf_timer_cb(struct hrtimer * hrtimer)1144 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1145 {
1146 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1147 struct bpf_map *map = t->cb.map;
1148 void *value = t->cb.value;
1149 bpf_callback_t callback_fn;
1150 void *key;
1151 u32 idx;
1152
1153 BTF_TYPE_EMIT(struct bpf_timer);
1154 callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held());
1155 if (!callback_fn)
1156 goto out;
1157
1158 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1159 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1160 * Remember the timer this callback is servicing to prevent
1161 * deadlock if callback_fn() calls bpf_timer_cancel() or
1162 * bpf_map_delete_elem() on the same timer.
1163 */
1164 this_cpu_write(hrtimer_running, t);
1165 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1166 struct bpf_array *array = container_of(map, struct bpf_array, map);
1167
1168 /* compute the key */
1169 idx = ((char *)value - array->value) / array->elem_size;
1170 key = &idx;
1171 } else { /* hash or lru */
1172 key = value - round_up(map->key_size, 8);
1173 }
1174
1175 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1176 /* The verifier checked that return value is zero. */
1177
1178 this_cpu_write(hrtimer_running, NULL);
1179 out:
1180 return HRTIMER_NORESTART;
1181 }
1182
bpf_wq_work(struct work_struct * work)1183 static void bpf_wq_work(struct work_struct *work)
1184 {
1185 struct bpf_work *w = container_of(work, struct bpf_work, work);
1186 struct bpf_async_cb *cb = &w->cb;
1187 struct bpf_map *map = cb->map;
1188 bpf_callback_t callback_fn;
1189 void *value = cb->value;
1190 void *key;
1191 u32 idx;
1192
1193 BTF_TYPE_EMIT(struct bpf_wq);
1194
1195 callback_fn = READ_ONCE(cb->callback_fn);
1196 if (!callback_fn)
1197 return;
1198
1199 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1200 struct bpf_array *array = container_of(map, struct bpf_array, map);
1201
1202 /* compute the key */
1203 idx = ((char *)value - array->value) / array->elem_size;
1204 key = &idx;
1205 } else { /* hash or lru */
1206 key = value - round_up(map->key_size, 8);
1207 }
1208
1209 rcu_read_lock_trace();
1210 migrate_disable();
1211
1212 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1213
1214 migrate_enable();
1215 rcu_read_unlock_trace();
1216 }
1217
bpf_wq_delete_work(struct work_struct * work)1218 static void bpf_wq_delete_work(struct work_struct *work)
1219 {
1220 struct bpf_work *w = container_of(work, struct bpf_work, delete_work);
1221
1222 cancel_work_sync(&w->work);
1223
1224 kfree_rcu(w, cb.rcu);
1225 }
1226
bpf_timer_delete_work(struct work_struct * work)1227 static void bpf_timer_delete_work(struct work_struct *work)
1228 {
1229 struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work);
1230
1231 /* Cancel the timer and wait for callback to complete if it was running.
1232 * If hrtimer_cancel() can be safely called it's safe to call
1233 * kfree_rcu(t) right after for both preallocated and non-preallocated
1234 * maps. The async->cb = NULL was already done and no code path can see
1235 * address 't' anymore. Timer if armed for existing bpf_hrtimer before
1236 * bpf_timer_cancel_and_free will have been cancelled.
1237 */
1238 hrtimer_cancel(&t->timer);
1239 kfree_rcu(t, cb.rcu);
1240 }
1241
__bpf_async_init(struct bpf_async_kern * async,struct bpf_map * map,u64 flags,enum bpf_async_type type)1242 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
1243 enum bpf_async_type type)
1244 {
1245 struct bpf_async_cb *cb;
1246 struct bpf_hrtimer *t;
1247 struct bpf_work *w;
1248 clockid_t clockid;
1249 size_t size;
1250 int ret = 0;
1251
1252 if (in_nmi())
1253 return -EOPNOTSUPP;
1254
1255 switch (type) {
1256 case BPF_ASYNC_TYPE_TIMER:
1257 size = sizeof(struct bpf_hrtimer);
1258 break;
1259 case BPF_ASYNC_TYPE_WQ:
1260 size = sizeof(struct bpf_work);
1261 break;
1262 default:
1263 return -EINVAL;
1264 }
1265
1266 __bpf_spin_lock_irqsave(&async->lock);
1267 t = async->timer;
1268 if (t) {
1269 ret = -EBUSY;
1270 goto out;
1271 }
1272
1273 /* allocate hrtimer via map_kmalloc to use memcg accounting */
1274 cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
1275 if (!cb) {
1276 ret = -ENOMEM;
1277 goto out;
1278 }
1279
1280 switch (type) {
1281 case BPF_ASYNC_TYPE_TIMER:
1282 clockid = flags & (MAX_CLOCKS - 1);
1283 t = (struct bpf_hrtimer *)cb;
1284
1285 atomic_set(&t->cancelling, 0);
1286 INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work);
1287 hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT);
1288 cb->value = (void *)async - map->record->timer_off;
1289 break;
1290 case BPF_ASYNC_TYPE_WQ:
1291 w = (struct bpf_work *)cb;
1292
1293 INIT_WORK(&w->work, bpf_wq_work);
1294 INIT_WORK(&w->delete_work, bpf_wq_delete_work);
1295 cb->value = (void *)async - map->record->wq_off;
1296 break;
1297 }
1298 cb->map = map;
1299 cb->prog = NULL;
1300 cb->flags = flags;
1301 rcu_assign_pointer(cb->callback_fn, NULL);
1302
1303 WRITE_ONCE(async->cb, cb);
1304 /* Guarantee the order between async->cb and map->usercnt. So
1305 * when there are concurrent uref release and bpf timer init, either
1306 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
1307 * timer or atomic64_read() below returns a zero usercnt.
1308 */
1309 smp_mb();
1310 if (!atomic64_read(&map->usercnt)) {
1311 /* maps with timers must be either held by user space
1312 * or pinned in bpffs.
1313 */
1314 WRITE_ONCE(async->cb, NULL);
1315 kfree(cb);
1316 ret = -EPERM;
1317 }
1318 out:
1319 __bpf_spin_unlock_irqrestore(&async->lock);
1320 return ret;
1321 }
1322
BPF_CALL_3(bpf_timer_init,struct bpf_async_kern *,timer,struct bpf_map *,map,u64,flags)1323 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
1324 u64, flags)
1325 {
1326 clock_t clockid = flags & (MAX_CLOCKS - 1);
1327
1328 BUILD_BUG_ON(MAX_CLOCKS != 16);
1329 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
1330 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
1331
1332 if (flags >= MAX_CLOCKS ||
1333 /* similar to timerfd except _ALARM variants are not supported */
1334 (clockid != CLOCK_MONOTONIC &&
1335 clockid != CLOCK_REALTIME &&
1336 clockid != CLOCK_BOOTTIME))
1337 return -EINVAL;
1338
1339 return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
1340 }
1341
1342 static const struct bpf_func_proto bpf_timer_init_proto = {
1343 .func = bpf_timer_init,
1344 .gpl_only = true,
1345 .ret_type = RET_INTEGER,
1346 .arg1_type = ARG_PTR_TO_TIMER,
1347 .arg2_type = ARG_CONST_MAP_PTR,
1348 .arg3_type = ARG_ANYTHING,
1349 };
1350
__bpf_async_set_callback(struct bpf_async_kern * async,void * callback_fn,struct bpf_prog_aux * aux,unsigned int flags,enum bpf_async_type type)1351 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
1352 struct bpf_prog_aux *aux, unsigned int flags,
1353 enum bpf_async_type type)
1354 {
1355 struct bpf_prog *prev, *prog = aux->prog;
1356 struct bpf_async_cb *cb;
1357 int ret = 0;
1358
1359 if (in_nmi())
1360 return -EOPNOTSUPP;
1361 __bpf_spin_lock_irqsave(&async->lock);
1362 cb = async->cb;
1363 if (!cb) {
1364 ret = -EINVAL;
1365 goto out;
1366 }
1367 if (!atomic64_read(&cb->map->usercnt)) {
1368 /* maps with timers must be either held by user space
1369 * or pinned in bpffs. Otherwise timer might still be
1370 * running even when bpf prog is detached and user space
1371 * is gone, since map_release_uref won't ever be called.
1372 */
1373 ret = -EPERM;
1374 goto out;
1375 }
1376 prev = cb->prog;
1377 if (prev != prog) {
1378 /* Bump prog refcnt once. Every bpf_timer_set_callback()
1379 * can pick different callback_fn-s within the same prog.
1380 */
1381 prog = bpf_prog_inc_not_zero(prog);
1382 if (IS_ERR(prog)) {
1383 ret = PTR_ERR(prog);
1384 goto out;
1385 }
1386 if (prev)
1387 /* Drop prev prog refcnt when swapping with new prog */
1388 bpf_prog_put(prev);
1389 cb->prog = prog;
1390 }
1391 rcu_assign_pointer(cb->callback_fn, callback_fn);
1392 out:
1393 __bpf_spin_unlock_irqrestore(&async->lock);
1394 return ret;
1395 }
1396
BPF_CALL_3(bpf_timer_set_callback,struct bpf_async_kern *,timer,void *,callback_fn,struct bpf_prog_aux *,aux)1397 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
1398 struct bpf_prog_aux *, aux)
1399 {
1400 return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER);
1401 }
1402
1403 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1404 .func = bpf_timer_set_callback,
1405 .gpl_only = true,
1406 .ret_type = RET_INTEGER,
1407 .arg1_type = ARG_PTR_TO_TIMER,
1408 .arg2_type = ARG_PTR_TO_FUNC,
1409 };
1410
BPF_CALL_3(bpf_timer_start,struct bpf_async_kern *,timer,u64,nsecs,u64,flags)1411 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags)
1412 {
1413 struct bpf_hrtimer *t;
1414 int ret = 0;
1415 enum hrtimer_mode mode;
1416
1417 if (in_nmi())
1418 return -EOPNOTSUPP;
1419 if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN))
1420 return -EINVAL;
1421 __bpf_spin_lock_irqsave(&timer->lock);
1422 t = timer->timer;
1423 if (!t || !t->cb.prog) {
1424 ret = -EINVAL;
1425 goto out;
1426 }
1427
1428 if (flags & BPF_F_TIMER_ABS)
1429 mode = HRTIMER_MODE_ABS_SOFT;
1430 else
1431 mode = HRTIMER_MODE_REL_SOFT;
1432
1433 if (flags & BPF_F_TIMER_CPU_PIN)
1434 mode |= HRTIMER_MODE_PINNED;
1435
1436 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
1437 out:
1438 __bpf_spin_unlock_irqrestore(&timer->lock);
1439 return ret;
1440 }
1441
1442 static const struct bpf_func_proto bpf_timer_start_proto = {
1443 .func = bpf_timer_start,
1444 .gpl_only = true,
1445 .ret_type = RET_INTEGER,
1446 .arg1_type = ARG_PTR_TO_TIMER,
1447 .arg2_type = ARG_ANYTHING,
1448 .arg3_type = ARG_ANYTHING,
1449 };
1450
drop_prog_refcnt(struct bpf_async_cb * async)1451 static void drop_prog_refcnt(struct bpf_async_cb *async)
1452 {
1453 struct bpf_prog *prog = async->prog;
1454
1455 if (prog) {
1456 bpf_prog_put(prog);
1457 async->prog = NULL;
1458 rcu_assign_pointer(async->callback_fn, NULL);
1459 }
1460 }
1461
BPF_CALL_1(bpf_timer_cancel,struct bpf_async_kern *,timer)1462 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
1463 {
1464 struct bpf_hrtimer *t, *cur_t;
1465 bool inc = false;
1466 int ret = 0;
1467
1468 if (in_nmi())
1469 return -EOPNOTSUPP;
1470 rcu_read_lock();
1471 __bpf_spin_lock_irqsave(&timer->lock);
1472 t = timer->timer;
1473 if (!t) {
1474 ret = -EINVAL;
1475 goto out;
1476 }
1477
1478 cur_t = this_cpu_read(hrtimer_running);
1479 if (cur_t == t) {
1480 /* If bpf callback_fn is trying to bpf_timer_cancel()
1481 * its own timer the hrtimer_cancel() will deadlock
1482 * since it waits for callback_fn to finish.
1483 */
1484 ret = -EDEADLK;
1485 goto out;
1486 }
1487
1488 /* Only account in-flight cancellations when invoked from a timer
1489 * callback, since we want to avoid waiting only if other _callbacks_
1490 * are waiting on us, to avoid introducing lockups. Non-callback paths
1491 * are ok, since nobody would synchronously wait for their completion.
1492 */
1493 if (!cur_t)
1494 goto drop;
1495 atomic_inc(&t->cancelling);
1496 /* Need full barrier after relaxed atomic_inc */
1497 smp_mb__after_atomic();
1498 inc = true;
1499 if (atomic_read(&cur_t->cancelling)) {
1500 /* We're cancelling timer t, while some other timer callback is
1501 * attempting to cancel us. In such a case, it might be possible
1502 * that timer t belongs to the other callback, or some other
1503 * callback waiting upon it (creating transitive dependencies
1504 * upon us), and we will enter a deadlock if we continue
1505 * cancelling and waiting for it synchronously, since it might
1506 * do the same. Bail!
1507 */
1508 ret = -EDEADLK;
1509 goto out;
1510 }
1511 drop:
1512 drop_prog_refcnt(&t->cb);
1513 out:
1514 __bpf_spin_unlock_irqrestore(&timer->lock);
1515 /* Cancel the timer and wait for associated callback to finish
1516 * if it was running.
1517 */
1518 ret = ret ?: hrtimer_cancel(&t->timer);
1519 if (inc)
1520 atomic_dec(&t->cancelling);
1521 rcu_read_unlock();
1522 return ret;
1523 }
1524
1525 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1526 .func = bpf_timer_cancel,
1527 .gpl_only = true,
1528 .ret_type = RET_INTEGER,
1529 .arg1_type = ARG_PTR_TO_TIMER,
1530 };
1531
__bpf_async_cancel_and_free(struct bpf_async_kern * async)1532 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async)
1533 {
1534 struct bpf_async_cb *cb;
1535
1536 /* Performance optimization: read async->cb without lock first. */
1537 if (!READ_ONCE(async->cb))
1538 return NULL;
1539
1540 __bpf_spin_lock_irqsave(&async->lock);
1541 /* re-read it under lock */
1542 cb = async->cb;
1543 if (!cb)
1544 goto out;
1545 drop_prog_refcnt(cb);
1546 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1547 * this timer, since it won't be initialized.
1548 */
1549 WRITE_ONCE(async->cb, NULL);
1550 out:
1551 __bpf_spin_unlock_irqrestore(&async->lock);
1552 return cb;
1553 }
1554
1555 /* This function is called by map_delete/update_elem for individual element and
1556 * by ops->map_release_uref when the user space reference to a map reaches zero.
1557 */
bpf_timer_cancel_and_free(void * val)1558 void bpf_timer_cancel_and_free(void *val)
1559 {
1560 struct bpf_hrtimer *t;
1561
1562 t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val);
1563
1564 if (!t)
1565 return;
1566 /* We check that bpf_map_delete/update_elem() was called from timer
1567 * callback_fn. In such case we don't call hrtimer_cancel() (since it
1568 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will
1569 * just return -1). Though callback_fn is still running on this cpu it's
1570 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1571 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1572 * since async->cb = NULL was already done. The timer will be
1573 * effectively cancelled because bpf_timer_cb() will return
1574 * HRTIMER_NORESTART.
1575 *
1576 * However, it is possible the timer callback_fn calling us armed the
1577 * timer _before_ calling us, such that failing to cancel it here will
1578 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer.
1579 * Therefore, we _need_ to cancel any outstanding timers before we do
1580 * kfree_rcu, even though no more timers can be armed.
1581 *
1582 * Moreover, we need to schedule work even if timer does not belong to
1583 * the calling callback_fn, as on two different CPUs, we can end up in a
1584 * situation where both sides run in parallel, try to cancel one
1585 * another, and we end up waiting on both sides in hrtimer_cancel
1586 * without making forward progress, since timer1 depends on time2
1587 * callback to finish, and vice versa.
1588 *
1589 * CPU 1 (timer1_cb) CPU 2 (timer2_cb)
1590 * bpf_timer_cancel_and_free(timer2) bpf_timer_cancel_and_free(timer1)
1591 *
1592 * To avoid these issues, punt to workqueue context when we are in a
1593 * timer callback.
1594 */
1595 if (this_cpu_read(hrtimer_running)) {
1596 queue_work(system_unbound_wq, &t->cb.delete_work);
1597 return;
1598 }
1599
1600 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1601 /* If the timer is running on other CPU, also use a kworker to
1602 * wait for the completion of the timer instead of trying to
1603 * acquire a sleepable lock in hrtimer_cancel() to wait for its
1604 * completion.
1605 */
1606 if (hrtimer_try_to_cancel(&t->timer) >= 0)
1607 kfree_rcu(t, cb.rcu);
1608 else
1609 queue_work(system_unbound_wq, &t->cb.delete_work);
1610 } else {
1611 bpf_timer_delete_work(&t->cb.delete_work);
1612 }
1613 }
1614
1615 /* This function is called by map_delete/update_elem for individual element and
1616 * by ops->map_release_uref when the user space reference to a map reaches zero.
1617 */
bpf_wq_cancel_and_free(void * val)1618 void bpf_wq_cancel_and_free(void *val)
1619 {
1620 struct bpf_work *work;
1621
1622 BTF_TYPE_EMIT(struct bpf_wq);
1623
1624 work = (struct bpf_work *)__bpf_async_cancel_and_free(val);
1625 if (!work)
1626 return;
1627 /* Trigger cancel of the sleepable work, but *do not* wait for
1628 * it to finish if it was running as we might not be in a
1629 * sleepable context.
1630 * kfree will be called once the work has finished.
1631 */
1632 schedule_work(&work->delete_work);
1633 }
1634
BPF_CALL_2(bpf_kptr_xchg,void *,dst,void *,ptr)1635 BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr)
1636 {
1637 unsigned long *kptr = dst;
1638
1639 /* This helper may be inlined by verifier. */
1640 return xchg(kptr, (unsigned long)ptr);
1641 }
1642
1643 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1644 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1645 * denote type that verifier will determine.
1646 */
1647 static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1648 .func = bpf_kptr_xchg,
1649 .gpl_only = false,
1650 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
1651 .ret_btf_id = BPF_PTR_POISON,
1652 .arg1_type = ARG_KPTR_XCHG_DEST,
1653 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1654 .arg2_btf_id = BPF_PTR_POISON,
1655 };
1656
1657 /* Since the upper 8 bits of dynptr->size is reserved, the
1658 * maximum supported size is 2^24 - 1.
1659 */
1660 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1)
1661 #define DYNPTR_TYPE_SHIFT 28
1662 #define DYNPTR_SIZE_MASK 0xFFFFFF
1663 #define DYNPTR_RDONLY_BIT BIT(31)
1664
__bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern * ptr)1665 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1666 {
1667 return ptr->size & DYNPTR_RDONLY_BIT;
1668 }
1669
bpf_dynptr_set_rdonly(struct bpf_dynptr_kern * ptr)1670 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
1671 {
1672 ptr->size |= DYNPTR_RDONLY_BIT;
1673 }
1674
bpf_dynptr_set_type(struct bpf_dynptr_kern * ptr,enum bpf_dynptr_type type)1675 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1676 {
1677 ptr->size |= type << DYNPTR_TYPE_SHIFT;
1678 }
1679
bpf_dynptr_get_type(const struct bpf_dynptr_kern * ptr)1680 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
1681 {
1682 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
1683 }
1684
__bpf_dynptr_size(const struct bpf_dynptr_kern * ptr)1685 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
1686 {
1687 return ptr->size & DYNPTR_SIZE_MASK;
1688 }
1689
bpf_dynptr_set_size(struct bpf_dynptr_kern * ptr,u32 new_size)1690 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
1691 {
1692 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
1693
1694 ptr->size = new_size | metadata;
1695 }
1696
bpf_dynptr_check_size(u32 size)1697 int bpf_dynptr_check_size(u32 size)
1698 {
1699 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1700 }
1701
bpf_dynptr_init(struct bpf_dynptr_kern * ptr,void * data,enum bpf_dynptr_type type,u32 offset,u32 size)1702 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1703 enum bpf_dynptr_type type, u32 offset, u32 size)
1704 {
1705 ptr->data = data;
1706 ptr->offset = offset;
1707 ptr->size = size;
1708 bpf_dynptr_set_type(ptr, type);
1709 }
1710
bpf_dynptr_set_null(struct bpf_dynptr_kern * ptr)1711 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1712 {
1713 memset(ptr, 0, sizeof(*ptr));
1714 }
1715
bpf_dynptr_check_off_len(const struct bpf_dynptr_kern * ptr,u32 offset,u32 len)1716 static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
1717 {
1718 u32 size = __bpf_dynptr_size(ptr);
1719
1720 if (len > size || offset > size - len)
1721 return -E2BIG;
1722
1723 return 0;
1724 }
1725
BPF_CALL_4(bpf_dynptr_from_mem,void *,data,u32,size,u64,flags,struct bpf_dynptr_kern *,ptr)1726 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1727 {
1728 int err;
1729
1730 BTF_TYPE_EMIT(struct bpf_dynptr);
1731
1732 err = bpf_dynptr_check_size(size);
1733 if (err)
1734 goto error;
1735
1736 /* flags is currently unsupported */
1737 if (flags) {
1738 err = -EINVAL;
1739 goto error;
1740 }
1741
1742 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1743
1744 return 0;
1745
1746 error:
1747 bpf_dynptr_set_null(ptr);
1748 return err;
1749 }
1750
1751 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1752 .func = bpf_dynptr_from_mem,
1753 .gpl_only = false,
1754 .ret_type = RET_INTEGER,
1755 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1756 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1757 .arg3_type = ARG_ANYTHING,
1758 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE,
1759 };
1760
__bpf_dynptr_read(void * dst,u32 len,const struct bpf_dynptr_kern * src,u32 offset,u64 flags)1761 static int __bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr_kern *src,
1762 u32 offset, u64 flags)
1763 {
1764 enum bpf_dynptr_type type;
1765 int err;
1766
1767 if (!src->data || flags)
1768 return -EINVAL;
1769
1770 err = bpf_dynptr_check_off_len(src, offset, len);
1771 if (err)
1772 return err;
1773
1774 type = bpf_dynptr_get_type(src);
1775
1776 switch (type) {
1777 case BPF_DYNPTR_TYPE_LOCAL:
1778 case BPF_DYNPTR_TYPE_RINGBUF:
1779 /* Source and destination may possibly overlap, hence use memmove to
1780 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1781 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1782 */
1783 memmove(dst, src->data + src->offset + offset, len);
1784 return 0;
1785 case BPF_DYNPTR_TYPE_SKB:
1786 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len);
1787 case BPF_DYNPTR_TYPE_XDP:
1788 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len);
1789 default:
1790 WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type);
1791 return -EFAULT;
1792 }
1793 }
1794
BPF_CALL_5(bpf_dynptr_read,void *,dst,u32,len,const struct bpf_dynptr_kern *,src,u32,offset,u64,flags)1795 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
1796 u32, offset, u64, flags)
1797 {
1798 return __bpf_dynptr_read(dst, len, src, offset, flags);
1799 }
1800
1801 static const struct bpf_func_proto bpf_dynptr_read_proto = {
1802 .func = bpf_dynptr_read,
1803 .gpl_only = false,
1804 .ret_type = RET_INTEGER,
1805 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1806 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1807 .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1808 .arg4_type = ARG_ANYTHING,
1809 .arg5_type = ARG_ANYTHING,
1810 };
1811
__bpf_dynptr_write(const struct bpf_dynptr_kern * dst,u32 offset,void * src,u32 len,u64 flags)1812 static int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src,
1813 u32 len, u64 flags)
1814 {
1815 enum bpf_dynptr_type type;
1816 int err;
1817
1818 if (!dst->data || __bpf_dynptr_is_rdonly(dst))
1819 return -EINVAL;
1820
1821 err = bpf_dynptr_check_off_len(dst, offset, len);
1822 if (err)
1823 return err;
1824
1825 type = bpf_dynptr_get_type(dst);
1826
1827 switch (type) {
1828 case BPF_DYNPTR_TYPE_LOCAL:
1829 case BPF_DYNPTR_TYPE_RINGBUF:
1830 if (flags)
1831 return -EINVAL;
1832 /* Source and destination may possibly overlap, hence use memmove to
1833 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1834 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1835 */
1836 memmove(dst->data + dst->offset + offset, src, len);
1837 return 0;
1838 case BPF_DYNPTR_TYPE_SKB:
1839 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len,
1840 flags);
1841 case BPF_DYNPTR_TYPE_XDP:
1842 if (flags)
1843 return -EINVAL;
1844 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len);
1845 default:
1846 WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type);
1847 return -EFAULT;
1848 }
1849 }
1850
BPF_CALL_5(bpf_dynptr_write,const struct bpf_dynptr_kern *,dst,u32,offset,void *,src,u32,len,u64,flags)1851 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
1852 u32, len, u64, flags)
1853 {
1854 return __bpf_dynptr_write(dst, offset, src, len, flags);
1855 }
1856
1857 static const struct bpf_func_proto bpf_dynptr_write_proto = {
1858 .func = bpf_dynptr_write,
1859 .gpl_only = false,
1860 .ret_type = RET_INTEGER,
1861 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1862 .arg2_type = ARG_ANYTHING,
1863 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1864 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
1865 .arg5_type = ARG_ANYTHING,
1866 };
1867
BPF_CALL_3(bpf_dynptr_data,const struct bpf_dynptr_kern *,ptr,u32,offset,u32,len)1868 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1869 {
1870 enum bpf_dynptr_type type;
1871 int err;
1872
1873 if (!ptr->data)
1874 return 0;
1875
1876 err = bpf_dynptr_check_off_len(ptr, offset, len);
1877 if (err)
1878 return 0;
1879
1880 if (__bpf_dynptr_is_rdonly(ptr))
1881 return 0;
1882
1883 type = bpf_dynptr_get_type(ptr);
1884
1885 switch (type) {
1886 case BPF_DYNPTR_TYPE_LOCAL:
1887 case BPF_DYNPTR_TYPE_RINGBUF:
1888 return (unsigned long)(ptr->data + ptr->offset + offset);
1889 case BPF_DYNPTR_TYPE_SKB:
1890 case BPF_DYNPTR_TYPE_XDP:
1891 /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */
1892 return 0;
1893 default:
1894 WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type);
1895 return 0;
1896 }
1897 }
1898
1899 static const struct bpf_func_proto bpf_dynptr_data_proto = {
1900 .func = bpf_dynptr_data,
1901 .gpl_only = false,
1902 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1903 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1904 .arg2_type = ARG_ANYTHING,
1905 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
1906 };
1907
1908 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1909 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1910 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1911 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1912 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1913 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1914 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1915
1916 const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1917 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1918 {
1919 switch (func_id) {
1920 case BPF_FUNC_map_lookup_elem:
1921 return &bpf_map_lookup_elem_proto;
1922 case BPF_FUNC_map_update_elem:
1923 return &bpf_map_update_elem_proto;
1924 case BPF_FUNC_map_delete_elem:
1925 return &bpf_map_delete_elem_proto;
1926 case BPF_FUNC_map_push_elem:
1927 return &bpf_map_push_elem_proto;
1928 case BPF_FUNC_map_pop_elem:
1929 return &bpf_map_pop_elem_proto;
1930 case BPF_FUNC_map_peek_elem:
1931 return &bpf_map_peek_elem_proto;
1932 case BPF_FUNC_map_lookup_percpu_elem:
1933 return &bpf_map_lookup_percpu_elem_proto;
1934 case BPF_FUNC_get_prandom_u32:
1935 return &bpf_get_prandom_u32_proto;
1936 case BPF_FUNC_get_smp_processor_id:
1937 return &bpf_get_raw_smp_processor_id_proto;
1938 case BPF_FUNC_get_numa_node_id:
1939 return &bpf_get_numa_node_id_proto;
1940 case BPF_FUNC_tail_call:
1941 return &bpf_tail_call_proto;
1942 case BPF_FUNC_ktime_get_ns:
1943 return &bpf_ktime_get_ns_proto;
1944 case BPF_FUNC_ktime_get_boot_ns:
1945 return &bpf_ktime_get_boot_ns_proto;
1946 case BPF_FUNC_ktime_get_tai_ns:
1947 return &bpf_ktime_get_tai_ns_proto;
1948 case BPF_FUNC_ringbuf_output:
1949 return &bpf_ringbuf_output_proto;
1950 case BPF_FUNC_ringbuf_reserve:
1951 return &bpf_ringbuf_reserve_proto;
1952 case BPF_FUNC_ringbuf_submit:
1953 return &bpf_ringbuf_submit_proto;
1954 case BPF_FUNC_ringbuf_discard:
1955 return &bpf_ringbuf_discard_proto;
1956 case BPF_FUNC_ringbuf_query:
1957 return &bpf_ringbuf_query_proto;
1958 case BPF_FUNC_strncmp:
1959 return &bpf_strncmp_proto;
1960 case BPF_FUNC_strtol:
1961 return &bpf_strtol_proto;
1962 case BPF_FUNC_strtoul:
1963 return &bpf_strtoul_proto;
1964 case BPF_FUNC_get_current_pid_tgid:
1965 return &bpf_get_current_pid_tgid_proto;
1966 case BPF_FUNC_get_ns_current_pid_tgid:
1967 return &bpf_get_ns_current_pid_tgid_proto;
1968 default:
1969 break;
1970 }
1971
1972 if (!bpf_token_capable(prog->aux->token, CAP_BPF))
1973 return NULL;
1974
1975 switch (func_id) {
1976 case BPF_FUNC_spin_lock:
1977 return &bpf_spin_lock_proto;
1978 case BPF_FUNC_spin_unlock:
1979 return &bpf_spin_unlock_proto;
1980 case BPF_FUNC_jiffies64:
1981 return &bpf_jiffies64_proto;
1982 case BPF_FUNC_per_cpu_ptr:
1983 return &bpf_per_cpu_ptr_proto;
1984 case BPF_FUNC_this_cpu_ptr:
1985 return &bpf_this_cpu_ptr_proto;
1986 case BPF_FUNC_timer_init:
1987 return &bpf_timer_init_proto;
1988 case BPF_FUNC_timer_set_callback:
1989 return &bpf_timer_set_callback_proto;
1990 case BPF_FUNC_timer_start:
1991 return &bpf_timer_start_proto;
1992 case BPF_FUNC_timer_cancel:
1993 return &bpf_timer_cancel_proto;
1994 case BPF_FUNC_kptr_xchg:
1995 return &bpf_kptr_xchg_proto;
1996 case BPF_FUNC_for_each_map_elem:
1997 return &bpf_for_each_map_elem_proto;
1998 case BPF_FUNC_loop:
1999 return &bpf_loop_proto;
2000 case BPF_FUNC_user_ringbuf_drain:
2001 return &bpf_user_ringbuf_drain_proto;
2002 case BPF_FUNC_ringbuf_reserve_dynptr:
2003 return &bpf_ringbuf_reserve_dynptr_proto;
2004 case BPF_FUNC_ringbuf_submit_dynptr:
2005 return &bpf_ringbuf_submit_dynptr_proto;
2006 case BPF_FUNC_ringbuf_discard_dynptr:
2007 return &bpf_ringbuf_discard_dynptr_proto;
2008 case BPF_FUNC_dynptr_from_mem:
2009 return &bpf_dynptr_from_mem_proto;
2010 case BPF_FUNC_dynptr_read:
2011 return &bpf_dynptr_read_proto;
2012 case BPF_FUNC_dynptr_write:
2013 return &bpf_dynptr_write_proto;
2014 case BPF_FUNC_dynptr_data:
2015 return &bpf_dynptr_data_proto;
2016 #ifdef CONFIG_CGROUPS
2017 case BPF_FUNC_cgrp_storage_get:
2018 return &bpf_cgrp_storage_get_proto;
2019 case BPF_FUNC_cgrp_storage_delete:
2020 return &bpf_cgrp_storage_delete_proto;
2021 case BPF_FUNC_get_current_cgroup_id:
2022 return &bpf_get_current_cgroup_id_proto;
2023 case BPF_FUNC_get_current_ancestor_cgroup_id:
2024 return &bpf_get_current_ancestor_cgroup_id_proto;
2025 #endif
2026 default:
2027 break;
2028 }
2029
2030 if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
2031 return NULL;
2032
2033 switch (func_id) {
2034 case BPF_FUNC_trace_printk:
2035 return bpf_get_trace_printk_proto();
2036 case BPF_FUNC_get_current_task:
2037 return &bpf_get_current_task_proto;
2038 case BPF_FUNC_get_current_task_btf:
2039 return &bpf_get_current_task_btf_proto;
2040 case BPF_FUNC_probe_read_user:
2041 return &bpf_probe_read_user_proto;
2042 case BPF_FUNC_probe_read_kernel:
2043 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2044 NULL : &bpf_probe_read_kernel_proto;
2045 case BPF_FUNC_probe_read_user_str:
2046 return &bpf_probe_read_user_str_proto;
2047 case BPF_FUNC_probe_read_kernel_str:
2048 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2049 NULL : &bpf_probe_read_kernel_str_proto;
2050 case BPF_FUNC_snprintf_btf:
2051 return &bpf_snprintf_btf_proto;
2052 case BPF_FUNC_snprintf:
2053 return &bpf_snprintf_proto;
2054 case BPF_FUNC_task_pt_regs:
2055 return &bpf_task_pt_regs_proto;
2056 case BPF_FUNC_trace_vprintk:
2057 return bpf_get_trace_vprintk_proto();
2058 case BPF_FUNC_perf_event_read_value:
2059 return bpf_get_perf_event_read_value_proto();
2060 default:
2061 return NULL;
2062 }
2063 }
2064 EXPORT_SYMBOL_GPL(bpf_base_func_proto);
2065
bpf_list_head_free(const struct btf_field * field,void * list_head,struct bpf_spin_lock * spin_lock)2066 void bpf_list_head_free(const struct btf_field *field, void *list_head,
2067 struct bpf_spin_lock *spin_lock)
2068 {
2069 struct list_head *head = list_head, *orig_head = list_head;
2070
2071 BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
2072 BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
2073
2074 /* Do the actual list draining outside the lock to not hold the lock for
2075 * too long, and also prevent deadlocks if tracing programs end up
2076 * executing on entry/exit of functions called inside the critical
2077 * section, and end up doing map ops that call bpf_list_head_free for
2078 * the same map value again.
2079 */
2080 __bpf_spin_lock_irqsave(spin_lock);
2081 if (!head->next || list_empty(head))
2082 goto unlock;
2083 head = head->next;
2084 unlock:
2085 INIT_LIST_HEAD(orig_head);
2086 __bpf_spin_unlock_irqrestore(spin_lock);
2087
2088 while (head != orig_head) {
2089 void *obj = head;
2090
2091 obj -= field->graph_root.node_offset;
2092 head = head->next;
2093 /* The contained type can also have resources, including a
2094 * bpf_list_head which needs to be freed.
2095 */
2096 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2097 }
2098 }
2099
2100 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
2101 * 'rb_node *', so field name of rb_node within containing struct is not
2102 * needed.
2103 *
2104 * Since bpf_rb_tree's node type has a corresponding struct btf_field with
2105 * graph_root.node_offset, it's not necessary to know field name
2106 * or type of node struct
2107 */
2108 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
2109 for (pos = rb_first_postorder(root); \
2110 pos && ({ n = rb_next_postorder(pos); 1; }); \
2111 pos = n)
2112
bpf_rb_root_free(const struct btf_field * field,void * rb_root,struct bpf_spin_lock * spin_lock)2113 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
2114 struct bpf_spin_lock *spin_lock)
2115 {
2116 struct rb_root_cached orig_root, *root = rb_root;
2117 struct rb_node *pos, *n;
2118 void *obj;
2119
2120 BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
2121 BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
2122
2123 __bpf_spin_lock_irqsave(spin_lock);
2124 orig_root = *root;
2125 *root = RB_ROOT_CACHED;
2126 __bpf_spin_unlock_irqrestore(spin_lock);
2127
2128 bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
2129 obj = pos;
2130 obj -= field->graph_root.node_offset;
2131
2132
2133 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2134 }
2135 }
2136
2137 __bpf_kfunc_start_defs();
2138
bpf_obj_new_impl(u64 local_type_id__k,void * meta__ign)2139 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2140 {
2141 struct btf_struct_meta *meta = meta__ign;
2142 u64 size = local_type_id__k;
2143 void *p;
2144
2145 p = bpf_mem_alloc(&bpf_global_ma, size);
2146 if (!p)
2147 return NULL;
2148 if (meta)
2149 bpf_obj_init(meta->record, p);
2150 return p;
2151 }
2152
bpf_percpu_obj_new_impl(u64 local_type_id__k,void * meta__ign)2153 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2154 {
2155 u64 size = local_type_id__k;
2156
2157 /* The verifier has ensured that meta__ign must be NULL */
2158 return bpf_mem_alloc(&bpf_global_percpu_ma, size);
2159 }
2160
2161 /* Must be called under migrate_disable(), as required by bpf_mem_free */
__bpf_obj_drop_impl(void * p,const struct btf_record * rec,bool percpu)2162 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
2163 {
2164 struct bpf_mem_alloc *ma;
2165
2166 if (rec && rec->refcount_off >= 0 &&
2167 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) {
2168 /* Object is refcounted and refcount_dec didn't result in 0
2169 * refcount. Return without freeing the object
2170 */
2171 return;
2172 }
2173
2174 if (rec)
2175 bpf_obj_free_fields(rec, p);
2176
2177 if (percpu)
2178 ma = &bpf_global_percpu_ma;
2179 else
2180 ma = &bpf_global_ma;
2181 bpf_mem_free_rcu(ma, p);
2182 }
2183
bpf_obj_drop_impl(void * p__alloc,void * meta__ign)2184 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
2185 {
2186 struct btf_struct_meta *meta = meta__ign;
2187 void *p = p__alloc;
2188
2189 __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false);
2190 }
2191
bpf_percpu_obj_drop_impl(void * p__alloc,void * meta__ign)2192 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
2193 {
2194 /* The verifier has ensured that meta__ign must be NULL */
2195 bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
2196 }
2197
bpf_refcount_acquire_impl(void * p__refcounted_kptr,void * meta__ign)2198 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
2199 {
2200 struct btf_struct_meta *meta = meta__ign;
2201 struct bpf_refcount *ref;
2202
2203 /* Could just cast directly to refcount_t *, but need some code using
2204 * bpf_refcount type so that it is emitted in vmlinux BTF
2205 */
2206 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off);
2207 if (!refcount_inc_not_zero((refcount_t *)ref))
2208 return NULL;
2209
2210 /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null
2211 * in verifier.c
2212 */
2213 return (void *)p__refcounted_kptr;
2214 }
2215
__bpf_list_add(struct bpf_list_node_kern * node,struct bpf_list_head * head,bool tail,struct btf_record * rec,u64 off)2216 static int __bpf_list_add(struct bpf_list_node_kern *node,
2217 struct bpf_list_head *head,
2218 bool tail, struct btf_record *rec, u64 off)
2219 {
2220 struct list_head *n = &node->list_head, *h = (void *)head;
2221
2222 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2223 * called on its fields, so init here
2224 */
2225 if (unlikely(!h->next))
2226 INIT_LIST_HEAD(h);
2227
2228 /* node->owner != NULL implies !list_empty(n), no need to separately
2229 * check the latter
2230 */
2231 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2232 /* Only called from BPF prog, no need to migrate_disable */
2233 __bpf_obj_drop_impl((void *)n - off, rec, false);
2234 return -EINVAL;
2235 }
2236
2237 tail ? list_add_tail(n, h) : list_add(n, h);
2238 WRITE_ONCE(node->owner, head);
2239
2240 return 0;
2241 }
2242
bpf_list_push_front_impl(struct bpf_list_head * head,struct bpf_list_node * node,void * meta__ign,u64 off)2243 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
2244 struct bpf_list_node *node,
2245 void *meta__ign, u64 off)
2246 {
2247 struct bpf_list_node_kern *n = (void *)node;
2248 struct btf_struct_meta *meta = meta__ign;
2249
2250 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
2251 }
2252
bpf_list_push_back_impl(struct bpf_list_head * head,struct bpf_list_node * node,void * meta__ign,u64 off)2253 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
2254 struct bpf_list_node *node,
2255 void *meta__ign, u64 off)
2256 {
2257 struct bpf_list_node_kern *n = (void *)node;
2258 struct btf_struct_meta *meta = meta__ign;
2259
2260 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
2261 }
2262
__bpf_list_del(struct bpf_list_head * head,bool tail)2263 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
2264 {
2265 struct list_head *n, *h = (void *)head;
2266 struct bpf_list_node_kern *node;
2267
2268 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2269 * called on its fields, so init here
2270 */
2271 if (unlikely(!h->next))
2272 INIT_LIST_HEAD(h);
2273 if (list_empty(h))
2274 return NULL;
2275
2276 n = tail ? h->prev : h->next;
2277 node = container_of(n, struct bpf_list_node_kern, list_head);
2278 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
2279 return NULL;
2280
2281 list_del_init(n);
2282 WRITE_ONCE(node->owner, NULL);
2283 return (struct bpf_list_node *)n;
2284 }
2285
bpf_list_pop_front(struct bpf_list_head * head)2286 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
2287 {
2288 return __bpf_list_del(head, false);
2289 }
2290
bpf_list_pop_back(struct bpf_list_head * head)2291 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
2292 {
2293 return __bpf_list_del(head, true);
2294 }
2295
bpf_rbtree_remove(struct bpf_rb_root * root,struct bpf_rb_node * node)2296 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
2297 struct bpf_rb_node *node)
2298 {
2299 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2300 struct rb_root_cached *r = (struct rb_root_cached *)root;
2301 struct rb_node *n = &node_internal->rb_node;
2302
2303 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or
2304 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n)
2305 */
2306 if (READ_ONCE(node_internal->owner) != root)
2307 return NULL;
2308
2309 rb_erase_cached(n, r);
2310 RB_CLEAR_NODE(n);
2311 WRITE_ONCE(node_internal->owner, NULL);
2312 return (struct bpf_rb_node *)n;
2313 }
2314
2315 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
2316 * program
2317 */
__bpf_rbtree_add(struct bpf_rb_root * root,struct bpf_rb_node_kern * node,void * less,struct btf_record * rec,u64 off)2318 static int __bpf_rbtree_add(struct bpf_rb_root *root,
2319 struct bpf_rb_node_kern *node,
2320 void *less, struct btf_record *rec, u64 off)
2321 {
2322 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
2323 struct rb_node *parent = NULL, *n = &node->rb_node;
2324 bpf_callback_t cb = (bpf_callback_t)less;
2325 bool leftmost = true;
2326
2327 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately
2328 * check the latter
2329 */
2330 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2331 /* Only called from BPF prog, no need to migrate_disable */
2332 __bpf_obj_drop_impl((void *)n - off, rec, false);
2333 return -EINVAL;
2334 }
2335
2336 while (*link) {
2337 parent = *link;
2338 if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
2339 link = &parent->rb_left;
2340 } else {
2341 link = &parent->rb_right;
2342 leftmost = false;
2343 }
2344 }
2345
2346 rb_link_node(n, parent, link);
2347 rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
2348 WRITE_ONCE(node->owner, root);
2349 return 0;
2350 }
2351
bpf_rbtree_add_impl(struct bpf_rb_root * root,struct bpf_rb_node * node,bool (less)(struct bpf_rb_node * a,const struct bpf_rb_node * b),void * meta__ign,u64 off)2352 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
2353 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
2354 void *meta__ign, u64 off)
2355 {
2356 struct btf_struct_meta *meta = meta__ign;
2357 struct bpf_rb_node_kern *n = (void *)node;
2358
2359 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
2360 }
2361
bpf_rbtree_first(struct bpf_rb_root * root)2362 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
2363 {
2364 struct rb_root_cached *r = (struct rb_root_cached *)root;
2365
2366 return (struct bpf_rb_node *)rb_first_cached(r);
2367 }
2368
2369 /**
2370 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2371 * kfunc which is not stored in a map as a kptr, must be released by calling
2372 * bpf_task_release().
2373 * @p: The task on which a reference is being acquired.
2374 */
bpf_task_acquire(struct task_struct * p)2375 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
2376 {
2377 if (refcount_inc_not_zero(&p->rcu_users))
2378 return p;
2379 return NULL;
2380 }
2381
2382 /**
2383 * bpf_task_release - Release the reference acquired on a task.
2384 * @p: The task on which a reference is being released.
2385 */
bpf_task_release(struct task_struct * p)2386 __bpf_kfunc void bpf_task_release(struct task_struct *p)
2387 {
2388 put_task_struct_rcu_user(p);
2389 }
2390
bpf_task_release_dtor(void * p)2391 __bpf_kfunc void bpf_task_release_dtor(void *p)
2392 {
2393 put_task_struct_rcu_user(p);
2394 }
2395 CFI_NOSEAL(bpf_task_release_dtor);
2396
2397 #ifdef CONFIG_CGROUPS
2398 /**
2399 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2400 * this kfunc which is not stored in a map as a kptr, must be released by
2401 * calling bpf_cgroup_release().
2402 * @cgrp: The cgroup on which a reference is being acquired.
2403 */
bpf_cgroup_acquire(struct cgroup * cgrp)2404 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2405 {
2406 return cgroup_tryget(cgrp) ? cgrp : NULL;
2407 }
2408
2409 /**
2410 * bpf_cgroup_release - Release the reference acquired on a cgroup.
2411 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2412 * not be freed until the current grace period has ended, even if its refcount
2413 * drops to 0.
2414 * @cgrp: The cgroup on which a reference is being released.
2415 */
bpf_cgroup_release(struct cgroup * cgrp)2416 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2417 {
2418 cgroup_put(cgrp);
2419 }
2420
bpf_cgroup_release_dtor(void * cgrp)2421 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
2422 {
2423 cgroup_put(cgrp);
2424 }
2425 CFI_NOSEAL(bpf_cgroup_release_dtor);
2426
2427 /**
2428 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2429 * array. A cgroup returned by this kfunc which is not subsequently stored in a
2430 * map, must be released by calling bpf_cgroup_release().
2431 * @cgrp: The cgroup for which we're performing a lookup.
2432 * @level: The level of ancestor to look up.
2433 */
bpf_cgroup_ancestor(struct cgroup * cgrp,int level)2434 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2435 {
2436 struct cgroup *ancestor;
2437
2438 if (level > cgrp->level || level < 0)
2439 return NULL;
2440
2441 /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */
2442 ancestor = cgrp->ancestors[level];
2443 if (!cgroup_tryget(ancestor))
2444 return NULL;
2445 return ancestor;
2446 }
2447
2448 /**
2449 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2450 * kfunc which is not subsequently stored in a map, must be released by calling
2451 * bpf_cgroup_release().
2452 * @cgid: cgroup id.
2453 */
bpf_cgroup_from_id(u64 cgid)2454 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
2455 {
2456 struct cgroup *cgrp;
2457
2458 cgrp = cgroup_get_from_id(cgid);
2459 if (IS_ERR(cgrp))
2460 return NULL;
2461 return cgrp;
2462 }
2463
2464 /**
2465 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2466 * task's membership of cgroup ancestry.
2467 * @task: the task to be tested
2468 * @ancestor: possible ancestor of @task's cgroup
2469 *
2470 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
2471 * It follows all the same rules as cgroup_is_descendant, and only applies
2472 * to the default hierarchy.
2473 */
bpf_task_under_cgroup(struct task_struct * task,struct cgroup * ancestor)2474 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
2475 struct cgroup *ancestor)
2476 {
2477 long ret;
2478
2479 rcu_read_lock();
2480 ret = task_under_cgroup_hierarchy(task, ancestor);
2481 rcu_read_unlock();
2482 return ret;
2483 }
2484
BPF_CALL_2(bpf_current_task_under_cgroup,struct bpf_map *,map,u32,idx)2485 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
2486 {
2487 struct bpf_array *array = container_of(map, struct bpf_array, map);
2488 struct cgroup *cgrp;
2489
2490 if (unlikely(idx >= array->map.max_entries))
2491 return -E2BIG;
2492
2493 cgrp = READ_ONCE(array->ptrs[idx]);
2494 if (unlikely(!cgrp))
2495 return -EAGAIN;
2496
2497 return task_under_cgroup_hierarchy(current, cgrp);
2498 }
2499
2500 const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
2501 .func = bpf_current_task_under_cgroup,
2502 .gpl_only = false,
2503 .ret_type = RET_INTEGER,
2504 .arg1_type = ARG_CONST_MAP_PTR,
2505 .arg2_type = ARG_ANYTHING,
2506 };
2507
2508 /**
2509 * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
2510 * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
2511 * hierarchy ID.
2512 * @task: The target task
2513 * @hierarchy_id: The ID of a cgroup1 hierarchy
2514 *
2515 * On success, the cgroup is returen. On failure, NULL is returned.
2516 */
2517 __bpf_kfunc struct cgroup *
bpf_task_get_cgroup1(struct task_struct * task,int hierarchy_id)2518 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
2519 {
2520 struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id);
2521
2522 if (IS_ERR(cgrp))
2523 return NULL;
2524 return cgrp;
2525 }
2526 #endif /* CONFIG_CGROUPS */
2527
2528 /**
2529 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2530 * in the root pid namespace idr. If a task is returned, it must either be
2531 * stored in a map, or released with bpf_task_release().
2532 * @pid: The pid of the task being looked up.
2533 */
bpf_task_from_pid(s32 pid)2534 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2535 {
2536 struct task_struct *p;
2537
2538 rcu_read_lock();
2539 p = find_task_by_pid_ns(pid, &init_pid_ns);
2540 if (p)
2541 p = bpf_task_acquire(p);
2542 rcu_read_unlock();
2543
2544 return p;
2545 }
2546
2547 /**
2548 * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up
2549 * in the pid namespace of the current task. If a task is returned, it must
2550 * either be stored in a map, or released with bpf_task_release().
2551 * @vpid: The vpid of the task being looked up.
2552 */
bpf_task_from_vpid(s32 vpid)2553 __bpf_kfunc struct task_struct *bpf_task_from_vpid(s32 vpid)
2554 {
2555 struct task_struct *p;
2556
2557 rcu_read_lock();
2558 p = find_task_by_vpid(vpid);
2559 if (p)
2560 p = bpf_task_acquire(p);
2561 rcu_read_unlock();
2562
2563 return p;
2564 }
2565
2566 /**
2567 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2568 * @p: The dynptr whose data slice to retrieve
2569 * @offset: Offset into the dynptr
2570 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2571 * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2572 * length of the requested slice. This must be a constant.
2573 *
2574 * For non-skb and non-xdp type dynptrs, there is no difference between
2575 * bpf_dynptr_slice and bpf_dynptr_data.
2576 *
2577 * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2578 *
2579 * If the intention is to write to the data slice, please use
2580 * bpf_dynptr_slice_rdwr.
2581 *
2582 * The user must check that the returned pointer is not null before using it.
2583 *
2584 * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
2585 * does not change the underlying packet data pointers, so a call to
2586 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2587 * the bpf program.
2588 *
2589 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2590 * data slice (can be either direct pointer to the data or a pointer to the user
2591 * provided buffer, with its contents containing the data, if unable to obtain
2592 * direct pointer)
2593 */
bpf_dynptr_slice(const struct bpf_dynptr * p,u32 offset,void * buffer__opt,u32 buffer__szk)2594 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset,
2595 void *buffer__opt, u32 buffer__szk)
2596 {
2597 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2598 enum bpf_dynptr_type type;
2599 u32 len = buffer__szk;
2600 int err;
2601
2602 if (!ptr->data)
2603 return NULL;
2604
2605 err = bpf_dynptr_check_off_len(ptr, offset, len);
2606 if (err)
2607 return NULL;
2608
2609 type = bpf_dynptr_get_type(ptr);
2610
2611 switch (type) {
2612 case BPF_DYNPTR_TYPE_LOCAL:
2613 case BPF_DYNPTR_TYPE_RINGBUF:
2614 return ptr->data + ptr->offset + offset;
2615 case BPF_DYNPTR_TYPE_SKB:
2616 if (buffer__opt)
2617 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt);
2618 else
2619 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len);
2620 case BPF_DYNPTR_TYPE_XDP:
2621 {
2622 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
2623 if (!IS_ERR_OR_NULL(xdp_ptr))
2624 return xdp_ptr;
2625
2626 if (!buffer__opt)
2627 return NULL;
2628 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false);
2629 return buffer__opt;
2630 }
2631 default:
2632 WARN_ONCE(true, "unknown dynptr type %d\n", type);
2633 return NULL;
2634 }
2635 }
2636
2637 /**
2638 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2639 * @p: The dynptr whose data slice to retrieve
2640 * @offset: Offset into the dynptr
2641 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2642 * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2643 * length of the requested slice. This must be a constant.
2644 *
2645 * For non-skb and non-xdp type dynptrs, there is no difference between
2646 * bpf_dynptr_slice and bpf_dynptr_data.
2647 *
2648 * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2649 *
2650 * The returned pointer is writable and may point to either directly the dynptr
2651 * data at the requested offset or to the buffer if unable to obtain a direct
2652 * data pointer to (example: the requested slice is to the paged area of an skb
2653 * packet). In the case where the returned pointer is to the buffer, the user
2654 * is responsible for persisting writes through calling bpf_dynptr_write(). This
2655 * usually looks something like this pattern:
2656 *
2657 * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
2658 * if (!eth)
2659 * return TC_ACT_SHOT;
2660 *
2661 * // mutate eth header //
2662 *
2663 * if (eth == buffer)
2664 * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
2665 *
2666 * Please note that, as in the example above, the user must check that the
2667 * returned pointer is not null before using it.
2668 *
2669 * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
2670 * does not change the underlying packet data pointers, so a call to
2671 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2672 * the bpf program.
2673 *
2674 * Return: NULL if the call failed (eg invalid dynptr), pointer to a
2675 * data slice (can be either direct pointer to the data or a pointer to the user
2676 * provided buffer, with its contents containing the data, if unable to obtain
2677 * direct pointer)
2678 */
bpf_dynptr_slice_rdwr(const struct bpf_dynptr * p,u32 offset,void * buffer__opt,u32 buffer__szk)2679 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
2680 void *buffer__opt, u32 buffer__szk)
2681 {
2682 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2683
2684 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
2685 return NULL;
2686
2687 /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
2688 *
2689 * For skb-type dynptrs, it is safe to write into the returned pointer
2690 * if the bpf program allows skb data writes. There are two possibilities
2691 * that may occur when calling bpf_dynptr_slice_rdwr:
2692 *
2693 * 1) The requested slice is in the head of the skb. In this case, the
2694 * returned pointer is directly to skb data, and if the skb is cloned, the
2695 * verifier will have uncloned it (see bpf_unclone_prologue()) already.
2696 * The pointer can be directly written into.
2697 *
2698 * 2) Some portion of the requested slice is in the paged buffer area.
2699 * In this case, the requested data will be copied out into the buffer
2700 * and the returned pointer will be a pointer to the buffer. The skb
2701 * will not be pulled. To persist the write, the user will need to call
2702 * bpf_dynptr_write(), which will pull the skb and commit the write.
2703 *
2704 * Similarly for xdp programs, if the requested slice is not across xdp
2705 * fragments, then a direct pointer will be returned, otherwise the data
2706 * will be copied out into the buffer and the user will need to call
2707 * bpf_dynptr_write() to commit changes.
2708 */
2709 return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk);
2710 }
2711
bpf_dynptr_adjust(const struct bpf_dynptr * p,u32 start,u32 end)2712 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end)
2713 {
2714 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2715 u32 size;
2716
2717 if (!ptr->data || start > end)
2718 return -EINVAL;
2719
2720 size = __bpf_dynptr_size(ptr);
2721
2722 if (start > size || end > size)
2723 return -ERANGE;
2724
2725 ptr->offset += start;
2726 bpf_dynptr_set_size(ptr, end - start);
2727
2728 return 0;
2729 }
2730
bpf_dynptr_is_null(const struct bpf_dynptr * p)2731 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
2732 {
2733 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2734
2735 return !ptr->data;
2736 }
2737
bpf_dynptr_is_rdonly(const struct bpf_dynptr * p)2738 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
2739 {
2740 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2741
2742 if (!ptr->data)
2743 return false;
2744
2745 return __bpf_dynptr_is_rdonly(ptr);
2746 }
2747
bpf_dynptr_size(const struct bpf_dynptr * p)2748 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p)
2749 {
2750 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2751
2752 if (!ptr->data)
2753 return -EINVAL;
2754
2755 return __bpf_dynptr_size(ptr);
2756 }
2757
bpf_dynptr_clone(const struct bpf_dynptr * p,struct bpf_dynptr * clone__uninit)2758 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
2759 struct bpf_dynptr *clone__uninit)
2760 {
2761 struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit;
2762 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2763
2764 if (!ptr->data) {
2765 bpf_dynptr_set_null(clone);
2766 return -EINVAL;
2767 }
2768
2769 *clone = *ptr;
2770
2771 return 0;
2772 }
2773
2774 /**
2775 * bpf_dynptr_copy() - Copy data from one dynptr to another.
2776 * @dst_ptr: Destination dynptr - where data should be copied to
2777 * @dst_off: Offset into the destination dynptr
2778 * @src_ptr: Source dynptr - where data should be copied from
2779 * @src_off: Offset into the source dynptr
2780 * @size: Length of the data to copy from source to destination
2781 *
2782 * Copies data from source dynptr to destination dynptr.
2783 * Returns 0 on success; negative error, otherwise.
2784 */
bpf_dynptr_copy(struct bpf_dynptr * dst_ptr,u32 dst_off,struct bpf_dynptr * src_ptr,u32 src_off,u32 size)2785 __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u32 dst_off,
2786 struct bpf_dynptr *src_ptr, u32 src_off, u32 size)
2787 {
2788 struct bpf_dynptr_kern *dst = (struct bpf_dynptr_kern *)dst_ptr;
2789 struct bpf_dynptr_kern *src = (struct bpf_dynptr_kern *)src_ptr;
2790 void *src_slice, *dst_slice;
2791 char buf[256];
2792 u32 off;
2793
2794 src_slice = bpf_dynptr_slice(src_ptr, src_off, NULL, size);
2795 dst_slice = bpf_dynptr_slice_rdwr(dst_ptr, dst_off, NULL, size);
2796
2797 if (src_slice && dst_slice) {
2798 memmove(dst_slice, src_slice, size);
2799 return 0;
2800 }
2801
2802 if (src_slice)
2803 return __bpf_dynptr_write(dst, dst_off, src_slice, size, 0);
2804
2805 if (dst_slice)
2806 return __bpf_dynptr_read(dst_slice, size, src, src_off, 0);
2807
2808 if (bpf_dynptr_check_off_len(dst, dst_off, size) ||
2809 bpf_dynptr_check_off_len(src, src_off, size))
2810 return -E2BIG;
2811
2812 off = 0;
2813 while (off < size) {
2814 u32 chunk_sz = min_t(u32, sizeof(buf), size - off);
2815 int err;
2816
2817 err = __bpf_dynptr_read(buf, chunk_sz, src, src_off + off, 0);
2818 if (err)
2819 return err;
2820 err = __bpf_dynptr_write(dst, dst_off + off, buf, chunk_sz, 0);
2821 if (err)
2822 return err;
2823
2824 off += chunk_sz;
2825 }
2826 return 0;
2827 }
2828
bpf_cast_to_kern_ctx(void * obj)2829 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
2830 {
2831 return obj;
2832 }
2833
bpf_rdonly_cast(const void * obj__ign,u32 btf_id__k)2834 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
2835 {
2836 return (void *)obj__ign;
2837 }
2838
bpf_rcu_read_lock(void)2839 __bpf_kfunc void bpf_rcu_read_lock(void)
2840 {
2841 rcu_read_lock();
2842 }
2843
bpf_rcu_read_unlock(void)2844 __bpf_kfunc void bpf_rcu_read_unlock(void)
2845 {
2846 rcu_read_unlock();
2847 }
2848
2849 struct bpf_throw_ctx {
2850 struct bpf_prog_aux *aux;
2851 u64 sp;
2852 u64 bp;
2853 int cnt;
2854 };
2855
bpf_stack_walker(void * cookie,u64 ip,u64 sp,u64 bp)2856 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
2857 {
2858 struct bpf_throw_ctx *ctx = cookie;
2859 struct bpf_prog *prog;
2860
2861 if (!is_bpf_text_address(ip))
2862 return !ctx->cnt;
2863 prog = bpf_prog_ksym_find(ip);
2864 ctx->cnt++;
2865 if (bpf_is_subprog(prog))
2866 return true;
2867 ctx->aux = prog->aux;
2868 ctx->sp = sp;
2869 ctx->bp = bp;
2870 return false;
2871 }
2872
bpf_throw(u64 cookie)2873 __bpf_kfunc void bpf_throw(u64 cookie)
2874 {
2875 struct bpf_throw_ctx ctx = {};
2876
2877 arch_bpf_stack_walk(bpf_stack_walker, &ctx);
2878 WARN_ON_ONCE(!ctx.aux);
2879 if (ctx.aux)
2880 WARN_ON_ONCE(!ctx.aux->exception_boundary);
2881 WARN_ON_ONCE(!ctx.bp);
2882 WARN_ON_ONCE(!ctx.cnt);
2883 /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning
2884 * deeper stack depths than ctx.sp as we do not return from bpf_throw,
2885 * which skips compiler generated instrumentation to do the same.
2886 */
2887 kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
2888 ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0);
2889 WARN(1, "A call to BPF exception callback should never return\n");
2890 }
2891
bpf_wq_init(struct bpf_wq * wq,void * p__map,unsigned int flags)2892 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
2893 {
2894 struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
2895 struct bpf_map *map = p__map;
2896
2897 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq));
2898 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq));
2899
2900 if (flags)
2901 return -EINVAL;
2902
2903 return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
2904 }
2905
bpf_wq_start(struct bpf_wq * wq,unsigned int flags)2906 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
2907 {
2908 struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
2909 struct bpf_work *w;
2910
2911 if (in_nmi())
2912 return -EOPNOTSUPP;
2913 if (flags)
2914 return -EINVAL;
2915 w = READ_ONCE(async->work);
2916 if (!w || !READ_ONCE(w->cb.prog))
2917 return -EINVAL;
2918
2919 schedule_work(&w->work);
2920 return 0;
2921 }
2922
bpf_wq_set_callback_impl(struct bpf_wq * wq,int (callback_fn)(void * map,int * key,void * value),unsigned int flags,void * aux__ign)2923 __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
2924 int (callback_fn)(void *map, int *key, void *value),
2925 unsigned int flags,
2926 void *aux__ign)
2927 {
2928 struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__ign;
2929 struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
2930
2931 if (flags)
2932 return -EINVAL;
2933
2934 return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ);
2935 }
2936
bpf_preempt_disable(void)2937 __bpf_kfunc void bpf_preempt_disable(void)
2938 {
2939 preempt_disable();
2940 }
2941
bpf_preempt_enable(void)2942 __bpf_kfunc void bpf_preempt_enable(void)
2943 {
2944 preempt_enable();
2945 }
2946
2947 struct bpf_iter_bits {
2948 __u64 __opaque[2];
2949 } __aligned(8);
2950
2951 #define BITS_ITER_NR_WORDS_MAX 511
2952
2953 struct bpf_iter_bits_kern {
2954 union {
2955 __u64 *bits;
2956 __u64 bits_copy;
2957 };
2958 int nr_bits;
2959 int bit;
2960 } __aligned(8);
2961
2962 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing
2963 * a u64 pointer and an unsigned long pointer to find_next_bit() will
2964 * return the same result, as both point to the same 8-byte area.
2965 *
2966 * For 32-bit little-endian hosts, using a u64 pointer or unsigned long
2967 * pointer also makes no difference. This is because the first iterated
2968 * unsigned long is composed of bits 0-31 of the u64 and the second unsigned
2969 * long is composed of bits 32-63 of the u64.
2970 *
2971 * However, for 32-bit big-endian hosts, this is not the case. The first
2972 * iterated unsigned long will be bits 32-63 of the u64, so swap these two
2973 * ulong values within the u64.
2974 */
swap_ulong_in_u64(u64 * bits,unsigned int nr)2975 static void swap_ulong_in_u64(u64 *bits, unsigned int nr)
2976 {
2977 #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
2978 unsigned int i;
2979
2980 for (i = 0; i < nr; i++)
2981 bits[i] = (bits[i] >> 32) | ((u64)(u32)bits[i] << 32);
2982 #endif
2983 }
2984
2985 /**
2986 * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
2987 * @it: The new bpf_iter_bits to be created
2988 * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over
2989 * @nr_words: The size of the specified memory area, measured in 8-byte units.
2990 * The maximum value of @nr_words is @BITS_ITER_NR_WORDS_MAX. This limit may be
2991 * further reduced by the BPF memory allocator implementation.
2992 *
2993 * This function initializes a new bpf_iter_bits structure for iterating over
2994 * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It
2995 * copies the data of the memory area to the newly created bpf_iter_bits @it for
2996 * subsequent iteration operations.
2997 *
2998 * On success, 0 is returned. On failure, ERR is returned.
2999 */
3000 __bpf_kfunc int
bpf_iter_bits_new(struct bpf_iter_bits * it,const u64 * unsafe_ptr__ign,u32 nr_words)3001 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
3002 {
3003 struct bpf_iter_bits_kern *kit = (void *)it;
3004 u32 nr_bytes = nr_words * sizeof(u64);
3005 u32 nr_bits = BYTES_TO_BITS(nr_bytes);
3006 int err;
3007
3008 BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits));
3009 BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) !=
3010 __alignof__(struct bpf_iter_bits));
3011
3012 kit->nr_bits = 0;
3013 kit->bits_copy = 0;
3014 kit->bit = -1;
3015
3016 if (!unsafe_ptr__ign || !nr_words)
3017 return -EINVAL;
3018 if (nr_words > BITS_ITER_NR_WORDS_MAX)
3019 return -E2BIG;
3020
3021 /* Optimization for u64 mask */
3022 if (nr_bits == 64) {
3023 err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign);
3024 if (err)
3025 return -EFAULT;
3026
3027 swap_ulong_in_u64(&kit->bits_copy, nr_words);
3028
3029 kit->nr_bits = nr_bits;
3030 return 0;
3031 }
3032
3033 if (bpf_mem_alloc_check_size(false, nr_bytes))
3034 return -E2BIG;
3035
3036 /* Fallback to memalloc */
3037 kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes);
3038 if (!kit->bits)
3039 return -ENOMEM;
3040
3041 err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign);
3042 if (err) {
3043 bpf_mem_free(&bpf_global_ma, kit->bits);
3044 return err;
3045 }
3046
3047 swap_ulong_in_u64(kit->bits, nr_words);
3048
3049 kit->nr_bits = nr_bits;
3050 return 0;
3051 }
3052
3053 /**
3054 * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
3055 * @it: The bpf_iter_bits to be checked
3056 *
3057 * This function returns a pointer to a number representing the value of the
3058 * next bit in the bits.
3059 *
3060 * If there are no further bits available, it returns NULL.
3061 */
bpf_iter_bits_next(struct bpf_iter_bits * it)3062 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
3063 {
3064 struct bpf_iter_bits_kern *kit = (void *)it;
3065 int bit = kit->bit, nr_bits = kit->nr_bits;
3066 const void *bits;
3067
3068 if (!nr_bits || bit >= nr_bits)
3069 return NULL;
3070
3071 bits = nr_bits == 64 ? &kit->bits_copy : kit->bits;
3072 bit = find_next_bit(bits, nr_bits, bit + 1);
3073 if (bit >= nr_bits) {
3074 kit->bit = bit;
3075 return NULL;
3076 }
3077
3078 kit->bit = bit;
3079 return &kit->bit;
3080 }
3081
3082 /**
3083 * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
3084 * @it: The bpf_iter_bits to be destroyed
3085 *
3086 * Destroy the resource associated with the bpf_iter_bits.
3087 */
bpf_iter_bits_destroy(struct bpf_iter_bits * it)3088 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
3089 {
3090 struct bpf_iter_bits_kern *kit = (void *)it;
3091
3092 if (kit->nr_bits <= 64)
3093 return;
3094 bpf_mem_free(&bpf_global_ma, kit->bits);
3095 }
3096
3097 /**
3098 * bpf_copy_from_user_str() - Copy a string from an unsafe user address
3099 * @dst: Destination address, in kernel space. This buffer must be
3100 * at least @dst__sz bytes long.
3101 * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL.
3102 * @unsafe_ptr__ign: Source address, in user space.
3103 * @flags: The only supported flag is BPF_F_PAD_ZEROS
3104 *
3105 * Copies a NUL-terminated string from userspace to BPF space. If user string is
3106 * too long this will still ensure zero termination in the dst buffer unless
3107 * buffer size is 0.
3108 *
3109 * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and
3110 * memset all of @dst on failure.
3111 */
bpf_copy_from_user_str(void * dst,u32 dst__sz,const void __user * unsafe_ptr__ign,u64 flags)3112 __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags)
3113 {
3114 int ret;
3115
3116 if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3117 return -EINVAL;
3118
3119 if (unlikely(!dst__sz))
3120 return 0;
3121
3122 ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1);
3123 if (ret < 0) {
3124 if (flags & BPF_F_PAD_ZEROS)
3125 memset((char *)dst, 0, dst__sz);
3126
3127 return ret;
3128 }
3129
3130 if (flags & BPF_F_PAD_ZEROS)
3131 memset((char *)dst + ret, 0, dst__sz - ret);
3132 else
3133 ((char *)dst)[ret] = '\0';
3134
3135 return ret + 1;
3136 }
3137
3138 /**
3139 * bpf_copy_from_user_task_str() - Copy a string from an task's address space
3140 * @dst: Destination address, in kernel space. This buffer must be
3141 * at least @dst__sz bytes long.
3142 * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL.
3143 * @unsafe_ptr__ign: Source address in the task's address space.
3144 * @tsk: The task whose address space will be used
3145 * @flags: The only supported flag is BPF_F_PAD_ZEROS
3146 *
3147 * Copies a NUL terminated string from a task's address space to @dst__sz
3148 * buffer. If user string is too long this will still ensure zero termination
3149 * in the @dst__sz buffer unless buffer size is 0.
3150 *
3151 * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst__sz to 0 on success
3152 * and memset all of @dst__sz on failure.
3153 *
3154 * Return: The number of copied bytes on success including the NUL terminator.
3155 * A negative error code on failure.
3156 */
bpf_copy_from_user_task_str(void * dst,u32 dst__sz,const void __user * unsafe_ptr__ign,struct task_struct * tsk,u64 flags)3157 __bpf_kfunc int bpf_copy_from_user_task_str(void *dst, u32 dst__sz,
3158 const void __user *unsafe_ptr__ign,
3159 struct task_struct *tsk, u64 flags)
3160 {
3161 int ret;
3162
3163 if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3164 return -EINVAL;
3165
3166 if (unlikely(dst__sz == 0))
3167 return 0;
3168
3169 ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_ptr__ign, dst, dst__sz, 0);
3170 if (ret < 0) {
3171 if (flags & BPF_F_PAD_ZEROS)
3172 memset(dst, 0, dst__sz);
3173 return ret;
3174 }
3175
3176 if (flags & BPF_F_PAD_ZEROS)
3177 memset(dst + ret, 0, dst__sz - ret);
3178
3179 return ret + 1;
3180 }
3181
3182 /* Keep unsinged long in prototype so that kfunc is usable when emitted to
3183 * vmlinux.h in BPF programs directly, but note that while in BPF prog, the
3184 * unsigned long always points to 8-byte region on stack, the kernel may only
3185 * read and write the 4-bytes on 32-bit.
3186 */
bpf_local_irq_save(unsigned long * flags__irq_flag)3187 __bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag)
3188 {
3189 local_irq_save(*flags__irq_flag);
3190 }
3191
bpf_local_irq_restore(unsigned long * flags__irq_flag)3192 __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag)
3193 {
3194 local_irq_restore(*flags__irq_flag);
3195 }
3196
3197 __bpf_kfunc_end_defs();
3198
3199 BTF_KFUNCS_START(generic_btf_ids)
3200 #ifdef CONFIG_CRASH_DUMP
3201 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
3202 #endif
3203 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
3204 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
3205 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
3206 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
3207 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
3208 BTF_ID_FLAGS(func, bpf_list_push_front_impl)
3209 BTF_ID_FLAGS(func, bpf_list_push_back_impl)
3210 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
3211 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
3212 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
3213 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
3214 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
3215 BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
3216 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
3217
3218 #ifdef CONFIG_CGROUPS
3219 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
3220 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
3221 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
3222 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
3223 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
3224 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
3225 #endif
3226 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
3227 BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL)
3228 BTF_ID_FLAGS(func, bpf_throw)
3229 #ifdef CONFIG_BPF_EVENTS
3230 BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS)
3231 #endif
3232 BTF_KFUNCS_END(generic_btf_ids)
3233
3234 static const struct btf_kfunc_id_set generic_kfunc_set = {
3235 .owner = THIS_MODULE,
3236 .set = &generic_btf_ids,
3237 };
3238
3239
3240 BTF_ID_LIST(generic_dtor_ids)
3241 BTF_ID(struct, task_struct)
3242 BTF_ID(func, bpf_task_release_dtor)
3243 #ifdef CONFIG_CGROUPS
3244 BTF_ID(struct, cgroup)
3245 BTF_ID(func, bpf_cgroup_release_dtor)
3246 #endif
3247
3248 BTF_KFUNCS_START(common_btf_ids)
3249 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx, KF_FASTCALL)
3250 BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL)
3251 BTF_ID_FLAGS(func, bpf_rcu_read_lock)
3252 BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
3253 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
3254 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
3255 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
3256 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
3257 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
3258 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
3259 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
3260 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
3261 #ifdef CONFIG_CGROUPS
3262 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
3263 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
3264 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
3265 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
3266 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL)
3267 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY)
3268 #endif
3269 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
3270 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
3271 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
3272 BTF_ID_FLAGS(func, bpf_dynptr_adjust)
3273 BTF_ID_FLAGS(func, bpf_dynptr_is_null)
3274 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
3275 BTF_ID_FLAGS(func, bpf_dynptr_size)
3276 BTF_ID_FLAGS(func, bpf_dynptr_clone)
3277 BTF_ID_FLAGS(func, bpf_dynptr_copy)
3278 #ifdef CONFIG_NET
3279 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
3280 #endif
3281 BTF_ID_FLAGS(func, bpf_wq_init)
3282 BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
3283 BTF_ID_FLAGS(func, bpf_wq_start)
3284 BTF_ID_FLAGS(func, bpf_preempt_disable)
3285 BTF_ID_FLAGS(func, bpf_preempt_enable)
3286 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
3287 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
3288 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
3289 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
3290 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str, KF_SLEEPABLE)
3291 BTF_ID_FLAGS(func, bpf_get_kmem_cache)
3292 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
3293 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
3294 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
3295 BTF_ID_FLAGS(func, bpf_local_irq_save)
3296 BTF_ID_FLAGS(func, bpf_local_irq_restore)
3297 BTF_KFUNCS_END(common_btf_ids)
3298
3299 static const struct btf_kfunc_id_set common_kfunc_set = {
3300 .owner = THIS_MODULE,
3301 .set = &common_btf_ids,
3302 };
3303
kfunc_init(void)3304 static int __init kfunc_init(void)
3305 {
3306 int ret;
3307 const struct btf_id_dtor_kfunc generic_dtors[] = {
3308 {
3309 .btf_id = generic_dtor_ids[0],
3310 .kfunc_btf_id = generic_dtor_ids[1]
3311 },
3312 #ifdef CONFIG_CGROUPS
3313 {
3314 .btf_id = generic_dtor_ids[2],
3315 .kfunc_btf_id = generic_dtor_ids[3]
3316 },
3317 #endif
3318 };
3319
3320 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
3321 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
3322 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
3323 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
3324 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set);
3325 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set);
3326 ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
3327 ARRAY_SIZE(generic_dtors),
3328 THIS_MODULE);
3329 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
3330 }
3331
3332 late_initcall(kfunc_init);
3333
3334 /* Get a pointer to dynptr data up to len bytes for read only access. If
3335 * the dynptr doesn't have continuous data up to len bytes, return NULL.
3336 */
__bpf_dynptr_data(const struct bpf_dynptr_kern * ptr,u32 len)3337 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
3338 {
3339 const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr;
3340
3341 return bpf_dynptr_slice(p, 0, NULL, len);
3342 }
3343
3344 /* Get a pointer to dynptr data up to len bytes for read write access. If
3345 * the dynptr doesn't have continuous data up to len bytes, or the dynptr
3346 * is read only, return NULL.
3347 */
__bpf_dynptr_data_rw(const struct bpf_dynptr_kern * ptr,u32 len)3348 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len)
3349 {
3350 if (__bpf_dynptr_is_rdonly(ptr))
3351 return NULL;
3352 return (void *)__bpf_dynptr_data(ptr, len);
3353 }
3354