1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf-cgroup.h> 6 #include <linux/bpf_trace.h> 7 #include <linux/bpf_lirc.h> 8 #include <linux/bpf_verifier.h> 9 #include <linux/bsearch.h> 10 #include <linux/btf.h> 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mmzone.h> 16 #include <linux/anon_inodes.h> 17 #include <linux/fdtable.h> 18 #include <linux/file.h> 19 #include <linux/fs.h> 20 #include <linux/license.h> 21 #include <linux/filter.h> 22 #include <linux/kernel.h> 23 #include <linux/idr.h> 24 #include <linux/cred.h> 25 #include <linux/timekeeping.h> 26 #include <linux/ctype.h> 27 #include <linux/nospec.h> 28 #include <linux/audit.h> 29 #include <uapi/linux/btf.h> 30 #include <linux/pgtable.h> 31 #include <linux/bpf_lsm.h> 32 #include <linux/poll.h> 33 #include <linux/sort.h> 34 #include <linux/bpf-netns.h> 35 #include <linux/rcupdate_trace.h> 36 #include <linux/memcontrol.h> 37 #include <linux/trace_events.h> 38 #include <linux/tracepoint.h> 39 #include <linux/overflow.h> 40 41 #include <net/netfilter/nf_bpf_link.h> 42 #include <net/netkit.h> 43 #include <net/tcx.h> 44 45 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 46 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 47 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 48 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 49 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 50 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 51 IS_FD_HASH(map)) 52 53 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 54 55 DEFINE_PER_CPU(int, bpf_prog_active); 56 static DEFINE_IDR(prog_idr); 57 static DEFINE_SPINLOCK(prog_idr_lock); 58 static DEFINE_IDR(map_idr); 59 static DEFINE_SPINLOCK(map_idr_lock); 60 static DEFINE_IDR(link_idr); 61 static DEFINE_SPINLOCK(link_idr_lock); 62 63 int sysctl_unprivileged_bpf_disabled __read_mostly = 64 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; 65 66 static const struct bpf_map_ops * const bpf_map_types[] = { 67 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 68 #define BPF_MAP_TYPE(_id, _ops) \ 69 [_id] = &_ops, 70 #define BPF_LINK_TYPE(_id, _name) 71 #include <linux/bpf_types.h> 72 #undef BPF_PROG_TYPE 73 #undef BPF_MAP_TYPE 74 #undef BPF_LINK_TYPE 75 }; 76 77 /* 78 * If we're handed a bigger struct than we know of, ensure all the unknown bits 79 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 80 * we don't know about yet. 81 * 82 * There is a ToCToU between this function call and the following 83 * copy_from_user() call. However, this is not a concern since this function is 84 * meant to be a future-proofing of bits. 85 */ 86 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, 87 size_t expected_size, 88 size_t actual_size) 89 { 90 int res; 91 92 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 93 return -E2BIG; 94 95 if (actual_size <= expected_size) 96 return 0; 97 98 if (uaddr.is_kernel) 99 res = memchr_inv(uaddr.kernel + expected_size, 0, 100 actual_size - expected_size) == NULL; 101 else 102 res = check_zeroed_user(uaddr.user + expected_size, 103 actual_size - expected_size); 104 if (res < 0) 105 return res; 106 return res ? 0 : -E2BIG; 107 } 108 109 const struct bpf_map_ops bpf_map_offload_ops = { 110 .map_meta_equal = bpf_map_meta_equal, 111 .map_alloc = bpf_map_offload_map_alloc, 112 .map_free = bpf_map_offload_map_free, 113 .map_check_btf = map_check_no_btf, 114 .map_mem_usage = bpf_map_offload_map_mem_usage, 115 }; 116 117 static void bpf_map_write_active_inc(struct bpf_map *map) 118 { 119 atomic64_inc(&map->writecnt); 120 } 121 122 static void bpf_map_write_active_dec(struct bpf_map *map) 123 { 124 atomic64_dec(&map->writecnt); 125 } 126 127 bool bpf_map_write_active(const struct bpf_map *map) 128 { 129 return atomic64_read(&map->writecnt) != 0; 130 } 131 132 static u32 bpf_map_value_size(const struct bpf_map *map) 133 { 134 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 135 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 136 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 137 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 138 return round_up(map->value_size, 8) * num_possible_cpus(); 139 else if (IS_FD_MAP(map)) 140 return sizeof(u32); 141 else 142 return map->value_size; 143 } 144 145 static void maybe_wait_bpf_programs(struct bpf_map *map) 146 { 147 /* Wait for any running non-sleepable BPF programs to complete so that 148 * userspace, when we return to it, knows that all non-sleepable 149 * programs that could be running use the new map value. For sleepable 150 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait 151 * for the completions of these programs, but considering the waiting 152 * time can be very long and userspace may think it will hang forever, 153 * so don't handle sleepable BPF programs now. 154 */ 155 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 156 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 157 synchronize_rcu(); 158 } 159 160 static void unpin_uptr_kaddr(void *kaddr) 161 { 162 if (kaddr) 163 unpin_user_page(virt_to_page(kaddr)); 164 } 165 166 static void __bpf_obj_unpin_uptrs(struct btf_record *rec, u32 cnt, void *obj) 167 { 168 const struct btf_field *field; 169 void **uptr_addr; 170 int i; 171 172 for (i = 0, field = rec->fields; i < cnt; i++, field++) { 173 if (field->type != BPF_UPTR) 174 continue; 175 176 uptr_addr = obj + field->offset; 177 unpin_uptr_kaddr(*uptr_addr); 178 } 179 } 180 181 static void bpf_obj_unpin_uptrs(struct btf_record *rec, void *obj) 182 { 183 if (!btf_record_has_field(rec, BPF_UPTR)) 184 return; 185 186 __bpf_obj_unpin_uptrs(rec, rec->cnt, obj); 187 } 188 189 static int bpf_obj_pin_uptrs(struct btf_record *rec, void *obj) 190 { 191 const struct btf_field *field; 192 const struct btf_type *t; 193 unsigned long start, end; 194 struct page *page; 195 void **uptr_addr; 196 int i, err; 197 198 if (!btf_record_has_field(rec, BPF_UPTR)) 199 return 0; 200 201 for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { 202 if (field->type != BPF_UPTR) 203 continue; 204 205 uptr_addr = obj + field->offset; 206 start = *(unsigned long *)uptr_addr; 207 if (!start) 208 continue; 209 210 t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); 211 /* t->size was checked for zero before */ 212 if (check_add_overflow(start, t->size - 1, &end)) { 213 err = -EFAULT; 214 goto unpin_all; 215 } 216 217 /* The uptr's struct cannot span across two pages */ 218 if ((start & PAGE_MASK) != (end & PAGE_MASK)) { 219 err = -EOPNOTSUPP; 220 goto unpin_all; 221 } 222 223 err = pin_user_pages_fast(start, 1, FOLL_LONGTERM | FOLL_WRITE, &page); 224 if (err != 1) 225 goto unpin_all; 226 227 if (PageHighMem(page)) { 228 err = -EOPNOTSUPP; 229 unpin_user_page(page); 230 goto unpin_all; 231 } 232 233 *uptr_addr = page_address(page) + offset_in_page(start); 234 } 235 236 return 0; 237 238 unpin_all: 239 __bpf_obj_unpin_uptrs(rec, i, obj); 240 return err; 241 } 242 243 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, 244 void *key, void *value, __u64 flags) 245 { 246 int err; 247 248 /* Need to create a kthread, thus must support schedule */ 249 if (bpf_map_is_offloaded(map)) { 250 return bpf_map_offload_update_elem(map, key, value, flags); 251 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 252 map->map_type == BPF_MAP_TYPE_ARENA || 253 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 254 return map->ops->map_update_elem(map, key, value, flags); 255 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 256 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 257 return sock_map_update_elem_sys(map, key, value, flags); 258 } else if (IS_FD_PROG_ARRAY(map)) { 259 return bpf_fd_array_map_update_elem(map, map_file, key, value, 260 flags); 261 } 262 263 bpf_disable_instrumentation(); 264 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 265 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 266 err = bpf_percpu_hash_update(map, key, value, flags); 267 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 268 err = bpf_percpu_array_update(map, key, value, flags); 269 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 270 err = bpf_percpu_cgroup_storage_update(map, key, value, 271 flags); 272 } else if (IS_FD_ARRAY(map)) { 273 err = bpf_fd_array_map_update_elem(map, map_file, key, value, 274 flags); 275 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 276 err = bpf_fd_htab_map_update_elem(map, map_file, key, value, 277 flags); 278 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 279 /* rcu_read_lock() is not needed */ 280 err = bpf_fd_reuseport_array_update_elem(map, key, value, 281 flags); 282 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 283 map->map_type == BPF_MAP_TYPE_STACK || 284 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 285 err = map->ops->map_push_elem(map, value, flags); 286 } else { 287 err = bpf_obj_pin_uptrs(map->record, value); 288 if (!err) { 289 rcu_read_lock(); 290 err = map->ops->map_update_elem(map, key, value, flags); 291 rcu_read_unlock(); 292 if (err) 293 bpf_obj_unpin_uptrs(map->record, value); 294 } 295 } 296 bpf_enable_instrumentation(); 297 298 return err; 299 } 300 301 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 302 __u64 flags) 303 { 304 void *ptr; 305 int err; 306 307 if (bpf_map_is_offloaded(map)) 308 return bpf_map_offload_lookup_elem(map, key, value); 309 310 bpf_disable_instrumentation(); 311 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 312 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 313 err = bpf_percpu_hash_copy(map, key, value); 314 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 315 err = bpf_percpu_array_copy(map, key, value); 316 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 317 err = bpf_percpu_cgroup_storage_copy(map, key, value); 318 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 319 err = bpf_stackmap_copy(map, key, value); 320 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 321 err = bpf_fd_array_map_lookup_elem(map, key, value); 322 } else if (IS_FD_HASH(map)) { 323 err = bpf_fd_htab_map_lookup_elem(map, key, value); 324 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 325 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 326 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 327 map->map_type == BPF_MAP_TYPE_STACK || 328 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 329 err = map->ops->map_peek_elem(map, value); 330 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 331 /* struct_ops map requires directly updating "value" */ 332 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 333 } else { 334 rcu_read_lock(); 335 if (map->ops->map_lookup_elem_sys_only) 336 ptr = map->ops->map_lookup_elem_sys_only(map, key); 337 else 338 ptr = map->ops->map_lookup_elem(map, key); 339 if (IS_ERR(ptr)) { 340 err = PTR_ERR(ptr); 341 } else if (!ptr) { 342 err = -ENOENT; 343 } else { 344 err = 0; 345 if (flags & BPF_F_LOCK) 346 /* lock 'ptr' and copy everything but lock */ 347 copy_map_value_locked(map, value, ptr, true); 348 else 349 copy_map_value(map, value, ptr); 350 /* mask lock and timer, since value wasn't zero inited */ 351 check_and_init_map_value(map, value); 352 } 353 rcu_read_unlock(); 354 } 355 356 bpf_enable_instrumentation(); 357 358 return err; 359 } 360 361 /* Please, do not use this function outside from the map creation path 362 * (e.g. in map update path) without taking care of setting the active 363 * memory cgroup (see at bpf_map_kmalloc_node() for example). 364 */ 365 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 366 { 367 /* We really just want to fail instead of triggering OOM killer 368 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 369 * which is used for lower order allocation requests. 370 * 371 * It has been observed that higher order allocation requests done by 372 * vmalloc with __GFP_NORETRY being set might fail due to not trying 373 * to reclaim memory from the page cache, thus we set 374 * __GFP_RETRY_MAYFAIL to avoid such situations. 375 */ 376 377 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO); 378 unsigned int flags = 0; 379 unsigned long align = 1; 380 void *area; 381 382 if (size >= SIZE_MAX) 383 return NULL; 384 385 /* kmalloc()'ed memory can't be mmap()'ed */ 386 if (mmapable) { 387 BUG_ON(!PAGE_ALIGNED(size)); 388 align = SHMLBA; 389 flags = VM_USERMAP; 390 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 391 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 392 numa_node); 393 if (area != NULL) 394 return area; 395 } 396 397 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 398 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 399 flags, numa_node, __builtin_return_address(0)); 400 } 401 402 void *bpf_map_area_alloc(u64 size, int numa_node) 403 { 404 return __bpf_map_area_alloc(size, numa_node, false); 405 } 406 407 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 408 { 409 return __bpf_map_area_alloc(size, numa_node, true); 410 } 411 412 void bpf_map_area_free(void *area) 413 { 414 kvfree(area); 415 } 416 417 static u32 bpf_map_flags_retain_permanent(u32 flags) 418 { 419 /* Some map creation flags are not tied to the map object but 420 * rather to the map fd instead, so they have no meaning upon 421 * map object inspection since multiple file descriptors with 422 * different (access) properties can exist here. Thus, given 423 * this has zero meaning for the map itself, lets clear these 424 * from here. 425 */ 426 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 427 } 428 429 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 430 { 431 map->map_type = attr->map_type; 432 map->key_size = attr->key_size; 433 map->value_size = attr->value_size; 434 map->max_entries = attr->max_entries; 435 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 436 map->numa_node = bpf_map_attr_numa_node(attr); 437 map->map_extra = attr->map_extra; 438 } 439 440 static int bpf_map_alloc_id(struct bpf_map *map) 441 { 442 int id; 443 444 idr_preload(GFP_KERNEL); 445 spin_lock_bh(&map_idr_lock); 446 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 447 if (id > 0) 448 map->id = id; 449 spin_unlock_bh(&map_idr_lock); 450 idr_preload_end(); 451 452 if (WARN_ON_ONCE(!id)) 453 return -ENOSPC; 454 455 return id > 0 ? 0 : id; 456 } 457 458 void bpf_map_free_id(struct bpf_map *map) 459 { 460 unsigned long flags; 461 462 /* Offloaded maps are removed from the IDR store when their device 463 * disappears - even if someone holds an fd to them they are unusable, 464 * the memory is gone, all ops will fail; they are simply waiting for 465 * refcnt to drop to be freed. 466 */ 467 if (!map->id) 468 return; 469 470 spin_lock_irqsave(&map_idr_lock, flags); 471 472 idr_remove(&map_idr, map->id); 473 map->id = 0; 474 475 spin_unlock_irqrestore(&map_idr_lock, flags); 476 } 477 478 #ifdef CONFIG_MEMCG 479 static void bpf_map_save_memcg(struct bpf_map *map) 480 { 481 /* Currently if a map is created by a process belonging to the root 482 * memory cgroup, get_obj_cgroup_from_current() will return NULL. 483 * So we have to check map->objcg for being NULL each time it's 484 * being used. 485 */ 486 if (memcg_bpf_enabled()) 487 map->objcg = get_obj_cgroup_from_current(); 488 } 489 490 static void bpf_map_release_memcg(struct bpf_map *map) 491 { 492 if (map->objcg) 493 obj_cgroup_put(map->objcg); 494 } 495 496 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) 497 { 498 if (map->objcg) 499 return get_mem_cgroup_from_objcg(map->objcg); 500 501 return root_mem_cgroup; 502 } 503 504 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 505 int node) 506 { 507 struct mem_cgroup *memcg, *old_memcg; 508 void *ptr; 509 510 memcg = bpf_map_get_memcg(map); 511 old_memcg = set_active_memcg(memcg); 512 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 513 set_active_memcg(old_memcg); 514 mem_cgroup_put(memcg); 515 516 return ptr; 517 } 518 519 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 520 { 521 struct mem_cgroup *memcg, *old_memcg; 522 void *ptr; 523 524 memcg = bpf_map_get_memcg(map); 525 old_memcg = set_active_memcg(memcg); 526 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 527 set_active_memcg(old_memcg); 528 mem_cgroup_put(memcg); 529 530 return ptr; 531 } 532 533 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, 534 gfp_t flags) 535 { 536 struct mem_cgroup *memcg, *old_memcg; 537 void *ptr; 538 539 memcg = bpf_map_get_memcg(map); 540 old_memcg = set_active_memcg(memcg); 541 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT); 542 set_active_memcg(old_memcg); 543 mem_cgroup_put(memcg); 544 545 return ptr; 546 } 547 548 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 549 size_t align, gfp_t flags) 550 { 551 struct mem_cgroup *memcg, *old_memcg; 552 void __percpu *ptr; 553 554 memcg = bpf_map_get_memcg(map); 555 old_memcg = set_active_memcg(memcg); 556 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 557 set_active_memcg(old_memcg); 558 mem_cgroup_put(memcg); 559 560 return ptr; 561 } 562 563 #else 564 static void bpf_map_save_memcg(struct bpf_map *map) 565 { 566 } 567 568 static void bpf_map_release_memcg(struct bpf_map *map) 569 { 570 } 571 #endif 572 573 static bool can_alloc_pages(void) 574 { 575 return preempt_count() == 0 && !irqs_disabled() && 576 !IS_ENABLED(CONFIG_PREEMPT_RT); 577 } 578 579 static struct page *__bpf_alloc_page(int nid) 580 { 581 if (!can_alloc_pages()) 582 return alloc_pages_nolock(nid, 0); 583 584 return alloc_pages_node(nid, 585 GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT 586 | __GFP_NOWARN, 587 0); 588 } 589 590 int bpf_map_alloc_pages(const struct bpf_map *map, int nid, 591 unsigned long nr_pages, struct page **pages) 592 { 593 unsigned long i, j; 594 struct page *pg; 595 int ret = 0; 596 #ifdef CONFIG_MEMCG 597 struct mem_cgroup *memcg, *old_memcg; 598 599 memcg = bpf_map_get_memcg(map); 600 old_memcg = set_active_memcg(memcg); 601 #endif 602 for (i = 0; i < nr_pages; i++) { 603 pg = __bpf_alloc_page(nid); 604 605 if (pg) { 606 pages[i] = pg; 607 continue; 608 } 609 for (j = 0; j < i; j++) 610 free_pages_nolock(pages[j], 0); 611 ret = -ENOMEM; 612 break; 613 } 614 615 #ifdef CONFIG_MEMCG 616 set_active_memcg(old_memcg); 617 mem_cgroup_put(memcg); 618 #endif 619 return ret; 620 } 621 622 623 static int btf_field_cmp(const void *a, const void *b) 624 { 625 const struct btf_field *f1 = a, *f2 = b; 626 627 if (f1->offset < f2->offset) 628 return -1; 629 else if (f1->offset > f2->offset) 630 return 1; 631 return 0; 632 } 633 634 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset, 635 u32 field_mask) 636 { 637 struct btf_field *field; 638 639 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask)) 640 return NULL; 641 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); 642 if (!field || !(field->type & field_mask)) 643 return NULL; 644 return field; 645 } 646 647 void btf_record_free(struct btf_record *rec) 648 { 649 int i; 650 651 if (IS_ERR_OR_NULL(rec)) 652 return; 653 for (i = 0; i < rec->cnt; i++) { 654 switch (rec->fields[i].type) { 655 case BPF_KPTR_UNREF: 656 case BPF_KPTR_REF: 657 case BPF_KPTR_PERCPU: 658 case BPF_UPTR: 659 if (rec->fields[i].kptr.module) 660 module_put(rec->fields[i].kptr.module); 661 if (btf_is_kernel(rec->fields[i].kptr.btf)) 662 btf_put(rec->fields[i].kptr.btf); 663 break; 664 case BPF_LIST_HEAD: 665 case BPF_LIST_NODE: 666 case BPF_RB_ROOT: 667 case BPF_RB_NODE: 668 case BPF_SPIN_LOCK: 669 case BPF_RES_SPIN_LOCK: 670 case BPF_TIMER: 671 case BPF_REFCOUNT: 672 case BPF_WORKQUEUE: 673 /* Nothing to release */ 674 break; 675 default: 676 WARN_ON_ONCE(1); 677 continue; 678 } 679 } 680 kfree(rec); 681 } 682 683 void bpf_map_free_record(struct bpf_map *map) 684 { 685 btf_record_free(map->record); 686 map->record = NULL; 687 } 688 689 struct btf_record *btf_record_dup(const struct btf_record *rec) 690 { 691 const struct btf_field *fields; 692 struct btf_record *new_rec; 693 int ret, size, i; 694 695 if (IS_ERR_OR_NULL(rec)) 696 return NULL; 697 size = struct_size(rec, fields, rec->cnt); 698 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN); 699 if (!new_rec) 700 return ERR_PTR(-ENOMEM); 701 /* Do a deep copy of the btf_record */ 702 fields = rec->fields; 703 new_rec->cnt = 0; 704 for (i = 0; i < rec->cnt; i++) { 705 switch (fields[i].type) { 706 case BPF_KPTR_UNREF: 707 case BPF_KPTR_REF: 708 case BPF_KPTR_PERCPU: 709 case BPF_UPTR: 710 if (btf_is_kernel(fields[i].kptr.btf)) 711 btf_get(fields[i].kptr.btf); 712 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) { 713 ret = -ENXIO; 714 goto free; 715 } 716 break; 717 case BPF_LIST_HEAD: 718 case BPF_LIST_NODE: 719 case BPF_RB_ROOT: 720 case BPF_RB_NODE: 721 case BPF_SPIN_LOCK: 722 case BPF_RES_SPIN_LOCK: 723 case BPF_TIMER: 724 case BPF_REFCOUNT: 725 case BPF_WORKQUEUE: 726 /* Nothing to acquire */ 727 break; 728 default: 729 ret = -EFAULT; 730 WARN_ON_ONCE(1); 731 goto free; 732 } 733 new_rec->cnt++; 734 } 735 return new_rec; 736 free: 737 btf_record_free(new_rec); 738 return ERR_PTR(ret); 739 } 740 741 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b) 742 { 743 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b); 744 int size; 745 746 if (!a_has_fields && !b_has_fields) 747 return true; 748 if (a_has_fields != b_has_fields) 749 return false; 750 if (rec_a->cnt != rec_b->cnt) 751 return false; 752 size = struct_size(rec_a, fields, rec_a->cnt); 753 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused 754 * members are zeroed out. So memcmp is safe to do without worrying 755 * about padding/unused fields. 756 * 757 * While spin_lock, timer, and kptr have no relation to map BTF, 758 * list_head metadata is specific to map BTF, the btf and value_rec 759 * members in particular. btf is the map BTF, while value_rec points to 760 * btf_record in that map BTF. 761 * 762 * So while by default, we don't rely on the map BTF (which the records 763 * were parsed from) matching for both records, which is not backwards 764 * compatible, in case list_head is part of it, we implicitly rely on 765 * that by way of depending on memcmp succeeding for it. 766 */ 767 return !memcmp(rec_a, rec_b, size); 768 } 769 770 void bpf_obj_free_timer(const struct btf_record *rec, void *obj) 771 { 772 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER))) 773 return; 774 bpf_timer_cancel_and_free(obj + rec->timer_off); 775 } 776 777 void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj) 778 { 779 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE))) 780 return; 781 bpf_wq_cancel_and_free(obj + rec->wq_off); 782 } 783 784 void bpf_obj_free_fields(const struct btf_record *rec, void *obj) 785 { 786 const struct btf_field *fields; 787 int i; 788 789 if (IS_ERR_OR_NULL(rec)) 790 return; 791 fields = rec->fields; 792 for (i = 0; i < rec->cnt; i++) { 793 struct btf_struct_meta *pointee_struct_meta; 794 const struct btf_field *field = &fields[i]; 795 void *field_ptr = obj + field->offset; 796 void *xchgd_field; 797 798 switch (fields[i].type) { 799 case BPF_SPIN_LOCK: 800 case BPF_RES_SPIN_LOCK: 801 break; 802 case BPF_TIMER: 803 bpf_timer_cancel_and_free(field_ptr); 804 break; 805 case BPF_WORKQUEUE: 806 bpf_wq_cancel_and_free(field_ptr); 807 break; 808 case BPF_KPTR_UNREF: 809 WRITE_ONCE(*(u64 *)field_ptr, 0); 810 break; 811 case BPF_KPTR_REF: 812 case BPF_KPTR_PERCPU: 813 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0); 814 if (!xchgd_field) 815 break; 816 817 if (!btf_is_kernel(field->kptr.btf)) { 818 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, 819 field->kptr.btf_id); 820 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? 821 pointee_struct_meta->record : NULL, 822 fields[i].type == BPF_KPTR_PERCPU); 823 } else { 824 field->kptr.dtor(xchgd_field); 825 } 826 break; 827 case BPF_UPTR: 828 /* The caller ensured that no one is using the uptr */ 829 unpin_uptr_kaddr(*(void **)field_ptr); 830 break; 831 case BPF_LIST_HEAD: 832 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 833 continue; 834 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); 835 break; 836 case BPF_RB_ROOT: 837 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 838 continue; 839 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); 840 break; 841 case BPF_LIST_NODE: 842 case BPF_RB_NODE: 843 case BPF_REFCOUNT: 844 break; 845 default: 846 WARN_ON_ONCE(1); 847 continue; 848 } 849 } 850 } 851 852 static void bpf_map_free(struct bpf_map *map) 853 { 854 struct btf_record *rec = map->record; 855 struct btf *btf = map->btf; 856 857 /* implementation dependent freeing. Disabling migration to simplify 858 * the free of values or special fields allocated from bpf memory 859 * allocator. 860 */ 861 migrate_disable(); 862 map->ops->map_free(map); 863 migrate_enable(); 864 865 /* Delay freeing of btf_record for maps, as map_free 866 * callback usually needs access to them. It is better to do it here 867 * than require each callback to do the free itself manually. 868 * 869 * Note that the btf_record stashed in map->inner_map_meta->record was 870 * already freed using the map_free callback for map in map case which 871 * eventually calls bpf_map_free_meta, since inner_map_meta is only a 872 * template bpf_map struct used during verification. 873 */ 874 btf_record_free(rec); 875 /* Delay freeing of btf for maps, as map_free callback may need 876 * struct_meta info which will be freed with btf_put(). 877 */ 878 btf_put(btf); 879 } 880 881 /* called from workqueue */ 882 static void bpf_map_free_deferred(struct work_struct *work) 883 { 884 struct bpf_map *map = container_of(work, struct bpf_map, work); 885 886 security_bpf_map_free(map); 887 bpf_map_release_memcg(map); 888 bpf_map_free(map); 889 } 890 891 static void bpf_map_put_uref(struct bpf_map *map) 892 { 893 if (atomic64_dec_and_test(&map->usercnt)) { 894 if (map->ops->map_release_uref) 895 map->ops->map_release_uref(map); 896 } 897 } 898 899 static void bpf_map_free_in_work(struct bpf_map *map) 900 { 901 INIT_WORK(&map->work, bpf_map_free_deferred); 902 /* Avoid spawning kworkers, since they all might contend 903 * for the same mutex like slab_mutex. 904 */ 905 queue_work(system_unbound_wq, &map->work); 906 } 907 908 static void bpf_map_free_rcu_gp(struct rcu_head *rcu) 909 { 910 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu)); 911 } 912 913 static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu) 914 { 915 if (rcu_trace_implies_rcu_gp()) 916 bpf_map_free_rcu_gp(rcu); 917 else 918 call_rcu(rcu, bpf_map_free_rcu_gp); 919 } 920 921 /* decrement map refcnt and schedule it for freeing via workqueue 922 * (underlying map implementation ops->map_free() might sleep) 923 */ 924 void bpf_map_put(struct bpf_map *map) 925 { 926 if (atomic64_dec_and_test(&map->refcnt)) { 927 /* bpf_map_free_id() must be called first */ 928 bpf_map_free_id(map); 929 930 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt)); 931 if (READ_ONCE(map->free_after_mult_rcu_gp)) 932 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); 933 else if (READ_ONCE(map->free_after_rcu_gp)) 934 call_rcu(&map->rcu, bpf_map_free_rcu_gp); 935 else 936 bpf_map_free_in_work(map); 937 } 938 } 939 EXPORT_SYMBOL_GPL(bpf_map_put); 940 941 void bpf_map_put_with_uref(struct bpf_map *map) 942 { 943 bpf_map_put_uref(map); 944 bpf_map_put(map); 945 } 946 947 static int bpf_map_release(struct inode *inode, struct file *filp) 948 { 949 struct bpf_map *map = filp->private_data; 950 951 if (map->ops->map_release) 952 map->ops->map_release(map, filp); 953 954 bpf_map_put_with_uref(map); 955 return 0; 956 } 957 958 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 959 { 960 fmode_t mode = fd_file(f)->f_mode; 961 962 /* Our file permissions may have been overridden by global 963 * map permissions facing syscall side. 964 */ 965 if (READ_ONCE(map->frozen)) 966 mode &= ~FMODE_CAN_WRITE; 967 return mode; 968 } 969 970 #ifdef CONFIG_PROC_FS 971 /* Show the memory usage of a bpf map */ 972 static u64 bpf_map_memory_usage(const struct bpf_map *map) 973 { 974 return map->ops->map_mem_usage(map); 975 } 976 977 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 978 { 979 struct bpf_map *map = filp->private_data; 980 u32 type = 0, jited = 0; 981 982 if (map_type_contains_progs(map)) { 983 spin_lock(&map->owner.lock); 984 type = map->owner.type; 985 jited = map->owner.jited; 986 spin_unlock(&map->owner.lock); 987 } 988 989 seq_printf(m, 990 "map_type:\t%u\n" 991 "key_size:\t%u\n" 992 "value_size:\t%u\n" 993 "max_entries:\t%u\n" 994 "map_flags:\t%#x\n" 995 "map_extra:\t%#llx\n" 996 "memlock:\t%llu\n" 997 "map_id:\t%u\n" 998 "frozen:\t%u\n", 999 map->map_type, 1000 map->key_size, 1001 map->value_size, 1002 map->max_entries, 1003 map->map_flags, 1004 (unsigned long long)map->map_extra, 1005 bpf_map_memory_usage(map), 1006 map->id, 1007 READ_ONCE(map->frozen)); 1008 if (type) { 1009 seq_printf(m, "owner_prog_type:\t%u\n", type); 1010 seq_printf(m, "owner_jited:\t%u\n", jited); 1011 } 1012 } 1013 #endif 1014 1015 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 1016 loff_t *ppos) 1017 { 1018 /* We need this handler such that alloc_file() enables 1019 * f_mode with FMODE_CAN_READ. 1020 */ 1021 return -EINVAL; 1022 } 1023 1024 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 1025 size_t siz, loff_t *ppos) 1026 { 1027 /* We need this handler such that alloc_file() enables 1028 * f_mode with FMODE_CAN_WRITE. 1029 */ 1030 return -EINVAL; 1031 } 1032 1033 /* called for any extra memory-mapped regions (except initial) */ 1034 static void bpf_map_mmap_open(struct vm_area_struct *vma) 1035 { 1036 struct bpf_map *map = vma->vm_file->private_data; 1037 1038 if (vma->vm_flags & VM_MAYWRITE) 1039 bpf_map_write_active_inc(map); 1040 } 1041 1042 /* called for all unmapped memory region (including initial) */ 1043 static void bpf_map_mmap_close(struct vm_area_struct *vma) 1044 { 1045 struct bpf_map *map = vma->vm_file->private_data; 1046 1047 if (vma->vm_flags & VM_MAYWRITE) 1048 bpf_map_write_active_dec(map); 1049 } 1050 1051 static const struct vm_operations_struct bpf_map_default_vmops = { 1052 .open = bpf_map_mmap_open, 1053 .close = bpf_map_mmap_close, 1054 }; 1055 1056 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 1057 { 1058 struct bpf_map *map = filp->private_data; 1059 int err = 0; 1060 1061 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) 1062 return -ENOTSUPP; 1063 1064 if (!(vma->vm_flags & VM_SHARED)) 1065 return -EINVAL; 1066 1067 mutex_lock(&map->freeze_mutex); 1068 1069 if (vma->vm_flags & VM_WRITE) { 1070 if (map->frozen) { 1071 err = -EPERM; 1072 goto out; 1073 } 1074 /* map is meant to be read-only, so do not allow mapping as 1075 * writable, because it's possible to leak a writable page 1076 * reference and allows user-space to still modify it after 1077 * freezing, while verifier will assume contents do not change 1078 */ 1079 if (map->map_flags & BPF_F_RDONLY_PROG) { 1080 err = -EACCES; 1081 goto out; 1082 } 1083 bpf_map_write_active_inc(map); 1084 } 1085 out: 1086 mutex_unlock(&map->freeze_mutex); 1087 if (err) 1088 return err; 1089 1090 /* set default open/close callbacks */ 1091 vma->vm_ops = &bpf_map_default_vmops; 1092 vma->vm_private_data = map; 1093 vm_flags_clear(vma, VM_MAYEXEC); 1094 /* If mapping is read-only, then disallow potentially re-mapping with 1095 * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing 1096 * means that as far as BPF map's memory-mapped VMAs are concerned, 1097 * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set, 1098 * both should be set, so we can forget about VM_MAYWRITE and always 1099 * check just VM_WRITE 1100 */ 1101 if (!(vma->vm_flags & VM_WRITE)) 1102 vm_flags_clear(vma, VM_MAYWRITE); 1103 1104 err = map->ops->map_mmap(map, vma); 1105 if (err) { 1106 if (vma->vm_flags & VM_WRITE) 1107 bpf_map_write_active_dec(map); 1108 } 1109 1110 return err; 1111 } 1112 1113 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 1114 { 1115 struct bpf_map *map = filp->private_data; 1116 1117 if (map->ops->map_poll) 1118 return map->ops->map_poll(map, filp, pts); 1119 1120 return EPOLLERR; 1121 } 1122 1123 static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr, 1124 unsigned long len, unsigned long pgoff, 1125 unsigned long flags) 1126 { 1127 struct bpf_map *map = filp->private_data; 1128 1129 if (map->ops->map_get_unmapped_area) 1130 return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags); 1131 #ifdef CONFIG_MMU 1132 return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); 1133 #else 1134 return addr; 1135 #endif 1136 } 1137 1138 const struct file_operations bpf_map_fops = { 1139 #ifdef CONFIG_PROC_FS 1140 .show_fdinfo = bpf_map_show_fdinfo, 1141 #endif 1142 .release = bpf_map_release, 1143 .read = bpf_dummy_read, 1144 .write = bpf_dummy_write, 1145 .mmap = bpf_map_mmap, 1146 .poll = bpf_map_poll, 1147 .get_unmapped_area = bpf_get_unmapped_area, 1148 }; 1149 1150 int bpf_map_new_fd(struct bpf_map *map, int flags) 1151 { 1152 int ret; 1153 1154 ret = security_bpf_map(map, OPEN_FMODE(flags)); 1155 if (ret < 0) 1156 return ret; 1157 1158 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 1159 flags | O_CLOEXEC); 1160 } 1161 1162 int bpf_get_file_flag(int flags) 1163 { 1164 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 1165 return -EINVAL; 1166 if (flags & BPF_F_RDONLY) 1167 return O_RDONLY; 1168 if (flags & BPF_F_WRONLY) 1169 return O_WRONLY; 1170 return O_RDWR; 1171 } 1172 1173 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 1174 #define CHECK_ATTR(CMD) \ 1175 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 1176 sizeof(attr->CMD##_LAST_FIELD), 0, \ 1177 sizeof(*attr) - \ 1178 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 1179 sizeof(attr->CMD##_LAST_FIELD)) != NULL 1180 1181 /* dst and src must have at least "size" number of bytes. 1182 * Return strlen on success and < 0 on error. 1183 */ 1184 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 1185 { 1186 const char *end = src + size; 1187 const char *orig_src = src; 1188 1189 memset(dst, 0, size); 1190 /* Copy all isalnum(), '_' and '.' chars. */ 1191 while (src < end && *src) { 1192 if (!isalnum(*src) && 1193 *src != '_' && *src != '.') 1194 return -EINVAL; 1195 *dst++ = *src++; 1196 } 1197 1198 /* No '\0' found in "size" number of bytes */ 1199 if (src == end) 1200 return -EINVAL; 1201 1202 return src - orig_src; 1203 } 1204 1205 int map_check_no_btf(const struct bpf_map *map, 1206 const struct btf *btf, 1207 const struct btf_type *key_type, 1208 const struct btf_type *value_type) 1209 { 1210 return -ENOTSUPP; 1211 } 1212 1213 static int map_check_btf(struct bpf_map *map, struct bpf_token *token, 1214 const struct btf *btf, u32 btf_key_id, u32 btf_value_id) 1215 { 1216 const struct btf_type *key_type, *value_type; 1217 u32 key_size, value_size; 1218 int ret = 0; 1219 1220 /* Some maps allow key to be unspecified. */ 1221 if (btf_key_id) { 1222 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 1223 if (!key_type || key_size != map->key_size) 1224 return -EINVAL; 1225 } else { 1226 key_type = btf_type_by_id(btf, 0); 1227 if (!map->ops->map_check_btf) 1228 return -EINVAL; 1229 } 1230 1231 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 1232 if (!value_type || value_size != map->value_size) 1233 return -EINVAL; 1234 1235 map->record = btf_parse_fields(btf, value_type, 1236 BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | 1237 BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE | BPF_UPTR, 1238 map->value_size); 1239 if (!IS_ERR_OR_NULL(map->record)) { 1240 int i; 1241 1242 if (!bpf_token_capable(token, CAP_BPF)) { 1243 ret = -EPERM; 1244 goto free_map_tab; 1245 } 1246 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { 1247 ret = -EACCES; 1248 goto free_map_tab; 1249 } 1250 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { 1251 switch (map->record->field_mask & (1 << i)) { 1252 case 0: 1253 continue; 1254 case BPF_SPIN_LOCK: 1255 case BPF_RES_SPIN_LOCK: 1256 if (map->map_type != BPF_MAP_TYPE_HASH && 1257 map->map_type != BPF_MAP_TYPE_ARRAY && 1258 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 1259 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1260 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1261 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1262 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1263 ret = -EOPNOTSUPP; 1264 goto free_map_tab; 1265 } 1266 break; 1267 case BPF_TIMER: 1268 case BPF_WORKQUEUE: 1269 if (map->map_type != BPF_MAP_TYPE_HASH && 1270 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1271 map->map_type != BPF_MAP_TYPE_ARRAY) { 1272 ret = -EOPNOTSUPP; 1273 goto free_map_tab; 1274 } 1275 break; 1276 case BPF_KPTR_UNREF: 1277 case BPF_KPTR_REF: 1278 case BPF_KPTR_PERCPU: 1279 case BPF_REFCOUNT: 1280 if (map->map_type != BPF_MAP_TYPE_HASH && 1281 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 1282 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1283 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && 1284 map->map_type != BPF_MAP_TYPE_ARRAY && 1285 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 1286 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1287 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1288 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1289 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1290 ret = -EOPNOTSUPP; 1291 goto free_map_tab; 1292 } 1293 break; 1294 case BPF_UPTR: 1295 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) { 1296 ret = -EOPNOTSUPP; 1297 goto free_map_tab; 1298 } 1299 break; 1300 case BPF_LIST_HEAD: 1301 case BPF_RB_ROOT: 1302 if (map->map_type != BPF_MAP_TYPE_HASH && 1303 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1304 map->map_type != BPF_MAP_TYPE_ARRAY) { 1305 ret = -EOPNOTSUPP; 1306 goto free_map_tab; 1307 } 1308 break; 1309 default: 1310 /* Fail if map_type checks are missing for a field type */ 1311 ret = -EOPNOTSUPP; 1312 goto free_map_tab; 1313 } 1314 } 1315 } 1316 1317 ret = btf_check_and_fixup_fields(btf, map->record); 1318 if (ret < 0) 1319 goto free_map_tab; 1320 1321 if (map->ops->map_check_btf) { 1322 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 1323 if (ret < 0) 1324 goto free_map_tab; 1325 } 1326 1327 return ret; 1328 free_map_tab: 1329 bpf_map_free_record(map); 1330 return ret; 1331 } 1332 1333 static bool bpf_net_capable(void) 1334 { 1335 return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN); 1336 } 1337 1338 #define BPF_MAP_CREATE_LAST_FIELD map_token_fd 1339 /* called via syscall */ 1340 static int map_create(union bpf_attr *attr, bool kernel) 1341 { 1342 const struct bpf_map_ops *ops; 1343 struct bpf_token *token = NULL; 1344 int numa_node = bpf_map_attr_numa_node(attr); 1345 u32 map_type = attr->map_type; 1346 struct bpf_map *map; 1347 bool token_flag; 1348 int f_flags; 1349 int err; 1350 1351 err = CHECK_ATTR(BPF_MAP_CREATE); 1352 if (err) 1353 return -EINVAL; 1354 1355 /* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it 1356 * to avoid per-map type checks tripping on unknown flag 1357 */ 1358 token_flag = attr->map_flags & BPF_F_TOKEN_FD; 1359 attr->map_flags &= ~BPF_F_TOKEN_FD; 1360 1361 if (attr->btf_vmlinux_value_type_id) { 1362 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 1363 attr->btf_key_type_id || attr->btf_value_type_id) 1364 return -EINVAL; 1365 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 1366 return -EINVAL; 1367 } 1368 1369 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && 1370 attr->map_type != BPF_MAP_TYPE_ARENA && 1371 attr->map_extra != 0) 1372 return -EINVAL; 1373 1374 f_flags = bpf_get_file_flag(attr->map_flags); 1375 if (f_flags < 0) 1376 return f_flags; 1377 1378 if (numa_node != NUMA_NO_NODE && 1379 ((unsigned int)numa_node >= nr_node_ids || 1380 !node_online(numa_node))) 1381 return -EINVAL; 1382 1383 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 1384 map_type = attr->map_type; 1385 if (map_type >= ARRAY_SIZE(bpf_map_types)) 1386 return -EINVAL; 1387 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types)); 1388 ops = bpf_map_types[map_type]; 1389 if (!ops) 1390 return -EINVAL; 1391 1392 if (ops->map_alloc_check) { 1393 err = ops->map_alloc_check(attr); 1394 if (err) 1395 return err; 1396 } 1397 if (attr->map_ifindex) 1398 ops = &bpf_map_offload_ops; 1399 if (!ops->map_mem_usage) 1400 return -EINVAL; 1401 1402 if (token_flag) { 1403 token = bpf_token_get_from_fd(attr->map_token_fd); 1404 if (IS_ERR(token)) 1405 return PTR_ERR(token); 1406 1407 /* if current token doesn't grant map creation permissions, 1408 * then we can't use this token, so ignore it and rely on 1409 * system-wide capabilities checks 1410 */ 1411 if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) || 1412 !bpf_token_allow_map_type(token, attr->map_type)) { 1413 bpf_token_put(token); 1414 token = NULL; 1415 } 1416 } 1417 1418 err = -EPERM; 1419 1420 /* Intent here is for unprivileged_bpf_disabled to block BPF map 1421 * creation for unprivileged users; other actions depend 1422 * on fd availability and access to bpffs, so are dependent on 1423 * object creation success. Even with unprivileged BPF disabled, 1424 * capability checks are still carried out. 1425 */ 1426 if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF)) 1427 goto put_token; 1428 1429 /* check privileged map type permissions */ 1430 switch (map_type) { 1431 case BPF_MAP_TYPE_ARRAY: 1432 case BPF_MAP_TYPE_PERCPU_ARRAY: 1433 case BPF_MAP_TYPE_PROG_ARRAY: 1434 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1435 case BPF_MAP_TYPE_CGROUP_ARRAY: 1436 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1437 case BPF_MAP_TYPE_HASH: 1438 case BPF_MAP_TYPE_PERCPU_HASH: 1439 case BPF_MAP_TYPE_HASH_OF_MAPS: 1440 case BPF_MAP_TYPE_RINGBUF: 1441 case BPF_MAP_TYPE_USER_RINGBUF: 1442 case BPF_MAP_TYPE_CGROUP_STORAGE: 1443 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 1444 /* unprivileged */ 1445 break; 1446 case BPF_MAP_TYPE_SK_STORAGE: 1447 case BPF_MAP_TYPE_INODE_STORAGE: 1448 case BPF_MAP_TYPE_TASK_STORAGE: 1449 case BPF_MAP_TYPE_CGRP_STORAGE: 1450 case BPF_MAP_TYPE_BLOOM_FILTER: 1451 case BPF_MAP_TYPE_LPM_TRIE: 1452 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 1453 case BPF_MAP_TYPE_STACK_TRACE: 1454 case BPF_MAP_TYPE_QUEUE: 1455 case BPF_MAP_TYPE_STACK: 1456 case BPF_MAP_TYPE_LRU_HASH: 1457 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 1458 case BPF_MAP_TYPE_STRUCT_OPS: 1459 case BPF_MAP_TYPE_CPUMAP: 1460 case BPF_MAP_TYPE_ARENA: 1461 if (!bpf_token_capable(token, CAP_BPF)) 1462 goto put_token; 1463 break; 1464 case BPF_MAP_TYPE_SOCKMAP: 1465 case BPF_MAP_TYPE_SOCKHASH: 1466 case BPF_MAP_TYPE_DEVMAP: 1467 case BPF_MAP_TYPE_DEVMAP_HASH: 1468 case BPF_MAP_TYPE_XSKMAP: 1469 if (!bpf_token_capable(token, CAP_NET_ADMIN)) 1470 goto put_token; 1471 break; 1472 default: 1473 WARN(1, "unsupported map type %d", map_type); 1474 goto put_token; 1475 } 1476 1477 map = ops->map_alloc(attr); 1478 if (IS_ERR(map)) { 1479 err = PTR_ERR(map); 1480 goto put_token; 1481 } 1482 map->ops = ops; 1483 map->map_type = map_type; 1484 1485 err = bpf_obj_name_cpy(map->name, attr->map_name, 1486 sizeof(attr->map_name)); 1487 if (err < 0) 1488 goto free_map; 1489 1490 atomic64_set(&map->refcnt, 1); 1491 atomic64_set(&map->usercnt, 1); 1492 mutex_init(&map->freeze_mutex); 1493 spin_lock_init(&map->owner.lock); 1494 1495 if (attr->btf_key_type_id || attr->btf_value_type_id || 1496 /* Even the map's value is a kernel's struct, 1497 * the bpf_prog.o must have BTF to begin with 1498 * to figure out the corresponding kernel's 1499 * counter part. Thus, attr->btf_fd has 1500 * to be valid also. 1501 */ 1502 attr->btf_vmlinux_value_type_id) { 1503 struct btf *btf; 1504 1505 btf = btf_get_by_fd(attr->btf_fd); 1506 if (IS_ERR(btf)) { 1507 err = PTR_ERR(btf); 1508 goto free_map; 1509 } 1510 if (btf_is_kernel(btf)) { 1511 btf_put(btf); 1512 err = -EACCES; 1513 goto free_map; 1514 } 1515 map->btf = btf; 1516 1517 if (attr->btf_value_type_id) { 1518 err = map_check_btf(map, token, btf, attr->btf_key_type_id, 1519 attr->btf_value_type_id); 1520 if (err) 1521 goto free_map; 1522 } 1523 1524 map->btf_key_type_id = attr->btf_key_type_id; 1525 map->btf_value_type_id = attr->btf_value_type_id; 1526 map->btf_vmlinux_value_type_id = 1527 attr->btf_vmlinux_value_type_id; 1528 } 1529 1530 err = security_bpf_map_create(map, attr, token, kernel); 1531 if (err) 1532 goto free_map_sec; 1533 1534 err = bpf_map_alloc_id(map); 1535 if (err) 1536 goto free_map_sec; 1537 1538 bpf_map_save_memcg(map); 1539 bpf_token_put(token); 1540 1541 err = bpf_map_new_fd(map, f_flags); 1542 if (err < 0) { 1543 /* failed to allocate fd. 1544 * bpf_map_put_with_uref() is needed because the above 1545 * bpf_map_alloc_id() has published the map 1546 * to the userspace and the userspace may 1547 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 1548 */ 1549 bpf_map_put_with_uref(map); 1550 return err; 1551 } 1552 1553 return err; 1554 1555 free_map_sec: 1556 security_bpf_map_free(map); 1557 free_map: 1558 bpf_map_free(map); 1559 put_token: 1560 bpf_token_put(token); 1561 return err; 1562 } 1563 1564 void bpf_map_inc(struct bpf_map *map) 1565 { 1566 atomic64_inc(&map->refcnt); 1567 } 1568 EXPORT_SYMBOL_GPL(bpf_map_inc); 1569 1570 void bpf_map_inc_with_uref(struct bpf_map *map) 1571 { 1572 atomic64_inc(&map->refcnt); 1573 atomic64_inc(&map->usercnt); 1574 } 1575 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 1576 1577 struct bpf_map *bpf_map_get(u32 ufd) 1578 { 1579 CLASS(fd, f)(ufd); 1580 struct bpf_map *map = __bpf_map_get(f); 1581 1582 if (!IS_ERR(map)) 1583 bpf_map_inc(map); 1584 1585 return map; 1586 } 1587 EXPORT_SYMBOL_NS(bpf_map_get, "BPF_INTERNAL"); 1588 1589 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 1590 { 1591 CLASS(fd, f)(ufd); 1592 struct bpf_map *map = __bpf_map_get(f); 1593 1594 if (!IS_ERR(map)) 1595 bpf_map_inc_with_uref(map); 1596 1597 return map; 1598 } 1599 1600 /* map_idr_lock should have been held or the map should have been 1601 * protected by rcu read lock. 1602 */ 1603 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 1604 { 1605 int refold; 1606 1607 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 1608 if (!refold) 1609 return ERR_PTR(-ENOENT); 1610 if (uref) 1611 atomic64_inc(&map->usercnt); 1612 1613 return map; 1614 } 1615 1616 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 1617 { 1618 lockdep_assert(rcu_read_lock_held()); 1619 return __bpf_map_inc_not_zero(map, false); 1620 } 1621 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 1622 1623 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 1624 { 1625 return -ENOTSUPP; 1626 } 1627 1628 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 1629 { 1630 if (key_size) 1631 return vmemdup_user(ukey, key_size); 1632 1633 if (ukey) 1634 return ERR_PTR(-EINVAL); 1635 1636 return NULL; 1637 } 1638 1639 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) 1640 { 1641 if (key_size) 1642 return kvmemdup_bpfptr(ukey, key_size); 1643 1644 if (!bpfptr_is_null(ukey)) 1645 return ERR_PTR(-EINVAL); 1646 1647 return NULL; 1648 } 1649 1650 /* last field in 'union bpf_attr' used by this command */ 1651 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1652 1653 static int map_lookup_elem(union bpf_attr *attr) 1654 { 1655 void __user *ukey = u64_to_user_ptr(attr->key); 1656 void __user *uvalue = u64_to_user_ptr(attr->value); 1657 struct bpf_map *map; 1658 void *key, *value; 1659 u32 value_size; 1660 int err; 1661 1662 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1663 return -EINVAL; 1664 1665 if (attr->flags & ~BPF_F_LOCK) 1666 return -EINVAL; 1667 1668 CLASS(fd, f)(attr->map_fd); 1669 map = __bpf_map_get(f); 1670 if (IS_ERR(map)) 1671 return PTR_ERR(map); 1672 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) 1673 return -EPERM; 1674 1675 if ((attr->flags & BPF_F_LOCK) && 1676 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 1677 return -EINVAL; 1678 1679 key = __bpf_copy_key(ukey, map->key_size); 1680 if (IS_ERR(key)) 1681 return PTR_ERR(key); 1682 1683 value_size = bpf_map_value_size(map); 1684 1685 err = -ENOMEM; 1686 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1687 if (!value) 1688 goto free_key; 1689 1690 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 1691 if (copy_from_user(value, uvalue, value_size)) 1692 err = -EFAULT; 1693 else 1694 err = bpf_map_copy_value(map, key, value, attr->flags); 1695 goto free_value; 1696 } 1697 1698 err = bpf_map_copy_value(map, key, value, attr->flags); 1699 if (err) 1700 goto free_value; 1701 1702 err = -EFAULT; 1703 if (copy_to_user(uvalue, value, value_size) != 0) 1704 goto free_value; 1705 1706 err = 0; 1707 1708 free_value: 1709 kvfree(value); 1710 free_key: 1711 kvfree(key); 1712 return err; 1713 } 1714 1715 1716 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1717 1718 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) 1719 { 1720 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1721 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); 1722 struct bpf_map *map; 1723 void *key, *value; 1724 u32 value_size; 1725 int err; 1726 1727 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1728 return -EINVAL; 1729 1730 CLASS(fd, f)(attr->map_fd); 1731 map = __bpf_map_get(f); 1732 if (IS_ERR(map)) 1733 return PTR_ERR(map); 1734 bpf_map_write_active_inc(map); 1735 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1736 err = -EPERM; 1737 goto err_put; 1738 } 1739 1740 if ((attr->flags & BPF_F_LOCK) && 1741 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1742 err = -EINVAL; 1743 goto err_put; 1744 } 1745 1746 key = ___bpf_copy_key(ukey, map->key_size); 1747 if (IS_ERR(key)) { 1748 err = PTR_ERR(key); 1749 goto err_put; 1750 } 1751 1752 value_size = bpf_map_value_size(map); 1753 value = kvmemdup_bpfptr(uvalue, value_size); 1754 if (IS_ERR(value)) { 1755 err = PTR_ERR(value); 1756 goto free_key; 1757 } 1758 1759 err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags); 1760 if (!err) 1761 maybe_wait_bpf_programs(map); 1762 1763 kvfree(value); 1764 free_key: 1765 kvfree(key); 1766 err_put: 1767 bpf_map_write_active_dec(map); 1768 return err; 1769 } 1770 1771 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1772 1773 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) 1774 { 1775 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1776 struct bpf_map *map; 1777 void *key; 1778 int err; 1779 1780 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1781 return -EINVAL; 1782 1783 CLASS(fd, f)(attr->map_fd); 1784 map = __bpf_map_get(f); 1785 if (IS_ERR(map)) 1786 return PTR_ERR(map); 1787 bpf_map_write_active_inc(map); 1788 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1789 err = -EPERM; 1790 goto err_put; 1791 } 1792 1793 key = ___bpf_copy_key(ukey, map->key_size); 1794 if (IS_ERR(key)) { 1795 err = PTR_ERR(key); 1796 goto err_put; 1797 } 1798 1799 if (bpf_map_is_offloaded(map)) { 1800 err = bpf_map_offload_delete_elem(map, key); 1801 goto out; 1802 } else if (IS_FD_PROG_ARRAY(map) || 1803 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1804 /* These maps require sleepable context */ 1805 err = map->ops->map_delete_elem(map, key); 1806 goto out; 1807 } 1808 1809 bpf_disable_instrumentation(); 1810 rcu_read_lock(); 1811 err = map->ops->map_delete_elem(map, key); 1812 rcu_read_unlock(); 1813 bpf_enable_instrumentation(); 1814 if (!err) 1815 maybe_wait_bpf_programs(map); 1816 out: 1817 kvfree(key); 1818 err_put: 1819 bpf_map_write_active_dec(map); 1820 return err; 1821 } 1822 1823 /* last field in 'union bpf_attr' used by this command */ 1824 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1825 1826 static int map_get_next_key(union bpf_attr *attr) 1827 { 1828 void __user *ukey = u64_to_user_ptr(attr->key); 1829 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1830 struct bpf_map *map; 1831 void *key, *next_key; 1832 int err; 1833 1834 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1835 return -EINVAL; 1836 1837 CLASS(fd, f)(attr->map_fd); 1838 map = __bpf_map_get(f); 1839 if (IS_ERR(map)) 1840 return PTR_ERR(map); 1841 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) 1842 return -EPERM; 1843 1844 if (ukey) { 1845 key = __bpf_copy_key(ukey, map->key_size); 1846 if (IS_ERR(key)) 1847 return PTR_ERR(key); 1848 } else { 1849 key = NULL; 1850 } 1851 1852 err = -ENOMEM; 1853 next_key = kvmalloc(map->key_size, GFP_USER); 1854 if (!next_key) 1855 goto free_key; 1856 1857 if (bpf_map_is_offloaded(map)) { 1858 err = bpf_map_offload_get_next_key(map, key, next_key); 1859 goto out; 1860 } 1861 1862 rcu_read_lock(); 1863 err = map->ops->map_get_next_key(map, key, next_key); 1864 rcu_read_unlock(); 1865 out: 1866 if (err) 1867 goto free_next_key; 1868 1869 err = -EFAULT; 1870 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1871 goto free_next_key; 1872 1873 err = 0; 1874 1875 free_next_key: 1876 kvfree(next_key); 1877 free_key: 1878 kvfree(key); 1879 return err; 1880 } 1881 1882 int generic_map_delete_batch(struct bpf_map *map, 1883 const union bpf_attr *attr, 1884 union bpf_attr __user *uattr) 1885 { 1886 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1887 u32 cp, max_count; 1888 int err = 0; 1889 void *key; 1890 1891 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1892 return -EINVAL; 1893 1894 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1895 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1896 return -EINVAL; 1897 } 1898 1899 max_count = attr->batch.count; 1900 if (!max_count) 1901 return 0; 1902 1903 if (put_user(0, &uattr->batch.count)) 1904 return -EFAULT; 1905 1906 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1907 if (!key) 1908 return -ENOMEM; 1909 1910 for (cp = 0; cp < max_count; cp++) { 1911 err = -EFAULT; 1912 if (copy_from_user(key, keys + cp * map->key_size, 1913 map->key_size)) 1914 break; 1915 1916 if (bpf_map_is_offloaded(map)) { 1917 err = bpf_map_offload_delete_elem(map, key); 1918 break; 1919 } 1920 1921 bpf_disable_instrumentation(); 1922 rcu_read_lock(); 1923 err = map->ops->map_delete_elem(map, key); 1924 rcu_read_unlock(); 1925 bpf_enable_instrumentation(); 1926 if (err) 1927 break; 1928 cond_resched(); 1929 } 1930 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1931 err = -EFAULT; 1932 1933 kvfree(key); 1934 1935 return err; 1936 } 1937 1938 int generic_map_update_batch(struct bpf_map *map, struct file *map_file, 1939 const union bpf_attr *attr, 1940 union bpf_attr __user *uattr) 1941 { 1942 void __user *values = u64_to_user_ptr(attr->batch.values); 1943 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1944 u32 value_size, cp, max_count; 1945 void *key, *value; 1946 int err = 0; 1947 1948 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1949 return -EINVAL; 1950 1951 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1952 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1953 return -EINVAL; 1954 } 1955 1956 value_size = bpf_map_value_size(map); 1957 1958 max_count = attr->batch.count; 1959 if (!max_count) 1960 return 0; 1961 1962 if (put_user(0, &uattr->batch.count)) 1963 return -EFAULT; 1964 1965 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1966 if (!key) 1967 return -ENOMEM; 1968 1969 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1970 if (!value) { 1971 kvfree(key); 1972 return -ENOMEM; 1973 } 1974 1975 for (cp = 0; cp < max_count; cp++) { 1976 err = -EFAULT; 1977 if (copy_from_user(key, keys + cp * map->key_size, 1978 map->key_size) || 1979 copy_from_user(value, values + cp * value_size, value_size)) 1980 break; 1981 1982 err = bpf_map_update_value(map, map_file, key, value, 1983 attr->batch.elem_flags); 1984 1985 if (err) 1986 break; 1987 cond_resched(); 1988 } 1989 1990 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1991 err = -EFAULT; 1992 1993 kvfree(value); 1994 kvfree(key); 1995 1996 return err; 1997 } 1998 1999 int generic_map_lookup_batch(struct bpf_map *map, 2000 const union bpf_attr *attr, 2001 union bpf_attr __user *uattr) 2002 { 2003 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 2004 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 2005 void __user *values = u64_to_user_ptr(attr->batch.values); 2006 void __user *keys = u64_to_user_ptr(attr->batch.keys); 2007 void *buf, *buf_prevkey, *prev_key, *key, *value; 2008 u32 value_size, cp, max_count; 2009 int err; 2010 2011 if (attr->batch.elem_flags & ~BPF_F_LOCK) 2012 return -EINVAL; 2013 2014 if ((attr->batch.elem_flags & BPF_F_LOCK) && 2015 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 2016 return -EINVAL; 2017 2018 value_size = bpf_map_value_size(map); 2019 2020 max_count = attr->batch.count; 2021 if (!max_count) 2022 return 0; 2023 2024 if (put_user(0, &uattr->batch.count)) 2025 return -EFAULT; 2026 2027 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 2028 if (!buf_prevkey) 2029 return -ENOMEM; 2030 2031 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 2032 if (!buf) { 2033 kvfree(buf_prevkey); 2034 return -ENOMEM; 2035 } 2036 2037 err = -EFAULT; 2038 prev_key = NULL; 2039 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 2040 goto free_buf; 2041 key = buf; 2042 value = key + map->key_size; 2043 if (ubatch) 2044 prev_key = buf_prevkey; 2045 2046 for (cp = 0; cp < max_count;) { 2047 rcu_read_lock(); 2048 err = map->ops->map_get_next_key(map, prev_key, key); 2049 rcu_read_unlock(); 2050 if (err) 2051 break; 2052 err = bpf_map_copy_value(map, key, value, 2053 attr->batch.elem_flags); 2054 2055 if (err == -ENOENT) 2056 goto next_key; 2057 2058 if (err) 2059 goto free_buf; 2060 2061 if (copy_to_user(keys + cp * map->key_size, key, 2062 map->key_size)) { 2063 err = -EFAULT; 2064 goto free_buf; 2065 } 2066 if (copy_to_user(values + cp * value_size, value, value_size)) { 2067 err = -EFAULT; 2068 goto free_buf; 2069 } 2070 2071 cp++; 2072 next_key: 2073 if (!prev_key) 2074 prev_key = buf_prevkey; 2075 2076 swap(prev_key, key); 2077 cond_resched(); 2078 } 2079 2080 if (err == -EFAULT) 2081 goto free_buf; 2082 2083 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 2084 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 2085 err = -EFAULT; 2086 2087 free_buf: 2088 kvfree(buf_prevkey); 2089 kvfree(buf); 2090 return err; 2091 } 2092 2093 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags 2094 2095 static int map_lookup_and_delete_elem(union bpf_attr *attr) 2096 { 2097 void __user *ukey = u64_to_user_ptr(attr->key); 2098 void __user *uvalue = u64_to_user_ptr(attr->value); 2099 struct bpf_map *map; 2100 void *key, *value; 2101 u32 value_size; 2102 int err; 2103 2104 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 2105 return -EINVAL; 2106 2107 if (attr->flags & ~BPF_F_LOCK) 2108 return -EINVAL; 2109 2110 CLASS(fd, f)(attr->map_fd); 2111 map = __bpf_map_get(f); 2112 if (IS_ERR(map)) 2113 return PTR_ERR(map); 2114 bpf_map_write_active_inc(map); 2115 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 2116 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 2117 err = -EPERM; 2118 goto err_put; 2119 } 2120 2121 if (attr->flags && 2122 (map->map_type == BPF_MAP_TYPE_QUEUE || 2123 map->map_type == BPF_MAP_TYPE_STACK)) { 2124 err = -EINVAL; 2125 goto err_put; 2126 } 2127 2128 if ((attr->flags & BPF_F_LOCK) && 2129 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 2130 err = -EINVAL; 2131 goto err_put; 2132 } 2133 2134 key = __bpf_copy_key(ukey, map->key_size); 2135 if (IS_ERR(key)) { 2136 err = PTR_ERR(key); 2137 goto err_put; 2138 } 2139 2140 value_size = bpf_map_value_size(map); 2141 2142 err = -ENOMEM; 2143 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 2144 if (!value) 2145 goto free_key; 2146 2147 err = -ENOTSUPP; 2148 if (map->map_type == BPF_MAP_TYPE_QUEUE || 2149 map->map_type == BPF_MAP_TYPE_STACK) { 2150 err = map->ops->map_pop_elem(map, value); 2151 } else if (map->map_type == BPF_MAP_TYPE_HASH || 2152 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 2153 map->map_type == BPF_MAP_TYPE_LRU_HASH || 2154 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 2155 if (!bpf_map_is_offloaded(map)) { 2156 bpf_disable_instrumentation(); 2157 rcu_read_lock(); 2158 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); 2159 rcu_read_unlock(); 2160 bpf_enable_instrumentation(); 2161 } 2162 } 2163 2164 if (err) 2165 goto free_value; 2166 2167 if (copy_to_user(uvalue, value, value_size) != 0) { 2168 err = -EFAULT; 2169 goto free_value; 2170 } 2171 2172 err = 0; 2173 2174 free_value: 2175 kvfree(value); 2176 free_key: 2177 kvfree(key); 2178 err_put: 2179 bpf_map_write_active_dec(map); 2180 return err; 2181 } 2182 2183 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 2184 2185 static int map_freeze(const union bpf_attr *attr) 2186 { 2187 int err = 0; 2188 struct bpf_map *map; 2189 2190 if (CHECK_ATTR(BPF_MAP_FREEZE)) 2191 return -EINVAL; 2192 2193 CLASS(fd, f)(attr->map_fd); 2194 map = __bpf_map_get(f); 2195 if (IS_ERR(map)) 2196 return PTR_ERR(map); 2197 2198 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) 2199 return -ENOTSUPP; 2200 2201 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) 2202 return -EPERM; 2203 2204 mutex_lock(&map->freeze_mutex); 2205 if (bpf_map_write_active(map)) { 2206 err = -EBUSY; 2207 goto err_put; 2208 } 2209 if (READ_ONCE(map->frozen)) { 2210 err = -EBUSY; 2211 goto err_put; 2212 } 2213 2214 WRITE_ONCE(map->frozen, true); 2215 err_put: 2216 mutex_unlock(&map->freeze_mutex); 2217 return err; 2218 } 2219 2220 static const struct bpf_prog_ops * const bpf_prog_types[] = { 2221 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 2222 [_id] = & _name ## _prog_ops, 2223 #define BPF_MAP_TYPE(_id, _ops) 2224 #define BPF_LINK_TYPE(_id, _name) 2225 #include <linux/bpf_types.h> 2226 #undef BPF_PROG_TYPE 2227 #undef BPF_MAP_TYPE 2228 #undef BPF_LINK_TYPE 2229 }; 2230 2231 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 2232 { 2233 const struct bpf_prog_ops *ops; 2234 2235 if (type >= ARRAY_SIZE(bpf_prog_types)) 2236 return -EINVAL; 2237 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 2238 ops = bpf_prog_types[type]; 2239 if (!ops) 2240 return -EINVAL; 2241 2242 if (!bpf_prog_is_offloaded(prog->aux)) 2243 prog->aux->ops = ops; 2244 else 2245 prog->aux->ops = &bpf_offload_prog_ops; 2246 prog->type = type; 2247 return 0; 2248 } 2249 2250 enum bpf_audit { 2251 BPF_AUDIT_LOAD, 2252 BPF_AUDIT_UNLOAD, 2253 BPF_AUDIT_MAX, 2254 }; 2255 2256 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 2257 [BPF_AUDIT_LOAD] = "LOAD", 2258 [BPF_AUDIT_UNLOAD] = "UNLOAD", 2259 }; 2260 2261 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 2262 { 2263 struct audit_context *ctx = NULL; 2264 struct audit_buffer *ab; 2265 2266 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 2267 return; 2268 if (audit_enabled == AUDIT_OFF) 2269 return; 2270 if (!in_irq() && !irqs_disabled()) 2271 ctx = audit_context(); 2272 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 2273 if (unlikely(!ab)) 2274 return; 2275 audit_log_format(ab, "prog-id=%u op=%s", 2276 prog->aux->id, bpf_audit_str[op]); 2277 audit_log_end(ab); 2278 } 2279 2280 static int bpf_prog_alloc_id(struct bpf_prog *prog) 2281 { 2282 int id; 2283 2284 idr_preload(GFP_KERNEL); 2285 spin_lock_bh(&prog_idr_lock); 2286 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 2287 if (id > 0) 2288 prog->aux->id = id; 2289 spin_unlock_bh(&prog_idr_lock); 2290 idr_preload_end(); 2291 2292 /* id is in [1, INT_MAX) */ 2293 if (WARN_ON_ONCE(!id)) 2294 return -ENOSPC; 2295 2296 return id > 0 ? 0 : id; 2297 } 2298 2299 void bpf_prog_free_id(struct bpf_prog *prog) 2300 { 2301 unsigned long flags; 2302 2303 /* cBPF to eBPF migrations are currently not in the idr store. 2304 * Offloaded programs are removed from the store when their device 2305 * disappears - even if someone grabs an fd to them they are unusable, 2306 * simply waiting for refcnt to drop to be freed. 2307 */ 2308 if (!prog->aux->id) 2309 return; 2310 2311 spin_lock_irqsave(&prog_idr_lock, flags); 2312 idr_remove(&prog_idr, prog->aux->id); 2313 prog->aux->id = 0; 2314 spin_unlock_irqrestore(&prog_idr_lock, flags); 2315 } 2316 2317 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 2318 { 2319 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 2320 2321 kvfree(aux->func_info); 2322 kfree(aux->func_info_aux); 2323 free_uid(aux->user); 2324 security_bpf_prog_free(aux->prog); 2325 bpf_prog_free(aux->prog); 2326 } 2327 2328 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 2329 { 2330 bpf_prog_kallsyms_del_all(prog); 2331 btf_put(prog->aux->btf); 2332 module_put(prog->aux->mod); 2333 kvfree(prog->aux->jited_linfo); 2334 kvfree(prog->aux->linfo); 2335 kfree(prog->aux->kfunc_tab); 2336 kfree(prog->aux->ctx_arg_info); 2337 if (prog->aux->attach_btf) 2338 btf_put(prog->aux->attach_btf); 2339 2340 if (deferred) { 2341 if (prog->sleepable) 2342 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 2343 else 2344 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 2345 } else { 2346 __bpf_prog_put_rcu(&prog->aux->rcu); 2347 } 2348 } 2349 2350 static void bpf_prog_put_deferred(struct work_struct *work) 2351 { 2352 struct bpf_prog_aux *aux; 2353 struct bpf_prog *prog; 2354 2355 aux = container_of(work, struct bpf_prog_aux, work); 2356 prog = aux->prog; 2357 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2358 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2359 bpf_prog_free_id(prog); 2360 __bpf_prog_put_noref(prog, true); 2361 } 2362 2363 static void __bpf_prog_put(struct bpf_prog *prog) 2364 { 2365 struct bpf_prog_aux *aux = prog->aux; 2366 2367 if (atomic64_dec_and_test(&aux->refcnt)) { 2368 if (in_irq() || irqs_disabled()) { 2369 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2370 schedule_work(&aux->work); 2371 } else { 2372 bpf_prog_put_deferred(&aux->work); 2373 } 2374 } 2375 } 2376 2377 void bpf_prog_put(struct bpf_prog *prog) 2378 { 2379 __bpf_prog_put(prog); 2380 } 2381 EXPORT_SYMBOL_GPL(bpf_prog_put); 2382 2383 static int bpf_prog_release(struct inode *inode, struct file *filp) 2384 { 2385 struct bpf_prog *prog = filp->private_data; 2386 2387 bpf_prog_put(prog); 2388 return 0; 2389 } 2390 2391 struct bpf_prog_kstats { 2392 u64 nsecs; 2393 u64 cnt; 2394 u64 misses; 2395 }; 2396 2397 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2398 { 2399 struct bpf_prog_stats *stats; 2400 unsigned int flags; 2401 2402 stats = this_cpu_ptr(prog->stats); 2403 flags = u64_stats_update_begin_irqsave(&stats->syncp); 2404 u64_stats_inc(&stats->misses); 2405 u64_stats_update_end_irqrestore(&stats->syncp, flags); 2406 } 2407 2408 static void bpf_prog_get_stats(const struct bpf_prog *prog, 2409 struct bpf_prog_kstats *stats) 2410 { 2411 u64 nsecs = 0, cnt = 0, misses = 0; 2412 int cpu; 2413 2414 for_each_possible_cpu(cpu) { 2415 const struct bpf_prog_stats *st; 2416 unsigned int start; 2417 u64 tnsecs, tcnt, tmisses; 2418 2419 st = per_cpu_ptr(prog->stats, cpu); 2420 do { 2421 start = u64_stats_fetch_begin(&st->syncp); 2422 tnsecs = u64_stats_read(&st->nsecs); 2423 tcnt = u64_stats_read(&st->cnt); 2424 tmisses = u64_stats_read(&st->misses); 2425 } while (u64_stats_fetch_retry(&st->syncp, start)); 2426 nsecs += tnsecs; 2427 cnt += tcnt; 2428 misses += tmisses; 2429 } 2430 stats->nsecs = nsecs; 2431 stats->cnt = cnt; 2432 stats->misses = misses; 2433 } 2434 2435 #ifdef CONFIG_PROC_FS 2436 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 2437 { 2438 const struct bpf_prog *prog = filp->private_data; 2439 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2440 struct bpf_prog_kstats stats; 2441 2442 bpf_prog_get_stats(prog, &stats); 2443 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2444 seq_printf(m, 2445 "prog_type:\t%u\n" 2446 "prog_jited:\t%u\n" 2447 "prog_tag:\t%s\n" 2448 "memlock:\t%llu\n" 2449 "prog_id:\t%u\n" 2450 "run_time_ns:\t%llu\n" 2451 "run_cnt:\t%llu\n" 2452 "recursion_misses:\t%llu\n" 2453 "verified_insns:\t%u\n", 2454 prog->type, 2455 prog->jited, 2456 prog_tag, 2457 prog->pages * 1ULL << PAGE_SHIFT, 2458 prog->aux->id, 2459 stats.nsecs, 2460 stats.cnt, 2461 stats.misses, 2462 prog->aux->verified_insns); 2463 } 2464 #endif 2465 2466 const struct file_operations bpf_prog_fops = { 2467 #ifdef CONFIG_PROC_FS 2468 .show_fdinfo = bpf_prog_show_fdinfo, 2469 #endif 2470 .release = bpf_prog_release, 2471 .read = bpf_dummy_read, 2472 .write = bpf_dummy_write, 2473 }; 2474 2475 int bpf_prog_new_fd(struct bpf_prog *prog) 2476 { 2477 int ret; 2478 2479 ret = security_bpf_prog(prog); 2480 if (ret < 0) 2481 return ret; 2482 2483 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 2484 O_RDWR | O_CLOEXEC); 2485 } 2486 2487 void bpf_prog_add(struct bpf_prog *prog, int i) 2488 { 2489 atomic64_add(i, &prog->aux->refcnt); 2490 } 2491 EXPORT_SYMBOL_GPL(bpf_prog_add); 2492 2493 void bpf_prog_sub(struct bpf_prog *prog, int i) 2494 { 2495 /* Only to be used for undoing previous bpf_prog_add() in some 2496 * error path. We still know that another entity in our call 2497 * path holds a reference to the program, thus atomic_sub() can 2498 * be safely used in such cases! 2499 */ 2500 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 2501 } 2502 EXPORT_SYMBOL_GPL(bpf_prog_sub); 2503 2504 void bpf_prog_inc(struct bpf_prog *prog) 2505 { 2506 atomic64_inc(&prog->aux->refcnt); 2507 } 2508 EXPORT_SYMBOL_GPL(bpf_prog_inc); 2509 2510 /* prog_idr_lock should have been held */ 2511 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 2512 { 2513 int refold; 2514 2515 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 2516 2517 if (!refold) 2518 return ERR_PTR(-ENOENT); 2519 2520 return prog; 2521 } 2522 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 2523 2524 bool bpf_prog_get_ok(struct bpf_prog *prog, 2525 enum bpf_prog_type *attach_type, bool attach_drv) 2526 { 2527 /* not an attachment, just a refcount inc, always allow */ 2528 if (!attach_type) 2529 return true; 2530 2531 if (prog->type != *attach_type) 2532 return false; 2533 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) 2534 return false; 2535 2536 return true; 2537 } 2538 2539 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 2540 bool attach_drv) 2541 { 2542 CLASS(fd, f)(ufd); 2543 struct bpf_prog *prog; 2544 2545 if (fd_empty(f)) 2546 return ERR_PTR(-EBADF); 2547 if (fd_file(f)->f_op != &bpf_prog_fops) 2548 return ERR_PTR(-EINVAL); 2549 2550 prog = fd_file(f)->private_data; 2551 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) 2552 return ERR_PTR(-EINVAL); 2553 2554 bpf_prog_inc(prog); 2555 return prog; 2556 } 2557 2558 struct bpf_prog *bpf_prog_get(u32 ufd) 2559 { 2560 return __bpf_prog_get(ufd, NULL, false); 2561 } 2562 2563 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2564 bool attach_drv) 2565 { 2566 return __bpf_prog_get(ufd, &type, attach_drv); 2567 } 2568 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 2569 2570 /* Initially all BPF programs could be loaded w/o specifying 2571 * expected_attach_type. Later for some of them specifying expected_attach_type 2572 * at load time became required so that program could be validated properly. 2573 * Programs of types that are allowed to be loaded both w/ and w/o (for 2574 * backward compatibility) expected_attach_type, should have the default attach 2575 * type assigned to expected_attach_type for the latter case, so that it can be 2576 * validated later at attach time. 2577 * 2578 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 2579 * prog type requires it but has some attach types that have to be backward 2580 * compatible. 2581 */ 2582 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 2583 { 2584 switch (attr->prog_type) { 2585 case BPF_PROG_TYPE_CGROUP_SOCK: 2586 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 2587 * exist so checking for non-zero is the way to go here. 2588 */ 2589 if (!attr->expected_attach_type) 2590 attr->expected_attach_type = 2591 BPF_CGROUP_INET_SOCK_CREATE; 2592 break; 2593 case BPF_PROG_TYPE_SK_REUSEPORT: 2594 if (!attr->expected_attach_type) 2595 attr->expected_attach_type = 2596 BPF_SK_REUSEPORT_SELECT; 2597 break; 2598 } 2599 } 2600 2601 static int 2602 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 2603 enum bpf_attach_type expected_attach_type, 2604 struct btf *attach_btf, u32 btf_id, 2605 struct bpf_prog *dst_prog) 2606 { 2607 if (btf_id) { 2608 if (btf_id > BTF_MAX_TYPE) 2609 return -EINVAL; 2610 2611 if (!attach_btf && !dst_prog) 2612 return -EINVAL; 2613 2614 switch (prog_type) { 2615 case BPF_PROG_TYPE_TRACING: 2616 case BPF_PROG_TYPE_LSM: 2617 case BPF_PROG_TYPE_STRUCT_OPS: 2618 case BPF_PROG_TYPE_EXT: 2619 break; 2620 default: 2621 return -EINVAL; 2622 } 2623 } 2624 2625 if (attach_btf && (!btf_id || dst_prog)) 2626 return -EINVAL; 2627 2628 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 2629 prog_type != BPF_PROG_TYPE_EXT) 2630 return -EINVAL; 2631 2632 switch (prog_type) { 2633 case BPF_PROG_TYPE_CGROUP_SOCK: 2634 switch (expected_attach_type) { 2635 case BPF_CGROUP_INET_SOCK_CREATE: 2636 case BPF_CGROUP_INET_SOCK_RELEASE: 2637 case BPF_CGROUP_INET4_POST_BIND: 2638 case BPF_CGROUP_INET6_POST_BIND: 2639 return 0; 2640 default: 2641 return -EINVAL; 2642 } 2643 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2644 switch (expected_attach_type) { 2645 case BPF_CGROUP_INET4_BIND: 2646 case BPF_CGROUP_INET6_BIND: 2647 case BPF_CGROUP_INET4_CONNECT: 2648 case BPF_CGROUP_INET6_CONNECT: 2649 case BPF_CGROUP_UNIX_CONNECT: 2650 case BPF_CGROUP_INET4_GETPEERNAME: 2651 case BPF_CGROUP_INET6_GETPEERNAME: 2652 case BPF_CGROUP_UNIX_GETPEERNAME: 2653 case BPF_CGROUP_INET4_GETSOCKNAME: 2654 case BPF_CGROUP_INET6_GETSOCKNAME: 2655 case BPF_CGROUP_UNIX_GETSOCKNAME: 2656 case BPF_CGROUP_UDP4_SENDMSG: 2657 case BPF_CGROUP_UDP6_SENDMSG: 2658 case BPF_CGROUP_UNIX_SENDMSG: 2659 case BPF_CGROUP_UDP4_RECVMSG: 2660 case BPF_CGROUP_UDP6_RECVMSG: 2661 case BPF_CGROUP_UNIX_RECVMSG: 2662 return 0; 2663 default: 2664 return -EINVAL; 2665 } 2666 case BPF_PROG_TYPE_CGROUP_SKB: 2667 switch (expected_attach_type) { 2668 case BPF_CGROUP_INET_INGRESS: 2669 case BPF_CGROUP_INET_EGRESS: 2670 return 0; 2671 default: 2672 return -EINVAL; 2673 } 2674 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2675 switch (expected_attach_type) { 2676 case BPF_CGROUP_SETSOCKOPT: 2677 case BPF_CGROUP_GETSOCKOPT: 2678 return 0; 2679 default: 2680 return -EINVAL; 2681 } 2682 case BPF_PROG_TYPE_SK_LOOKUP: 2683 if (expected_attach_type == BPF_SK_LOOKUP) 2684 return 0; 2685 return -EINVAL; 2686 case BPF_PROG_TYPE_SK_REUSEPORT: 2687 switch (expected_attach_type) { 2688 case BPF_SK_REUSEPORT_SELECT: 2689 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: 2690 return 0; 2691 default: 2692 return -EINVAL; 2693 } 2694 case BPF_PROG_TYPE_NETFILTER: 2695 if (expected_attach_type == BPF_NETFILTER) 2696 return 0; 2697 return -EINVAL; 2698 case BPF_PROG_TYPE_SYSCALL: 2699 case BPF_PROG_TYPE_EXT: 2700 if (expected_attach_type) 2701 return -EINVAL; 2702 fallthrough; 2703 default: 2704 return 0; 2705 } 2706 } 2707 2708 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2709 { 2710 switch (prog_type) { 2711 case BPF_PROG_TYPE_SCHED_CLS: 2712 case BPF_PROG_TYPE_SCHED_ACT: 2713 case BPF_PROG_TYPE_XDP: 2714 case BPF_PROG_TYPE_LWT_IN: 2715 case BPF_PROG_TYPE_LWT_OUT: 2716 case BPF_PROG_TYPE_LWT_XMIT: 2717 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2718 case BPF_PROG_TYPE_SK_SKB: 2719 case BPF_PROG_TYPE_SK_MSG: 2720 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2721 case BPF_PROG_TYPE_CGROUP_DEVICE: 2722 case BPF_PROG_TYPE_CGROUP_SOCK: 2723 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2724 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2725 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2726 case BPF_PROG_TYPE_SOCK_OPS: 2727 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2728 case BPF_PROG_TYPE_NETFILTER: 2729 return true; 2730 case BPF_PROG_TYPE_CGROUP_SKB: 2731 /* always unpriv */ 2732 case BPF_PROG_TYPE_SK_REUSEPORT: 2733 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2734 default: 2735 return false; 2736 } 2737 } 2738 2739 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2740 { 2741 switch (prog_type) { 2742 case BPF_PROG_TYPE_KPROBE: 2743 case BPF_PROG_TYPE_TRACEPOINT: 2744 case BPF_PROG_TYPE_PERF_EVENT: 2745 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2746 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2747 case BPF_PROG_TYPE_TRACING: 2748 case BPF_PROG_TYPE_LSM: 2749 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2750 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2751 return true; 2752 default: 2753 return false; 2754 } 2755 } 2756 2757 /* last field in 'union bpf_attr' used by this command */ 2758 #define BPF_PROG_LOAD_LAST_FIELD fd_array_cnt 2759 2760 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) 2761 { 2762 enum bpf_prog_type type = attr->prog_type; 2763 struct bpf_prog *prog, *dst_prog = NULL; 2764 struct btf *attach_btf = NULL; 2765 struct bpf_token *token = NULL; 2766 bool bpf_cap; 2767 int err; 2768 char license[128]; 2769 2770 if (CHECK_ATTR(BPF_PROG_LOAD)) 2771 return -EINVAL; 2772 2773 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2774 BPF_F_ANY_ALIGNMENT | 2775 BPF_F_TEST_STATE_FREQ | 2776 BPF_F_SLEEPABLE | 2777 BPF_F_TEST_RND_HI32 | 2778 BPF_F_XDP_HAS_FRAGS | 2779 BPF_F_XDP_DEV_BOUND_ONLY | 2780 BPF_F_TEST_REG_INVARIANTS | 2781 BPF_F_TOKEN_FD)) 2782 return -EINVAL; 2783 2784 bpf_prog_load_fixup_attach_type(attr); 2785 2786 if (attr->prog_flags & BPF_F_TOKEN_FD) { 2787 token = bpf_token_get_from_fd(attr->prog_token_fd); 2788 if (IS_ERR(token)) 2789 return PTR_ERR(token); 2790 /* if current token doesn't grant prog loading permissions, 2791 * then we can't use this token, so ignore it and rely on 2792 * system-wide capabilities checks 2793 */ 2794 if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) || 2795 !bpf_token_allow_prog_type(token, attr->prog_type, 2796 attr->expected_attach_type)) { 2797 bpf_token_put(token); 2798 token = NULL; 2799 } 2800 } 2801 2802 bpf_cap = bpf_token_capable(token, CAP_BPF); 2803 err = -EPERM; 2804 2805 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2806 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2807 !bpf_cap) 2808 goto put_token; 2809 2810 /* Intent here is for unprivileged_bpf_disabled to block BPF program 2811 * creation for unprivileged users; other actions depend 2812 * on fd availability and access to bpffs, so are dependent on 2813 * object creation success. Even with unprivileged BPF disabled, 2814 * capability checks are still carried out for these 2815 * and other operations. 2816 */ 2817 if (sysctl_unprivileged_bpf_disabled && !bpf_cap) 2818 goto put_token; 2819 2820 if (attr->insn_cnt == 0 || 2821 attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) { 2822 err = -E2BIG; 2823 goto put_token; 2824 } 2825 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2826 type != BPF_PROG_TYPE_CGROUP_SKB && 2827 !bpf_cap) 2828 goto put_token; 2829 2830 if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN)) 2831 goto put_token; 2832 if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON)) 2833 goto put_token; 2834 2835 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2836 * or btf, we need to check which one it is 2837 */ 2838 if (attr->attach_prog_fd) { 2839 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2840 if (IS_ERR(dst_prog)) { 2841 dst_prog = NULL; 2842 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2843 if (IS_ERR(attach_btf)) { 2844 err = -EINVAL; 2845 goto put_token; 2846 } 2847 if (!btf_is_kernel(attach_btf)) { 2848 /* attaching through specifying bpf_prog's BTF 2849 * objects directly might be supported eventually 2850 */ 2851 btf_put(attach_btf); 2852 err = -ENOTSUPP; 2853 goto put_token; 2854 } 2855 } 2856 } else if (attr->attach_btf_id) { 2857 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2858 attach_btf = bpf_get_btf_vmlinux(); 2859 if (IS_ERR(attach_btf)) { 2860 err = PTR_ERR(attach_btf); 2861 goto put_token; 2862 } 2863 if (!attach_btf) { 2864 err = -EINVAL; 2865 goto put_token; 2866 } 2867 btf_get(attach_btf); 2868 } 2869 2870 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2871 attach_btf, attr->attach_btf_id, 2872 dst_prog)) { 2873 if (dst_prog) 2874 bpf_prog_put(dst_prog); 2875 if (attach_btf) 2876 btf_put(attach_btf); 2877 err = -EINVAL; 2878 goto put_token; 2879 } 2880 2881 /* plain bpf_prog allocation */ 2882 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2883 if (!prog) { 2884 if (dst_prog) 2885 bpf_prog_put(dst_prog); 2886 if (attach_btf) 2887 btf_put(attach_btf); 2888 err = -EINVAL; 2889 goto put_token; 2890 } 2891 2892 prog->expected_attach_type = attr->expected_attach_type; 2893 prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE); 2894 prog->aux->attach_btf = attach_btf; 2895 prog->aux->attach_btf_id = attr->attach_btf_id; 2896 prog->aux->dst_prog = dst_prog; 2897 prog->aux->dev_bound = !!attr->prog_ifindex; 2898 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; 2899 2900 /* move token into prog->aux, reuse taken refcnt */ 2901 prog->aux->token = token; 2902 token = NULL; 2903 2904 prog->aux->user = get_current_user(); 2905 prog->len = attr->insn_cnt; 2906 2907 err = -EFAULT; 2908 if (copy_from_bpfptr(prog->insns, 2909 make_bpfptr(attr->insns, uattr.is_kernel), 2910 bpf_prog_insn_size(prog)) != 0) 2911 goto free_prog; 2912 /* copy eBPF program license from user space */ 2913 if (strncpy_from_bpfptr(license, 2914 make_bpfptr(attr->license, uattr.is_kernel), 2915 sizeof(license) - 1) < 0) 2916 goto free_prog; 2917 license[sizeof(license) - 1] = 0; 2918 2919 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2920 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0; 2921 2922 prog->orig_prog = NULL; 2923 prog->jited = 0; 2924 2925 atomic64_set(&prog->aux->refcnt, 1); 2926 2927 if (bpf_prog_is_dev_bound(prog->aux)) { 2928 err = bpf_prog_dev_bound_init(prog, attr); 2929 if (err) 2930 goto free_prog; 2931 } 2932 2933 if (type == BPF_PROG_TYPE_EXT && dst_prog && 2934 bpf_prog_is_dev_bound(dst_prog->aux)) { 2935 err = bpf_prog_dev_bound_inherit(prog, dst_prog); 2936 if (err) 2937 goto free_prog; 2938 } 2939 2940 /* 2941 * Bookkeeping for managing the program attachment chain. 2942 * 2943 * It might be tempting to set attach_tracing_prog flag at the attachment 2944 * time, but this will not prevent from loading bunch of tracing prog 2945 * first, then attach them one to another. 2946 * 2947 * The flag attach_tracing_prog is set for the whole program lifecycle, and 2948 * doesn't have to be cleared in bpf_tracing_link_release, since tracing 2949 * programs cannot change attachment target. 2950 */ 2951 if (type == BPF_PROG_TYPE_TRACING && dst_prog && 2952 dst_prog->type == BPF_PROG_TYPE_TRACING) { 2953 prog->aux->attach_tracing_prog = true; 2954 } 2955 2956 /* find program type: socket_filter vs tracing_filter */ 2957 err = find_prog_type(type, prog); 2958 if (err < 0) 2959 goto free_prog; 2960 2961 prog->aux->load_time = ktime_get_boottime_ns(); 2962 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2963 sizeof(attr->prog_name)); 2964 if (err < 0) 2965 goto free_prog; 2966 2967 err = security_bpf_prog_load(prog, attr, token, uattr.is_kernel); 2968 if (err) 2969 goto free_prog_sec; 2970 2971 /* run eBPF verifier */ 2972 err = bpf_check(&prog, attr, uattr, uattr_size); 2973 if (err < 0) 2974 goto free_used_maps; 2975 2976 prog = bpf_prog_select_runtime(prog, &err); 2977 if (err < 0) 2978 goto free_used_maps; 2979 2980 err = bpf_prog_alloc_id(prog); 2981 if (err) 2982 goto free_used_maps; 2983 2984 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2985 * effectively publicly exposed. However, retrieving via 2986 * bpf_prog_get_fd_by_id() will take another reference, 2987 * therefore it cannot be gone underneath us. 2988 * 2989 * Only for the time /after/ successful bpf_prog_new_fd() 2990 * and before returning to userspace, we might just hold 2991 * one reference and any parallel close on that fd could 2992 * rip everything out. Hence, below notifications must 2993 * happen before bpf_prog_new_fd(). 2994 * 2995 * Also, any failure handling from this point onwards must 2996 * be using bpf_prog_put() given the program is exposed. 2997 */ 2998 bpf_prog_kallsyms_add(prog); 2999 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 3000 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 3001 3002 err = bpf_prog_new_fd(prog); 3003 if (err < 0) 3004 bpf_prog_put(prog); 3005 return err; 3006 3007 free_used_maps: 3008 /* In case we have subprogs, we need to wait for a grace 3009 * period before we can tear down JIT memory since symbols 3010 * are already exposed under kallsyms. 3011 */ 3012 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt); 3013 return err; 3014 3015 free_prog_sec: 3016 security_bpf_prog_free(prog); 3017 free_prog: 3018 free_uid(prog->aux->user); 3019 if (prog->aux->attach_btf) 3020 btf_put(prog->aux->attach_btf); 3021 bpf_prog_free(prog); 3022 put_token: 3023 bpf_token_put(token); 3024 return err; 3025 } 3026 3027 #define BPF_OBJ_LAST_FIELD path_fd 3028 3029 static int bpf_obj_pin(const union bpf_attr *attr) 3030 { 3031 int path_fd; 3032 3033 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD) 3034 return -EINVAL; 3035 3036 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 3037 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 3038 return -EINVAL; 3039 3040 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 3041 return bpf_obj_pin_user(attr->bpf_fd, path_fd, 3042 u64_to_user_ptr(attr->pathname)); 3043 } 3044 3045 static int bpf_obj_get(const union bpf_attr *attr) 3046 { 3047 int path_fd; 3048 3049 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 3050 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD)) 3051 return -EINVAL; 3052 3053 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 3054 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 3055 return -EINVAL; 3056 3057 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 3058 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname), 3059 attr->file_flags); 3060 } 3061 3062 /* bpf_link_init_sleepable() allows to specify whether BPF link itself has 3063 * "sleepable" semantics, which normally would mean that BPF link's attach 3064 * hook can dereference link or link's underlying program for some time after 3065 * detachment due to RCU Tasks Trace-based lifetime protection scheme. 3066 * BPF program itself can be non-sleepable, yet, because it's transitively 3067 * reachable through BPF link, its freeing has to be delayed until after RCU 3068 * Tasks Trace GP. 3069 */ 3070 void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, 3071 const struct bpf_link_ops *ops, struct bpf_prog *prog, 3072 bool sleepable) 3073 { 3074 WARN_ON(ops->dealloc && ops->dealloc_deferred); 3075 atomic64_set(&link->refcnt, 1); 3076 link->type = type; 3077 link->sleepable = sleepable; 3078 link->id = 0; 3079 link->ops = ops; 3080 link->prog = prog; 3081 } 3082 3083 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 3084 const struct bpf_link_ops *ops, struct bpf_prog *prog) 3085 { 3086 bpf_link_init_sleepable(link, type, ops, prog, false); 3087 } 3088 3089 static void bpf_link_free_id(int id) 3090 { 3091 if (!id) 3092 return; 3093 3094 spin_lock_bh(&link_idr_lock); 3095 idr_remove(&link_idr, id); 3096 spin_unlock_bh(&link_idr_lock); 3097 } 3098 3099 /* Clean up bpf_link and corresponding anon_inode file and FD. After 3100 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 3101 * anon_inode's release() call. This helper marks bpf_link as 3102 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 3103 * is not decremented, it's the responsibility of a calling code that failed 3104 * to complete bpf_link initialization. 3105 * This helper eventually calls link's dealloc callback, but does not call 3106 * link's release callback. 3107 */ 3108 void bpf_link_cleanup(struct bpf_link_primer *primer) 3109 { 3110 primer->link->prog = NULL; 3111 bpf_link_free_id(primer->id); 3112 fput(primer->file); 3113 put_unused_fd(primer->fd); 3114 } 3115 3116 void bpf_link_inc(struct bpf_link *link) 3117 { 3118 atomic64_inc(&link->refcnt); 3119 } 3120 3121 static void bpf_link_dealloc(struct bpf_link *link) 3122 { 3123 /* now that we know that bpf_link itself can't be reached, put underlying BPF program */ 3124 if (link->prog) 3125 bpf_prog_put(link->prog); 3126 3127 /* free bpf_link and its containing memory */ 3128 if (link->ops->dealloc_deferred) 3129 link->ops->dealloc_deferred(link); 3130 else 3131 link->ops->dealloc(link); 3132 } 3133 3134 static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu) 3135 { 3136 struct bpf_link *link = container_of(rcu, struct bpf_link, rcu); 3137 3138 bpf_link_dealloc(link); 3139 } 3140 3141 static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu) 3142 { 3143 if (rcu_trace_implies_rcu_gp()) 3144 bpf_link_defer_dealloc_rcu_gp(rcu); 3145 else 3146 call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp); 3147 } 3148 3149 /* bpf_link_free is guaranteed to be called from process context */ 3150 static void bpf_link_free(struct bpf_link *link) 3151 { 3152 const struct bpf_link_ops *ops = link->ops; 3153 3154 bpf_link_free_id(link->id); 3155 /* detach BPF program, clean up used resources */ 3156 if (link->prog) 3157 ops->release(link); 3158 if (ops->dealloc_deferred) { 3159 /* Schedule BPF link deallocation, which will only then 3160 * trigger putting BPF program refcount. 3161 * If underlying BPF program is sleepable or BPF link's target 3162 * attach hookpoint is sleepable or otherwise requires RCU GPs 3163 * to ensure link and its underlying BPF program is not 3164 * reachable anymore, we need to first wait for RCU tasks 3165 * trace sync, and then go through "classic" RCU grace period 3166 */ 3167 if (link->sleepable || (link->prog && link->prog->sleepable)) 3168 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp); 3169 else 3170 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp); 3171 } else if (ops->dealloc) { 3172 bpf_link_dealloc(link); 3173 } 3174 } 3175 3176 static void bpf_link_put_deferred(struct work_struct *work) 3177 { 3178 struct bpf_link *link = container_of(work, struct bpf_link, work); 3179 3180 bpf_link_free(link); 3181 } 3182 3183 /* bpf_link_put might be called from atomic context. It needs to be called 3184 * from sleepable context in order to acquire sleeping locks during the process. 3185 */ 3186 void bpf_link_put(struct bpf_link *link) 3187 { 3188 if (!atomic64_dec_and_test(&link->refcnt)) 3189 return; 3190 3191 INIT_WORK(&link->work, bpf_link_put_deferred); 3192 schedule_work(&link->work); 3193 } 3194 EXPORT_SYMBOL(bpf_link_put); 3195 3196 static void bpf_link_put_direct(struct bpf_link *link) 3197 { 3198 if (!atomic64_dec_and_test(&link->refcnt)) 3199 return; 3200 bpf_link_free(link); 3201 } 3202 3203 static int bpf_link_release(struct inode *inode, struct file *filp) 3204 { 3205 struct bpf_link *link = filp->private_data; 3206 3207 bpf_link_put_direct(link); 3208 return 0; 3209 } 3210 3211 #ifdef CONFIG_PROC_FS 3212 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 3213 #define BPF_MAP_TYPE(_id, _ops) 3214 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 3215 static const char *bpf_link_type_strs[] = { 3216 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 3217 #include <linux/bpf_types.h> 3218 }; 3219 #undef BPF_PROG_TYPE 3220 #undef BPF_MAP_TYPE 3221 #undef BPF_LINK_TYPE 3222 3223 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 3224 { 3225 const struct bpf_link *link = filp->private_data; 3226 const struct bpf_prog *prog = link->prog; 3227 enum bpf_link_type type = link->type; 3228 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 3229 3230 if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) { 3231 seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]); 3232 } else { 3233 WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type); 3234 seq_printf(m, "link_type:\t<%u>\n", type); 3235 } 3236 seq_printf(m, "link_id:\t%u\n", link->id); 3237 3238 if (prog) { 3239 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 3240 seq_printf(m, 3241 "prog_tag:\t%s\n" 3242 "prog_id:\t%u\n", 3243 prog_tag, 3244 prog->aux->id); 3245 } 3246 if (link->ops->show_fdinfo) 3247 link->ops->show_fdinfo(link, m); 3248 } 3249 #endif 3250 3251 static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts) 3252 { 3253 struct bpf_link *link = file->private_data; 3254 3255 return link->ops->poll(file, pts); 3256 } 3257 3258 static const struct file_operations bpf_link_fops = { 3259 #ifdef CONFIG_PROC_FS 3260 .show_fdinfo = bpf_link_show_fdinfo, 3261 #endif 3262 .release = bpf_link_release, 3263 .read = bpf_dummy_read, 3264 .write = bpf_dummy_write, 3265 }; 3266 3267 static const struct file_operations bpf_link_fops_poll = { 3268 #ifdef CONFIG_PROC_FS 3269 .show_fdinfo = bpf_link_show_fdinfo, 3270 #endif 3271 .release = bpf_link_release, 3272 .read = bpf_dummy_read, 3273 .write = bpf_dummy_write, 3274 .poll = bpf_link_poll, 3275 }; 3276 3277 static int bpf_link_alloc_id(struct bpf_link *link) 3278 { 3279 int id; 3280 3281 idr_preload(GFP_KERNEL); 3282 spin_lock_bh(&link_idr_lock); 3283 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 3284 spin_unlock_bh(&link_idr_lock); 3285 idr_preload_end(); 3286 3287 return id; 3288 } 3289 3290 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 3291 * reserving unused FD and allocating ID from link_idr. This is to be paired 3292 * with bpf_link_settle() to install FD and ID and expose bpf_link to 3293 * user-space, if bpf_link is successfully attached. If not, bpf_link and 3294 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 3295 * transient state is passed around in struct bpf_link_primer. 3296 * This is preferred way to create and initialize bpf_link, especially when 3297 * there are complicated and expensive operations in between creating bpf_link 3298 * itself and attaching it to BPF hook. By using bpf_link_prime() and 3299 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 3300 * expensive (and potentially failing) roll back operations in a rare case 3301 * that file, FD, or ID can't be allocated. 3302 */ 3303 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 3304 { 3305 struct file *file; 3306 int fd, id; 3307 3308 fd = get_unused_fd_flags(O_CLOEXEC); 3309 if (fd < 0) 3310 return fd; 3311 3312 3313 id = bpf_link_alloc_id(link); 3314 if (id < 0) { 3315 put_unused_fd(fd); 3316 return id; 3317 } 3318 3319 file = anon_inode_getfile("bpf_link", 3320 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, 3321 link, O_CLOEXEC); 3322 if (IS_ERR(file)) { 3323 bpf_link_free_id(id); 3324 put_unused_fd(fd); 3325 return PTR_ERR(file); 3326 } 3327 3328 primer->link = link; 3329 primer->file = file; 3330 primer->fd = fd; 3331 primer->id = id; 3332 return 0; 3333 } 3334 3335 int bpf_link_settle(struct bpf_link_primer *primer) 3336 { 3337 /* make bpf_link fetchable by ID */ 3338 spin_lock_bh(&link_idr_lock); 3339 primer->link->id = primer->id; 3340 spin_unlock_bh(&link_idr_lock); 3341 /* make bpf_link fetchable by FD */ 3342 fd_install(primer->fd, primer->file); 3343 /* pass through installed FD */ 3344 return primer->fd; 3345 } 3346 3347 int bpf_link_new_fd(struct bpf_link *link) 3348 { 3349 return anon_inode_getfd("bpf-link", 3350 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, 3351 link, O_CLOEXEC); 3352 } 3353 3354 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 3355 { 3356 CLASS(fd, f)(ufd); 3357 struct bpf_link *link; 3358 3359 if (fd_empty(f)) 3360 return ERR_PTR(-EBADF); 3361 if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll) 3362 return ERR_PTR(-EINVAL); 3363 3364 link = fd_file(f)->private_data; 3365 bpf_link_inc(link); 3366 return link; 3367 } 3368 EXPORT_SYMBOL_NS(bpf_link_get_from_fd, "BPF_INTERNAL"); 3369 3370 static void bpf_tracing_link_release(struct bpf_link *link) 3371 { 3372 struct bpf_tracing_link *tr_link = 3373 container_of(link, struct bpf_tracing_link, link.link); 3374 3375 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, 3376 tr_link->trampoline, 3377 tr_link->tgt_prog)); 3378 3379 bpf_trampoline_put(tr_link->trampoline); 3380 3381 /* tgt_prog is NULL if target is a kernel function */ 3382 if (tr_link->tgt_prog) 3383 bpf_prog_put(tr_link->tgt_prog); 3384 } 3385 3386 static void bpf_tracing_link_dealloc(struct bpf_link *link) 3387 { 3388 struct bpf_tracing_link *tr_link = 3389 container_of(link, struct bpf_tracing_link, link.link); 3390 3391 kfree(tr_link); 3392 } 3393 3394 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 3395 struct seq_file *seq) 3396 { 3397 struct bpf_tracing_link *tr_link = 3398 container_of(link, struct bpf_tracing_link, link.link); 3399 u32 target_btf_id, target_obj_id; 3400 3401 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3402 &target_obj_id, &target_btf_id); 3403 seq_printf(seq, 3404 "attach_type:\t%d\n" 3405 "target_obj_id:\t%u\n" 3406 "target_btf_id:\t%u\n", 3407 tr_link->attach_type, 3408 target_obj_id, 3409 target_btf_id); 3410 } 3411 3412 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 3413 struct bpf_link_info *info) 3414 { 3415 struct bpf_tracing_link *tr_link = 3416 container_of(link, struct bpf_tracing_link, link.link); 3417 3418 info->tracing.attach_type = tr_link->attach_type; 3419 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3420 &info->tracing.target_obj_id, 3421 &info->tracing.target_btf_id); 3422 3423 return 0; 3424 } 3425 3426 static const struct bpf_link_ops bpf_tracing_link_lops = { 3427 .release = bpf_tracing_link_release, 3428 .dealloc = bpf_tracing_link_dealloc, 3429 .show_fdinfo = bpf_tracing_link_show_fdinfo, 3430 .fill_link_info = bpf_tracing_link_fill_link_info, 3431 }; 3432 3433 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 3434 int tgt_prog_fd, 3435 u32 btf_id, 3436 u64 bpf_cookie) 3437 { 3438 struct bpf_link_primer link_primer; 3439 struct bpf_prog *tgt_prog = NULL; 3440 struct bpf_trampoline *tr = NULL; 3441 struct bpf_tracing_link *link; 3442 u64 key = 0; 3443 int err; 3444 3445 switch (prog->type) { 3446 case BPF_PROG_TYPE_TRACING: 3447 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 3448 prog->expected_attach_type != BPF_TRACE_FEXIT && 3449 prog->expected_attach_type != BPF_MODIFY_RETURN) { 3450 err = -EINVAL; 3451 goto out_put_prog; 3452 } 3453 break; 3454 case BPF_PROG_TYPE_EXT: 3455 if (prog->expected_attach_type != 0) { 3456 err = -EINVAL; 3457 goto out_put_prog; 3458 } 3459 break; 3460 case BPF_PROG_TYPE_LSM: 3461 if (prog->expected_attach_type != BPF_LSM_MAC) { 3462 err = -EINVAL; 3463 goto out_put_prog; 3464 } 3465 break; 3466 default: 3467 err = -EINVAL; 3468 goto out_put_prog; 3469 } 3470 3471 if (!!tgt_prog_fd != !!btf_id) { 3472 err = -EINVAL; 3473 goto out_put_prog; 3474 } 3475 3476 if (tgt_prog_fd) { 3477 /* 3478 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this 3479 * part would be changed to implement the same for 3480 * BPF_PROG_TYPE_TRACING, do not forget to update the way how 3481 * attach_tracing_prog flag is set. 3482 */ 3483 if (prog->type != BPF_PROG_TYPE_EXT) { 3484 err = -EINVAL; 3485 goto out_put_prog; 3486 } 3487 3488 tgt_prog = bpf_prog_get(tgt_prog_fd); 3489 if (IS_ERR(tgt_prog)) { 3490 err = PTR_ERR(tgt_prog); 3491 tgt_prog = NULL; 3492 goto out_put_prog; 3493 } 3494 3495 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 3496 } 3497 3498 link = kzalloc(sizeof(*link), GFP_USER); 3499 if (!link) { 3500 err = -ENOMEM; 3501 goto out_put_prog; 3502 } 3503 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, 3504 &bpf_tracing_link_lops, prog); 3505 link->attach_type = prog->expected_attach_type; 3506 link->link.cookie = bpf_cookie; 3507 3508 mutex_lock(&prog->aux->dst_mutex); 3509 3510 /* There are a few possible cases here: 3511 * 3512 * - if prog->aux->dst_trampoline is set, the program was just loaded 3513 * and not yet attached to anything, so we can use the values stored 3514 * in prog->aux 3515 * 3516 * - if prog->aux->dst_trampoline is NULL, the program has already been 3517 * attached to a target and its initial target was cleared (below) 3518 * 3519 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 3520 * target_btf_id using the link_create API. 3521 * 3522 * - if tgt_prog == NULL when this function was called using the old 3523 * raw_tracepoint_open API, and we need a target from prog->aux 3524 * 3525 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program 3526 * was detached and is going for re-attachment. 3527 * 3528 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf 3529 * are NULL, then program was already attached and user did not provide 3530 * tgt_prog_fd so we have no way to find out or create trampoline 3531 */ 3532 if (!prog->aux->dst_trampoline && !tgt_prog) { 3533 /* 3534 * Allow re-attach for TRACING and LSM programs. If it's 3535 * currently linked, bpf_trampoline_link_prog will fail. 3536 * EXT programs need to specify tgt_prog_fd, so they 3537 * re-attach in separate code path. 3538 */ 3539 if (prog->type != BPF_PROG_TYPE_TRACING && 3540 prog->type != BPF_PROG_TYPE_LSM) { 3541 err = -EINVAL; 3542 goto out_unlock; 3543 } 3544 /* We can allow re-attach only if we have valid attach_btf. */ 3545 if (!prog->aux->attach_btf) { 3546 err = -EINVAL; 3547 goto out_unlock; 3548 } 3549 btf_id = prog->aux->attach_btf_id; 3550 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); 3551 } 3552 3553 if (!prog->aux->dst_trampoline || 3554 (key && key != prog->aux->dst_trampoline->key)) { 3555 /* If there is no saved target, or the specified target is 3556 * different from the destination specified at load time, we 3557 * need a new trampoline and a check for compatibility 3558 */ 3559 struct bpf_attach_target_info tgt_info = {}; 3560 3561 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 3562 &tgt_info); 3563 if (err) 3564 goto out_unlock; 3565 3566 if (tgt_info.tgt_mod) { 3567 module_put(prog->aux->mod); 3568 prog->aux->mod = tgt_info.tgt_mod; 3569 } 3570 3571 tr = bpf_trampoline_get(key, &tgt_info); 3572 if (!tr) { 3573 err = -ENOMEM; 3574 goto out_unlock; 3575 } 3576 } else { 3577 /* The caller didn't specify a target, or the target was the 3578 * same as the destination supplied during program load. This 3579 * means we can reuse the trampoline and reference from program 3580 * load time, and there is no need to allocate a new one. This 3581 * can only happen once for any program, as the saved values in 3582 * prog->aux are cleared below. 3583 */ 3584 tr = prog->aux->dst_trampoline; 3585 tgt_prog = prog->aux->dst_prog; 3586 } 3587 3588 err = bpf_link_prime(&link->link.link, &link_primer); 3589 if (err) 3590 goto out_unlock; 3591 3592 err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog); 3593 if (err) { 3594 bpf_link_cleanup(&link_primer); 3595 link = NULL; 3596 goto out_unlock; 3597 } 3598 3599 link->tgt_prog = tgt_prog; 3600 link->trampoline = tr; 3601 3602 /* Always clear the trampoline and target prog from prog->aux to make 3603 * sure the original attach destination is not kept alive after a 3604 * program is (re-)attached to another target. 3605 */ 3606 if (prog->aux->dst_prog && 3607 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 3608 /* got extra prog ref from syscall, or attaching to different prog */ 3609 bpf_prog_put(prog->aux->dst_prog); 3610 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 3611 /* we allocated a new trampoline, so free the old one */ 3612 bpf_trampoline_put(prog->aux->dst_trampoline); 3613 3614 prog->aux->dst_prog = NULL; 3615 prog->aux->dst_trampoline = NULL; 3616 mutex_unlock(&prog->aux->dst_mutex); 3617 3618 return bpf_link_settle(&link_primer); 3619 out_unlock: 3620 if (tr && tr != prog->aux->dst_trampoline) 3621 bpf_trampoline_put(tr); 3622 mutex_unlock(&prog->aux->dst_mutex); 3623 kfree(link); 3624 out_put_prog: 3625 if (tgt_prog_fd && tgt_prog) 3626 bpf_prog_put(tgt_prog); 3627 return err; 3628 } 3629 3630 static void bpf_raw_tp_link_release(struct bpf_link *link) 3631 { 3632 struct bpf_raw_tp_link *raw_tp = 3633 container_of(link, struct bpf_raw_tp_link, link); 3634 3635 bpf_probe_unregister(raw_tp->btp, raw_tp); 3636 bpf_put_raw_tracepoint(raw_tp->btp); 3637 } 3638 3639 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 3640 { 3641 struct bpf_raw_tp_link *raw_tp = 3642 container_of(link, struct bpf_raw_tp_link, link); 3643 3644 kfree(raw_tp); 3645 } 3646 3647 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 3648 struct seq_file *seq) 3649 { 3650 struct bpf_raw_tp_link *raw_tp_link = 3651 container_of(link, struct bpf_raw_tp_link, link); 3652 3653 seq_printf(seq, 3654 "tp_name:\t%s\n", 3655 raw_tp_link->btp->tp->name); 3656 } 3657 3658 static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen, 3659 u32 len) 3660 { 3661 if (ulen >= len + 1) { 3662 if (copy_to_user(ubuf, buf, len + 1)) 3663 return -EFAULT; 3664 } else { 3665 char zero = '\0'; 3666 3667 if (copy_to_user(ubuf, buf, ulen - 1)) 3668 return -EFAULT; 3669 if (put_user(zero, ubuf + ulen - 1)) 3670 return -EFAULT; 3671 return -ENOSPC; 3672 } 3673 3674 return 0; 3675 } 3676 3677 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 3678 struct bpf_link_info *info) 3679 { 3680 struct bpf_raw_tp_link *raw_tp_link = 3681 container_of(link, struct bpf_raw_tp_link, link); 3682 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 3683 const char *tp_name = raw_tp_link->btp->tp->name; 3684 u32 ulen = info->raw_tracepoint.tp_name_len; 3685 size_t tp_len = strlen(tp_name); 3686 3687 if (!ulen ^ !ubuf) 3688 return -EINVAL; 3689 3690 info->raw_tracepoint.tp_name_len = tp_len + 1; 3691 3692 if (!ubuf) 3693 return 0; 3694 3695 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len); 3696 } 3697 3698 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 3699 .release = bpf_raw_tp_link_release, 3700 .dealloc_deferred = bpf_raw_tp_link_dealloc, 3701 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 3702 .fill_link_info = bpf_raw_tp_link_fill_link_info, 3703 }; 3704 3705 #ifdef CONFIG_PERF_EVENTS 3706 struct bpf_perf_link { 3707 struct bpf_link link; 3708 struct file *perf_file; 3709 }; 3710 3711 static void bpf_perf_link_release(struct bpf_link *link) 3712 { 3713 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3714 struct perf_event *event = perf_link->perf_file->private_data; 3715 3716 perf_event_free_bpf_prog(event); 3717 fput(perf_link->perf_file); 3718 } 3719 3720 static void bpf_perf_link_dealloc(struct bpf_link *link) 3721 { 3722 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3723 3724 kfree(perf_link); 3725 } 3726 3727 static int bpf_perf_link_fill_common(const struct perf_event *event, 3728 char __user *uname, u32 *ulenp, 3729 u64 *probe_offset, u64 *probe_addr, 3730 u32 *fd_type, unsigned long *missed) 3731 { 3732 const char *buf; 3733 u32 prog_id, ulen; 3734 size_t len; 3735 int err; 3736 3737 ulen = *ulenp; 3738 if (!ulen ^ !uname) 3739 return -EINVAL; 3740 3741 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf, 3742 probe_offset, probe_addr, missed); 3743 if (err) 3744 return err; 3745 3746 if (buf) { 3747 len = strlen(buf); 3748 *ulenp = len + 1; 3749 } else { 3750 *ulenp = 1; 3751 } 3752 if (!uname) 3753 return 0; 3754 3755 if (buf) { 3756 err = bpf_copy_to_user(uname, buf, ulen, len); 3757 if (err) 3758 return err; 3759 } else { 3760 char zero = '\0'; 3761 3762 if (put_user(zero, uname)) 3763 return -EFAULT; 3764 } 3765 return 0; 3766 } 3767 3768 #ifdef CONFIG_KPROBE_EVENTS 3769 static int bpf_perf_link_fill_kprobe(const struct perf_event *event, 3770 struct bpf_link_info *info) 3771 { 3772 unsigned long missed; 3773 char __user *uname; 3774 u64 addr, offset; 3775 u32 ulen, type; 3776 int err; 3777 3778 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name); 3779 ulen = info->perf_event.kprobe.name_len; 3780 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr, 3781 &type, &missed); 3782 if (err) 3783 return err; 3784 if (type == BPF_FD_TYPE_KRETPROBE) 3785 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE; 3786 else 3787 info->perf_event.type = BPF_PERF_EVENT_KPROBE; 3788 info->perf_event.kprobe.name_len = ulen; 3789 info->perf_event.kprobe.offset = offset; 3790 info->perf_event.kprobe.missed = missed; 3791 if (!kallsyms_show_value(current_cred())) 3792 addr = 0; 3793 info->perf_event.kprobe.addr = addr; 3794 info->perf_event.kprobe.cookie = event->bpf_cookie; 3795 return 0; 3796 } 3797 #endif 3798 3799 #ifdef CONFIG_UPROBE_EVENTS 3800 static int bpf_perf_link_fill_uprobe(const struct perf_event *event, 3801 struct bpf_link_info *info) 3802 { 3803 u64 ref_ctr_offset, offset; 3804 char __user *uname; 3805 u32 ulen, type; 3806 int err; 3807 3808 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); 3809 ulen = info->perf_event.uprobe.name_len; 3810 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &ref_ctr_offset, 3811 &type, NULL); 3812 if (err) 3813 return err; 3814 3815 if (type == BPF_FD_TYPE_URETPROBE) 3816 info->perf_event.type = BPF_PERF_EVENT_URETPROBE; 3817 else 3818 info->perf_event.type = BPF_PERF_EVENT_UPROBE; 3819 info->perf_event.uprobe.name_len = ulen; 3820 info->perf_event.uprobe.offset = offset; 3821 info->perf_event.uprobe.cookie = event->bpf_cookie; 3822 info->perf_event.uprobe.ref_ctr_offset = ref_ctr_offset; 3823 return 0; 3824 } 3825 #endif 3826 3827 static int bpf_perf_link_fill_probe(const struct perf_event *event, 3828 struct bpf_link_info *info) 3829 { 3830 #ifdef CONFIG_KPROBE_EVENTS 3831 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) 3832 return bpf_perf_link_fill_kprobe(event, info); 3833 #endif 3834 #ifdef CONFIG_UPROBE_EVENTS 3835 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) 3836 return bpf_perf_link_fill_uprobe(event, info); 3837 #endif 3838 return -EOPNOTSUPP; 3839 } 3840 3841 static int bpf_perf_link_fill_tracepoint(const struct perf_event *event, 3842 struct bpf_link_info *info) 3843 { 3844 char __user *uname; 3845 u32 ulen; 3846 int err; 3847 3848 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name); 3849 ulen = info->perf_event.tracepoint.name_len; 3850 err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL); 3851 if (err) 3852 return err; 3853 3854 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT; 3855 info->perf_event.tracepoint.name_len = ulen; 3856 info->perf_event.tracepoint.cookie = event->bpf_cookie; 3857 return 0; 3858 } 3859 3860 static int bpf_perf_link_fill_perf_event(const struct perf_event *event, 3861 struct bpf_link_info *info) 3862 { 3863 info->perf_event.event.type = event->attr.type; 3864 info->perf_event.event.config = event->attr.config; 3865 info->perf_event.event.cookie = event->bpf_cookie; 3866 info->perf_event.type = BPF_PERF_EVENT_EVENT; 3867 return 0; 3868 } 3869 3870 static int bpf_perf_link_fill_link_info(const struct bpf_link *link, 3871 struct bpf_link_info *info) 3872 { 3873 struct bpf_perf_link *perf_link; 3874 const struct perf_event *event; 3875 3876 perf_link = container_of(link, struct bpf_perf_link, link); 3877 event = perf_get_event(perf_link->perf_file); 3878 if (IS_ERR(event)) 3879 return PTR_ERR(event); 3880 3881 switch (event->prog->type) { 3882 case BPF_PROG_TYPE_PERF_EVENT: 3883 return bpf_perf_link_fill_perf_event(event, info); 3884 case BPF_PROG_TYPE_TRACEPOINT: 3885 return bpf_perf_link_fill_tracepoint(event, info); 3886 case BPF_PROG_TYPE_KPROBE: 3887 return bpf_perf_link_fill_probe(event, info); 3888 default: 3889 return -EOPNOTSUPP; 3890 } 3891 } 3892 3893 static const struct bpf_link_ops bpf_perf_link_lops = { 3894 .release = bpf_perf_link_release, 3895 .dealloc = bpf_perf_link_dealloc, 3896 .fill_link_info = bpf_perf_link_fill_link_info, 3897 }; 3898 3899 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3900 { 3901 struct bpf_link_primer link_primer; 3902 struct bpf_perf_link *link; 3903 struct perf_event *event; 3904 struct file *perf_file; 3905 int err; 3906 3907 if (attr->link_create.flags) 3908 return -EINVAL; 3909 3910 perf_file = perf_event_get(attr->link_create.target_fd); 3911 if (IS_ERR(perf_file)) 3912 return PTR_ERR(perf_file); 3913 3914 link = kzalloc(sizeof(*link), GFP_USER); 3915 if (!link) { 3916 err = -ENOMEM; 3917 goto out_put_file; 3918 } 3919 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 3920 link->perf_file = perf_file; 3921 3922 err = bpf_link_prime(&link->link, &link_primer); 3923 if (err) { 3924 kfree(link); 3925 goto out_put_file; 3926 } 3927 3928 event = perf_file->private_data; 3929 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); 3930 if (err) { 3931 bpf_link_cleanup(&link_primer); 3932 goto out_put_file; 3933 } 3934 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */ 3935 bpf_prog_inc(prog); 3936 3937 return bpf_link_settle(&link_primer); 3938 3939 out_put_file: 3940 fput(perf_file); 3941 return err; 3942 } 3943 #else 3944 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3945 { 3946 return -EOPNOTSUPP; 3947 } 3948 #endif /* CONFIG_PERF_EVENTS */ 3949 3950 static int bpf_raw_tp_link_attach(struct bpf_prog *prog, 3951 const char __user *user_tp_name, u64 cookie) 3952 { 3953 struct bpf_link_primer link_primer; 3954 struct bpf_raw_tp_link *link; 3955 struct bpf_raw_event_map *btp; 3956 const char *tp_name; 3957 char buf[128]; 3958 int err; 3959 3960 switch (prog->type) { 3961 case BPF_PROG_TYPE_TRACING: 3962 case BPF_PROG_TYPE_EXT: 3963 case BPF_PROG_TYPE_LSM: 3964 if (user_tp_name) 3965 /* The attach point for this category of programs 3966 * should be specified via btf_id during program load. 3967 */ 3968 return -EINVAL; 3969 if (prog->type == BPF_PROG_TYPE_TRACING && 3970 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 3971 tp_name = prog->aux->attach_func_name; 3972 break; 3973 } 3974 return bpf_tracing_prog_attach(prog, 0, 0, 0); 3975 case BPF_PROG_TYPE_RAW_TRACEPOINT: 3976 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 3977 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) 3978 return -EFAULT; 3979 buf[sizeof(buf) - 1] = 0; 3980 tp_name = buf; 3981 break; 3982 default: 3983 return -EINVAL; 3984 } 3985 3986 btp = bpf_get_raw_tracepoint(tp_name); 3987 if (!btp) 3988 return -ENOENT; 3989 3990 link = kzalloc(sizeof(*link), GFP_USER); 3991 if (!link) { 3992 err = -ENOMEM; 3993 goto out_put_btp; 3994 } 3995 bpf_link_init_sleepable(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 3996 &bpf_raw_tp_link_lops, prog, 3997 tracepoint_is_faultable(btp->tp)); 3998 link->btp = btp; 3999 link->cookie = cookie; 4000 4001 err = bpf_link_prime(&link->link, &link_primer); 4002 if (err) { 4003 kfree(link); 4004 goto out_put_btp; 4005 } 4006 4007 err = bpf_probe_register(link->btp, link); 4008 if (err) { 4009 bpf_link_cleanup(&link_primer); 4010 goto out_put_btp; 4011 } 4012 4013 return bpf_link_settle(&link_primer); 4014 4015 out_put_btp: 4016 bpf_put_raw_tracepoint(btp); 4017 return err; 4018 } 4019 4020 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie 4021 4022 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 4023 { 4024 struct bpf_prog *prog; 4025 void __user *tp_name; 4026 __u64 cookie; 4027 int fd; 4028 4029 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 4030 return -EINVAL; 4031 4032 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 4033 if (IS_ERR(prog)) 4034 return PTR_ERR(prog); 4035 4036 tp_name = u64_to_user_ptr(attr->raw_tracepoint.name); 4037 cookie = attr->raw_tracepoint.cookie; 4038 fd = bpf_raw_tp_link_attach(prog, tp_name, cookie); 4039 if (fd < 0) 4040 bpf_prog_put(prog); 4041 return fd; 4042 } 4043 4044 static enum bpf_prog_type 4045 attach_type_to_prog_type(enum bpf_attach_type attach_type) 4046 { 4047 switch (attach_type) { 4048 case BPF_CGROUP_INET_INGRESS: 4049 case BPF_CGROUP_INET_EGRESS: 4050 return BPF_PROG_TYPE_CGROUP_SKB; 4051 case BPF_CGROUP_INET_SOCK_CREATE: 4052 case BPF_CGROUP_INET_SOCK_RELEASE: 4053 case BPF_CGROUP_INET4_POST_BIND: 4054 case BPF_CGROUP_INET6_POST_BIND: 4055 return BPF_PROG_TYPE_CGROUP_SOCK; 4056 case BPF_CGROUP_INET4_BIND: 4057 case BPF_CGROUP_INET6_BIND: 4058 case BPF_CGROUP_INET4_CONNECT: 4059 case BPF_CGROUP_INET6_CONNECT: 4060 case BPF_CGROUP_UNIX_CONNECT: 4061 case BPF_CGROUP_INET4_GETPEERNAME: 4062 case BPF_CGROUP_INET6_GETPEERNAME: 4063 case BPF_CGROUP_UNIX_GETPEERNAME: 4064 case BPF_CGROUP_INET4_GETSOCKNAME: 4065 case BPF_CGROUP_INET6_GETSOCKNAME: 4066 case BPF_CGROUP_UNIX_GETSOCKNAME: 4067 case BPF_CGROUP_UDP4_SENDMSG: 4068 case BPF_CGROUP_UDP6_SENDMSG: 4069 case BPF_CGROUP_UNIX_SENDMSG: 4070 case BPF_CGROUP_UDP4_RECVMSG: 4071 case BPF_CGROUP_UDP6_RECVMSG: 4072 case BPF_CGROUP_UNIX_RECVMSG: 4073 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 4074 case BPF_CGROUP_SOCK_OPS: 4075 return BPF_PROG_TYPE_SOCK_OPS; 4076 case BPF_CGROUP_DEVICE: 4077 return BPF_PROG_TYPE_CGROUP_DEVICE; 4078 case BPF_SK_MSG_VERDICT: 4079 return BPF_PROG_TYPE_SK_MSG; 4080 case BPF_SK_SKB_STREAM_PARSER: 4081 case BPF_SK_SKB_STREAM_VERDICT: 4082 case BPF_SK_SKB_VERDICT: 4083 return BPF_PROG_TYPE_SK_SKB; 4084 case BPF_LIRC_MODE2: 4085 return BPF_PROG_TYPE_LIRC_MODE2; 4086 case BPF_FLOW_DISSECTOR: 4087 return BPF_PROG_TYPE_FLOW_DISSECTOR; 4088 case BPF_CGROUP_SYSCTL: 4089 return BPF_PROG_TYPE_CGROUP_SYSCTL; 4090 case BPF_CGROUP_GETSOCKOPT: 4091 case BPF_CGROUP_SETSOCKOPT: 4092 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 4093 case BPF_TRACE_ITER: 4094 case BPF_TRACE_RAW_TP: 4095 case BPF_TRACE_FENTRY: 4096 case BPF_TRACE_FEXIT: 4097 case BPF_MODIFY_RETURN: 4098 return BPF_PROG_TYPE_TRACING; 4099 case BPF_LSM_MAC: 4100 return BPF_PROG_TYPE_LSM; 4101 case BPF_SK_LOOKUP: 4102 return BPF_PROG_TYPE_SK_LOOKUP; 4103 case BPF_XDP: 4104 return BPF_PROG_TYPE_XDP; 4105 case BPF_LSM_CGROUP: 4106 return BPF_PROG_TYPE_LSM; 4107 case BPF_TCX_INGRESS: 4108 case BPF_TCX_EGRESS: 4109 case BPF_NETKIT_PRIMARY: 4110 case BPF_NETKIT_PEER: 4111 return BPF_PROG_TYPE_SCHED_CLS; 4112 default: 4113 return BPF_PROG_TYPE_UNSPEC; 4114 } 4115 } 4116 4117 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 4118 enum bpf_attach_type attach_type) 4119 { 4120 enum bpf_prog_type ptype; 4121 4122 switch (prog->type) { 4123 case BPF_PROG_TYPE_CGROUP_SOCK: 4124 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4125 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4126 case BPF_PROG_TYPE_SK_LOOKUP: 4127 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 4128 case BPF_PROG_TYPE_CGROUP_SKB: 4129 if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN)) 4130 /* cg-skb progs can be loaded by unpriv user. 4131 * check permissions at attach time. 4132 */ 4133 return -EPERM; 4134 4135 ptype = attach_type_to_prog_type(attach_type); 4136 if (prog->type != ptype) 4137 return -EINVAL; 4138 4139 return prog->enforce_expected_attach_type && 4140 prog->expected_attach_type != attach_type ? 4141 -EINVAL : 0; 4142 case BPF_PROG_TYPE_EXT: 4143 return 0; 4144 case BPF_PROG_TYPE_NETFILTER: 4145 if (attach_type != BPF_NETFILTER) 4146 return -EINVAL; 4147 return 0; 4148 case BPF_PROG_TYPE_PERF_EVENT: 4149 case BPF_PROG_TYPE_TRACEPOINT: 4150 if (attach_type != BPF_PERF_EVENT) 4151 return -EINVAL; 4152 return 0; 4153 case BPF_PROG_TYPE_KPROBE: 4154 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && 4155 attach_type != BPF_TRACE_KPROBE_MULTI) 4156 return -EINVAL; 4157 if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION && 4158 attach_type != BPF_TRACE_KPROBE_SESSION) 4159 return -EINVAL; 4160 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && 4161 attach_type != BPF_TRACE_UPROBE_MULTI) 4162 return -EINVAL; 4163 if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION && 4164 attach_type != BPF_TRACE_UPROBE_SESSION) 4165 return -EINVAL; 4166 if (attach_type != BPF_PERF_EVENT && 4167 attach_type != BPF_TRACE_KPROBE_MULTI && 4168 attach_type != BPF_TRACE_KPROBE_SESSION && 4169 attach_type != BPF_TRACE_UPROBE_MULTI && 4170 attach_type != BPF_TRACE_UPROBE_SESSION) 4171 return -EINVAL; 4172 return 0; 4173 case BPF_PROG_TYPE_SCHED_CLS: 4174 if (attach_type != BPF_TCX_INGRESS && 4175 attach_type != BPF_TCX_EGRESS && 4176 attach_type != BPF_NETKIT_PRIMARY && 4177 attach_type != BPF_NETKIT_PEER) 4178 return -EINVAL; 4179 return 0; 4180 default: 4181 ptype = attach_type_to_prog_type(attach_type); 4182 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) 4183 return -EINVAL; 4184 return 0; 4185 } 4186 } 4187 4188 #define BPF_PROG_ATTACH_LAST_FIELD expected_revision 4189 4190 #define BPF_F_ATTACH_MASK_BASE \ 4191 (BPF_F_ALLOW_OVERRIDE | \ 4192 BPF_F_ALLOW_MULTI | \ 4193 BPF_F_REPLACE | \ 4194 BPF_F_PREORDER) 4195 4196 #define BPF_F_ATTACH_MASK_MPROG \ 4197 (BPF_F_REPLACE | \ 4198 BPF_F_BEFORE | \ 4199 BPF_F_AFTER | \ 4200 BPF_F_ID | \ 4201 BPF_F_LINK) 4202 4203 static int bpf_prog_attach(const union bpf_attr *attr) 4204 { 4205 enum bpf_prog_type ptype; 4206 struct bpf_prog *prog; 4207 int ret; 4208 4209 if (CHECK_ATTR(BPF_PROG_ATTACH)) 4210 return -EINVAL; 4211 4212 ptype = attach_type_to_prog_type(attr->attach_type); 4213 if (ptype == BPF_PROG_TYPE_UNSPEC) 4214 return -EINVAL; 4215 if (bpf_mprog_supported(ptype)) { 4216 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 4217 return -EINVAL; 4218 } else { 4219 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE) 4220 return -EINVAL; 4221 if (attr->relative_fd || 4222 attr->expected_revision) 4223 return -EINVAL; 4224 } 4225 4226 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 4227 if (IS_ERR(prog)) 4228 return PTR_ERR(prog); 4229 4230 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 4231 bpf_prog_put(prog); 4232 return -EINVAL; 4233 } 4234 4235 switch (ptype) { 4236 case BPF_PROG_TYPE_SK_SKB: 4237 case BPF_PROG_TYPE_SK_MSG: 4238 ret = sock_map_get_from_fd(attr, prog); 4239 break; 4240 case BPF_PROG_TYPE_LIRC_MODE2: 4241 ret = lirc_prog_attach(attr, prog); 4242 break; 4243 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4244 ret = netns_bpf_prog_attach(attr, prog); 4245 break; 4246 case BPF_PROG_TYPE_CGROUP_DEVICE: 4247 case BPF_PROG_TYPE_CGROUP_SKB: 4248 case BPF_PROG_TYPE_CGROUP_SOCK: 4249 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4250 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4251 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4252 case BPF_PROG_TYPE_SOCK_OPS: 4253 case BPF_PROG_TYPE_LSM: 4254 if (ptype == BPF_PROG_TYPE_LSM && 4255 prog->expected_attach_type != BPF_LSM_CGROUP) 4256 ret = -EINVAL; 4257 else 4258 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 4259 break; 4260 case BPF_PROG_TYPE_SCHED_CLS: 4261 if (attr->attach_type == BPF_TCX_INGRESS || 4262 attr->attach_type == BPF_TCX_EGRESS) 4263 ret = tcx_prog_attach(attr, prog); 4264 else 4265 ret = netkit_prog_attach(attr, prog); 4266 break; 4267 default: 4268 ret = -EINVAL; 4269 } 4270 4271 if (ret) 4272 bpf_prog_put(prog); 4273 return ret; 4274 } 4275 4276 #define BPF_PROG_DETACH_LAST_FIELD expected_revision 4277 4278 static int bpf_prog_detach(const union bpf_attr *attr) 4279 { 4280 struct bpf_prog *prog = NULL; 4281 enum bpf_prog_type ptype; 4282 int ret; 4283 4284 if (CHECK_ATTR(BPF_PROG_DETACH)) 4285 return -EINVAL; 4286 4287 ptype = attach_type_to_prog_type(attr->attach_type); 4288 if (bpf_mprog_supported(ptype)) { 4289 if (ptype == BPF_PROG_TYPE_UNSPEC) 4290 return -EINVAL; 4291 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 4292 return -EINVAL; 4293 if (attr->attach_bpf_fd) { 4294 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 4295 if (IS_ERR(prog)) 4296 return PTR_ERR(prog); 4297 } 4298 } else if (attr->attach_flags || 4299 attr->relative_fd || 4300 attr->expected_revision) { 4301 return -EINVAL; 4302 } 4303 4304 switch (ptype) { 4305 case BPF_PROG_TYPE_SK_MSG: 4306 case BPF_PROG_TYPE_SK_SKB: 4307 ret = sock_map_prog_detach(attr, ptype); 4308 break; 4309 case BPF_PROG_TYPE_LIRC_MODE2: 4310 ret = lirc_prog_detach(attr); 4311 break; 4312 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4313 ret = netns_bpf_prog_detach(attr, ptype); 4314 break; 4315 case BPF_PROG_TYPE_CGROUP_DEVICE: 4316 case BPF_PROG_TYPE_CGROUP_SKB: 4317 case BPF_PROG_TYPE_CGROUP_SOCK: 4318 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4319 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4320 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4321 case BPF_PROG_TYPE_SOCK_OPS: 4322 case BPF_PROG_TYPE_LSM: 4323 ret = cgroup_bpf_prog_detach(attr, ptype); 4324 break; 4325 case BPF_PROG_TYPE_SCHED_CLS: 4326 if (attr->attach_type == BPF_TCX_INGRESS || 4327 attr->attach_type == BPF_TCX_EGRESS) 4328 ret = tcx_prog_detach(attr, prog); 4329 else 4330 ret = netkit_prog_detach(attr, prog); 4331 break; 4332 default: 4333 ret = -EINVAL; 4334 } 4335 4336 if (prog) 4337 bpf_prog_put(prog); 4338 return ret; 4339 } 4340 4341 #define BPF_PROG_QUERY_LAST_FIELD query.revision 4342 4343 static int bpf_prog_query(const union bpf_attr *attr, 4344 union bpf_attr __user *uattr) 4345 { 4346 if (!bpf_net_capable()) 4347 return -EPERM; 4348 if (CHECK_ATTR(BPF_PROG_QUERY)) 4349 return -EINVAL; 4350 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 4351 return -EINVAL; 4352 4353 switch (attr->query.attach_type) { 4354 case BPF_CGROUP_INET_INGRESS: 4355 case BPF_CGROUP_INET_EGRESS: 4356 case BPF_CGROUP_INET_SOCK_CREATE: 4357 case BPF_CGROUP_INET_SOCK_RELEASE: 4358 case BPF_CGROUP_INET4_BIND: 4359 case BPF_CGROUP_INET6_BIND: 4360 case BPF_CGROUP_INET4_POST_BIND: 4361 case BPF_CGROUP_INET6_POST_BIND: 4362 case BPF_CGROUP_INET4_CONNECT: 4363 case BPF_CGROUP_INET6_CONNECT: 4364 case BPF_CGROUP_UNIX_CONNECT: 4365 case BPF_CGROUP_INET4_GETPEERNAME: 4366 case BPF_CGROUP_INET6_GETPEERNAME: 4367 case BPF_CGROUP_UNIX_GETPEERNAME: 4368 case BPF_CGROUP_INET4_GETSOCKNAME: 4369 case BPF_CGROUP_INET6_GETSOCKNAME: 4370 case BPF_CGROUP_UNIX_GETSOCKNAME: 4371 case BPF_CGROUP_UDP4_SENDMSG: 4372 case BPF_CGROUP_UDP6_SENDMSG: 4373 case BPF_CGROUP_UNIX_SENDMSG: 4374 case BPF_CGROUP_UDP4_RECVMSG: 4375 case BPF_CGROUP_UDP6_RECVMSG: 4376 case BPF_CGROUP_UNIX_RECVMSG: 4377 case BPF_CGROUP_SOCK_OPS: 4378 case BPF_CGROUP_DEVICE: 4379 case BPF_CGROUP_SYSCTL: 4380 case BPF_CGROUP_GETSOCKOPT: 4381 case BPF_CGROUP_SETSOCKOPT: 4382 case BPF_LSM_CGROUP: 4383 return cgroup_bpf_prog_query(attr, uattr); 4384 case BPF_LIRC_MODE2: 4385 return lirc_prog_query(attr, uattr); 4386 case BPF_FLOW_DISSECTOR: 4387 case BPF_SK_LOOKUP: 4388 return netns_bpf_prog_query(attr, uattr); 4389 case BPF_SK_SKB_STREAM_PARSER: 4390 case BPF_SK_SKB_STREAM_VERDICT: 4391 case BPF_SK_MSG_VERDICT: 4392 case BPF_SK_SKB_VERDICT: 4393 return sock_map_bpf_prog_query(attr, uattr); 4394 case BPF_TCX_INGRESS: 4395 case BPF_TCX_EGRESS: 4396 return tcx_prog_query(attr, uattr); 4397 case BPF_NETKIT_PRIMARY: 4398 case BPF_NETKIT_PEER: 4399 return netkit_prog_query(attr, uattr); 4400 default: 4401 return -EINVAL; 4402 } 4403 } 4404 4405 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size 4406 4407 static int bpf_prog_test_run(const union bpf_attr *attr, 4408 union bpf_attr __user *uattr) 4409 { 4410 struct bpf_prog *prog; 4411 int ret = -ENOTSUPP; 4412 4413 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 4414 return -EINVAL; 4415 4416 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 4417 (!attr->test.ctx_size_in && attr->test.ctx_in)) 4418 return -EINVAL; 4419 4420 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 4421 (!attr->test.ctx_size_out && attr->test.ctx_out)) 4422 return -EINVAL; 4423 4424 prog = bpf_prog_get(attr->test.prog_fd); 4425 if (IS_ERR(prog)) 4426 return PTR_ERR(prog); 4427 4428 if (prog->aux->ops->test_run) 4429 ret = prog->aux->ops->test_run(prog, attr, uattr); 4430 4431 bpf_prog_put(prog); 4432 return ret; 4433 } 4434 4435 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 4436 4437 static int bpf_obj_get_next_id(const union bpf_attr *attr, 4438 union bpf_attr __user *uattr, 4439 struct idr *idr, 4440 spinlock_t *lock) 4441 { 4442 u32 next_id = attr->start_id; 4443 int err = 0; 4444 4445 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 4446 return -EINVAL; 4447 4448 if (!capable(CAP_SYS_ADMIN)) 4449 return -EPERM; 4450 4451 next_id++; 4452 spin_lock_bh(lock); 4453 if (!idr_get_next(idr, &next_id)) 4454 err = -ENOENT; 4455 spin_unlock_bh(lock); 4456 4457 if (!err) 4458 err = put_user(next_id, &uattr->next_id); 4459 4460 return err; 4461 } 4462 4463 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 4464 { 4465 struct bpf_map *map; 4466 4467 spin_lock_bh(&map_idr_lock); 4468 again: 4469 map = idr_get_next(&map_idr, id); 4470 if (map) { 4471 map = __bpf_map_inc_not_zero(map, false); 4472 if (IS_ERR(map)) { 4473 (*id)++; 4474 goto again; 4475 } 4476 } 4477 spin_unlock_bh(&map_idr_lock); 4478 4479 return map; 4480 } 4481 4482 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 4483 { 4484 struct bpf_prog *prog; 4485 4486 spin_lock_bh(&prog_idr_lock); 4487 again: 4488 prog = idr_get_next(&prog_idr, id); 4489 if (prog) { 4490 prog = bpf_prog_inc_not_zero(prog); 4491 if (IS_ERR(prog)) { 4492 (*id)++; 4493 goto again; 4494 } 4495 } 4496 spin_unlock_bh(&prog_idr_lock); 4497 4498 return prog; 4499 } 4500 4501 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 4502 4503 struct bpf_prog *bpf_prog_by_id(u32 id) 4504 { 4505 struct bpf_prog *prog; 4506 4507 if (!id) 4508 return ERR_PTR(-ENOENT); 4509 4510 spin_lock_bh(&prog_idr_lock); 4511 prog = idr_find(&prog_idr, id); 4512 if (prog) 4513 prog = bpf_prog_inc_not_zero(prog); 4514 else 4515 prog = ERR_PTR(-ENOENT); 4516 spin_unlock_bh(&prog_idr_lock); 4517 return prog; 4518 } 4519 4520 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 4521 { 4522 struct bpf_prog *prog; 4523 u32 id = attr->prog_id; 4524 int fd; 4525 4526 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 4527 return -EINVAL; 4528 4529 if (!capable(CAP_SYS_ADMIN)) 4530 return -EPERM; 4531 4532 prog = bpf_prog_by_id(id); 4533 if (IS_ERR(prog)) 4534 return PTR_ERR(prog); 4535 4536 fd = bpf_prog_new_fd(prog); 4537 if (fd < 0) 4538 bpf_prog_put(prog); 4539 4540 return fd; 4541 } 4542 4543 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 4544 4545 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 4546 { 4547 struct bpf_map *map; 4548 u32 id = attr->map_id; 4549 int f_flags; 4550 int fd; 4551 4552 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 4553 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 4554 return -EINVAL; 4555 4556 if (!capable(CAP_SYS_ADMIN)) 4557 return -EPERM; 4558 4559 f_flags = bpf_get_file_flag(attr->open_flags); 4560 if (f_flags < 0) 4561 return f_flags; 4562 4563 spin_lock_bh(&map_idr_lock); 4564 map = idr_find(&map_idr, id); 4565 if (map) 4566 map = __bpf_map_inc_not_zero(map, true); 4567 else 4568 map = ERR_PTR(-ENOENT); 4569 spin_unlock_bh(&map_idr_lock); 4570 4571 if (IS_ERR(map)) 4572 return PTR_ERR(map); 4573 4574 fd = bpf_map_new_fd(map, f_flags); 4575 if (fd < 0) 4576 bpf_map_put_with_uref(map); 4577 4578 return fd; 4579 } 4580 4581 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 4582 unsigned long addr, u32 *off, 4583 u32 *type) 4584 { 4585 const struct bpf_map *map; 4586 int i; 4587 4588 mutex_lock(&prog->aux->used_maps_mutex); 4589 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 4590 map = prog->aux->used_maps[i]; 4591 if (map == (void *)addr) { 4592 *type = BPF_PSEUDO_MAP_FD; 4593 goto out; 4594 } 4595 if (!map->ops->map_direct_value_meta) 4596 continue; 4597 if (!map->ops->map_direct_value_meta(map, addr, off)) { 4598 *type = BPF_PSEUDO_MAP_VALUE; 4599 goto out; 4600 } 4601 } 4602 map = NULL; 4603 4604 out: 4605 mutex_unlock(&prog->aux->used_maps_mutex); 4606 return map; 4607 } 4608 4609 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 4610 const struct cred *f_cred) 4611 { 4612 const struct bpf_map *map; 4613 struct bpf_insn *insns; 4614 u32 off, type; 4615 u64 imm; 4616 u8 code; 4617 int i; 4618 4619 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 4620 GFP_USER); 4621 if (!insns) 4622 return insns; 4623 4624 for (i = 0; i < prog->len; i++) { 4625 code = insns[i].code; 4626 4627 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 4628 insns[i].code = BPF_JMP | BPF_CALL; 4629 insns[i].imm = BPF_FUNC_tail_call; 4630 /* fall-through */ 4631 } 4632 if (code == (BPF_JMP | BPF_CALL) || 4633 code == (BPF_JMP | BPF_CALL_ARGS)) { 4634 if (code == (BPF_JMP | BPF_CALL_ARGS)) 4635 insns[i].code = BPF_JMP | BPF_CALL; 4636 if (!bpf_dump_raw_ok(f_cred)) 4637 insns[i].imm = 0; 4638 continue; 4639 } 4640 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 4641 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 4642 continue; 4643 } 4644 4645 if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX || 4646 BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) { 4647 insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM; 4648 continue; 4649 } 4650 4651 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 4652 continue; 4653 4654 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 4655 map = bpf_map_from_imm(prog, imm, &off, &type); 4656 if (map) { 4657 insns[i].src_reg = type; 4658 insns[i].imm = map->id; 4659 insns[i + 1].imm = off; 4660 continue; 4661 } 4662 } 4663 4664 return insns; 4665 } 4666 4667 static int set_info_rec_size(struct bpf_prog_info *info) 4668 { 4669 /* 4670 * Ensure info.*_rec_size is the same as kernel expected size 4671 * 4672 * or 4673 * 4674 * Only allow zero *_rec_size if both _rec_size and _cnt are 4675 * zero. In this case, the kernel will set the expected 4676 * _rec_size back to the info. 4677 */ 4678 4679 if ((info->nr_func_info || info->func_info_rec_size) && 4680 info->func_info_rec_size != sizeof(struct bpf_func_info)) 4681 return -EINVAL; 4682 4683 if ((info->nr_line_info || info->line_info_rec_size) && 4684 info->line_info_rec_size != sizeof(struct bpf_line_info)) 4685 return -EINVAL; 4686 4687 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 4688 info->jited_line_info_rec_size != sizeof(__u64)) 4689 return -EINVAL; 4690 4691 info->func_info_rec_size = sizeof(struct bpf_func_info); 4692 info->line_info_rec_size = sizeof(struct bpf_line_info); 4693 info->jited_line_info_rec_size = sizeof(__u64); 4694 4695 return 0; 4696 } 4697 4698 static int bpf_prog_get_info_by_fd(struct file *file, 4699 struct bpf_prog *prog, 4700 const union bpf_attr *attr, 4701 union bpf_attr __user *uattr) 4702 { 4703 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4704 struct btf *attach_btf = bpf_prog_get_target_btf(prog); 4705 struct bpf_prog_info info; 4706 u32 info_len = attr->info.info_len; 4707 struct bpf_prog_kstats stats; 4708 char __user *uinsns; 4709 u32 ulen; 4710 int err; 4711 4712 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4713 if (err) 4714 return err; 4715 info_len = min_t(u32, sizeof(info), info_len); 4716 4717 memset(&info, 0, sizeof(info)); 4718 if (copy_from_user(&info, uinfo, info_len)) 4719 return -EFAULT; 4720 4721 info.type = prog->type; 4722 info.id = prog->aux->id; 4723 info.load_time = prog->aux->load_time; 4724 info.created_by_uid = from_kuid_munged(current_user_ns(), 4725 prog->aux->user->uid); 4726 info.gpl_compatible = prog->gpl_compatible; 4727 4728 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 4729 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 4730 4731 mutex_lock(&prog->aux->used_maps_mutex); 4732 ulen = info.nr_map_ids; 4733 info.nr_map_ids = prog->aux->used_map_cnt; 4734 ulen = min_t(u32, info.nr_map_ids, ulen); 4735 if (ulen) { 4736 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 4737 u32 i; 4738 4739 for (i = 0; i < ulen; i++) 4740 if (put_user(prog->aux->used_maps[i]->id, 4741 &user_map_ids[i])) { 4742 mutex_unlock(&prog->aux->used_maps_mutex); 4743 return -EFAULT; 4744 } 4745 } 4746 mutex_unlock(&prog->aux->used_maps_mutex); 4747 4748 err = set_info_rec_size(&info); 4749 if (err) 4750 return err; 4751 4752 bpf_prog_get_stats(prog, &stats); 4753 info.run_time_ns = stats.nsecs; 4754 info.run_cnt = stats.cnt; 4755 info.recursion_misses = stats.misses; 4756 4757 info.verified_insns = prog->aux->verified_insns; 4758 if (prog->aux->btf) 4759 info.btf_id = btf_obj_id(prog->aux->btf); 4760 4761 if (!bpf_capable()) { 4762 info.jited_prog_len = 0; 4763 info.xlated_prog_len = 0; 4764 info.nr_jited_ksyms = 0; 4765 info.nr_jited_func_lens = 0; 4766 info.nr_func_info = 0; 4767 info.nr_line_info = 0; 4768 info.nr_jited_line_info = 0; 4769 goto done; 4770 } 4771 4772 ulen = info.xlated_prog_len; 4773 info.xlated_prog_len = bpf_prog_insn_size(prog); 4774 if (info.xlated_prog_len && ulen) { 4775 struct bpf_insn *insns_sanitized; 4776 bool fault; 4777 4778 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 4779 info.xlated_prog_insns = 0; 4780 goto done; 4781 } 4782 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 4783 if (!insns_sanitized) 4784 return -ENOMEM; 4785 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 4786 ulen = min_t(u32, info.xlated_prog_len, ulen); 4787 fault = copy_to_user(uinsns, insns_sanitized, ulen); 4788 kfree(insns_sanitized); 4789 if (fault) 4790 return -EFAULT; 4791 } 4792 4793 if (bpf_prog_is_offloaded(prog->aux)) { 4794 err = bpf_prog_offload_info_fill(&info, prog); 4795 if (err) 4796 return err; 4797 goto done; 4798 } 4799 4800 /* NOTE: the following code is supposed to be skipped for offload. 4801 * bpf_prog_offload_info_fill() is the place to fill similar fields 4802 * for offload. 4803 */ 4804 ulen = info.jited_prog_len; 4805 if (prog->aux->func_cnt) { 4806 u32 i; 4807 4808 info.jited_prog_len = 0; 4809 for (i = 0; i < prog->aux->func_cnt; i++) 4810 info.jited_prog_len += prog->aux->func[i]->jited_len; 4811 } else { 4812 info.jited_prog_len = prog->jited_len; 4813 } 4814 4815 if (info.jited_prog_len && ulen) { 4816 if (bpf_dump_raw_ok(file->f_cred)) { 4817 uinsns = u64_to_user_ptr(info.jited_prog_insns); 4818 ulen = min_t(u32, info.jited_prog_len, ulen); 4819 4820 /* for multi-function programs, copy the JITed 4821 * instructions for all the functions 4822 */ 4823 if (prog->aux->func_cnt) { 4824 u32 len, free, i; 4825 u8 *img; 4826 4827 free = ulen; 4828 for (i = 0; i < prog->aux->func_cnt; i++) { 4829 len = prog->aux->func[i]->jited_len; 4830 len = min_t(u32, len, free); 4831 img = (u8 *) prog->aux->func[i]->bpf_func; 4832 if (copy_to_user(uinsns, img, len)) 4833 return -EFAULT; 4834 uinsns += len; 4835 free -= len; 4836 if (!free) 4837 break; 4838 } 4839 } else { 4840 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 4841 return -EFAULT; 4842 } 4843 } else { 4844 info.jited_prog_insns = 0; 4845 } 4846 } 4847 4848 ulen = info.nr_jited_ksyms; 4849 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 4850 if (ulen) { 4851 if (bpf_dump_raw_ok(file->f_cred)) { 4852 unsigned long ksym_addr; 4853 u64 __user *user_ksyms; 4854 u32 i; 4855 4856 /* copy the address of the kernel symbol 4857 * corresponding to each function 4858 */ 4859 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 4860 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 4861 if (prog->aux->func_cnt) { 4862 for (i = 0; i < ulen; i++) { 4863 ksym_addr = (unsigned long) 4864 prog->aux->func[i]->bpf_func; 4865 if (put_user((u64) ksym_addr, 4866 &user_ksyms[i])) 4867 return -EFAULT; 4868 } 4869 } else { 4870 ksym_addr = (unsigned long) prog->bpf_func; 4871 if (put_user((u64) ksym_addr, &user_ksyms[0])) 4872 return -EFAULT; 4873 } 4874 } else { 4875 info.jited_ksyms = 0; 4876 } 4877 } 4878 4879 ulen = info.nr_jited_func_lens; 4880 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 4881 if (ulen) { 4882 if (bpf_dump_raw_ok(file->f_cred)) { 4883 u32 __user *user_lens; 4884 u32 func_len, i; 4885 4886 /* copy the JITed image lengths for each function */ 4887 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 4888 user_lens = u64_to_user_ptr(info.jited_func_lens); 4889 if (prog->aux->func_cnt) { 4890 for (i = 0; i < ulen; i++) { 4891 func_len = 4892 prog->aux->func[i]->jited_len; 4893 if (put_user(func_len, &user_lens[i])) 4894 return -EFAULT; 4895 } 4896 } else { 4897 func_len = prog->jited_len; 4898 if (put_user(func_len, &user_lens[0])) 4899 return -EFAULT; 4900 } 4901 } else { 4902 info.jited_func_lens = 0; 4903 } 4904 } 4905 4906 info.attach_btf_id = prog->aux->attach_btf_id; 4907 if (attach_btf) 4908 info.attach_btf_obj_id = btf_obj_id(attach_btf); 4909 4910 ulen = info.nr_func_info; 4911 info.nr_func_info = prog->aux->func_info_cnt; 4912 if (info.nr_func_info && ulen) { 4913 char __user *user_finfo; 4914 4915 user_finfo = u64_to_user_ptr(info.func_info); 4916 ulen = min_t(u32, info.nr_func_info, ulen); 4917 if (copy_to_user(user_finfo, prog->aux->func_info, 4918 info.func_info_rec_size * ulen)) 4919 return -EFAULT; 4920 } 4921 4922 ulen = info.nr_line_info; 4923 info.nr_line_info = prog->aux->nr_linfo; 4924 if (info.nr_line_info && ulen) { 4925 __u8 __user *user_linfo; 4926 4927 user_linfo = u64_to_user_ptr(info.line_info); 4928 ulen = min_t(u32, info.nr_line_info, ulen); 4929 if (copy_to_user(user_linfo, prog->aux->linfo, 4930 info.line_info_rec_size * ulen)) 4931 return -EFAULT; 4932 } 4933 4934 ulen = info.nr_jited_line_info; 4935 if (prog->aux->jited_linfo) 4936 info.nr_jited_line_info = prog->aux->nr_linfo; 4937 else 4938 info.nr_jited_line_info = 0; 4939 if (info.nr_jited_line_info && ulen) { 4940 if (bpf_dump_raw_ok(file->f_cred)) { 4941 unsigned long line_addr; 4942 __u64 __user *user_linfo; 4943 u32 i; 4944 4945 user_linfo = u64_to_user_ptr(info.jited_line_info); 4946 ulen = min_t(u32, info.nr_jited_line_info, ulen); 4947 for (i = 0; i < ulen; i++) { 4948 line_addr = (unsigned long)prog->aux->jited_linfo[i]; 4949 if (put_user((__u64)line_addr, &user_linfo[i])) 4950 return -EFAULT; 4951 } 4952 } else { 4953 info.jited_line_info = 0; 4954 } 4955 } 4956 4957 ulen = info.nr_prog_tags; 4958 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 4959 if (ulen) { 4960 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 4961 u32 i; 4962 4963 user_prog_tags = u64_to_user_ptr(info.prog_tags); 4964 ulen = min_t(u32, info.nr_prog_tags, ulen); 4965 if (prog->aux->func_cnt) { 4966 for (i = 0; i < ulen; i++) { 4967 if (copy_to_user(user_prog_tags[i], 4968 prog->aux->func[i]->tag, 4969 BPF_TAG_SIZE)) 4970 return -EFAULT; 4971 } 4972 } else { 4973 if (copy_to_user(user_prog_tags[0], 4974 prog->tag, BPF_TAG_SIZE)) 4975 return -EFAULT; 4976 } 4977 } 4978 4979 done: 4980 if (copy_to_user(uinfo, &info, info_len) || 4981 put_user(info_len, &uattr->info.info_len)) 4982 return -EFAULT; 4983 4984 return 0; 4985 } 4986 4987 static int bpf_map_get_info_by_fd(struct file *file, 4988 struct bpf_map *map, 4989 const union bpf_attr *attr, 4990 union bpf_attr __user *uattr) 4991 { 4992 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4993 struct bpf_map_info info; 4994 u32 info_len = attr->info.info_len; 4995 int err; 4996 4997 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4998 if (err) 4999 return err; 5000 info_len = min_t(u32, sizeof(info), info_len); 5001 5002 memset(&info, 0, sizeof(info)); 5003 info.type = map->map_type; 5004 info.id = map->id; 5005 info.key_size = map->key_size; 5006 info.value_size = map->value_size; 5007 info.max_entries = map->max_entries; 5008 info.map_flags = map->map_flags; 5009 info.map_extra = map->map_extra; 5010 memcpy(info.name, map->name, sizeof(map->name)); 5011 5012 if (map->btf) { 5013 info.btf_id = btf_obj_id(map->btf); 5014 info.btf_key_type_id = map->btf_key_type_id; 5015 info.btf_value_type_id = map->btf_value_type_id; 5016 } 5017 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 5018 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) 5019 bpf_map_struct_ops_info_fill(&info, map); 5020 5021 if (bpf_map_is_offloaded(map)) { 5022 err = bpf_map_offload_info_fill(&info, map); 5023 if (err) 5024 return err; 5025 } 5026 5027 if (copy_to_user(uinfo, &info, info_len) || 5028 put_user(info_len, &uattr->info.info_len)) 5029 return -EFAULT; 5030 5031 return 0; 5032 } 5033 5034 static int bpf_btf_get_info_by_fd(struct file *file, 5035 struct btf *btf, 5036 const union bpf_attr *attr, 5037 union bpf_attr __user *uattr) 5038 { 5039 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 5040 u32 info_len = attr->info.info_len; 5041 int err; 5042 5043 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); 5044 if (err) 5045 return err; 5046 5047 return btf_get_info_by_fd(btf, attr, uattr); 5048 } 5049 5050 static int bpf_link_get_info_by_fd(struct file *file, 5051 struct bpf_link *link, 5052 const union bpf_attr *attr, 5053 union bpf_attr __user *uattr) 5054 { 5055 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 5056 struct bpf_link_info info; 5057 u32 info_len = attr->info.info_len; 5058 int err; 5059 5060 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 5061 if (err) 5062 return err; 5063 info_len = min_t(u32, sizeof(info), info_len); 5064 5065 memset(&info, 0, sizeof(info)); 5066 if (copy_from_user(&info, uinfo, info_len)) 5067 return -EFAULT; 5068 5069 info.type = link->type; 5070 info.id = link->id; 5071 if (link->prog) 5072 info.prog_id = link->prog->aux->id; 5073 5074 if (link->ops->fill_link_info) { 5075 err = link->ops->fill_link_info(link, &info); 5076 if (err) 5077 return err; 5078 } 5079 5080 if (copy_to_user(uinfo, &info, info_len) || 5081 put_user(info_len, &uattr->info.info_len)) 5082 return -EFAULT; 5083 5084 return 0; 5085 } 5086 5087 5088 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 5089 5090 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 5091 union bpf_attr __user *uattr) 5092 { 5093 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 5094 return -EINVAL; 5095 5096 CLASS(fd, f)(attr->info.bpf_fd); 5097 if (fd_empty(f)) 5098 return -EBADFD; 5099 5100 if (fd_file(f)->f_op == &bpf_prog_fops) 5101 return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, 5102 uattr); 5103 else if (fd_file(f)->f_op == &bpf_map_fops) 5104 return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, 5105 uattr); 5106 else if (fd_file(f)->f_op == &btf_fops) 5107 return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr); 5108 else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll) 5109 return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data, 5110 attr, uattr); 5111 return -EINVAL; 5112 } 5113 5114 #define BPF_BTF_LOAD_LAST_FIELD btf_token_fd 5115 5116 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) 5117 { 5118 struct bpf_token *token = NULL; 5119 5120 if (CHECK_ATTR(BPF_BTF_LOAD)) 5121 return -EINVAL; 5122 5123 if (attr->btf_flags & ~BPF_F_TOKEN_FD) 5124 return -EINVAL; 5125 5126 if (attr->btf_flags & BPF_F_TOKEN_FD) { 5127 token = bpf_token_get_from_fd(attr->btf_token_fd); 5128 if (IS_ERR(token)) 5129 return PTR_ERR(token); 5130 if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) { 5131 bpf_token_put(token); 5132 token = NULL; 5133 } 5134 } 5135 5136 if (!bpf_token_capable(token, CAP_BPF)) { 5137 bpf_token_put(token); 5138 return -EPERM; 5139 } 5140 5141 bpf_token_put(token); 5142 5143 return btf_new_fd(attr, uattr, uattr_size); 5144 } 5145 5146 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD fd_by_id_token_fd 5147 5148 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 5149 { 5150 struct bpf_token *token = NULL; 5151 5152 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 5153 return -EINVAL; 5154 5155 if (attr->open_flags & ~BPF_F_TOKEN_FD) 5156 return -EINVAL; 5157 5158 if (attr->open_flags & BPF_F_TOKEN_FD) { 5159 token = bpf_token_get_from_fd(attr->fd_by_id_token_fd); 5160 if (IS_ERR(token)) 5161 return PTR_ERR(token); 5162 if (!bpf_token_allow_cmd(token, BPF_BTF_GET_FD_BY_ID)) { 5163 bpf_token_put(token); 5164 token = NULL; 5165 } 5166 } 5167 5168 if (!bpf_token_capable(token, CAP_SYS_ADMIN)) { 5169 bpf_token_put(token); 5170 return -EPERM; 5171 } 5172 5173 bpf_token_put(token); 5174 5175 return btf_get_fd_by_id(attr->btf_id); 5176 } 5177 5178 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 5179 union bpf_attr __user *uattr, 5180 u32 prog_id, u32 fd_type, 5181 const char *buf, u64 probe_offset, 5182 u64 probe_addr) 5183 { 5184 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 5185 u32 len = buf ? strlen(buf) : 0, input_len; 5186 int err = 0; 5187 5188 if (put_user(len, &uattr->task_fd_query.buf_len)) 5189 return -EFAULT; 5190 input_len = attr->task_fd_query.buf_len; 5191 if (input_len && ubuf) { 5192 if (!len) { 5193 /* nothing to copy, just make ubuf NULL terminated */ 5194 char zero = '\0'; 5195 5196 if (put_user(zero, ubuf)) 5197 return -EFAULT; 5198 } else if (input_len >= len + 1) { 5199 /* ubuf can hold the string with NULL terminator */ 5200 if (copy_to_user(ubuf, buf, len + 1)) 5201 return -EFAULT; 5202 } else { 5203 /* ubuf cannot hold the string with NULL terminator, 5204 * do a partial copy with NULL terminator. 5205 */ 5206 char zero = '\0'; 5207 5208 err = -ENOSPC; 5209 if (copy_to_user(ubuf, buf, input_len - 1)) 5210 return -EFAULT; 5211 if (put_user(zero, ubuf + input_len - 1)) 5212 return -EFAULT; 5213 } 5214 } 5215 5216 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 5217 put_user(fd_type, &uattr->task_fd_query.fd_type) || 5218 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 5219 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 5220 return -EFAULT; 5221 5222 return err; 5223 } 5224 5225 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 5226 5227 static int bpf_task_fd_query(const union bpf_attr *attr, 5228 union bpf_attr __user *uattr) 5229 { 5230 pid_t pid = attr->task_fd_query.pid; 5231 u32 fd = attr->task_fd_query.fd; 5232 const struct perf_event *event; 5233 struct task_struct *task; 5234 struct file *file; 5235 int err; 5236 5237 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 5238 return -EINVAL; 5239 5240 if (!capable(CAP_SYS_ADMIN)) 5241 return -EPERM; 5242 5243 if (attr->task_fd_query.flags != 0) 5244 return -EINVAL; 5245 5246 rcu_read_lock(); 5247 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 5248 rcu_read_unlock(); 5249 if (!task) 5250 return -ENOENT; 5251 5252 err = 0; 5253 file = fget_task(task, fd); 5254 put_task_struct(task); 5255 if (!file) 5256 return -EBADF; 5257 5258 if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) { 5259 struct bpf_link *link = file->private_data; 5260 5261 if (link->ops == &bpf_raw_tp_link_lops) { 5262 struct bpf_raw_tp_link *raw_tp = 5263 container_of(link, struct bpf_raw_tp_link, link); 5264 struct bpf_raw_event_map *btp = raw_tp->btp; 5265 5266 err = bpf_task_fd_query_copy(attr, uattr, 5267 raw_tp->link.prog->aux->id, 5268 BPF_FD_TYPE_RAW_TRACEPOINT, 5269 btp->tp->name, 0, 0); 5270 goto put_file; 5271 } 5272 goto out_not_supp; 5273 } 5274 5275 event = perf_get_event(file); 5276 if (!IS_ERR(event)) { 5277 u64 probe_offset, probe_addr; 5278 u32 prog_id, fd_type; 5279 const char *buf; 5280 5281 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 5282 &buf, &probe_offset, 5283 &probe_addr, NULL); 5284 if (!err) 5285 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 5286 fd_type, buf, 5287 probe_offset, 5288 probe_addr); 5289 goto put_file; 5290 } 5291 5292 out_not_supp: 5293 err = -ENOTSUPP; 5294 put_file: 5295 fput(file); 5296 return err; 5297 } 5298 5299 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 5300 5301 #define BPF_DO_BATCH(fn, ...) \ 5302 do { \ 5303 if (!fn) { \ 5304 err = -ENOTSUPP; \ 5305 goto err_put; \ 5306 } \ 5307 err = fn(__VA_ARGS__); \ 5308 } while (0) 5309 5310 static int bpf_map_do_batch(const union bpf_attr *attr, 5311 union bpf_attr __user *uattr, 5312 int cmd) 5313 { 5314 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || 5315 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; 5316 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; 5317 struct bpf_map *map; 5318 int err; 5319 5320 if (CHECK_ATTR(BPF_MAP_BATCH)) 5321 return -EINVAL; 5322 5323 CLASS(fd, f)(attr->batch.map_fd); 5324 5325 map = __bpf_map_get(f); 5326 if (IS_ERR(map)) 5327 return PTR_ERR(map); 5328 if (has_write) 5329 bpf_map_write_active_inc(map); 5330 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 5331 err = -EPERM; 5332 goto err_put; 5333 } 5334 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 5335 err = -EPERM; 5336 goto err_put; 5337 } 5338 5339 if (cmd == BPF_MAP_LOOKUP_BATCH) 5340 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); 5341 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 5342 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); 5343 else if (cmd == BPF_MAP_UPDATE_BATCH) 5344 BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr); 5345 else 5346 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); 5347 err_put: 5348 if (has_write) { 5349 maybe_wait_bpf_programs(map); 5350 bpf_map_write_active_dec(map); 5351 } 5352 return err; 5353 } 5354 5355 #define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid 5356 static int link_create(union bpf_attr *attr, bpfptr_t uattr) 5357 { 5358 struct bpf_prog *prog; 5359 int ret; 5360 5361 if (CHECK_ATTR(BPF_LINK_CREATE)) 5362 return -EINVAL; 5363 5364 if (attr->link_create.attach_type == BPF_STRUCT_OPS) 5365 return bpf_struct_ops_link_create(attr); 5366 5367 prog = bpf_prog_get(attr->link_create.prog_fd); 5368 if (IS_ERR(prog)) 5369 return PTR_ERR(prog); 5370 5371 ret = bpf_prog_attach_check_attach_type(prog, 5372 attr->link_create.attach_type); 5373 if (ret) 5374 goto out; 5375 5376 switch (prog->type) { 5377 case BPF_PROG_TYPE_CGROUP_SKB: 5378 case BPF_PROG_TYPE_CGROUP_SOCK: 5379 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 5380 case BPF_PROG_TYPE_SOCK_OPS: 5381 case BPF_PROG_TYPE_CGROUP_DEVICE: 5382 case BPF_PROG_TYPE_CGROUP_SYSCTL: 5383 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 5384 ret = cgroup_bpf_link_attach(attr, prog); 5385 break; 5386 case BPF_PROG_TYPE_EXT: 5387 ret = bpf_tracing_prog_attach(prog, 5388 attr->link_create.target_fd, 5389 attr->link_create.target_btf_id, 5390 attr->link_create.tracing.cookie); 5391 break; 5392 case BPF_PROG_TYPE_LSM: 5393 case BPF_PROG_TYPE_TRACING: 5394 if (attr->link_create.attach_type != prog->expected_attach_type) { 5395 ret = -EINVAL; 5396 goto out; 5397 } 5398 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 5399 ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie); 5400 else if (prog->expected_attach_type == BPF_TRACE_ITER) 5401 ret = bpf_iter_link_attach(attr, uattr, prog); 5402 else if (prog->expected_attach_type == BPF_LSM_CGROUP) 5403 ret = cgroup_bpf_link_attach(attr, prog); 5404 else 5405 ret = bpf_tracing_prog_attach(prog, 5406 attr->link_create.target_fd, 5407 attr->link_create.target_btf_id, 5408 attr->link_create.tracing.cookie); 5409 break; 5410 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5411 case BPF_PROG_TYPE_SK_LOOKUP: 5412 ret = netns_bpf_link_create(attr, prog); 5413 break; 5414 case BPF_PROG_TYPE_SK_MSG: 5415 case BPF_PROG_TYPE_SK_SKB: 5416 ret = sock_map_link_create(attr, prog); 5417 break; 5418 #ifdef CONFIG_NET 5419 case BPF_PROG_TYPE_XDP: 5420 ret = bpf_xdp_link_attach(attr, prog); 5421 break; 5422 case BPF_PROG_TYPE_SCHED_CLS: 5423 if (attr->link_create.attach_type == BPF_TCX_INGRESS || 5424 attr->link_create.attach_type == BPF_TCX_EGRESS) 5425 ret = tcx_link_attach(attr, prog); 5426 else 5427 ret = netkit_link_attach(attr, prog); 5428 break; 5429 case BPF_PROG_TYPE_NETFILTER: 5430 ret = bpf_nf_link_attach(attr, prog); 5431 break; 5432 #endif 5433 case BPF_PROG_TYPE_PERF_EVENT: 5434 case BPF_PROG_TYPE_TRACEPOINT: 5435 ret = bpf_perf_link_attach(attr, prog); 5436 break; 5437 case BPF_PROG_TYPE_KPROBE: 5438 if (attr->link_create.attach_type == BPF_PERF_EVENT) 5439 ret = bpf_perf_link_attach(attr, prog); 5440 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI || 5441 attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION) 5442 ret = bpf_kprobe_multi_link_attach(attr, prog); 5443 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI || 5444 attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION) 5445 ret = bpf_uprobe_multi_link_attach(attr, prog); 5446 break; 5447 default: 5448 ret = -EINVAL; 5449 } 5450 5451 out: 5452 if (ret < 0) 5453 bpf_prog_put(prog); 5454 return ret; 5455 } 5456 5457 static int link_update_map(struct bpf_link *link, union bpf_attr *attr) 5458 { 5459 struct bpf_map *new_map, *old_map = NULL; 5460 int ret; 5461 5462 new_map = bpf_map_get(attr->link_update.new_map_fd); 5463 if (IS_ERR(new_map)) 5464 return PTR_ERR(new_map); 5465 5466 if (attr->link_update.flags & BPF_F_REPLACE) { 5467 old_map = bpf_map_get(attr->link_update.old_map_fd); 5468 if (IS_ERR(old_map)) { 5469 ret = PTR_ERR(old_map); 5470 goto out_put; 5471 } 5472 } else if (attr->link_update.old_map_fd) { 5473 ret = -EINVAL; 5474 goto out_put; 5475 } 5476 5477 ret = link->ops->update_map(link, new_map, old_map); 5478 5479 if (old_map) 5480 bpf_map_put(old_map); 5481 out_put: 5482 bpf_map_put(new_map); 5483 return ret; 5484 } 5485 5486 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 5487 5488 static int link_update(union bpf_attr *attr) 5489 { 5490 struct bpf_prog *old_prog = NULL, *new_prog; 5491 struct bpf_link *link; 5492 u32 flags; 5493 int ret; 5494 5495 if (CHECK_ATTR(BPF_LINK_UPDATE)) 5496 return -EINVAL; 5497 5498 flags = attr->link_update.flags; 5499 if (flags & ~BPF_F_REPLACE) 5500 return -EINVAL; 5501 5502 link = bpf_link_get_from_fd(attr->link_update.link_fd); 5503 if (IS_ERR(link)) 5504 return PTR_ERR(link); 5505 5506 if (link->ops->update_map) { 5507 ret = link_update_map(link, attr); 5508 goto out_put_link; 5509 } 5510 5511 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 5512 if (IS_ERR(new_prog)) { 5513 ret = PTR_ERR(new_prog); 5514 goto out_put_link; 5515 } 5516 5517 if (flags & BPF_F_REPLACE) { 5518 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 5519 if (IS_ERR(old_prog)) { 5520 ret = PTR_ERR(old_prog); 5521 old_prog = NULL; 5522 goto out_put_progs; 5523 } 5524 } else if (attr->link_update.old_prog_fd) { 5525 ret = -EINVAL; 5526 goto out_put_progs; 5527 } 5528 5529 if (link->ops->update_prog) 5530 ret = link->ops->update_prog(link, new_prog, old_prog); 5531 else 5532 ret = -EINVAL; 5533 5534 out_put_progs: 5535 if (old_prog) 5536 bpf_prog_put(old_prog); 5537 if (ret) 5538 bpf_prog_put(new_prog); 5539 out_put_link: 5540 bpf_link_put_direct(link); 5541 return ret; 5542 } 5543 5544 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 5545 5546 static int link_detach(union bpf_attr *attr) 5547 { 5548 struct bpf_link *link; 5549 int ret; 5550 5551 if (CHECK_ATTR(BPF_LINK_DETACH)) 5552 return -EINVAL; 5553 5554 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 5555 if (IS_ERR(link)) 5556 return PTR_ERR(link); 5557 5558 if (link->ops->detach) 5559 ret = link->ops->detach(link); 5560 else 5561 ret = -EOPNOTSUPP; 5562 5563 bpf_link_put_direct(link); 5564 return ret; 5565 } 5566 5567 struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 5568 { 5569 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 5570 } 5571 EXPORT_SYMBOL(bpf_link_inc_not_zero); 5572 5573 struct bpf_link *bpf_link_by_id(u32 id) 5574 { 5575 struct bpf_link *link; 5576 5577 if (!id) 5578 return ERR_PTR(-ENOENT); 5579 5580 spin_lock_bh(&link_idr_lock); 5581 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 5582 link = idr_find(&link_idr, id); 5583 if (link) { 5584 if (link->id) 5585 link = bpf_link_inc_not_zero(link); 5586 else 5587 link = ERR_PTR(-EAGAIN); 5588 } else { 5589 link = ERR_PTR(-ENOENT); 5590 } 5591 spin_unlock_bh(&link_idr_lock); 5592 return link; 5593 } 5594 5595 struct bpf_link *bpf_link_get_curr_or_next(u32 *id) 5596 { 5597 struct bpf_link *link; 5598 5599 spin_lock_bh(&link_idr_lock); 5600 again: 5601 link = idr_get_next(&link_idr, id); 5602 if (link) { 5603 link = bpf_link_inc_not_zero(link); 5604 if (IS_ERR(link)) { 5605 (*id)++; 5606 goto again; 5607 } 5608 } 5609 spin_unlock_bh(&link_idr_lock); 5610 5611 return link; 5612 } 5613 5614 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 5615 5616 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 5617 { 5618 struct bpf_link *link; 5619 u32 id = attr->link_id; 5620 int fd; 5621 5622 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 5623 return -EINVAL; 5624 5625 if (!capable(CAP_SYS_ADMIN)) 5626 return -EPERM; 5627 5628 link = bpf_link_by_id(id); 5629 if (IS_ERR(link)) 5630 return PTR_ERR(link); 5631 5632 fd = bpf_link_new_fd(link); 5633 if (fd < 0) 5634 bpf_link_put_direct(link); 5635 5636 return fd; 5637 } 5638 5639 DEFINE_MUTEX(bpf_stats_enabled_mutex); 5640 5641 static int bpf_stats_release(struct inode *inode, struct file *file) 5642 { 5643 mutex_lock(&bpf_stats_enabled_mutex); 5644 static_key_slow_dec(&bpf_stats_enabled_key.key); 5645 mutex_unlock(&bpf_stats_enabled_mutex); 5646 return 0; 5647 } 5648 5649 static const struct file_operations bpf_stats_fops = { 5650 .release = bpf_stats_release, 5651 }; 5652 5653 static int bpf_enable_runtime_stats(void) 5654 { 5655 int fd; 5656 5657 mutex_lock(&bpf_stats_enabled_mutex); 5658 5659 /* Set a very high limit to avoid overflow */ 5660 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 5661 mutex_unlock(&bpf_stats_enabled_mutex); 5662 return -EBUSY; 5663 } 5664 5665 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 5666 if (fd >= 0) 5667 static_key_slow_inc(&bpf_stats_enabled_key.key); 5668 5669 mutex_unlock(&bpf_stats_enabled_mutex); 5670 return fd; 5671 } 5672 5673 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 5674 5675 static int bpf_enable_stats(union bpf_attr *attr) 5676 { 5677 5678 if (CHECK_ATTR(BPF_ENABLE_STATS)) 5679 return -EINVAL; 5680 5681 if (!capable(CAP_SYS_ADMIN)) 5682 return -EPERM; 5683 5684 switch (attr->enable_stats.type) { 5685 case BPF_STATS_RUN_TIME: 5686 return bpf_enable_runtime_stats(); 5687 default: 5688 break; 5689 } 5690 return -EINVAL; 5691 } 5692 5693 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 5694 5695 static int bpf_iter_create(union bpf_attr *attr) 5696 { 5697 struct bpf_link *link; 5698 int err; 5699 5700 if (CHECK_ATTR(BPF_ITER_CREATE)) 5701 return -EINVAL; 5702 5703 if (attr->iter_create.flags) 5704 return -EINVAL; 5705 5706 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 5707 if (IS_ERR(link)) 5708 return PTR_ERR(link); 5709 5710 err = bpf_iter_new_fd(link); 5711 bpf_link_put_direct(link); 5712 5713 return err; 5714 } 5715 5716 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 5717 5718 static int bpf_prog_bind_map(union bpf_attr *attr) 5719 { 5720 struct bpf_prog *prog; 5721 struct bpf_map *map; 5722 struct bpf_map **used_maps_old, **used_maps_new; 5723 int i, ret = 0; 5724 5725 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 5726 return -EINVAL; 5727 5728 if (attr->prog_bind_map.flags) 5729 return -EINVAL; 5730 5731 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 5732 if (IS_ERR(prog)) 5733 return PTR_ERR(prog); 5734 5735 map = bpf_map_get(attr->prog_bind_map.map_fd); 5736 if (IS_ERR(map)) { 5737 ret = PTR_ERR(map); 5738 goto out_prog_put; 5739 } 5740 5741 mutex_lock(&prog->aux->used_maps_mutex); 5742 5743 used_maps_old = prog->aux->used_maps; 5744 5745 for (i = 0; i < prog->aux->used_map_cnt; i++) 5746 if (used_maps_old[i] == map) { 5747 bpf_map_put(map); 5748 goto out_unlock; 5749 } 5750 5751 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 5752 sizeof(used_maps_new[0]), 5753 GFP_KERNEL); 5754 if (!used_maps_new) { 5755 ret = -ENOMEM; 5756 goto out_unlock; 5757 } 5758 5759 /* The bpf program will not access the bpf map, but for the sake of 5760 * simplicity, increase sleepable_refcnt for sleepable program as well. 5761 */ 5762 if (prog->sleepable) 5763 atomic64_inc(&map->sleepable_refcnt); 5764 memcpy(used_maps_new, used_maps_old, 5765 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 5766 used_maps_new[prog->aux->used_map_cnt] = map; 5767 5768 prog->aux->used_map_cnt++; 5769 prog->aux->used_maps = used_maps_new; 5770 5771 kfree(used_maps_old); 5772 5773 out_unlock: 5774 mutex_unlock(&prog->aux->used_maps_mutex); 5775 5776 if (ret) 5777 bpf_map_put(map); 5778 out_prog_put: 5779 bpf_prog_put(prog); 5780 return ret; 5781 } 5782 5783 #define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd 5784 5785 static int token_create(union bpf_attr *attr) 5786 { 5787 if (CHECK_ATTR(BPF_TOKEN_CREATE)) 5788 return -EINVAL; 5789 5790 /* no flags are supported yet */ 5791 if (attr->token_create.flags) 5792 return -EINVAL; 5793 5794 return bpf_token_create(attr); 5795 } 5796 5797 static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size) 5798 { 5799 union bpf_attr attr; 5800 int err; 5801 5802 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 5803 if (err) 5804 return err; 5805 size = min_t(u32, size, sizeof(attr)); 5806 5807 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 5808 memset(&attr, 0, sizeof(attr)); 5809 if (copy_from_bpfptr(&attr, uattr, size) != 0) 5810 return -EFAULT; 5811 5812 err = security_bpf(cmd, &attr, size, uattr.is_kernel); 5813 if (err < 0) 5814 return err; 5815 5816 switch (cmd) { 5817 case BPF_MAP_CREATE: 5818 err = map_create(&attr, uattr.is_kernel); 5819 break; 5820 case BPF_MAP_LOOKUP_ELEM: 5821 err = map_lookup_elem(&attr); 5822 break; 5823 case BPF_MAP_UPDATE_ELEM: 5824 err = map_update_elem(&attr, uattr); 5825 break; 5826 case BPF_MAP_DELETE_ELEM: 5827 err = map_delete_elem(&attr, uattr); 5828 break; 5829 case BPF_MAP_GET_NEXT_KEY: 5830 err = map_get_next_key(&attr); 5831 break; 5832 case BPF_MAP_FREEZE: 5833 err = map_freeze(&attr); 5834 break; 5835 case BPF_PROG_LOAD: 5836 err = bpf_prog_load(&attr, uattr, size); 5837 break; 5838 case BPF_OBJ_PIN: 5839 err = bpf_obj_pin(&attr); 5840 break; 5841 case BPF_OBJ_GET: 5842 err = bpf_obj_get(&attr); 5843 break; 5844 case BPF_PROG_ATTACH: 5845 err = bpf_prog_attach(&attr); 5846 break; 5847 case BPF_PROG_DETACH: 5848 err = bpf_prog_detach(&attr); 5849 break; 5850 case BPF_PROG_QUERY: 5851 err = bpf_prog_query(&attr, uattr.user); 5852 break; 5853 case BPF_PROG_TEST_RUN: 5854 err = bpf_prog_test_run(&attr, uattr.user); 5855 break; 5856 case BPF_PROG_GET_NEXT_ID: 5857 err = bpf_obj_get_next_id(&attr, uattr.user, 5858 &prog_idr, &prog_idr_lock); 5859 break; 5860 case BPF_MAP_GET_NEXT_ID: 5861 err = bpf_obj_get_next_id(&attr, uattr.user, 5862 &map_idr, &map_idr_lock); 5863 break; 5864 case BPF_BTF_GET_NEXT_ID: 5865 err = bpf_obj_get_next_id(&attr, uattr.user, 5866 &btf_idr, &btf_idr_lock); 5867 break; 5868 case BPF_PROG_GET_FD_BY_ID: 5869 err = bpf_prog_get_fd_by_id(&attr); 5870 break; 5871 case BPF_MAP_GET_FD_BY_ID: 5872 err = bpf_map_get_fd_by_id(&attr); 5873 break; 5874 case BPF_OBJ_GET_INFO_BY_FD: 5875 err = bpf_obj_get_info_by_fd(&attr, uattr.user); 5876 break; 5877 case BPF_RAW_TRACEPOINT_OPEN: 5878 err = bpf_raw_tracepoint_open(&attr); 5879 break; 5880 case BPF_BTF_LOAD: 5881 err = bpf_btf_load(&attr, uattr, size); 5882 break; 5883 case BPF_BTF_GET_FD_BY_ID: 5884 err = bpf_btf_get_fd_by_id(&attr); 5885 break; 5886 case BPF_TASK_FD_QUERY: 5887 err = bpf_task_fd_query(&attr, uattr.user); 5888 break; 5889 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 5890 err = map_lookup_and_delete_elem(&attr); 5891 break; 5892 case BPF_MAP_LOOKUP_BATCH: 5893 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); 5894 break; 5895 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 5896 err = bpf_map_do_batch(&attr, uattr.user, 5897 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 5898 break; 5899 case BPF_MAP_UPDATE_BATCH: 5900 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); 5901 break; 5902 case BPF_MAP_DELETE_BATCH: 5903 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); 5904 break; 5905 case BPF_LINK_CREATE: 5906 err = link_create(&attr, uattr); 5907 break; 5908 case BPF_LINK_UPDATE: 5909 err = link_update(&attr); 5910 break; 5911 case BPF_LINK_GET_FD_BY_ID: 5912 err = bpf_link_get_fd_by_id(&attr); 5913 break; 5914 case BPF_LINK_GET_NEXT_ID: 5915 err = bpf_obj_get_next_id(&attr, uattr.user, 5916 &link_idr, &link_idr_lock); 5917 break; 5918 case BPF_ENABLE_STATS: 5919 err = bpf_enable_stats(&attr); 5920 break; 5921 case BPF_ITER_CREATE: 5922 err = bpf_iter_create(&attr); 5923 break; 5924 case BPF_LINK_DETACH: 5925 err = link_detach(&attr); 5926 break; 5927 case BPF_PROG_BIND_MAP: 5928 err = bpf_prog_bind_map(&attr); 5929 break; 5930 case BPF_TOKEN_CREATE: 5931 err = token_create(&attr); 5932 break; 5933 default: 5934 err = -EINVAL; 5935 break; 5936 } 5937 5938 return err; 5939 } 5940 5941 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 5942 { 5943 return __sys_bpf(cmd, USER_BPFPTR(uattr), size); 5944 } 5945 5946 static bool syscall_prog_is_valid_access(int off, int size, 5947 enum bpf_access_type type, 5948 const struct bpf_prog *prog, 5949 struct bpf_insn_access_aux *info) 5950 { 5951 if (off < 0 || off >= U16_MAX) 5952 return false; 5953 if (off % size != 0) 5954 return false; 5955 return true; 5956 } 5957 5958 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) 5959 { 5960 switch (cmd) { 5961 case BPF_MAP_CREATE: 5962 case BPF_MAP_DELETE_ELEM: 5963 case BPF_MAP_UPDATE_ELEM: 5964 case BPF_MAP_FREEZE: 5965 case BPF_MAP_GET_FD_BY_ID: 5966 case BPF_PROG_LOAD: 5967 case BPF_BTF_LOAD: 5968 case BPF_LINK_CREATE: 5969 case BPF_RAW_TRACEPOINT_OPEN: 5970 break; 5971 default: 5972 return -EINVAL; 5973 } 5974 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); 5975 } 5976 5977 5978 /* To shut up -Wmissing-prototypes. 5979 * This function is used by the kernel light skeleton 5980 * to load bpf programs when modules are loaded or during kernel boot. 5981 * See tools/lib/bpf/skel_internal.h 5982 */ 5983 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); 5984 5985 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size) 5986 { 5987 struct bpf_prog * __maybe_unused prog; 5988 struct bpf_tramp_run_ctx __maybe_unused run_ctx; 5989 5990 switch (cmd) { 5991 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */ 5992 case BPF_PROG_TEST_RUN: 5993 if (attr->test.data_in || attr->test.data_out || 5994 attr->test.ctx_out || attr->test.duration || 5995 attr->test.repeat || attr->test.flags) 5996 return -EINVAL; 5997 5998 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); 5999 if (IS_ERR(prog)) 6000 return PTR_ERR(prog); 6001 6002 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || 6003 attr->test.ctx_size_in > U16_MAX) { 6004 bpf_prog_put(prog); 6005 return -EINVAL; 6006 } 6007 6008 run_ctx.bpf_cookie = 0; 6009 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) { 6010 /* recursion detected */ 6011 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx); 6012 bpf_prog_put(prog); 6013 return -EBUSY; 6014 } 6015 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); 6016 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */, 6017 &run_ctx); 6018 bpf_prog_put(prog); 6019 return 0; 6020 #endif 6021 default: 6022 return ____bpf_sys_bpf(cmd, attr, size); 6023 } 6024 } 6025 EXPORT_SYMBOL_NS(kern_sys_bpf, "BPF_INTERNAL"); 6026 6027 static const struct bpf_func_proto bpf_sys_bpf_proto = { 6028 .func = bpf_sys_bpf, 6029 .gpl_only = false, 6030 .ret_type = RET_INTEGER, 6031 .arg1_type = ARG_ANYTHING, 6032 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 6033 .arg3_type = ARG_CONST_SIZE, 6034 }; 6035 6036 const struct bpf_func_proto * __weak 6037 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 6038 { 6039 return bpf_base_func_proto(func_id, prog); 6040 } 6041 6042 BPF_CALL_1(bpf_sys_close, u32, fd) 6043 { 6044 /* When bpf program calls this helper there should not be 6045 * an fdget() without matching completed fdput(). 6046 * This helper is allowed in the following callchain only: 6047 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close 6048 */ 6049 return close_fd(fd); 6050 } 6051 6052 static const struct bpf_func_proto bpf_sys_close_proto = { 6053 .func = bpf_sys_close, 6054 .gpl_only = false, 6055 .ret_type = RET_INTEGER, 6056 .arg1_type = ARG_ANYTHING, 6057 }; 6058 6059 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) 6060 { 6061 *res = 0; 6062 if (flags) 6063 return -EINVAL; 6064 6065 if (name_sz <= 1 || name[name_sz - 1]) 6066 return -EINVAL; 6067 6068 if (!bpf_dump_raw_ok(current_cred())) 6069 return -EPERM; 6070 6071 *res = kallsyms_lookup_name(name); 6072 return *res ? 0 : -ENOENT; 6073 } 6074 6075 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { 6076 .func = bpf_kallsyms_lookup_name, 6077 .gpl_only = false, 6078 .ret_type = RET_INTEGER, 6079 .arg1_type = ARG_PTR_TO_MEM, 6080 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 6081 .arg3_type = ARG_ANYTHING, 6082 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 6083 .arg4_size = sizeof(u64), 6084 }; 6085 6086 static const struct bpf_func_proto * 6087 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 6088 { 6089 switch (func_id) { 6090 case BPF_FUNC_sys_bpf: 6091 return !bpf_token_capable(prog->aux->token, CAP_PERFMON) 6092 ? NULL : &bpf_sys_bpf_proto; 6093 case BPF_FUNC_btf_find_by_name_kind: 6094 return &bpf_btf_find_by_name_kind_proto; 6095 case BPF_FUNC_sys_close: 6096 return &bpf_sys_close_proto; 6097 case BPF_FUNC_kallsyms_lookup_name: 6098 return &bpf_kallsyms_lookup_name_proto; 6099 default: 6100 return tracing_prog_func_proto(func_id, prog); 6101 } 6102 } 6103 6104 const struct bpf_verifier_ops bpf_syscall_verifier_ops = { 6105 .get_func_proto = syscall_prog_func_proto, 6106 .is_valid_access = syscall_prog_is_valid_access, 6107 }; 6108 6109 const struct bpf_prog_ops bpf_syscall_prog_ops = { 6110 .test_run = bpf_prog_test_run_syscall, 6111 }; 6112 6113 #ifdef CONFIG_SYSCTL 6114 static int bpf_stats_handler(const struct ctl_table *table, int write, 6115 void *buffer, size_t *lenp, loff_t *ppos) 6116 { 6117 struct static_key *key = (struct static_key *)table->data; 6118 static int saved_val; 6119 int val, ret; 6120 struct ctl_table tmp = { 6121 .data = &val, 6122 .maxlen = sizeof(val), 6123 .mode = table->mode, 6124 .extra1 = SYSCTL_ZERO, 6125 .extra2 = SYSCTL_ONE, 6126 }; 6127 6128 if (write && !capable(CAP_SYS_ADMIN)) 6129 return -EPERM; 6130 6131 mutex_lock(&bpf_stats_enabled_mutex); 6132 val = saved_val; 6133 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 6134 if (write && !ret && val != saved_val) { 6135 if (val) 6136 static_key_slow_inc(key); 6137 else 6138 static_key_slow_dec(key); 6139 saved_val = val; 6140 } 6141 mutex_unlock(&bpf_stats_enabled_mutex); 6142 return ret; 6143 } 6144 6145 void __weak unpriv_ebpf_notify(int new_state) 6146 { 6147 } 6148 6149 static int bpf_unpriv_handler(const struct ctl_table *table, int write, 6150 void *buffer, size_t *lenp, loff_t *ppos) 6151 { 6152 int ret, unpriv_enable = *(int *)table->data; 6153 bool locked_state = unpriv_enable == 1; 6154 struct ctl_table tmp = *table; 6155 6156 if (write && !capable(CAP_SYS_ADMIN)) 6157 return -EPERM; 6158 6159 tmp.data = &unpriv_enable; 6160 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 6161 if (write && !ret) { 6162 if (locked_state && unpriv_enable != 1) 6163 return -EPERM; 6164 *(int *)table->data = unpriv_enable; 6165 } 6166 6167 if (write) 6168 unpriv_ebpf_notify(unpriv_enable); 6169 6170 return ret; 6171 } 6172 6173 static const struct ctl_table bpf_syscall_table[] = { 6174 { 6175 .procname = "unprivileged_bpf_disabled", 6176 .data = &sysctl_unprivileged_bpf_disabled, 6177 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), 6178 .mode = 0644, 6179 .proc_handler = bpf_unpriv_handler, 6180 .extra1 = SYSCTL_ZERO, 6181 .extra2 = SYSCTL_TWO, 6182 }, 6183 { 6184 .procname = "bpf_stats_enabled", 6185 .data = &bpf_stats_enabled_key.key, 6186 .mode = 0644, 6187 .proc_handler = bpf_stats_handler, 6188 }, 6189 }; 6190 6191 static int __init bpf_syscall_sysctl_init(void) 6192 { 6193 register_sysctl_init("kernel", bpf_syscall_table); 6194 return 0; 6195 } 6196 late_initcall(bpf_syscall_sysctl_init); 6197 #endif /* CONFIG_SYSCTL */ 6198