Lines Matching +full:cpu +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
48 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
52 * by pinning the task to the current CPU and incrementing the recursion
53 * protection across the map operation.
74 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
76 * after hash map was fully converted to use bpf_mem_alloc, there will be
77 * non-synchronous memory allocation for non-preallocated hash map, so it is
86 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
89 struct bpf_map map; member
99 /* number of elements in non-preallocated hashtable are kept
123 /* pointer to per-cpu pointer */
133 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); in htab_is_prealloc()
140 for (i = 0; i < htab->n_buckets; i++) { in htab_init_buckets()
141 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); in htab_init_buckets()
142 raw_res_spin_lock_init(&htab->buckets[i].raw_lock); in htab_init_buckets()
152 ret = raw_res_spin_lock_irqsave(&b->raw_lock, flags); in htab_lock_bucket()
161 raw_res_spin_unlock_irqrestore(&b->raw_lock, flags); in htab_unlock_bucket()
168 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || in htab_is_lru()
169 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_lru()
174 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_is_percpu()
175 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_percpu()
181 *(void __percpu **)(l->key + roundup(key_size, 8)) = pptr; in htab_elem_set_ptr()
186 return *(void __percpu **)(l->key + roundup(key_size, 8)); in htab_elem_get_ptr()
189 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) in fd_htab_map_get_ptr() argument
191 return *(void **)(l->key + roundup(map->key_size, 8)); in fd_htab_map_get_ptr()
196 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); in get_htab_elem()
206 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_timers_and_wq()
216 if (btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_free_prealloced_timers_and_wq()
217 bpf_obj_free_timer(htab->map.record, in htab_free_prealloced_timers_and_wq()
218 elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_timers_and_wq()
219 if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) in htab_free_prealloced_timers_and_wq()
220 bpf_obj_free_workqueue(htab->map.record, in htab_free_prealloced_timers_and_wq()
221 elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_timers_and_wq()
228 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_fields()
231 if (IS_ERR_OR_NULL(htab->map.record)) in htab_free_prealloced_fields()
240 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); in htab_free_prealloced_fields()
241 int cpu; in htab_free_prealloced_fields() local
243 for_each_possible_cpu(cpu) { in htab_free_prealloced_fields()
244 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); in htab_free_prealloced_fields()
248 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_fields()
262 for (i = 0; i < htab->map.max_entries; i++) { in htab_free_elems()
266 htab->map.key_size); in htab_free_elems()
271 bpf_map_area_free(htab->elems); in htab_free_elems()
276 * order is always lru_lock -> bucket_lock and this only happens in
288 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); in prealloc_lru_pop()
292 bpf_map_inc_elem_count(&htab->map); in prealloc_lru_pop()
294 memcpy(l->key, key, htab->map.key_size); in prealloc_lru_pop()
303 u32 num_entries = htab->map.max_entries; in prealloc_init()
304 int err = -ENOMEM, i; in prealloc_init()
309 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries, in prealloc_init()
310 htab->map.numa_node); in prealloc_init()
311 if (!htab->elems) in prealloc_init()
312 return -ENOMEM; in prealloc_init()
318 u32 size = round_up(htab->map.value_size, 8); in prealloc_init()
321 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, in prealloc_init()
325 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, in prealloc_init()
332 err = bpf_lru_init(&htab->lru, in prealloc_init()
333 htab->map.map_flags & BPF_F_NO_COMMON_LRU, in prealloc_init()
334 offsetof(struct htab_elem, hash) - in prealloc_init()
339 err = pcpu_freelist_init(&htab->freelist); in prealloc_init()
345 bpf_lru_populate(&htab->lru, htab->elems, in prealloc_init()
347 htab->elem_size, num_entries); in prealloc_init()
349 pcpu_freelist_populate(&htab->freelist, in prealloc_init()
350 htab->elems + offsetof(struct htab_elem, fnode), in prealloc_init()
351 htab->elem_size, num_entries); in prealloc_init()
365 bpf_lru_destroy(&htab->lru); in prealloc_destroy()
367 pcpu_freelist_destroy(&htab->freelist); in prealloc_destroy()
374 int cpu; in alloc_extra_elems() local
376 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8, in alloc_extra_elems()
379 return -ENOMEM; in alloc_extra_elems()
381 for_each_possible_cpu(cpu) { in alloc_extra_elems()
382 l = pcpu_freelist_pop(&htab->freelist); in alloc_extra_elems()
387 *per_cpu_ptr(pptr, cpu) = l_new; in alloc_extra_elems()
389 htab->extra_elems = pptr; in alloc_extra_elems()
396 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc_check()
397 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check()
398 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || in htab_map_alloc_check()
399 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check()
400 /* percpu_lru means each cpu has its own LRU list. in htab_map_alloc_check()
402 * the map's value itself is percpu. percpu_lru has in htab_map_alloc_check()
403 * nothing to do with the map's value. in htab_map_alloc_check()
405 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc_check()
406 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc_check()
407 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED); in htab_map_alloc_check()
415 return -EPERM; in htab_map_alloc_check()
417 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK || in htab_map_alloc_check()
418 !bpf_map_flags_access_ok(attr->map_flags)) in htab_map_alloc_check()
419 return -EINVAL; in htab_map_alloc_check()
422 return -EINVAL; in htab_map_alloc_check()
425 return -ENOTSUPP; in htab_map_alloc_check()
428 return -EINVAL; in htab_map_alloc_check()
431 * value_size == 0 may be allowed in the future to use map as a set in htab_map_alloc_check()
433 if (attr->max_entries == 0 || attr->key_size == 0 || in htab_map_alloc_check()
434 attr->value_size == 0) in htab_map_alloc_check()
435 return -EINVAL; in htab_map_alloc_check()
437 if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE - in htab_map_alloc_check()
442 * kmalloc-able later in htab_map_update_elem() in htab_map_alloc_check()
444 return -E2BIG; in htab_map_alloc_check()
445 /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */ in htab_map_alloc_check()
446 if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE) in htab_map_alloc_check()
447 return -E2BIG; in htab_map_alloc_check()
454 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc()
455 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc()
456 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || in htab_map_alloc()
457 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc()
458 /* percpu_lru means each cpu has its own LRU list. in htab_map_alloc()
460 * the map's value itself is percpu. percpu_lru has in htab_map_alloc()
461 * nothing to do with the map's value. in htab_map_alloc()
463 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc()
464 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc()
470 return ERR_PTR(-ENOMEM); in htab_map_alloc()
472 bpf_map_init_from_attr(&htab->map, attr); in htab_map_alloc()
475 /* ensure each CPU's lru list has >=1 elements. in htab_map_alloc()
479 htab->map.max_entries = roundup(attr->max_entries, in htab_map_alloc()
481 if (htab->map.max_entries < attr->max_entries) in htab_map_alloc()
482 htab->map.max_entries = rounddown(attr->max_entries, in htab_map_alloc()
487 * into UB on 32-bit arches, so check that first in htab_map_alloc()
489 err = -E2BIG; in htab_map_alloc()
490 if (htab->map.max_entries > 1UL << 31) in htab_map_alloc()
493 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc()
495 htab->elem_size = sizeof(struct htab_elem) + in htab_map_alloc()
496 round_up(htab->map.key_size, 8); in htab_map_alloc()
498 htab->elem_size += sizeof(void *); in htab_map_alloc()
500 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc()
503 if (htab->n_buckets > U32_MAX / sizeof(struct bucket)) in htab_map_alloc()
506 err = bpf_map_init_elem_count(&htab->map); in htab_map_alloc()
510 err = -ENOMEM; in htab_map_alloc()
511 htab->buckets = bpf_map_area_alloc(htab->n_buckets * in htab_map_alloc()
513 htab->map.numa_node); in htab_map_alloc()
514 if (!htab->buckets) in htab_map_alloc()
517 if (htab->map.map_flags & BPF_F_ZERO_SEED) in htab_map_alloc()
518 htab->hashrnd = 0; in htab_map_alloc()
520 htab->hashrnd = get_random_u32(); in htab_map_alloc()
526 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus() in htab_map_alloc()
528 * hash map size is 10k, which means that a system with 64 cpus will fill in htab_map_alloc()
530 * define our own batch count as 32 then 10k hash map can be filled up to 80%: in htab_map_alloc()
531 * 10k - 8k > 32 _batch_ * 64 _cpus_ in htab_map_alloc()
532 * and __percpu_counter_compare() will still be fast. At that point hash map in htab_map_alloc()
533 * collisions will dominate its performance anyway. Assume that hash map filled in htab_map_alloc()
538 if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH) in htab_map_alloc()
539 htab->use_percpu_counter = true; in htab_map_alloc()
541 if (htab->use_percpu_counter) { in htab_map_alloc()
542 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL); in htab_map_alloc()
561 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false); in htab_map_alloc()
565 err = bpf_mem_alloc_init(&htab->pcpu_ma, in htab_map_alloc()
566 round_up(htab->map.value_size, 8), true); in htab_map_alloc()
572 return &htab->map; in htab_map_alloc()
577 if (htab->use_percpu_counter) in htab_map_alloc()
578 percpu_counter_destroy(&htab->pcount); in htab_map_alloc()
579 bpf_map_area_free(htab->buckets); in htab_map_alloc()
580 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_alloc()
581 bpf_mem_alloc_destroy(&htab->ma); in htab_map_alloc()
583 bpf_map_free_elem_count(&htab->map); in htab_map_alloc()
598 return &htab->buckets[hash & (htab->n_buckets - 1)]; in __select_bucket()
603 return &__select_bucket(htab, hash)->head; in select_bucket()
614 if (l->hash == hash && !memcmp(&l->key, key, key_size)) in lookup_elem_raw()
633 if (l->hash == hash && !memcmp(&l->key, key, key_size)) in lookup_nulls_elem_raw()
636 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) in lookup_nulls_elem_raw()
647 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) in __htab_map_lookup_elem() argument
649 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_elem()
657 key_size = map->key_size; in __htab_map_lookup_elem()
659 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_elem()
663 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in __htab_map_lookup_elem()
668 static void *htab_map_lookup_elem(struct bpf_map *map, void *key) in htab_map_lookup_elem() argument
670 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_map_lookup_elem()
673 return l->key + round_up(map->key_size, 8); in htab_map_lookup_elem()
682 * map->ops->map_lookup_elem
689 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) in htab_map_gen_lookup() argument
695 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_map_gen_lookup()
700 round_up(map->key_size, 8)); in htab_map_gen_lookup()
701 return insn - insn_buf; in htab_map_gen_lookup()
704 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, in __htab_lru_map_lookup_elem() argument
707 struct htab_elem *l = __htab_map_lookup_elem(map, key); in __htab_lru_map_lookup_elem()
711 bpf_lru_node_set_ref(&l->lru_node); in __htab_lru_map_lookup_elem()
712 return l->key + round_up(map->key_size, 8); in __htab_lru_map_lookup_elem()
718 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) in htab_lru_map_lookup_elem() argument
720 return __htab_lru_map_lookup_elem(map, key, true); in htab_lru_map_lookup_elem()
723 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) in htab_lru_map_lookup_elem_sys() argument
725 return __htab_lru_map_lookup_elem(map, key, false); in htab_lru_map_lookup_elem_sys()
728 static int htab_lru_map_gen_lookup(struct bpf_map *map, in htab_lru_map_gen_lookup() argument
736 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_lru_map_gen_lookup()
749 round_up(map->key_size, 8)); in htab_lru_map_gen_lookup()
750 return insn - insn_buf; in htab_lru_map_gen_lookup()
756 if (IS_ERR_OR_NULL(htab->map.record)) in check_and_free_fields()
760 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); in check_and_free_fields()
761 int cpu; in check_and_free_fields() local
763 for_each_possible_cpu(cpu) in check_and_free_fields()
764 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); in check_and_free_fields()
766 void *map_value = elem->key + round_up(htab->map.key_size, 8); in check_and_free_fields()
768 bpf_obj_free_fields(htab->map.record, map_value); in check_and_free_fields()
786 b = __select_bucket(htab, tgt_l->hash); in htab_lru_map_delete_node()
787 head = &b->head; in htab_lru_map_delete_node()
795 hlist_nulls_del_rcu(&l->hash_node); in htab_lru_map_delete_node()
796 bpf_map_dec_elem_count(&htab->map); in htab_lru_map_delete_node()
808 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in htab_map_get_next_key() argument
810 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_get_next_key()
818 key_size = map->key_size; in htab_map_get_next_key()
823 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_get_next_key()
828 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in htab_map_get_next_key()
834 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), in htab_map_get_next_key()
838 /* if next elem in this hash list is non-zero, just return it */ in htab_map_get_next_key()
839 memcpy(next_key, next_l->key, key_size); in htab_map_get_next_key()
844 i = hash & (htab->n_buckets - 1); in htab_map_get_next_key()
849 for (; i < htab->n_buckets; i++) { in htab_map_get_next_key()
857 memcpy(next_key, next_l->key, key_size); in htab_map_get_next_key()
863 return -ENOENT; in htab_map_get_next_key()
870 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) in htab_elem_free()
871 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr); in htab_elem_free()
872 bpf_mem_cache_free(&htab->ma, l); in htab_elem_free()
877 struct bpf_map *map = &htab->map; in htab_put_fd_value() local
880 if (map->ops->map_fd_put_ptr) { in htab_put_fd_value()
881 ptr = fd_htab_map_get_ptr(map, l); in htab_put_fd_value()
882 map->ops->map_fd_put_ptr(map, ptr, true); in htab_put_fd_value()
888 if (htab->use_percpu_counter) in is_map_full()
889 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries, in is_map_full()
891 return atomic_read(&htab->count) >= htab->map.max_entries; in is_map_full()
896 bpf_map_inc_elem_count(&htab->map); in inc_elem_count()
898 if (htab->use_percpu_counter) in inc_elem_count()
899 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH); in inc_elem_count()
901 atomic_inc(&htab->count); in inc_elem_count()
906 bpf_map_dec_elem_count(&htab->map); in dec_elem_count()
908 if (htab->use_percpu_counter) in dec_elem_count()
909 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH); in dec_elem_count()
911 atomic_dec(&htab->count); in dec_elem_count()
920 bpf_map_dec_elem_count(&htab->map); in free_htab_elem()
922 pcpu_freelist_push(&htab->freelist, &l->fnode); in free_htab_elem()
934 copy_map_value(&htab->map, this_cpu_ptr(pptr), value); in pcpu_copy_value()
936 u32 size = round_up(htab->map.value_size, 8); in pcpu_copy_value()
937 int off = 0, cpu; in pcpu_copy_value() local
939 for_each_possible_cpu(cpu) { in pcpu_copy_value()
940 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off); in pcpu_copy_value()
949 /* When not setting the initial value on all cpus, zero-fill element in pcpu_init_value()
956 int cpu; in pcpu_init_value() local
958 for_each_possible_cpu(cpu) { in pcpu_init_value()
959 if (cpu == current_cpu) in pcpu_init_value()
960 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value); in pcpu_init_value()
962 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu)); in pcpu_init_value()
971 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && in fd_htab_map_needs_adjust()
980 u32 size = htab->map.value_size; in alloc_htab_elem()
988 * use per-cpu extra elems to avoid freelist_pop/push in alloc_htab_elem()
990 pl_new = this_cpu_ptr(htab->extra_elems); in alloc_htab_elem()
996 l = __pcpu_freelist_pop(&htab->freelist); in alloc_htab_elem()
998 return ERR_PTR(-E2BIG); in alloc_htab_elem()
1000 bpf_map_inc_elem_count(&htab->map); in alloc_htab_elem()
1005 /* when map is full and update() is replacing in alloc_htab_elem()
1010 return ERR_PTR(-E2BIG); in alloc_htab_elem()
1012 l_new = bpf_mem_cache_alloc(&htab->ma); in alloc_htab_elem()
1014 l_new = ERR_PTR(-ENOMEM); in alloc_htab_elem()
1019 memcpy(l_new->key, key, key_size); in alloc_htab_elem()
1024 /* alloc_percpu zero-fills */ in alloc_htab_elem()
1025 void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma); in alloc_htab_elem()
1028 bpf_mem_cache_free(&htab->ma, l_new); in alloc_htab_elem()
1029 l_new = ERR_PTR(-ENOMEM); in alloc_htab_elem()
1032 l_new->ptr_to_pptr = ptr; in alloc_htab_elem()
1042 memcpy(l_new->key + round_up(key_size, 8), value, size); in alloc_htab_elem()
1044 copy_map_value(&htab->map, in alloc_htab_elem()
1045 l_new->key + round_up(key_size, 8), in alloc_htab_elem()
1049 l_new->hash = hash; in alloc_htab_elem()
1061 return -EEXIST; in check_flags()
1065 return -ENOENT; in check_flags()
1071 static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, in htab_map_update_elem() argument
1074 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_update_elem()
1085 return -EINVAL; in htab_map_update_elem()
1090 key_size = map->key_size; in htab_map_update_elem()
1092 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_update_elem()
1095 head = &b->head; in htab_map_update_elem()
1098 if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK))) in htab_map_update_elem()
1099 return -EINVAL; in htab_map_update_elem()
1102 htab->n_buckets); in htab_map_update_elem()
1108 copy_map_value_locked(map, in htab_map_update_elem()
1109 l_old->key + round_up(key_size, 8), in htab_map_update_elem()
1136 copy_map_value_locked(map, in htab_map_update_elem()
1137 l_old->key + round_up(key_size, 8), in htab_map_update_elem()
1146 /* all pre-allocated elements are in use or memory exhausted */ in htab_map_update_elem()
1154 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in htab_map_update_elem()
1156 hlist_nulls_del_rcu(&l_old->hash_node); in htab_map_update_elem()
1158 /* l_old has already been stashed in htab->extra_elems, free in htab_map_update_elem()
1160 * save the old map pointer in htab of maps before unlock in htab_map_update_elem()
1165 if (map->ops->map_fd_put_ptr) in htab_map_update_elem()
1166 old_map_ptr = fd_htab_map_get_ptr(map, l_old); in htab_map_update_elem()
1173 map->ops->map_fd_put_ptr(map, old_map_ptr, true); in htab_map_update_elem()
1186 bpf_map_dec_elem_count(&htab->map); in htab_lru_push_free()
1187 bpf_lru_push_free(&htab->lru, &elem->lru_node); in htab_lru_push_free()
1190 static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, in htab_lru_map_update_elem() argument
1193 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_update_elem()
1203 return -EINVAL; in htab_lru_map_update_elem()
1208 key_size = map->key_size; in htab_lru_map_update_elem()
1210 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_update_elem()
1213 head = &b->head; in htab_lru_map_update_elem()
1222 return -ENOMEM; in htab_lru_map_update_elem()
1223 copy_map_value(&htab->map, in htab_lru_map_update_elem()
1224 l_new->key + round_up(map->key_size, 8), value); in htab_lru_map_update_elem()
1239 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in htab_lru_map_update_elem()
1241 bpf_lru_node_set_ref(&l_new->lru_node); in htab_lru_map_update_elem()
1242 hlist_nulls_del_rcu(&l_old->hash_node); in htab_lru_map_update_elem()
1258 static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key, in __htab_percpu_map_update_elem() argument
1262 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_percpu_map_update_elem()
1272 return -EINVAL; in __htab_percpu_map_update_elem()
1277 key_size = map->key_size; in __htab_percpu_map_update_elem()
1279 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_percpu_map_update_elem()
1282 head = &b->head; in __htab_percpu_map_update_elem()
1295 /* per-cpu hash map can update value in-place */ in __htab_percpu_map_update_elem()
1305 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in __htab_percpu_map_update_elem()
1313 static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, in __htab_lru_percpu_map_update_elem() argument
1317 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_lru_percpu_map_update_elem()
1327 return -EINVAL; in __htab_lru_percpu_map_update_elem()
1332 key_size = map->key_size; in __htab_lru_percpu_map_update_elem()
1334 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_lru_percpu_map_update_elem()
1337 head = &b->head; in __htab_lru_percpu_map_update_elem()
1347 return -ENOMEM; in __htab_lru_percpu_map_update_elem()
1361 bpf_lru_node_set_ref(&l_old->lru_node); in __htab_lru_percpu_map_update_elem()
1363 /* per-cpu hash map can update value in-place */ in __htab_lru_percpu_map_update_elem()
1369 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in __htab_lru_percpu_map_update_elem()
1377 bpf_map_dec_elem_count(&htab->map); in __htab_lru_percpu_map_update_elem()
1378 bpf_lru_push_free(&htab->lru, &l_new->lru_node); in __htab_lru_percpu_map_update_elem()
1383 static long htab_percpu_map_update_elem(struct bpf_map *map, void *key, in htab_percpu_map_update_elem() argument
1386 return __htab_percpu_map_update_elem(map, key, value, map_flags, false); in htab_percpu_map_update_elem()
1389 static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, in htab_lru_percpu_map_update_elem() argument
1392 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, in htab_lru_percpu_map_update_elem()
1397 static long htab_map_delete_elem(struct bpf_map *map, void *key) in htab_map_delete_elem() argument
1399 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_delete_elem()
1410 key_size = map->key_size; in htab_map_delete_elem()
1412 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_delete_elem()
1414 head = &b->head; in htab_map_delete_elem()
1422 hlist_nulls_del_rcu(&l->hash_node); in htab_map_delete_elem()
1424 ret = -ENOENT; in htab_map_delete_elem()
1433 static long htab_lru_map_delete_elem(struct bpf_map *map, void *key) in htab_lru_map_delete_elem() argument
1435 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_delete_elem()
1446 key_size = map->key_size; in htab_lru_map_delete_elem()
1448 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_delete_elem()
1450 head = &b->head; in htab_lru_map_delete_elem()
1459 hlist_nulls_del_rcu(&l->hash_node); in htab_lru_map_delete_elem()
1461 ret = -ENOENT; in htab_lru_map_delete_elem()
1476 for (i = 0; i < htab->n_buckets; i++) { in delete_all_elements()
1482 hlist_nulls_del_rcu(&l->hash_node); in delete_all_elements()
1494 for (i = 0; i < htab->n_buckets; i++) { in htab_free_malloced_timers_and_wq()
1501 if (btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_free_malloced_timers_and_wq()
1502 bpf_obj_free_timer(htab->map.record, in htab_free_malloced_timers_and_wq()
1503 l->key + round_up(htab->map.key_size, 8)); in htab_free_malloced_timers_and_wq()
1504 if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) in htab_free_malloced_timers_and_wq()
1505 bpf_obj_free_workqueue(htab->map.record, in htab_free_malloced_timers_and_wq()
1506 l->key + round_up(htab->map.key_size, 8)); in htab_free_malloced_timers_and_wq()
1513 static void htab_map_free_timers_and_wq(struct bpf_map *map) in htab_map_free_timers_and_wq() argument
1515 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free_timers_and_wq()
1518 if (btf_record_has_field(htab->map.record, BPF_TIMER | BPF_WORKQUEUE)) { in htab_map_free_timers_and_wq()
1526 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1527 static void htab_map_free(struct bpf_map *map) in htab_map_free() argument
1529 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free()
1533 * There is no need to synchronize_rcu() here to protect map elements. in htab_map_free()
1547 bpf_map_free_elem_count(map); in htab_map_free()
1548 free_percpu(htab->extra_elems); in htab_map_free()
1549 bpf_map_area_free(htab->buckets); in htab_map_free()
1550 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_free()
1551 bpf_mem_alloc_destroy(&htab->ma); in htab_map_free()
1552 if (htab->use_percpu_counter) in htab_map_free()
1553 percpu_counter_destroy(&htab->pcount); in htab_map_free()
1557 static void htab_map_seq_show_elem(struct bpf_map *map, void *key, in htab_map_seq_show_elem() argument
1564 value = htab_map_lookup_elem(map, key); in htab_map_seq_show_elem()
1570 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); in htab_map_seq_show_elem()
1572 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); in htab_map_seq_show_elem()
1578 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, in __htab_map_lookup_and_delete_elem() argument
1582 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_elem()
1590 key_size = map->key_size; in __htab_map_lookup_and_delete_elem()
1592 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_and_delete_elem()
1594 head = &b->head; in __htab_map_lookup_and_delete_elem()
1602 ret = -ENOENT; in __htab_map_lookup_and_delete_elem()
1607 u32 roundup_value_size = round_up(map->value_size, 8); in __htab_map_lookup_and_delete_elem()
1609 int off = 0, cpu; in __htab_map_lookup_and_delete_elem() local
1612 for_each_possible_cpu(cpu) { in __htab_map_lookup_and_delete_elem()
1613 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu)); in __htab_map_lookup_and_delete_elem()
1614 check_and_init_map_value(&htab->map, value + off); in __htab_map_lookup_and_delete_elem()
1618 u32 roundup_key_size = round_up(map->key_size, 8); in __htab_map_lookup_and_delete_elem()
1621 copy_map_value_locked(map, value, l->key + in __htab_map_lookup_and_delete_elem()
1625 copy_map_value(map, value, l->key + in __htab_map_lookup_and_delete_elem()
1628 check_and_init_map_value(map, value); in __htab_map_lookup_and_delete_elem()
1630 hlist_nulls_del_rcu(&l->hash_node); in __htab_map_lookup_and_delete_elem()
1645 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, in htab_map_lookup_and_delete_elem() argument
1648 return __htab_map_lookup_and_delete_elem(map, key, value, false, false, in htab_map_lookup_and_delete_elem()
1652 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map, in htab_percpu_map_lookup_and_delete_elem() argument
1656 return __htab_map_lookup_and_delete_elem(map, key, value, false, true, in htab_percpu_map_lookup_and_delete_elem()
1660 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key, in htab_lru_map_lookup_and_delete_elem() argument
1663 return __htab_map_lookup_and_delete_elem(map, key, value, true, false, in htab_lru_map_lookup_and_delete_elem()
1667 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map, in htab_lru_percpu_map_lookup_and_delete_elem() argument
1671 return __htab_map_lookup_and_delete_elem(map, key, value, true, true, in htab_lru_percpu_map_lookup_and_delete_elem()
1676 __htab_map_lookup_and_delete_batch(struct bpf_map *map, in __htab_map_lookup_and_delete_batch() argument
1682 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_batch()
1685 void __user *uvalues = u64_to_user_ptr(attr->batch.values); in __htab_map_lookup_and_delete_batch()
1686 void __user *ukeys = u64_to_user_ptr(attr->batch.keys); in __htab_map_lookup_and_delete_batch()
1687 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); in __htab_map_lookup_and_delete_batch()
1699 elem_map_flags = attr->batch.elem_flags; in __htab_map_lookup_and_delete_batch()
1701 ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))) in __htab_map_lookup_and_delete_batch()
1702 return -EINVAL; in __htab_map_lookup_and_delete_batch()
1704 map_flags = attr->batch.flags; in __htab_map_lookup_and_delete_batch()
1706 return -EINVAL; in __htab_map_lookup_and_delete_batch()
1708 max_count = attr->batch.count; in __htab_map_lookup_and_delete_batch()
1712 if (put_user(0, &uattr->batch.count)) in __htab_map_lookup_and_delete_batch()
1713 return -EFAULT; in __htab_map_lookup_and_delete_batch()
1717 return -EFAULT; in __htab_map_lookup_and_delete_batch()
1719 if (batch >= htab->n_buckets) in __htab_map_lookup_and_delete_batch()
1720 return -ENOENT; in __htab_map_lookup_and_delete_batch()
1722 key_size = htab->map.key_size; in __htab_map_lookup_and_delete_batch()
1723 roundup_key_size = round_up(htab->map.key_size, 8); in __htab_map_lookup_and_delete_batch()
1724 value_size = htab->map.value_size; in __htab_map_lookup_and_delete_batch()
1741 ret = -ENOMEM; in __htab_map_lookup_and_delete_batch()
1751 b = &htab->buckets[batch]; in __htab_map_lookup_and_delete_batch()
1752 head = &b->head; in __htab_map_lookup_and_delete_batch()
1772 if (bucket_cnt > (max_count - total)) { in __htab_map_lookup_and_delete_batch()
1774 ret = -ENOSPC; in __htab_map_lookup_and_delete_batch()
1802 memcpy(dst_key, l->key, key_size); in __htab_map_lookup_and_delete_batch()
1805 int off = 0, cpu; in __htab_map_lookup_and_delete_batch() local
1808 pptr = htab_elem_get_ptr(l, map->key_size); in __htab_map_lookup_and_delete_batch()
1809 for_each_possible_cpu(cpu) { in __htab_map_lookup_and_delete_batch()
1810 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu)); in __htab_map_lookup_and_delete_batch()
1811 check_and_init_map_value(&htab->map, dst_val + off); in __htab_map_lookup_and_delete_batch()
1815 value = l->key + roundup_key_size; in __htab_map_lookup_and_delete_batch()
1816 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { in __htab_map_lookup_and_delete_batch()
1819 /* Actual value is the id of the inner map */ in __htab_map_lookup_and_delete_batch()
1820 map_id = map->ops->map_fd_sys_lookup_elem(*inner_map); in __htab_map_lookup_and_delete_batch()
1825 copy_map_value_locked(map, dst_val, value, in __htab_map_lookup_and_delete_batch()
1828 copy_map_value(map, dst_val, value); in __htab_map_lookup_and_delete_batch()
1830 check_and_init_map_value(map, dst_val); in __htab_map_lookup_and_delete_batch()
1833 hlist_nulls_del_rcu(&l->hash_node); in __htab_map_lookup_and_delete_batch()
1845 l->batch_flink = node_to_free; in __htab_map_lookup_and_delete_batch()
1857 node_to_free = node_to_free->batch_flink; in __htab_map_lookup_and_delete_batch()
1868 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { in __htab_map_lookup_and_delete_batch()
1879 ret = -EFAULT; in __htab_map_lookup_and_delete_batch()
1885 if (batch >= htab->n_buckets) { in __htab_map_lookup_and_delete_batch()
1886 ret = -ENOENT; in __htab_map_lookup_and_delete_batch()
1892 if (ret == -EFAULT) in __htab_map_lookup_and_delete_batch()
1896 ubatch = u64_to_user_ptr(attr->batch.out_batch); in __htab_map_lookup_and_delete_batch()
1898 put_user(total, &uattr->batch.count)) in __htab_map_lookup_and_delete_batch()
1899 ret = -EFAULT; in __htab_map_lookup_and_delete_batch()
1908 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, in htab_percpu_map_lookup_batch() argument
1911 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_percpu_map_lookup_batch()
1916 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map, in htab_percpu_map_lookup_and_delete_batch() argument
1920 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_percpu_map_lookup_and_delete_batch()
1925 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, in htab_map_lookup_batch() argument
1928 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_map_lookup_batch()
1933 htab_map_lookup_and_delete_batch(struct bpf_map *map, in htab_map_lookup_and_delete_batch() argument
1937 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_map_lookup_and_delete_batch()
1942 htab_lru_percpu_map_lookup_batch(struct bpf_map *map, in htab_lru_percpu_map_lookup_batch() argument
1946 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_lru_percpu_map_lookup_batch()
1951 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map, in htab_lru_percpu_map_lookup_and_delete_batch() argument
1955 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_lru_percpu_map_lookup_and_delete_batch()
1960 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, in htab_lru_map_lookup_batch() argument
1963 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_lru_map_lookup_batch()
1968 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map, in htab_lru_map_lookup_and_delete_batch() argument
1972 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_lru_map_lookup_and_delete_batch()
1977 struct bpf_map *map; member
1979 void *percpu_value_buf; // non-zero means percpu hash
1988 const struct bpf_htab *htab = info->htab; in bpf_hash_map_seq_find_next()
1989 u32 skip_elems = info->skip_elems; in bpf_hash_map_seq_find_next()
1990 u32 bucket_id = info->bucket_id; in bpf_hash_map_seq_find_next()
1997 if (bucket_id >= htab->n_buckets) in bpf_hash_map_seq_find_next()
2005 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node)); in bpf_hash_map_seq_find_next()
2011 b = &htab->buckets[bucket_id++]; in bpf_hash_map_seq_find_next()
2016 for (i = bucket_id; i < htab->n_buckets; i++) { in bpf_hash_map_seq_find_next()
2017 b = &htab->buckets[i]; in bpf_hash_map_seq_find_next()
2021 head = &b->head; in bpf_hash_map_seq_find_next()
2024 info->bucket_id = i; in bpf_hash_map_seq_find_next()
2025 info->skip_elems = count; in bpf_hash_map_seq_find_next()
2035 info->bucket_id = i; in bpf_hash_map_seq_find_next()
2036 info->skip_elems = 0; in bpf_hash_map_seq_find_next()
2042 struct bpf_iter_seq_hash_map_info *info = seq->private; in bpf_hash_map_seq_start()
2056 struct bpf_iter_seq_hash_map_info *info = seq->private; in bpf_hash_map_seq_next()
2059 ++info->skip_elems; in bpf_hash_map_seq_next()
2065 struct bpf_iter_seq_hash_map_info *info = seq->private; in __bpf_hash_map_seq_show()
2068 struct bpf_map *map = info->map; in __bpf_hash_map_seq_show() local
2070 int ret = 0, off = 0, cpu; in __bpf_hash_map_seq_show() local
2078 ctx.map = info->map; in __bpf_hash_map_seq_show()
2080 roundup_key_size = round_up(map->key_size, 8); in __bpf_hash_map_seq_show()
2081 ctx.key = elem->key; in __bpf_hash_map_seq_show()
2082 if (!info->percpu_value_buf) { in __bpf_hash_map_seq_show()
2083 ctx.value = elem->key + roundup_key_size; in __bpf_hash_map_seq_show()
2085 roundup_value_size = round_up(map->value_size, 8); in __bpf_hash_map_seq_show()
2086 pptr = htab_elem_get_ptr(elem, map->key_size); in __bpf_hash_map_seq_show()
2087 for_each_possible_cpu(cpu) { in __bpf_hash_map_seq_show()
2088 copy_map_value_long(map, info->percpu_value_buf + off, in __bpf_hash_map_seq_show()
2089 per_cpu_ptr(pptr, cpu)); in __bpf_hash_map_seq_show()
2090 check_and_init_map_value(map, info->percpu_value_buf + off); in __bpf_hash_map_seq_show()
2093 ctx.value = info->percpu_value_buf; in __bpf_hash_map_seq_show()
2119 struct bpf_map *map = aux->map; in bpf_iter_init_hash_map() local
2123 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_iter_init_hash_map()
2124 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_iter_init_hash_map()
2125 buf_size = round_up(map->value_size, 8) * num_possible_cpus(); in bpf_iter_init_hash_map()
2128 return -ENOMEM; in bpf_iter_init_hash_map()
2130 seq_info->percpu_value_buf = value_buf; in bpf_iter_init_hash_map()
2133 bpf_map_inc_with_uref(map); in bpf_iter_init_hash_map()
2134 seq_info->map = map; in bpf_iter_init_hash_map()
2135 seq_info->htab = container_of(map, struct bpf_htab, map); in bpf_iter_init_hash_map()
2143 bpf_map_put_with_uref(seq_info->map); in bpf_iter_fini_hash_map()
2144 kfree(seq_info->percpu_value_buf); in bpf_iter_fini_hash_map()
2161 static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn, in bpf_for_each_hash_elem() argument
2164 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_for_each_hash_elem()
2179 return -EINVAL; in bpf_for_each_hash_elem()
2183 roundup_key_size = round_up(map->key_size, 8); in bpf_for_each_hash_elem()
2188 for (i = 0; i < htab->n_buckets; i++) { in bpf_for_each_hash_elem()
2189 b = &htab->buckets[i]; in bpf_for_each_hash_elem()
2191 head = &b->head; in bpf_for_each_hash_elem()
2193 key = elem->key; in bpf_for_each_hash_elem()
2195 /* current cpu value for percpu map */ in bpf_for_each_hash_elem()
2196 pptr = htab_elem_get_ptr(elem, map->key_size); in bpf_for_each_hash_elem()
2199 val = elem->key + roundup_key_size; in bpf_for_each_hash_elem()
2202 ret = callback_fn((u64)(long)map, (u64)(long)key, in bpf_for_each_hash_elem()
2204 /* return value: 0 - continue, 1 - stop and return */ in bpf_for_each_hash_elem()
2216 static u64 htab_map_mem_usage(const struct bpf_map *map) in htab_map_mem_usage() argument
2218 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_mem_usage()
2219 u32 value_size = round_up(htab->map.value_size, 8); in htab_map_mem_usage()
2226 usage += sizeof(struct bucket) * htab->n_buckets; in htab_map_mem_usage()
2229 num_entries = map->max_entries; in htab_map_mem_usage()
2233 usage += htab->elem_size * num_entries; in htab_map_mem_usage()
2242 num_entries = htab->use_percpu_counter ? in htab_map_mem_usage()
2243 percpu_counter_sum(&htab->pcount) : in htab_map_mem_usage()
2244 atomic_read(&htab->count); in htab_map_mem_usage()
2245 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries; in htab_map_mem_usage()
2299 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) in htab_percpu_map_lookup_elem() argument
2301 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_percpu_map_lookup_elem()
2304 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); in htab_percpu_map_lookup_elem()
2309 /* inline bpf_map_lookup_elem() call for per-CPU hashmap */
2310 static int htab_percpu_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) in htab_percpu_map_gen_lookup() argument
2315 return -EOPNOTSUPP; in htab_percpu_map_gen_lookup()
2318 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_percpu_map_gen_lookup()
2322 offsetof(struct htab_elem, key) + roundup(map->key_size, 8)); in htab_percpu_map_gen_lookup()
2326 return insn - insn_buf; in htab_percpu_map_gen_lookup()
2329 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) in htab_percpu_map_lookup_percpu_elem() argument
2333 if (cpu >= nr_cpu_ids) in htab_percpu_map_lookup_percpu_elem()
2336 l = __htab_map_lookup_elem(map, key); in htab_percpu_map_lookup_percpu_elem()
2338 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); in htab_percpu_map_lookup_percpu_elem()
2343 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) in htab_lru_percpu_map_lookup_elem() argument
2345 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_lru_percpu_map_lookup_elem()
2348 bpf_lru_node_set_ref(&l->lru_node); in htab_lru_percpu_map_lookup_elem()
2349 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); in htab_lru_percpu_map_lookup_elem()
2355 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) in htab_lru_percpu_map_lookup_percpu_elem() argument
2359 if (cpu >= nr_cpu_ids) in htab_lru_percpu_map_lookup_percpu_elem()
2362 l = __htab_map_lookup_elem(map, key); in htab_lru_percpu_map_lookup_percpu_elem()
2364 bpf_lru_node_set_ref(&l->lru_node); in htab_lru_percpu_map_lookup_percpu_elem()
2365 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); in htab_lru_percpu_map_lookup_percpu_elem()
2371 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) in bpf_percpu_hash_copy() argument
2375 int ret = -ENOENT; in bpf_percpu_hash_copy()
2376 int cpu, off = 0; in bpf_percpu_hash_copy() local
2379 /* per_cpu areas are zero-filled and bpf programs can only in bpf_percpu_hash_copy()
2383 size = round_up(map->value_size, 8); in bpf_percpu_hash_copy()
2385 l = __htab_map_lookup_elem(map, key); in bpf_percpu_hash_copy()
2388 /* We do not mark LRU map element here in order to not mess up in bpf_percpu_hash_copy()
2389 * eviction heuristics when user space does a map walk. in bpf_percpu_hash_copy()
2391 pptr = htab_elem_get_ptr(l, map->key_size); in bpf_percpu_hash_copy()
2392 for_each_possible_cpu(cpu) { in bpf_percpu_hash_copy()
2393 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); in bpf_percpu_hash_copy()
2394 check_and_init_map_value(map, value + off); in bpf_percpu_hash_copy()
2403 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, in bpf_percpu_hash_update() argument
2406 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_percpu_hash_update()
2411 ret = __htab_lru_percpu_map_update_elem(map, key, value, in bpf_percpu_hash_update()
2414 ret = __htab_percpu_map_update_elem(map, key, value, map_flags, in bpf_percpu_hash_update()
2421 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, in htab_percpu_map_seq_show_elem() argument
2426 int cpu; in htab_percpu_map_seq_show_elem() local
2430 l = __htab_map_lookup_elem(map, key); in htab_percpu_map_seq_show_elem()
2436 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); in htab_percpu_map_seq_show_elem()
2438 pptr = htab_elem_get_ptr(l, map->key_size); in htab_percpu_map_seq_show_elem()
2439 for_each_possible_cpu(cpu) { in htab_percpu_map_seq_show_elem()
2440 seq_printf(m, "\tcpu%d: ", cpu); in htab_percpu_map_seq_show_elem()
2441 btf_type_seq_show(map->btf, map->btf_value_type_id, in htab_percpu_map_seq_show_elem()
2442 per_cpu_ptr(pptr, cpu), m); in htab_percpu_map_seq_show_elem()
2493 if (attr->value_size != sizeof(u32)) in fd_htab_map_alloc_check()
2494 return -EINVAL; in fd_htab_map_alloc_check()
2498 static void fd_htab_map_free(struct bpf_map *map) in fd_htab_map_free() argument
2500 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in fd_htab_map_free()
2506 for (i = 0; i < htab->n_buckets; i++) { in fd_htab_map_free()
2510 void *ptr = fd_htab_map_get_ptr(map, l); in fd_htab_map_free()
2512 map->ops->map_fd_put_ptr(map, ptr, false); in fd_htab_map_free()
2516 htab_map_free(map); in fd_htab_map_free()
2520 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) in bpf_fd_htab_map_lookup_elem() argument
2525 if (!map->ops->map_fd_sys_lookup_elem) in bpf_fd_htab_map_lookup_elem()
2526 return -ENOTSUPP; in bpf_fd_htab_map_lookup_elem()
2529 ptr = htab_map_lookup_elem(map, key); in bpf_fd_htab_map_lookup_elem()
2531 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); in bpf_fd_htab_map_lookup_elem()
2533 ret = -ENOENT; in bpf_fd_htab_map_lookup_elem()
2540 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, in bpf_fd_htab_map_update_elem() argument
2547 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); in bpf_fd_htab_map_update_elem()
2552 * htab map, and the following rcu_read_lock() is only used to avoid in bpf_fd_htab_map_update_elem()
2556 ret = htab_map_update_elem(map, key, &ptr, map_flags); in bpf_fd_htab_map_update_elem()
2559 map->ops->map_fd_put_ptr(map, ptr, false); in bpf_fd_htab_map_update_elem()
2566 struct bpf_map *map, *inner_map_meta; in htab_of_map_alloc() local
2568 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); in htab_of_map_alloc()
2572 map = htab_map_alloc(attr); in htab_of_map_alloc()
2573 if (IS_ERR(map)) { in htab_of_map_alloc()
2575 return map; in htab_of_map_alloc()
2578 map->inner_map_meta = inner_map_meta; in htab_of_map_alloc()
2580 return map; in htab_of_map_alloc()
2583 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) in htab_of_map_lookup_elem() argument
2585 struct bpf_map **inner_map = htab_map_lookup_elem(map, key); in htab_of_map_lookup_elem()
2593 static int htab_of_map_gen_lookup(struct bpf_map *map, in htab_of_map_gen_lookup() argument
2600 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_of_map_gen_lookup()
2605 round_up(map->key_size, 8)); in htab_of_map_gen_lookup()
2608 return insn - insn_buf; in htab_of_map_gen_lookup()
2611 static void htab_of_map_free(struct bpf_map *map) in htab_of_map_free() argument
2613 bpf_map_meta_free(map->inner_map_meta); in htab_of_map_free()
2614 fd_htab_map_free(map); in htab_of_map_free()