Lines Matching +full:cpu +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
47 * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
51 * by pinning the task to the current CPU and incrementing the recursion
52 * protection across the map operation.
73 * it is only safe to use raw spinlock for preallocated hash map on a RT kernel,
75 * after hash map was fully converted to use bpf_mem_alloc, there will be
76 * non-synchronous memory allocation for non-preallocated hash map, so it is
85 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
88 struct bpf_map map; member
98 /* number of elements in non-preallocated hashtable are kept
124 /* pointer to per-cpu pointer */
134 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); in htab_is_prealloc()
141 for (i = 0; i < htab->n_buckets; i++) { in htab_init_buckets()
142 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); in htab_init_buckets()
143 raw_spin_lock_init(&htab->buckets[i].raw_lock); in htab_init_buckets()
144 lockdep_set_class(&htab->buckets[i].raw_lock, in htab_init_buckets()
145 &htab->lockdep_key); in htab_init_buckets()
156 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_lock_bucket()
160 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { in htab_lock_bucket()
161 __this_cpu_dec(*(htab->map_locked[hash])); in htab_lock_bucket()
164 return -EBUSY; in htab_lock_bucket()
167 raw_spin_lock(&b->raw_lock); in htab_lock_bucket()
177 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_unlock_bucket()
178 raw_spin_unlock(&b->raw_lock); in htab_unlock_bucket()
179 __this_cpu_dec(*(htab->map_locked[hash])); in htab_unlock_bucket()
188 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || in htab_is_lru()
189 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_lru()
194 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_is_percpu()
195 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_percpu()
201 *(void __percpu **)(l->key + key_size) = pptr; in htab_elem_set_ptr()
206 return *(void __percpu **)(l->key + key_size); in htab_elem_get_ptr()
209 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) in fd_htab_map_get_ptr() argument
211 return *(void **)(l->key + roundup(map->key_size, 8)); in fd_htab_map_get_ptr()
216 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); in get_htab_elem()
226 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_timers()
229 if (!btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_free_prealloced_timers()
238 bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_timers()
245 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_fields()
248 if (IS_ERR_OR_NULL(htab->map.record)) in htab_free_prealloced_fields()
257 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); in htab_free_prealloced_fields()
258 int cpu; in htab_free_prealloced_fields() local
260 for_each_possible_cpu(cpu) { in htab_free_prealloced_fields()
261 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); in htab_free_prealloced_fields()
265 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_fields()
279 for (i = 0; i < htab->map.max_entries; i++) { in htab_free_elems()
283 htab->map.key_size); in htab_free_elems()
288 bpf_map_area_free(htab->elems); in htab_free_elems()
293 * order is always lru_lock -> bucket_lock and this only happens in
305 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); in prealloc_lru_pop()
309 bpf_map_inc_elem_count(&htab->map); in prealloc_lru_pop()
311 memcpy(l->key, key, htab->map.key_size); in prealloc_lru_pop()
320 u32 num_entries = htab->map.max_entries; in prealloc_init()
321 int err = -ENOMEM, i; in prealloc_init()
326 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries, in prealloc_init()
327 htab->map.numa_node); in prealloc_init()
328 if (!htab->elems) in prealloc_init()
329 return -ENOMEM; in prealloc_init()
335 u32 size = round_up(htab->map.value_size, 8); in prealloc_init()
338 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, in prealloc_init()
342 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, in prealloc_init()
349 err = bpf_lru_init(&htab->lru, in prealloc_init()
350 htab->map.map_flags & BPF_F_NO_COMMON_LRU, in prealloc_init()
351 offsetof(struct htab_elem, hash) - in prealloc_init()
356 err = pcpu_freelist_init(&htab->freelist); in prealloc_init()
362 bpf_lru_populate(&htab->lru, htab->elems, in prealloc_init()
364 htab->elem_size, num_entries); in prealloc_init()
366 pcpu_freelist_populate(&htab->freelist, in prealloc_init()
367 htab->elems + offsetof(struct htab_elem, fnode), in prealloc_init()
368 htab->elem_size, num_entries); in prealloc_init()
382 bpf_lru_destroy(&htab->lru); in prealloc_destroy()
384 pcpu_freelist_destroy(&htab->freelist); in prealloc_destroy()
391 int cpu; in alloc_extra_elems() local
393 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8, in alloc_extra_elems()
396 return -ENOMEM; in alloc_extra_elems()
398 for_each_possible_cpu(cpu) { in alloc_extra_elems()
399 l = pcpu_freelist_pop(&htab->freelist); in alloc_extra_elems()
404 *per_cpu_ptr(pptr, cpu) = l_new; in alloc_extra_elems()
406 htab->extra_elems = pptr; in alloc_extra_elems()
413 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc_check()
414 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check()
415 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || in htab_map_alloc_check()
416 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc_check()
417 /* percpu_lru means each cpu has its own LRU list. in htab_map_alloc_check()
419 * the map's value itself is percpu. percpu_lru has in htab_map_alloc_check()
420 * nothing to do with the map's value. in htab_map_alloc_check()
422 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc_check()
423 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc_check()
424 bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED); in htab_map_alloc_check()
432 return -EPERM; in htab_map_alloc_check()
434 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK || in htab_map_alloc_check()
435 !bpf_map_flags_access_ok(attr->map_flags)) in htab_map_alloc_check()
436 return -EINVAL; in htab_map_alloc_check()
439 return -EINVAL; in htab_map_alloc_check()
442 return -ENOTSUPP; in htab_map_alloc_check()
445 return -EINVAL; in htab_map_alloc_check()
448 * value_size == 0 may be allowed in the future to use map as a set in htab_map_alloc_check()
450 if (attr->max_entries == 0 || attr->key_size == 0 || in htab_map_alloc_check()
451 attr->value_size == 0) in htab_map_alloc_check()
452 return -EINVAL; in htab_map_alloc_check()
454 if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE - in htab_map_alloc_check()
459 * kmalloc-able later in htab_map_update_elem() in htab_map_alloc_check()
461 return -E2BIG; in htab_map_alloc_check()
468 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc()
469 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc()
470 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || in htab_map_alloc()
471 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); in htab_map_alloc()
472 /* percpu_lru means each cpu has its own LRU list. in htab_map_alloc()
474 * the map's value itself is percpu. percpu_lru has in htab_map_alloc()
475 * nothing to do with the map's value. in htab_map_alloc()
477 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); in htab_map_alloc()
478 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); in htab_map_alloc()
484 return ERR_PTR(-ENOMEM); in htab_map_alloc()
486 lockdep_register_key(&htab->lockdep_key); in htab_map_alloc()
488 bpf_map_init_from_attr(&htab->map, attr); in htab_map_alloc()
491 /* ensure each CPU's lru list has >=1 elements. in htab_map_alloc()
495 htab->map.max_entries = roundup(attr->max_entries, in htab_map_alloc()
497 if (htab->map.max_entries < attr->max_entries) in htab_map_alloc()
498 htab->map.max_entries = rounddown(attr->max_entries, in htab_map_alloc()
503 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc()
505 htab->elem_size = sizeof(struct htab_elem) + in htab_map_alloc()
506 round_up(htab->map.key_size, 8); in htab_map_alloc()
508 htab->elem_size += sizeof(void *); in htab_map_alloc()
510 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc()
512 err = -E2BIG; in htab_map_alloc()
514 if (htab->n_buckets == 0 || in htab_map_alloc()
515 htab->n_buckets > U32_MAX / sizeof(struct bucket)) in htab_map_alloc()
518 err = bpf_map_init_elem_count(&htab->map); in htab_map_alloc()
522 err = -ENOMEM; in htab_map_alloc()
523 htab->buckets = bpf_map_area_alloc(htab->n_buckets * in htab_map_alloc()
525 htab->map.numa_node); in htab_map_alloc()
526 if (!htab->buckets) in htab_map_alloc()
530 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map, in htab_map_alloc()
534 if (!htab->map_locked[i]) in htab_map_alloc()
538 if (htab->map.map_flags & BPF_F_ZERO_SEED) in htab_map_alloc()
539 htab->hashrnd = 0; in htab_map_alloc()
541 htab->hashrnd = get_random_u32(); in htab_map_alloc()
547 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus() in htab_map_alloc()
549 * hash map size is 10k, which means that a system with 64 cpus will fill in htab_map_alloc()
551 * define our own batch count as 32 then 10k hash map can be filled up to 80%: in htab_map_alloc()
552 * 10k - 8k > 32 _batch_ * 64 _cpus_ in htab_map_alloc()
553 * and __percpu_counter_compare() will still be fast. At that point hash map in htab_map_alloc()
554 * collisions will dominate its performance anyway. Assume that hash map filled in htab_map_alloc()
559 if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH) in htab_map_alloc()
560 htab->use_percpu_counter = true; in htab_map_alloc()
562 if (htab->use_percpu_counter) { in htab_map_alloc()
563 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL); in htab_map_alloc()
582 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false); in htab_map_alloc()
586 err = bpf_mem_alloc_init(&htab->pcpu_ma, in htab_map_alloc()
587 round_up(htab->map.value_size, 8), true); in htab_map_alloc()
593 return &htab->map; in htab_map_alloc()
598 if (htab->use_percpu_counter) in htab_map_alloc()
599 percpu_counter_destroy(&htab->pcount); in htab_map_alloc()
601 free_percpu(htab->map_locked[i]); in htab_map_alloc()
602 bpf_map_area_free(htab->buckets); in htab_map_alloc()
603 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_alloc()
604 bpf_mem_alloc_destroy(&htab->ma); in htab_map_alloc()
606 bpf_map_free_elem_count(&htab->map); in htab_map_alloc()
608 lockdep_unregister_key(&htab->lockdep_key); in htab_map_alloc()
622 return &htab->buckets[hash & (htab->n_buckets - 1)]; in __select_bucket()
627 return &__select_bucket(htab, hash)->head; in select_bucket()
638 if (l->hash == hash && !memcmp(&l->key, key, key_size)) in lookup_elem_raw()
657 if (l->hash == hash && !memcmp(&l->key, key, key_size)) in lookup_nulls_elem_raw()
660 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) in lookup_nulls_elem_raw()
671 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) in __htab_map_lookup_elem() argument
673 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_elem()
681 key_size = map->key_size; in __htab_map_lookup_elem()
683 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_elem()
687 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in __htab_map_lookup_elem()
692 static void *htab_map_lookup_elem(struct bpf_map *map, void *key) in htab_map_lookup_elem() argument
694 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_map_lookup_elem()
697 return l->key + round_up(map->key_size, 8); in htab_map_lookup_elem()
706 * map->ops->map_lookup_elem
713 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) in htab_map_gen_lookup() argument
719 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_map_gen_lookup()
724 round_up(map->key_size, 8)); in htab_map_gen_lookup()
725 return insn - insn_buf; in htab_map_gen_lookup()
728 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, in __htab_lru_map_lookup_elem() argument
731 struct htab_elem *l = __htab_map_lookup_elem(map, key); in __htab_lru_map_lookup_elem()
735 bpf_lru_node_set_ref(&l->lru_node); in __htab_lru_map_lookup_elem()
736 return l->key + round_up(map->key_size, 8); in __htab_lru_map_lookup_elem()
742 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) in htab_lru_map_lookup_elem() argument
744 return __htab_lru_map_lookup_elem(map, key, true); in htab_lru_map_lookup_elem()
747 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) in htab_lru_map_lookup_elem_sys() argument
749 return __htab_lru_map_lookup_elem(map, key, false); in htab_lru_map_lookup_elem_sys()
752 static int htab_lru_map_gen_lookup(struct bpf_map *map, in htab_lru_map_gen_lookup() argument
760 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_lru_map_gen_lookup()
773 round_up(map->key_size, 8)); in htab_lru_map_gen_lookup()
774 return insn - insn_buf; in htab_lru_map_gen_lookup()
781 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); in check_and_free_fields()
782 int cpu; in check_and_free_fields() local
784 for_each_possible_cpu(cpu) in check_and_free_fields()
785 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); in check_and_free_fields()
787 void *map_value = elem->key + round_up(htab->map.key_size, 8); in check_and_free_fields()
789 bpf_obj_free_fields(htab->map.record, map_value); in check_and_free_fields()
807 b = __select_bucket(htab, tgt_l->hash); in htab_lru_map_delete_node()
808 head = &b->head; in htab_lru_map_delete_node()
810 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags); in htab_lru_map_delete_node()
816 hlist_nulls_del_rcu(&l->hash_node); in htab_lru_map_delete_node()
818 bpf_map_dec_elem_count(&htab->map); in htab_lru_map_delete_node()
822 htab_unlock_bucket(htab, b, tgt_l->hash, flags); in htab_lru_map_delete_node()
828 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in htab_map_get_next_key() argument
830 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_get_next_key()
838 key_size = map->key_size; in htab_map_get_next_key()
843 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_get_next_key()
848 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in htab_map_get_next_key()
854 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), in htab_map_get_next_key()
858 /* if next elem in this hash list is non-zero, just return it */ in htab_map_get_next_key()
859 memcpy(next_key, next_l->key, key_size); in htab_map_get_next_key()
864 i = hash & (htab->n_buckets - 1); in htab_map_get_next_key()
869 for (; i < htab->n_buckets; i++) { in htab_map_get_next_key()
877 memcpy(next_key, next_l->key, key_size); in htab_map_get_next_key()
883 return -ENOENT; in htab_map_get_next_key()
889 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) in htab_elem_free()
890 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr); in htab_elem_free()
891 bpf_mem_cache_free(&htab->ma, l); in htab_elem_free()
896 struct bpf_map *map = &htab->map; in htab_put_fd_value() local
899 if (map->ops->map_fd_put_ptr) { in htab_put_fd_value()
900 ptr = fd_htab_map_get_ptr(map, l); in htab_put_fd_value()
901 map->ops->map_fd_put_ptr(map, ptr, true); in htab_put_fd_value()
907 if (htab->use_percpu_counter) in is_map_full()
908 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries, in is_map_full()
910 return atomic_read(&htab->count) >= htab->map.max_entries; in is_map_full()
915 bpf_map_inc_elem_count(&htab->map); in inc_elem_count()
917 if (htab->use_percpu_counter) in inc_elem_count()
918 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH); in inc_elem_count()
920 atomic_inc(&htab->count); in inc_elem_count()
925 bpf_map_dec_elem_count(&htab->map); in dec_elem_count()
927 if (htab->use_percpu_counter) in dec_elem_count()
928 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH); in dec_elem_count()
930 atomic_dec(&htab->count); in dec_elem_count()
939 bpf_map_dec_elem_count(&htab->map); in free_htab_elem()
941 __pcpu_freelist_push(&htab->freelist, &l->fnode); in free_htab_elem()
953 copy_map_value(&htab->map, this_cpu_ptr(pptr), value); in pcpu_copy_value()
955 u32 size = round_up(htab->map.value_size, 8); in pcpu_copy_value()
956 int off = 0, cpu; in pcpu_copy_value() local
958 for_each_possible_cpu(cpu) { in pcpu_copy_value()
959 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off); in pcpu_copy_value()
968 /* When not setting the initial value on all cpus, zero-fill element in pcpu_init_value()
975 int cpu; in pcpu_init_value() local
977 for_each_possible_cpu(cpu) { in pcpu_init_value()
978 if (cpu == current_cpu) in pcpu_init_value()
979 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value); in pcpu_init_value()
981 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu)); in pcpu_init_value()
990 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && in fd_htab_map_needs_adjust()
999 u32 size = htab->map.value_size; in alloc_htab_elem()
1007 * use per-cpu extra elems to avoid freelist_pop/push in alloc_htab_elem()
1009 pl_new = this_cpu_ptr(htab->extra_elems); in alloc_htab_elem()
1016 l = __pcpu_freelist_pop(&htab->freelist); in alloc_htab_elem()
1018 return ERR_PTR(-E2BIG); in alloc_htab_elem()
1020 bpf_map_inc_elem_count(&htab->map); in alloc_htab_elem()
1025 /* when map is full and update() is replacing in alloc_htab_elem()
1030 return ERR_PTR(-E2BIG); in alloc_htab_elem()
1032 l_new = bpf_mem_cache_alloc(&htab->ma); in alloc_htab_elem()
1034 l_new = ERR_PTR(-ENOMEM); in alloc_htab_elem()
1039 memcpy(l_new->key, key, key_size); in alloc_htab_elem()
1044 /* alloc_percpu zero-fills */ in alloc_htab_elem()
1045 pptr = bpf_mem_cache_alloc(&htab->pcpu_ma); in alloc_htab_elem()
1047 bpf_mem_cache_free(&htab->ma, l_new); in alloc_htab_elem()
1048 l_new = ERR_PTR(-ENOMEM); in alloc_htab_elem()
1051 l_new->ptr_to_pptr = pptr; in alloc_htab_elem()
1061 memcpy(l_new->key + round_up(key_size, 8), value, size); in alloc_htab_elem()
1063 copy_map_value(&htab->map, in alloc_htab_elem()
1064 l_new->key + round_up(key_size, 8), in alloc_htab_elem()
1068 l_new->hash = hash; in alloc_htab_elem()
1080 return -EEXIST; in check_flags()
1084 return -ENOENT; in check_flags()
1090 static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, in htab_map_update_elem() argument
1093 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_update_elem()
1103 return -EINVAL; in htab_map_update_elem()
1108 key_size = map->key_size; in htab_map_update_elem()
1110 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_update_elem()
1113 head = &b->head; in htab_map_update_elem()
1116 if (unlikely(!btf_record_has_field(map->record, BPF_SPIN_LOCK))) in htab_map_update_elem()
1117 return -EINVAL; in htab_map_update_elem()
1120 htab->n_buckets); in htab_map_update_elem()
1126 copy_map_value_locked(map, in htab_map_update_elem()
1127 l_old->key + round_up(key_size, 8), in htab_map_update_elem()
1154 copy_map_value_locked(map, in htab_map_update_elem()
1155 l_old->key + round_up(key_size, 8), in htab_map_update_elem()
1164 /* all pre-allocated elements are in use or memory exhausted */ in htab_map_update_elem()
1172 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in htab_map_update_elem()
1174 hlist_nulls_del_rcu(&l_old->hash_node); in htab_map_update_elem()
1189 bpf_map_dec_elem_count(&htab->map); in htab_lru_push_free()
1190 bpf_lru_push_free(&htab->lru, &elem->lru_node); in htab_lru_push_free()
1193 static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, in htab_lru_map_update_elem() argument
1196 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_update_elem()
1206 return -EINVAL; in htab_lru_map_update_elem()
1211 key_size = map->key_size; in htab_lru_map_update_elem()
1213 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_update_elem()
1216 head = &b->head; in htab_lru_map_update_elem()
1225 return -ENOMEM; in htab_lru_map_update_elem()
1226 copy_map_value(&htab->map, in htab_lru_map_update_elem()
1227 l_new->key + round_up(map->key_size, 8), value); in htab_lru_map_update_elem()
1242 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in htab_lru_map_update_elem()
1244 bpf_lru_node_set_ref(&l_new->lru_node); in htab_lru_map_update_elem()
1245 hlist_nulls_del_rcu(&l_old->hash_node); in htab_lru_map_update_elem()
1261 static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key, in __htab_percpu_map_update_elem() argument
1265 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_percpu_map_update_elem()
1275 return -EINVAL; in __htab_percpu_map_update_elem()
1280 key_size = map->key_size; in __htab_percpu_map_update_elem()
1282 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_percpu_map_update_elem()
1285 head = &b->head; in __htab_percpu_map_update_elem()
1298 /* per-cpu hash map can update value in-place */ in __htab_percpu_map_update_elem()
1308 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in __htab_percpu_map_update_elem()
1316 static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, in __htab_lru_percpu_map_update_elem() argument
1320 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_lru_percpu_map_update_elem()
1330 return -EINVAL; in __htab_lru_percpu_map_update_elem()
1335 key_size = map->key_size; in __htab_lru_percpu_map_update_elem()
1337 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_lru_percpu_map_update_elem()
1340 head = &b->head; in __htab_lru_percpu_map_update_elem()
1350 return -ENOMEM; in __htab_lru_percpu_map_update_elem()
1364 bpf_lru_node_set_ref(&l_old->lru_node); in __htab_lru_percpu_map_update_elem()
1366 /* per-cpu hash map can update value in-place */ in __htab_lru_percpu_map_update_elem()
1372 hlist_nulls_add_head_rcu(&l_new->hash_node, head); in __htab_lru_percpu_map_update_elem()
1380 bpf_map_dec_elem_count(&htab->map); in __htab_lru_percpu_map_update_elem()
1381 bpf_lru_push_free(&htab->lru, &l_new->lru_node); in __htab_lru_percpu_map_update_elem()
1386 static long htab_percpu_map_update_elem(struct bpf_map *map, void *key, in htab_percpu_map_update_elem() argument
1389 return __htab_percpu_map_update_elem(map, key, value, map_flags, false); in htab_percpu_map_update_elem()
1392 static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, in htab_lru_percpu_map_update_elem() argument
1395 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, in htab_lru_percpu_map_update_elem()
1400 static long htab_map_delete_elem(struct bpf_map *map, void *key) in htab_map_delete_elem() argument
1402 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_delete_elem()
1413 key_size = map->key_size; in htab_map_delete_elem()
1415 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_delete_elem()
1417 head = &b->head; in htab_map_delete_elem()
1426 hlist_nulls_del_rcu(&l->hash_node); in htab_map_delete_elem()
1429 ret = -ENOENT; in htab_map_delete_elem()
1436 static long htab_lru_map_delete_elem(struct bpf_map *map, void *key) in htab_lru_map_delete_elem() argument
1438 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_delete_elem()
1449 key_size = map->key_size; in htab_lru_map_delete_elem()
1451 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_delete_elem()
1453 head = &b->head; in htab_lru_map_delete_elem()
1462 hlist_nulls_del_rcu(&l->hash_node); in htab_lru_map_delete_elem()
1464 ret = -ENOENT; in htab_lru_map_delete_elem()
1480 for (i = 0; i < htab->n_buckets; i++) { in delete_all_elements()
1486 hlist_nulls_del_rcu(&l->hash_node); in delete_all_elements()
1498 for (i = 0; i < htab->n_buckets; i++) { in htab_free_malloced_timers()
1505 bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8)); in htab_free_malloced_timers()
1512 static void htab_map_free_timers(struct bpf_map *map) in htab_map_free_timers() argument
1514 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free_timers()
1517 if (!btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_map_free_timers()
1525 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1526 static void htab_map_free(struct bpf_map *map) in htab_map_free() argument
1528 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free()
1533 * There is no need to synchronize_rcu() here to protect map elements. in htab_map_free()
1547 bpf_map_free_elem_count(map); in htab_map_free()
1548 free_percpu(htab->extra_elems); in htab_map_free()
1549 bpf_map_area_free(htab->buckets); in htab_map_free()
1550 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_free()
1551 bpf_mem_alloc_destroy(&htab->ma); in htab_map_free()
1552 if (htab->use_percpu_counter) in htab_map_free()
1553 percpu_counter_destroy(&htab->pcount); in htab_map_free()
1555 free_percpu(htab->map_locked[i]); in htab_map_free()
1556 lockdep_unregister_key(&htab->lockdep_key); in htab_map_free()
1560 static void htab_map_seq_show_elem(struct bpf_map *map, void *key, in htab_map_seq_show_elem() argument
1567 value = htab_map_lookup_elem(map, key); in htab_map_seq_show_elem()
1573 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); in htab_map_seq_show_elem()
1575 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); in htab_map_seq_show_elem()
1581 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, in __htab_map_lookup_and_delete_elem() argument
1585 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_elem()
1593 key_size = map->key_size; in __htab_map_lookup_and_delete_elem()
1595 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_and_delete_elem()
1597 head = &b->head; in __htab_map_lookup_and_delete_elem()
1605 ret = -ENOENT; in __htab_map_lookup_and_delete_elem()
1608 u32 roundup_value_size = round_up(map->value_size, 8); in __htab_map_lookup_and_delete_elem()
1610 int off = 0, cpu; in __htab_map_lookup_and_delete_elem() local
1613 for_each_possible_cpu(cpu) { in __htab_map_lookup_and_delete_elem()
1614 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu)); in __htab_map_lookup_and_delete_elem()
1615 check_and_init_map_value(&htab->map, value + off); in __htab_map_lookup_and_delete_elem()
1619 u32 roundup_key_size = round_up(map->key_size, 8); in __htab_map_lookup_and_delete_elem()
1622 copy_map_value_locked(map, value, l->key + in __htab_map_lookup_and_delete_elem()
1626 copy_map_value(map, value, l->key + in __htab_map_lookup_and_delete_elem()
1629 check_and_init_map_value(map, value); in __htab_map_lookup_and_delete_elem()
1632 hlist_nulls_del_rcu(&l->hash_node); in __htab_map_lookup_and_delete_elem()
1645 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, in htab_map_lookup_and_delete_elem() argument
1648 return __htab_map_lookup_and_delete_elem(map, key, value, false, false, in htab_map_lookup_and_delete_elem()
1652 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map, in htab_percpu_map_lookup_and_delete_elem() argument
1656 return __htab_map_lookup_and_delete_elem(map, key, value, false, true, in htab_percpu_map_lookup_and_delete_elem()
1660 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key, in htab_lru_map_lookup_and_delete_elem() argument
1663 return __htab_map_lookup_and_delete_elem(map, key, value, true, false, in htab_lru_map_lookup_and_delete_elem()
1667 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map, in htab_lru_percpu_map_lookup_and_delete_elem() argument
1671 return __htab_map_lookup_and_delete_elem(map, key, value, true, true, in htab_lru_percpu_map_lookup_and_delete_elem()
1676 __htab_map_lookup_and_delete_batch(struct bpf_map *map, in __htab_map_lookup_and_delete_batch() argument
1682 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_batch()
1685 void __user *uvalues = u64_to_user_ptr(attr->batch.values); in __htab_map_lookup_and_delete_batch()
1686 void __user *ukeys = u64_to_user_ptr(attr->batch.keys); in __htab_map_lookup_and_delete_batch()
1687 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); in __htab_map_lookup_and_delete_batch()
1699 elem_map_flags = attr->batch.elem_flags; in __htab_map_lookup_and_delete_batch()
1701 ((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))) in __htab_map_lookup_and_delete_batch()
1702 return -EINVAL; in __htab_map_lookup_and_delete_batch()
1704 map_flags = attr->batch.flags; in __htab_map_lookup_and_delete_batch()
1706 return -EINVAL; in __htab_map_lookup_and_delete_batch()
1708 max_count = attr->batch.count; in __htab_map_lookup_and_delete_batch()
1712 if (put_user(0, &uattr->batch.count)) in __htab_map_lookup_and_delete_batch()
1713 return -EFAULT; in __htab_map_lookup_and_delete_batch()
1717 return -EFAULT; in __htab_map_lookup_and_delete_batch()
1719 if (batch >= htab->n_buckets) in __htab_map_lookup_and_delete_batch()
1720 return -ENOENT; in __htab_map_lookup_and_delete_batch()
1722 key_size = htab->map.key_size; in __htab_map_lookup_and_delete_batch()
1723 roundup_key_size = round_up(htab->map.key_size, 8); in __htab_map_lookup_and_delete_batch()
1724 value_size = htab->map.value_size; in __htab_map_lookup_and_delete_batch()
1741 ret = -ENOMEM; in __htab_map_lookup_and_delete_batch()
1751 b = &htab->buckets[batch]; in __htab_map_lookup_and_delete_batch()
1752 head = &b->head; in __htab_map_lookup_and_delete_batch()
1772 if (bucket_cnt > (max_count - total)) { in __htab_map_lookup_and_delete_batch()
1774 ret = -ENOSPC; in __htab_map_lookup_and_delete_batch()
1802 memcpy(dst_key, l->key, key_size); in __htab_map_lookup_and_delete_batch()
1805 int off = 0, cpu; in __htab_map_lookup_and_delete_batch() local
1808 pptr = htab_elem_get_ptr(l, map->key_size); in __htab_map_lookup_and_delete_batch()
1809 for_each_possible_cpu(cpu) { in __htab_map_lookup_and_delete_batch()
1810 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu)); in __htab_map_lookup_and_delete_batch()
1811 check_and_init_map_value(&htab->map, dst_val + off); in __htab_map_lookup_and_delete_batch()
1815 value = l->key + roundup_key_size; in __htab_map_lookup_and_delete_batch()
1816 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { in __htab_map_lookup_and_delete_batch()
1819 /* Actual value is the id of the inner map */ in __htab_map_lookup_and_delete_batch()
1820 map_id = map->ops->map_fd_sys_lookup_elem(*inner_map); in __htab_map_lookup_and_delete_batch()
1825 copy_map_value_locked(map, dst_val, value, in __htab_map_lookup_and_delete_batch()
1828 copy_map_value(map, dst_val, value); in __htab_map_lookup_and_delete_batch()
1830 check_and_init_map_value(map, dst_val); in __htab_map_lookup_and_delete_batch()
1833 hlist_nulls_del_rcu(&l->hash_node); in __htab_map_lookup_and_delete_batch()
1841 l->batch_flink = node_to_free; in __htab_map_lookup_and_delete_batch()
1856 node_to_free = node_to_free->batch_flink; in __htab_map_lookup_and_delete_batch()
1864 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { in __htab_map_lookup_and_delete_batch()
1875 ret = -EFAULT; in __htab_map_lookup_and_delete_batch()
1881 if (batch >= htab->n_buckets) { in __htab_map_lookup_and_delete_batch()
1882 ret = -ENOENT; in __htab_map_lookup_and_delete_batch()
1888 if (ret == -EFAULT) in __htab_map_lookup_and_delete_batch()
1892 ubatch = u64_to_user_ptr(attr->batch.out_batch); in __htab_map_lookup_and_delete_batch()
1894 put_user(total, &uattr->batch.count)) in __htab_map_lookup_and_delete_batch()
1895 ret = -EFAULT; in __htab_map_lookup_and_delete_batch()
1904 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, in htab_percpu_map_lookup_batch() argument
1907 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_percpu_map_lookup_batch()
1912 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map, in htab_percpu_map_lookup_and_delete_batch() argument
1916 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_percpu_map_lookup_and_delete_batch()
1921 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, in htab_map_lookup_batch() argument
1924 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_map_lookup_batch()
1929 htab_map_lookup_and_delete_batch(struct bpf_map *map, in htab_map_lookup_and_delete_batch() argument
1933 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_map_lookup_and_delete_batch()
1938 htab_lru_percpu_map_lookup_batch(struct bpf_map *map, in htab_lru_percpu_map_lookup_batch() argument
1942 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_lru_percpu_map_lookup_batch()
1947 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map, in htab_lru_percpu_map_lookup_and_delete_batch() argument
1951 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_lru_percpu_map_lookup_and_delete_batch()
1956 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, in htab_lru_map_lookup_batch() argument
1959 return __htab_map_lookup_and_delete_batch(map, attr, uattr, false, in htab_lru_map_lookup_batch()
1964 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map, in htab_lru_map_lookup_and_delete_batch() argument
1968 return __htab_map_lookup_and_delete_batch(map, attr, uattr, true, in htab_lru_map_lookup_and_delete_batch()
1973 struct bpf_map *map; member
1975 void *percpu_value_buf; // non-zero means percpu hash
1984 const struct bpf_htab *htab = info->htab; in bpf_hash_map_seq_find_next()
1985 u32 skip_elems = info->skip_elems; in bpf_hash_map_seq_find_next()
1986 u32 bucket_id = info->bucket_id; in bpf_hash_map_seq_find_next()
1993 if (bucket_id >= htab->n_buckets) in bpf_hash_map_seq_find_next()
2001 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node)); in bpf_hash_map_seq_find_next()
2007 b = &htab->buckets[bucket_id++]; in bpf_hash_map_seq_find_next()
2012 for (i = bucket_id; i < htab->n_buckets; i++) { in bpf_hash_map_seq_find_next()
2013 b = &htab->buckets[i]; in bpf_hash_map_seq_find_next()
2017 head = &b->head; in bpf_hash_map_seq_find_next()
2020 info->bucket_id = i; in bpf_hash_map_seq_find_next()
2021 info->skip_elems = count; in bpf_hash_map_seq_find_next()
2031 info->bucket_id = i; in bpf_hash_map_seq_find_next()
2032 info->skip_elems = 0; in bpf_hash_map_seq_find_next()
2038 struct bpf_iter_seq_hash_map_info *info = seq->private; in bpf_hash_map_seq_start()
2052 struct bpf_iter_seq_hash_map_info *info = seq->private; in bpf_hash_map_seq_next()
2055 ++info->skip_elems; in bpf_hash_map_seq_next()
2061 struct bpf_iter_seq_hash_map_info *info = seq->private; in __bpf_hash_map_seq_show()
2064 struct bpf_map *map = info->map; in __bpf_hash_map_seq_show() local
2066 int ret = 0, off = 0, cpu; in __bpf_hash_map_seq_show() local
2074 ctx.map = info->map; in __bpf_hash_map_seq_show()
2076 roundup_key_size = round_up(map->key_size, 8); in __bpf_hash_map_seq_show()
2077 ctx.key = elem->key; in __bpf_hash_map_seq_show()
2078 if (!info->percpu_value_buf) { in __bpf_hash_map_seq_show()
2079 ctx.value = elem->key + roundup_key_size; in __bpf_hash_map_seq_show()
2081 roundup_value_size = round_up(map->value_size, 8); in __bpf_hash_map_seq_show()
2082 pptr = htab_elem_get_ptr(elem, map->key_size); in __bpf_hash_map_seq_show()
2083 for_each_possible_cpu(cpu) { in __bpf_hash_map_seq_show()
2084 copy_map_value_long(map, info->percpu_value_buf + off, in __bpf_hash_map_seq_show()
2085 per_cpu_ptr(pptr, cpu)); in __bpf_hash_map_seq_show()
2086 check_and_init_map_value(map, info->percpu_value_buf + off); in __bpf_hash_map_seq_show()
2089 ctx.value = info->percpu_value_buf; in __bpf_hash_map_seq_show()
2115 struct bpf_map *map = aux->map; in bpf_iter_init_hash_map() local
2119 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_iter_init_hash_map()
2120 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_iter_init_hash_map()
2121 buf_size = round_up(map->value_size, 8) * num_possible_cpus(); in bpf_iter_init_hash_map()
2124 return -ENOMEM; in bpf_iter_init_hash_map()
2126 seq_info->percpu_value_buf = value_buf; in bpf_iter_init_hash_map()
2129 bpf_map_inc_with_uref(map); in bpf_iter_init_hash_map()
2130 seq_info->map = map; in bpf_iter_init_hash_map()
2131 seq_info->htab = container_of(map, struct bpf_htab, map); in bpf_iter_init_hash_map()
2139 bpf_map_put_with_uref(seq_info->map); in bpf_iter_fini_hash_map()
2140 kfree(seq_info->percpu_value_buf); in bpf_iter_fini_hash_map()
2157 static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn, in bpf_for_each_hash_elem() argument
2160 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_for_each_hash_elem()
2173 return -EINVAL; in bpf_for_each_hash_elem()
2177 roundup_key_size = round_up(map->key_size, 8); in bpf_for_each_hash_elem()
2183 for (i = 0; i < htab->n_buckets; i++) { in bpf_for_each_hash_elem()
2184 b = &htab->buckets[i]; in bpf_for_each_hash_elem()
2186 head = &b->head; in bpf_for_each_hash_elem()
2188 key = elem->key; in bpf_for_each_hash_elem()
2190 /* current cpu value for percpu map */ in bpf_for_each_hash_elem()
2191 pptr = htab_elem_get_ptr(elem, map->key_size); in bpf_for_each_hash_elem()
2194 val = elem->key + roundup_key_size; in bpf_for_each_hash_elem()
2197 ret = callback_fn((u64)(long)map, (u64)(long)key, in bpf_for_each_hash_elem()
2199 /* return value: 0 - continue, 1 - stop and return */ in bpf_for_each_hash_elem()
2213 static u64 htab_map_mem_usage(const struct bpf_map *map) in htab_map_mem_usage() argument
2215 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_mem_usage()
2216 u32 value_size = round_up(htab->map.value_size, 8); in htab_map_mem_usage()
2223 usage += sizeof(struct bucket) * htab->n_buckets; in htab_map_mem_usage()
2226 num_entries = map->max_entries; in htab_map_mem_usage()
2230 usage += htab->elem_size * num_entries; in htab_map_mem_usage()
2239 num_entries = htab->use_percpu_counter ? in htab_map_mem_usage()
2240 percpu_counter_sum(&htab->pcount) : in htab_map_mem_usage()
2241 atomic_read(&htab->count); in htab_map_mem_usage()
2242 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries; in htab_map_mem_usage()
2296 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) in htab_percpu_map_lookup_elem() argument
2298 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_percpu_map_lookup_elem()
2301 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); in htab_percpu_map_lookup_elem()
2306 static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) in htab_percpu_map_lookup_percpu_elem() argument
2310 if (cpu >= nr_cpu_ids) in htab_percpu_map_lookup_percpu_elem()
2313 l = __htab_map_lookup_elem(map, key); in htab_percpu_map_lookup_percpu_elem()
2315 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); in htab_percpu_map_lookup_percpu_elem()
2320 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) in htab_lru_percpu_map_lookup_elem() argument
2322 struct htab_elem *l = __htab_map_lookup_elem(map, key); in htab_lru_percpu_map_lookup_elem()
2325 bpf_lru_node_set_ref(&l->lru_node); in htab_lru_percpu_map_lookup_elem()
2326 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); in htab_lru_percpu_map_lookup_elem()
2332 static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) in htab_lru_percpu_map_lookup_percpu_elem() argument
2336 if (cpu >= nr_cpu_ids) in htab_lru_percpu_map_lookup_percpu_elem()
2339 l = __htab_map_lookup_elem(map, key); in htab_lru_percpu_map_lookup_percpu_elem()
2341 bpf_lru_node_set_ref(&l->lru_node); in htab_lru_percpu_map_lookup_percpu_elem()
2342 return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu); in htab_lru_percpu_map_lookup_percpu_elem()
2348 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) in bpf_percpu_hash_copy() argument
2352 int ret = -ENOENT; in bpf_percpu_hash_copy()
2353 int cpu, off = 0; in bpf_percpu_hash_copy() local
2356 /* per_cpu areas are zero-filled and bpf programs can only in bpf_percpu_hash_copy()
2360 size = round_up(map->value_size, 8); in bpf_percpu_hash_copy()
2362 l = __htab_map_lookup_elem(map, key); in bpf_percpu_hash_copy()
2365 /* We do not mark LRU map element here in order to not mess up in bpf_percpu_hash_copy()
2366 * eviction heuristics when user space does a map walk. in bpf_percpu_hash_copy()
2368 pptr = htab_elem_get_ptr(l, map->key_size); in bpf_percpu_hash_copy()
2369 for_each_possible_cpu(cpu) { in bpf_percpu_hash_copy()
2370 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); in bpf_percpu_hash_copy()
2371 check_and_init_map_value(map, value + off); in bpf_percpu_hash_copy()
2380 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, in bpf_percpu_hash_update() argument
2383 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_percpu_hash_update()
2388 ret = __htab_lru_percpu_map_update_elem(map, key, value, in bpf_percpu_hash_update()
2391 ret = __htab_percpu_map_update_elem(map, key, value, map_flags, in bpf_percpu_hash_update()
2398 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, in htab_percpu_map_seq_show_elem() argument
2403 int cpu; in htab_percpu_map_seq_show_elem() local
2407 l = __htab_map_lookup_elem(map, key); in htab_percpu_map_seq_show_elem()
2413 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); in htab_percpu_map_seq_show_elem()
2415 pptr = htab_elem_get_ptr(l, map->key_size); in htab_percpu_map_seq_show_elem()
2416 for_each_possible_cpu(cpu) { in htab_percpu_map_seq_show_elem()
2417 seq_printf(m, "\tcpu%d: ", cpu); in htab_percpu_map_seq_show_elem()
2418 btf_type_seq_show(map->btf, map->btf_value_type_id, in htab_percpu_map_seq_show_elem()
2419 per_cpu_ptr(pptr, cpu), m); in htab_percpu_map_seq_show_elem()
2469 if (attr->value_size != sizeof(u32)) in fd_htab_map_alloc_check()
2470 return -EINVAL; in fd_htab_map_alloc_check()
2474 static void fd_htab_map_free(struct bpf_map *map) in fd_htab_map_free() argument
2476 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in fd_htab_map_free()
2482 for (i = 0; i < htab->n_buckets; i++) { in fd_htab_map_free()
2486 void *ptr = fd_htab_map_get_ptr(map, l); in fd_htab_map_free()
2488 map->ops->map_fd_put_ptr(map, ptr, false); in fd_htab_map_free()
2492 htab_map_free(map); in fd_htab_map_free()
2496 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) in bpf_fd_htab_map_lookup_elem() argument
2501 if (!map->ops->map_fd_sys_lookup_elem) in bpf_fd_htab_map_lookup_elem()
2502 return -ENOTSUPP; in bpf_fd_htab_map_lookup_elem()
2505 ptr = htab_map_lookup_elem(map, key); in bpf_fd_htab_map_lookup_elem()
2507 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); in bpf_fd_htab_map_lookup_elem()
2509 ret = -ENOENT; in bpf_fd_htab_map_lookup_elem()
2516 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, in bpf_fd_htab_map_update_elem() argument
2523 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); in bpf_fd_htab_map_update_elem()
2528 * htab map, and the following rcu_read_lock() is only used to avoid in bpf_fd_htab_map_update_elem()
2532 ret = htab_map_update_elem(map, key, &ptr, map_flags); in bpf_fd_htab_map_update_elem()
2535 map->ops->map_fd_put_ptr(map, ptr, false); in bpf_fd_htab_map_update_elem()
2542 struct bpf_map *map, *inner_map_meta; in htab_of_map_alloc() local
2544 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); in htab_of_map_alloc()
2548 map = htab_map_alloc(attr); in htab_of_map_alloc()
2549 if (IS_ERR(map)) { in htab_of_map_alloc()
2551 return map; in htab_of_map_alloc()
2554 map->inner_map_meta = inner_map_meta; in htab_of_map_alloc()
2556 return map; in htab_of_map_alloc()
2559 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) in htab_of_map_lookup_elem() argument
2561 struct bpf_map **inner_map = htab_map_lookup_elem(map, key); in htab_of_map_lookup_elem()
2569 static int htab_of_map_gen_lookup(struct bpf_map *map, in htab_of_map_gen_lookup() argument
2576 (void *(*)(struct bpf_map *map, void *key))NULL)); in htab_of_map_gen_lookup()
2581 round_up(map->key_size, 8)); in htab_of_map_gen_lookup()
2584 return insn - insn_buf; in htab_of_map_gen_lookup()
2587 static void htab_of_map_free(struct bpf_map *map) in htab_of_map_free() argument
2589 bpf_map_meta_free(map->inner_map_meta); in htab_of_map_free()
2590 fd_htab_map_free(map); in htab_of_map_free()