Lines Matching full:array
22 static void bpf_array_free_percpu(struct bpf_array *array) in bpf_array_free_percpu() argument
26 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_free_percpu()
27 free_percpu(array->pptrs[i]); in bpf_array_free_percpu()
32 static int bpf_array_alloc_percpu(struct bpf_array *array) in bpf_array_alloc_percpu() argument
37 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_alloc_percpu()
38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, in bpf_array_alloc_percpu()
41 bpf_array_free_percpu(array); in bpf_array_alloc_percpu()
44 array->pptrs[i] = ptr; in bpf_array_alloc_percpu()
87 struct bpf_array *array; in array_map_alloc() local
103 /* round up array size to nearest power of 2, in array_map_alloc()
112 array_size = sizeof(*array); in array_map_alloc()
117 * ensure array->value is exactly page-aligned in array_map_alloc()
135 array = data + PAGE_ALIGN(sizeof(struct bpf_array)) in array_map_alloc()
138 array = bpf_map_area_alloc(array_size, numa_node); in array_map_alloc()
140 if (!array) in array_map_alloc()
142 array->index_mask = index_mask; in array_map_alloc()
143 array->map.bypass_spec_v1 = bypass_spec_v1; in array_map_alloc()
146 bpf_map_init_from_attr(&array->map, attr); in array_map_alloc()
147 array->elem_size = elem_size; in array_map_alloc()
149 if (percpu && bpf_array_alloc_percpu(array)) { in array_map_alloc()
150 bpf_map_area_free(array); in array_map_alloc()
154 return &array->map; in array_map_alloc()
157 static void *array_map_elem_ptr(struct bpf_array* array, u32 index) in array_map_elem_ptr() argument
159 return array->value + (u64)array->elem_size * index; in array_map_elem_ptr()
165 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_lookup_elem() local
168 if (unlikely(index >= array->map.max_entries)) in array_map_lookup_elem()
171 return array->value + (u64)array->elem_size * (index & array->index_mask); in array_map_lookup_elem()
177 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_direct_value_addr() local
184 *imm = (unsigned long)array->value; in array_map_direct_value_addr()
191 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_direct_value_meta() local
192 u64 base = (unsigned long)array->value; in array_map_direct_value_meta()
193 u64 range = array->elem_size; in array_map_direct_value_meta()
207 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_gen_lookup() local
209 u32 elem_size = array->elem_size; in array_map_gen_lookup()
221 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); in array_map_gen_lookup()
240 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_lookup_elem() local
243 if (unlikely(index >= array->map.max_entries)) in percpu_array_map_lookup_elem()
246 return this_cpu_ptr(array->pptrs[index & array->index_mask]); in percpu_array_map_lookup_elem()
251 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_lookup_percpu_elem() local
257 if (unlikely(index >= array->map.max_entries)) in percpu_array_map_lookup_percpu_elem()
260 return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu); in percpu_array_map_lookup_percpu_elem()
265 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_percpu_array_copy() local
271 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_copy()
278 size = array->elem_size; in bpf_percpu_array_copy()
280 pptr = array->pptrs[index & array->index_mask]; in bpf_percpu_array_copy()
293 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_get_next_key() local
297 if (index >= array->map.max_entries) { in array_map_get_next_key()
302 if (index == array->map.max_entries - 1) in array_map_get_next_key()
313 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_update_elem() local
321 if (unlikely(index >= array->map.max_entries)) in array_map_update_elem()
333 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in array_map_update_elem()
334 val = this_cpu_ptr(array->pptrs[index & array->index_mask]); in array_map_update_elem()
336 bpf_obj_free_fields(array->map.record, val); in array_map_update_elem()
338 val = array->value + in array_map_update_elem()
339 (u64)array->elem_size * (index & array->index_mask); in array_map_update_elem()
344 bpf_obj_free_fields(array->map.record, val); in array_map_update_elem()
352 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_percpu_array_update() local
362 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_update()
376 size = array->elem_size; in bpf_percpu_array_update()
378 pptr = array->pptrs[index & array->index_mask]; in bpf_percpu_array_update()
381 bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu)); in bpf_percpu_array_update()
394 static void *array_map_vmalloc_addr(struct bpf_array *array) in array_map_vmalloc_addr() argument
396 return (void *)round_down((unsigned long)array, PAGE_SIZE); in array_map_vmalloc_addr()
401 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_free_timers() local
408 for (i = 0; i < array->map.max_entries; i++) in array_map_free_timers()
409 bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); in array_map_free_timers()
415 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_free() local
419 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in array_map_free()
420 for (i = 0; i < array->map.max_entries; i++) { in array_map_free()
421 void __percpu *pptr = array->pptrs[i & array->index_mask]; in array_map_free()
430 for (i = 0; i < array->map.max_entries; i++) in array_map_free()
431 bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i)); in array_map_free()
435 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) in array_map_free()
436 bpf_array_free_percpu(array); in array_map_free()
438 if (array->map.map_flags & BPF_F_MMAPABLE) in array_map_free()
439 bpf_map_area_free(array_map_vmalloc_addr(array)); in array_map_free()
441 bpf_map_area_free(array); in array_map_free()
468 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_seq_show_elem() local
476 pptr = array->pptrs[index & array->index_mask]; in percpu_array_map_seq_show_elem()
511 /* bpf array can only take a u32 key. This check makes sure in array_map_check_btf()
522 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_mmap() local
523 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; in array_map_mmap()
529 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) in array_map_mmap()
532 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), in array_map_mmap()
555 struct bpf_array *array; in bpf_array_map_seq_start() local
563 array = container_of(map, struct bpf_array, map); in bpf_array_map_seq_start()
564 index = info->index & array->index_mask; in bpf_array_map_seq_start()
566 return array->pptrs[index]; in bpf_array_map_seq_start()
567 return array_map_elem_ptr(array, index); in bpf_array_map_seq_start()
574 struct bpf_array *array; in bpf_array_map_seq_next() local
582 array = container_of(map, struct bpf_array, map); in bpf_array_map_seq_next()
583 index = info->index & array->index_mask; in bpf_array_map_seq_next()
585 return array->pptrs[index]; in bpf_array_map_seq_next()
586 return array_map_elem_ptr(array, index); in bpf_array_map_seq_next()
594 struct bpf_array *array = container_of(map, struct bpf_array, map); in __bpf_array_map_seq_show() local
615 size = array->elem_size; in __bpf_array_map_seq_show()
645 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_iter_init_array_map() local
650 buf_size = array->elem_size * num_possible_cpus(); in bpf_iter_init_array_map()
693 struct bpf_array *array; in bpf_for_each_array_elem() local
702 array = container_of(map, struct bpf_array, map); in bpf_for_each_array_elem()
707 val = this_cpu_ptr(array->pptrs[i]); in bpf_for_each_array_elem()
709 val = array_map_elem_ptr(array, i); in bpf_for_each_array_elem()
726 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_mem_usage() local
728 u32 elem_size = array->elem_size; in array_map_mem_usage()
730 u64 usage = sizeof(*array); in array_map_mem_usage()
806 struct bpf_array *array = container_of(map, struct bpf_array, map); in fd_array_map_free() local
810 for (i = 0; i < array->map.max_entries; i++) in fd_array_map_free()
811 BUG_ON(array->ptrs[i] != NULL); in fd_array_map_free()
813 bpf_map_area_free(array); in fd_array_map_free()
845 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_fd_array_map_update_elem() local
852 if (index >= array->map.max_entries) in bpf_fd_array_map_update_elem()
861 mutex_lock(&array->aux->poke_mutex); in bpf_fd_array_map_update_elem()
862 old_ptr = xchg(array->ptrs + index, new_ptr); in bpf_fd_array_map_update_elem()
864 mutex_unlock(&array->aux->poke_mutex); in bpf_fd_array_map_update_elem()
866 old_ptr = xchg(array->ptrs + index, new_ptr); in bpf_fd_array_map_update_elem()
876 struct bpf_array *array = container_of(map, struct bpf_array, map); in __fd_array_map_delete_elem() local
880 if (index >= array->map.max_entries) in __fd_array_map_delete_elem()
884 mutex_lock(&array->aux->poke_mutex); in __fd_array_map_delete_elem()
885 old_ptr = xchg(array->ptrs + index, NULL); in __fd_array_map_delete_elem()
887 mutex_unlock(&array->aux->poke_mutex); in __fd_array_map_delete_elem()
889 old_ptr = xchg(array->ptrs + index, NULL); in __fd_array_map_delete_elem()
935 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_fd_array_map_clear() local
938 for (i = 0; i < array->map.max_entries; i++) in bpf_fd_array_map_clear()
1219 struct bpf_array *array = container_of(map, struct bpf_array, map); in perf_event_fd_array_release() local
1227 for (i = 0; i < array->map.max_entries; i++) { in perf_event_fd_array_release()
1228 ee = READ_ONCE(array->ptrs[i]); in perf_event_fd_array_release()
1336 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_of_map_gen_lookup() local
1337 u32 elem_size = array->elem_size; in array_of_map_gen_lookup()
1347 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); in array_of_map_gen_lookup()