Lines Matching +full:static +full:- +full:trace +full:- +full:id

1 // SPDX-License-Identifier: GPL-2.0-only
34 static inline bool stack_map_use_build_id(struct bpf_map *map) in stack_map_use_build_id()
36 return (map->map_flags & BPF_F_STACK_BUILD_ID); in stack_map_use_build_id()
39 static inline int stack_map_data_size(struct bpf_map *map) in stack_map_data_size()
45 static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) in prealloc_elems_and_freelist()
48 (u64)smap->map.value_size; in prealloc_elems_and_freelist()
51 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, in prealloc_elems_and_freelist()
52 smap->map.numa_node); in prealloc_elems_and_freelist()
53 if (!smap->elems) in prealloc_elems_and_freelist()
54 return -ENOMEM; in prealloc_elems_and_freelist()
56 err = pcpu_freelist_init(&smap->freelist); in prealloc_elems_and_freelist()
60 pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size, in prealloc_elems_and_freelist()
61 smap->map.max_entries); in prealloc_elems_and_freelist()
65 bpf_map_area_free(smap->elems); in prealloc_elems_and_freelist()
70 static struct bpf_map *stack_map_alloc(union bpf_attr *attr) in stack_map_alloc()
72 u32 value_size = attr->value_size; in stack_map_alloc()
77 if (attr->map_flags & ~STACK_CREATE_FLAG_MASK) in stack_map_alloc()
78 return ERR_PTR(-EINVAL); in stack_map_alloc()
81 if (attr->max_entries == 0 || attr->key_size != 4 || in stack_map_alloc()
83 return ERR_PTR(-EINVAL); in stack_map_alloc()
86 if (attr->map_flags & BPF_F_STACK_BUILD_ID) { in stack_map_alloc()
90 return ERR_PTR(-EINVAL); in stack_map_alloc()
92 return ERR_PTR(-EINVAL); in stack_map_alloc()
95 * into UB on 32-bit arches, so check that first in stack_map_alloc()
97 if (attr->max_entries > 1UL << 31) in stack_map_alloc()
98 return ERR_PTR(-E2BIG); in stack_map_alloc()
100 n_buckets = roundup_pow_of_two(attr->max_entries); in stack_map_alloc()
105 return ERR_PTR(-ENOMEM); in stack_map_alloc()
107 bpf_map_init_from_attr(&smap->map, attr); in stack_map_alloc()
108 smap->n_buckets = n_buckets; in stack_map_alloc()
118 return &smap->map; in stack_map_alloc()
127 static int fetch_build_id(struct vm_area_struct *vma, unsigned char *build_id, bool may_fault) in fetch_build_id()
136 * - either adjusted in place to a file offset, if build ID fetching
137 * succeeds; in this case id_offs[i].build_id is set to correct build ID,
139 * - or IP will be kept intact, if build ID fetching failed; in this case
143 static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, in stack_map_get_build_id_offset()
156 if (!user || !current || !current->mm || irq_work_busy || in stack_map_get_build_id_offset()
157 !mmap_read_trylock(current->mm)) { in stack_map_get_build_id_offset()
158 /* cannot access current->mm, fall back to ips */ in stack_map_get_build_id_offset()
174 vma = find_vma(current->mm, ip); in stack_map_get_build_id_offset()
182 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ip - vma->vm_start; in stack_map_get_build_id_offset()
187 bpf_mmap_unlock_mm(work, current->mm); in stack_map_get_build_id_offset()
190 static struct perf_callchain_entry *
202 entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip, in get_callchain_entry_for_task()
206 * perf_callchain_entry uses u64 array. For 32-bit systems, it is in get_callchain_entry_for_task()
210 unsigned long *from = (unsigned long *) entry->ip; in get_callchain_entry_for_task()
211 u64 *to = entry->ip; in get_callchain_entry_for_task()
215 for (i = entry->nr - 1; i >= 0; i--) in get_callchain_entry_for_task()
227 static long __bpf_get_stackid(struct bpf_map *map, in __bpf_get_stackid()
228 struct perf_callchain_entry *trace, u64 flags) in __bpf_get_stackid() argument
233 u32 hash, id, trace_nr, trace_len, i; in __bpf_get_stackid() local
238 if (trace->nr <= skip) in __bpf_get_stackid()
239 /* skipping more than usable stack trace */ in __bpf_get_stackid()
240 return -EFAULT; in __bpf_get_stackid()
242 trace_nr = trace->nr - skip; in __bpf_get_stackid()
244 ips = trace->ip + skip; in __bpf_get_stackid()
246 id = hash & (smap->n_buckets - 1); in __bpf_get_stackid()
247 bucket = READ_ONCE(smap->buckets[id]); in __bpf_get_stackid()
249 hash_matches = bucket && bucket->hash == hash; in __bpf_get_stackid()
252 return id; in __bpf_get_stackid()
259 pcpu_freelist_pop(&smap->freelist); in __bpf_get_stackid()
261 return -ENOMEM; in __bpf_get_stackid()
262 new_bucket->nr = trace_nr; in __bpf_get_stackid()
263 id_offs = (struct bpf_stack_build_id *)new_bucket->data; in __bpf_get_stackid()
268 if (hash_matches && bucket->nr == trace_nr && in __bpf_get_stackid()
269 memcmp(bucket->data, new_bucket->data, trace_len) == 0) { in __bpf_get_stackid()
270 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); in __bpf_get_stackid()
271 return id; in __bpf_get_stackid()
274 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); in __bpf_get_stackid()
275 return -EEXIST; in __bpf_get_stackid()
278 if (hash_matches && bucket->nr == trace_nr && in __bpf_get_stackid()
279 memcmp(bucket->data, ips, trace_len) == 0) in __bpf_get_stackid()
280 return id; in __bpf_get_stackid()
282 return -EEXIST; in __bpf_get_stackid()
285 pcpu_freelist_pop(&smap->freelist); in __bpf_get_stackid()
287 return -ENOMEM; in __bpf_get_stackid()
288 memcpy(new_bucket->data, ips, trace_len); in __bpf_get_stackid()
291 new_bucket->hash = hash; in __bpf_get_stackid()
292 new_bucket->nr = trace_nr; in __bpf_get_stackid()
294 old_bucket = xchg(&smap->buckets[id], new_bucket); in __bpf_get_stackid()
296 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); in __bpf_get_stackid()
297 return id; in __bpf_get_stackid()
303 u32 max_depth = map->value_size / stack_map_data_size(map); in BPF_CALL_3()
306 struct perf_callchain_entry *trace; in BPF_CALL_3() local
311 return -EINVAL; in BPF_CALL_3()
317 trace = get_perf_callchain(regs, 0, kernel, user, max_depth, in BPF_CALL_3()
320 if (unlikely(!trace)) in BPF_CALL_3()
321 /* couldn't fetch the stack trace */ in BPF_CALL_3()
322 return -EFAULT; in BPF_CALL_3()
324 return __bpf_get_stackid(map, trace, flags); in BPF_CALL_3()
336 static __u64 count_kernel_ip(struct perf_callchain_entry *trace) in count_kernel_ip() argument
340 while (nr_kernel < trace->nr) { in count_kernel_ip()
341 if (trace->ip[nr_kernel] == PERF_CONTEXT_USER) in count_kernel_ip()
351 struct perf_event *event = ctx->event; in BPF_CALL_3()
352 struct perf_callchain_entry *trace; in BPF_CALL_3() local
358 if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) in BPF_CALL_3()
359 return bpf_get_stackid((unsigned long)(ctx->regs), in BPF_CALL_3()
364 return -EINVAL; in BPF_CALL_3()
369 trace = ctx->data->callchain; in BPF_CALL_3()
370 if (unlikely(!trace)) in BPF_CALL_3()
371 return -EFAULT; in BPF_CALL_3()
373 nr_kernel = count_kernel_ip(trace); in BPF_CALL_3()
376 __u64 nr = trace->nr; in BPF_CALL_3()
378 trace->nr = nr_kernel; in BPF_CALL_3()
379 ret = __bpf_get_stackid(map, trace, flags); in BPF_CALL_3()
382 trace->nr = nr; in BPF_CALL_3()
388 return -EFAULT; in BPF_CALL_3()
391 ret = __bpf_get_stackid(map, trace, flags); in BPF_CALL_3()
405 static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, in __bpf_get_stack()
414 struct perf_callchain_entry *trace; in __bpf_get_stack() local
416 int err = -EINVAL; in __bpf_get_stack()
437 err = -EOPNOTSUPP; in __bpf_get_stack()
450 trace = trace_in; in __bpf_get_stack()
452 trace = get_callchain_entry_for_task(task, max_depth); in __bpf_get_stack()
454 trace = get_perf_callchain(regs, 0, kernel, user, max_depth, in __bpf_get_stack()
457 if (unlikely(!trace) || trace->nr < skip) { in __bpf_get_stack()
463 trace_nr = trace->nr - skip; in __bpf_get_stack()
467 ips = trace->ip + skip; in __bpf_get_stack()
478 /* trace/ips should not be dereferenced after this point */ in __bpf_get_stack()
486 memset(buf + copy_len, 0, size - copy_len); in __bpf_get_stack()
490 err = -EFAULT; in __bpf_get_stack()
528 static long __bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, in __bpf_get_task_stack()
532 long res = -EINVAL; in __bpf_get_task_stack()
535 return -EFAULT; in __bpf_get_task_stack()
582 struct pt_regs *regs = (struct pt_regs *)(ctx->regs); in BPF_CALL_4()
583 struct perf_event *event = ctx->event; in BPF_CALL_4()
584 struct perf_callchain_entry *trace; in BPF_CALL_4() local
586 int err = -EINVAL; in BPF_CALL_4()
589 if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) in BPF_CALL_4()
599 err = -EFAULT; in BPF_CALL_4()
600 trace = ctx->data->callchain; in BPF_CALL_4()
601 if (unlikely(!trace)) in BPF_CALL_4()
604 nr_kernel = count_kernel_ip(trace); in BPF_CALL_4()
607 __u64 nr = trace->nr; in BPF_CALL_4()
609 trace->nr = nr_kernel; in BPF_CALL_4()
610 err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */); in BPF_CALL_4()
613 trace->nr = nr; in BPF_CALL_4()
622 err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */); in BPF_CALL_4()
643 static void *stack_map_lookup_elem(struct bpf_map *map, void *key) in stack_map_lookup_elem()
645 return ERR_PTR(-EOPNOTSUPP); in stack_map_lookup_elem()
653 u32 id = *(u32 *)key, trace_len; in bpf_stackmap_copy() local
655 if (unlikely(id >= smap->n_buckets)) in bpf_stackmap_copy()
656 return -ENOENT; in bpf_stackmap_copy()
658 bucket = xchg(&smap->buckets[id], NULL); in bpf_stackmap_copy()
660 return -ENOENT; in bpf_stackmap_copy()
662 trace_len = bucket->nr * stack_map_data_size(map); in bpf_stackmap_copy()
663 memcpy(value, bucket->data, trace_len); in bpf_stackmap_copy()
664 memset(value + trace_len, 0, map->value_size - trace_len); in bpf_stackmap_copy()
666 old_bucket = xchg(&smap->buckets[id], bucket); in bpf_stackmap_copy()
668 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); in bpf_stackmap_copy()
672 static int stack_map_get_next_key(struct bpf_map *map, void *key, in stack_map_get_next_key()
677 u32 id; in stack_map_get_next_key() local
682 id = 0; in stack_map_get_next_key()
684 id = *(u32 *)key; in stack_map_get_next_key()
685 if (id >= smap->n_buckets || !smap->buckets[id]) in stack_map_get_next_key()
686 id = 0; in stack_map_get_next_key()
688 id++; in stack_map_get_next_key()
691 while (id < smap->n_buckets && !smap->buckets[id]) in stack_map_get_next_key()
692 id++; in stack_map_get_next_key()
694 if (id >= smap->n_buckets) in stack_map_get_next_key()
695 return -ENOENT; in stack_map_get_next_key()
697 *(u32 *)next_key = id; in stack_map_get_next_key()
701 static long stack_map_update_elem(struct bpf_map *map, void *key, void *value, in stack_map_update_elem()
704 return -EINVAL; in stack_map_update_elem()
708 static long stack_map_delete_elem(struct bpf_map *map, void *key) in stack_map_delete_elem()
712 u32 id = *(u32 *)key; in stack_map_delete_elem() local
714 if (unlikely(id >= smap->n_buckets)) in stack_map_delete_elem()
715 return -E2BIG; in stack_map_delete_elem()
717 old_bucket = xchg(&smap->buckets[id], NULL); in stack_map_delete_elem()
719 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); in stack_map_delete_elem()
722 return -ENOENT; in stack_map_delete_elem()
726 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
727 static void stack_map_free(struct bpf_map *map) in stack_map_free()
731 bpf_map_area_free(smap->elems); in stack_map_free()
732 pcpu_freelist_destroy(&smap->freelist); in stack_map_free()
737 static u64 stack_map_mem_usage(const struct bpf_map *map) in stack_map_mem_usage()
740 u64 value_size = map->value_size; in stack_map_mem_usage()
741 u64 n_buckets = smap->n_buckets; in stack_map_mem_usage()
742 u64 enties = map->max_entries; in stack_map_mem_usage()