1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/cgroup.h"
3 #include "util/debug.h"
4 #include "util/evlist.h"
5 #include "util/hashmap.h"
6 #include "util/machine.h"
7 #include "util/map.h"
8 #include "util/symbol.h"
9 #include "util/target.h"
10 #include "util/thread.h"
11 #include "util/thread_map.h"
12 #include "util/lock-contention.h"
13 #include <linux/zalloc.h>
14 #include <linux/string.h>
15 #include <bpf/bpf.h>
16 #include <bpf/btf.h>
17 #include <inttypes.h>
18
19 #include "bpf_skel/lock_contention.skel.h"
20 #include "bpf_skel/lock_data.h"
21
22 static struct lock_contention_bpf *skel;
23 static bool has_slab_iter;
24 static struct hashmap slab_hash;
25
slab_cache_hash(long key,void * ctx __maybe_unused)26 static size_t slab_cache_hash(long key, void *ctx __maybe_unused)
27 {
28 return key;
29 }
30
slab_cache_equal(long key1,long key2,void * ctx __maybe_unused)31 static bool slab_cache_equal(long key1, long key2, void *ctx __maybe_unused)
32 {
33 return key1 == key2;
34 }
35
check_slab_cache_iter(struct lock_contention * con)36 static void check_slab_cache_iter(struct lock_contention *con)
37 {
38 struct btf *btf = btf__load_vmlinux_btf();
39 s32 ret;
40
41 hashmap__init(&slab_hash, slab_cache_hash, slab_cache_equal, /*ctx=*/NULL);
42
43 if (btf == NULL) {
44 pr_debug("BTF loading failed: %s\n", strerror(errno));
45 return;
46 }
47
48 ret = btf__find_by_name_kind(btf, "bpf_iter__kmem_cache", BTF_KIND_STRUCT);
49 if (ret < 0) {
50 bpf_program__set_autoload(skel->progs.slab_cache_iter, false);
51 pr_debug("slab cache iterator is not available: %d\n", ret);
52 goto out;
53 }
54
55 has_slab_iter = true;
56
57 bpf_map__set_max_entries(skel->maps.slab_caches, con->map_nr_entries);
58 out:
59 btf__free(btf);
60 }
61
run_slab_cache_iter(void)62 static void run_slab_cache_iter(void)
63 {
64 int fd;
65 char buf[256];
66 long key, *prev_key;
67
68 if (!has_slab_iter)
69 return;
70
71 fd = bpf_iter_create(bpf_link__fd(skel->links.slab_cache_iter));
72 if (fd < 0) {
73 pr_debug("cannot create slab cache iter: %d\n", fd);
74 return;
75 }
76
77 /* This will run the bpf program */
78 while (read(fd, buf, sizeof(buf)) > 0)
79 continue;
80
81 close(fd);
82
83 /* Read the slab cache map and build a hash with IDs */
84 fd = bpf_map__fd(skel->maps.slab_caches);
85 prev_key = NULL;
86 while (!bpf_map_get_next_key(fd, prev_key, &key)) {
87 struct slab_cache_data *data;
88
89 data = malloc(sizeof(*data));
90 if (data == NULL)
91 break;
92
93 if (bpf_map_lookup_elem(fd, &key, data) < 0)
94 break;
95
96 hashmap__add(&slab_hash, data->id, data);
97 prev_key = &key;
98 }
99 }
100
exit_slab_cache_iter(void)101 static void exit_slab_cache_iter(void)
102 {
103 struct hashmap_entry *cur;
104 unsigned bkt;
105
106 hashmap__for_each_entry(&slab_hash, cur, bkt)
107 free(cur->pvalue);
108
109 hashmap__clear(&slab_hash);
110 }
111
lock_contention_prepare(struct lock_contention * con)112 int lock_contention_prepare(struct lock_contention *con)
113 {
114 int i, fd;
115 int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1, nslabs = 1;
116 struct evlist *evlist = con->evlist;
117 struct target *target = con->target;
118
119 skel = lock_contention_bpf__open();
120 if (!skel) {
121 pr_err("Failed to open lock-contention BPF skeleton\n");
122 return -1;
123 }
124
125 bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
126 bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
127 bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
128
129 if (con->aggr_mode == LOCK_AGGR_TASK)
130 bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries);
131 else
132 bpf_map__set_max_entries(skel->maps.task_data, 1);
133
134 if (con->save_callstack) {
135 bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
136 if (con->owner) {
137 bpf_map__set_value_size(skel->maps.stack_buf, con->max_stack * sizeof(u64));
138 bpf_map__set_key_size(skel->maps.owner_stacks,
139 con->max_stack * sizeof(u64));
140 bpf_map__set_max_entries(skel->maps.owner_stacks, con->map_nr_entries);
141 bpf_map__set_max_entries(skel->maps.owner_data, con->map_nr_entries);
142 bpf_map__set_max_entries(skel->maps.owner_stat, con->map_nr_entries);
143 skel->rodata->max_stack = con->max_stack;
144 }
145 } else {
146 bpf_map__set_max_entries(skel->maps.stacks, 1);
147 }
148
149 if (target__has_cpu(target)) {
150 skel->rodata->has_cpu = 1;
151 ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
152 }
153 if (target__has_task(target)) {
154 skel->rodata->has_task = 1;
155 ntasks = perf_thread_map__nr(evlist->core.threads);
156 }
157 if (con->filters->nr_types) {
158 skel->rodata->has_type = 1;
159 ntypes = con->filters->nr_types;
160 }
161 if (con->filters->nr_cgrps) {
162 skel->rodata->has_cgroup = 1;
163 ncgrps = con->filters->nr_cgrps;
164 }
165
166 /* resolve lock name filters to addr */
167 if (con->filters->nr_syms) {
168 struct symbol *sym;
169 struct map *kmap;
170 unsigned long *addrs;
171
172 for (i = 0; i < con->filters->nr_syms; i++) {
173 sym = machine__find_kernel_symbol_by_name(con->machine,
174 con->filters->syms[i],
175 &kmap);
176 if (sym == NULL) {
177 pr_warning("ignore unknown symbol: %s\n",
178 con->filters->syms[i]);
179 continue;
180 }
181
182 addrs = realloc(con->filters->addrs,
183 (con->filters->nr_addrs + 1) * sizeof(*addrs));
184 if (addrs == NULL) {
185 pr_warning("memory allocation failure\n");
186 continue;
187 }
188
189 addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start);
190 con->filters->addrs = addrs;
191 }
192 naddrs = con->filters->nr_addrs;
193 skel->rodata->has_addr = 1;
194 }
195
196 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
197 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
198 bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
199 bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
200 bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
201
202 skel->rodata->stack_skip = con->stack_skip;
203 skel->rodata->aggr_mode = con->aggr_mode;
204 skel->rodata->needs_callstack = con->save_callstack;
205 skel->rodata->lock_owner = con->owner;
206
207 if (con->aggr_mode == LOCK_AGGR_CGROUP || con->filters->nr_cgrps) {
208 if (cgroup_is_v2("perf_event"))
209 skel->rodata->use_cgroup_v2 = 1;
210 }
211
212 check_slab_cache_iter(con);
213
214 if (con->filters->nr_slabs && has_slab_iter) {
215 skel->rodata->has_slab = 1;
216 nslabs = con->filters->nr_slabs;
217 }
218
219 bpf_map__set_max_entries(skel->maps.slab_filter, nslabs);
220
221 if (lock_contention_bpf__load(skel) < 0) {
222 pr_err("Failed to load lock-contention BPF skeleton\n");
223 return -1;
224 }
225
226 if (target__has_cpu(target)) {
227 u32 cpu;
228 u8 val = 1;
229
230 fd = bpf_map__fd(skel->maps.cpu_filter);
231
232 for (i = 0; i < ncpus; i++) {
233 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
234 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
235 }
236 }
237
238 if (target__has_task(target)) {
239 u32 pid;
240 u8 val = 1;
241
242 fd = bpf_map__fd(skel->maps.task_filter);
243
244 for (i = 0; i < ntasks; i++) {
245 pid = perf_thread_map__pid(evlist->core.threads, i);
246 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
247 }
248 }
249
250 if (target__none(target) && evlist->workload.pid > 0) {
251 u32 pid = evlist->workload.pid;
252 u8 val = 1;
253
254 fd = bpf_map__fd(skel->maps.task_filter);
255 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
256 }
257
258 if (con->filters->nr_types) {
259 u8 val = 1;
260
261 fd = bpf_map__fd(skel->maps.type_filter);
262
263 for (i = 0; i < con->filters->nr_types; i++)
264 bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY);
265 }
266
267 if (con->filters->nr_addrs) {
268 u8 val = 1;
269
270 fd = bpf_map__fd(skel->maps.addr_filter);
271
272 for (i = 0; i < con->filters->nr_addrs; i++)
273 bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
274 }
275
276 if (con->filters->nr_cgrps) {
277 u8 val = 1;
278
279 fd = bpf_map__fd(skel->maps.cgroup_filter);
280
281 for (i = 0; i < con->filters->nr_cgrps; i++)
282 bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
283 }
284
285 if (con->aggr_mode == LOCK_AGGR_CGROUP)
286 read_all_cgroups(&con->cgroups);
287
288 bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
289
290 lock_contention_bpf__attach(skel);
291
292 /* run the slab iterator after attaching */
293 run_slab_cache_iter();
294
295 if (con->filters->nr_slabs) {
296 u8 val = 1;
297 int cache_fd;
298 long key, *prev_key;
299
300 fd = bpf_map__fd(skel->maps.slab_filter);
301
302 /* Read the slab cache map and build a hash with its address */
303 cache_fd = bpf_map__fd(skel->maps.slab_caches);
304 prev_key = NULL;
305 while (!bpf_map_get_next_key(cache_fd, prev_key, &key)) {
306 struct slab_cache_data data;
307
308 if (bpf_map_lookup_elem(cache_fd, &key, &data) < 0)
309 break;
310
311 for (i = 0; i < con->filters->nr_slabs; i++) {
312 if (!strcmp(con->filters->slabs[i], data.name)) {
313 bpf_map_update_elem(fd, &key, &val, BPF_ANY);
314 break;
315 }
316 }
317 prev_key = &key;
318 }
319 }
320
321 return 0;
322 }
323
324 /*
325 * Run the BPF program directly using BPF_PROG_TEST_RUN to update the end
326 * timestamp in ktime so that it can calculate delta easily.
327 */
mark_end_timestamp(void)328 static void mark_end_timestamp(void)
329 {
330 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
331 .flags = BPF_F_TEST_RUN_ON_CPU,
332 );
333 int prog_fd = bpf_program__fd(skel->progs.end_timestamp);
334
335 bpf_prog_test_run_opts(prog_fd, &opts);
336 }
337
update_lock_stat(int map_fd,int pid,u64 end_ts,enum lock_aggr_mode aggr_mode,struct tstamp_data * ts_data)338 static void update_lock_stat(int map_fd, int pid, u64 end_ts,
339 enum lock_aggr_mode aggr_mode,
340 struct tstamp_data *ts_data)
341 {
342 u64 delta;
343 struct contention_key stat_key = {};
344 struct contention_data stat_data;
345
346 if (ts_data->timestamp >= end_ts)
347 return;
348
349 delta = end_ts - ts_data->timestamp;
350
351 switch (aggr_mode) {
352 case LOCK_AGGR_CALLER:
353 stat_key.stack_id = ts_data->stack_id;
354 break;
355 case LOCK_AGGR_TASK:
356 stat_key.pid = pid;
357 break;
358 case LOCK_AGGR_ADDR:
359 stat_key.lock_addr_or_cgroup = ts_data->lock;
360 break;
361 case LOCK_AGGR_CGROUP:
362 /* TODO */
363 return;
364 default:
365 return;
366 }
367
368 if (bpf_map_lookup_elem(map_fd, &stat_key, &stat_data) < 0)
369 return;
370
371 stat_data.total_time += delta;
372 stat_data.count++;
373
374 if (delta > stat_data.max_time)
375 stat_data.max_time = delta;
376 if (delta < stat_data.min_time)
377 stat_data.min_time = delta;
378
379 bpf_map_update_elem(map_fd, &stat_key, &stat_data, BPF_EXIST);
380 }
381
382 /*
383 * Account entries in the tstamp map (which didn't see the corresponding
384 * lock:contention_end tracepoint) using end_ts.
385 */
account_end_timestamp(struct lock_contention * con)386 static void account_end_timestamp(struct lock_contention *con)
387 {
388 int ts_fd, stat_fd;
389 int *prev_key, key;
390 u64 end_ts = skel->bss->end_ts;
391 int total_cpus;
392 enum lock_aggr_mode aggr_mode = con->aggr_mode;
393 struct tstamp_data ts_data, *cpu_data;
394
395 /* Iterate per-task tstamp map (key = TID) */
396 ts_fd = bpf_map__fd(skel->maps.tstamp);
397 stat_fd = bpf_map__fd(skel->maps.lock_stat);
398
399 prev_key = NULL;
400 while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) {
401 if (bpf_map_lookup_elem(ts_fd, &key, &ts_data) == 0) {
402 int pid = key;
403
404 if (aggr_mode == LOCK_AGGR_TASK && con->owner)
405 pid = ts_data.flags;
406
407 update_lock_stat(stat_fd, pid, end_ts, aggr_mode,
408 &ts_data);
409 }
410
411 prev_key = &key;
412 }
413
414 /* Now it'll check per-cpu tstamp map which doesn't have TID. */
415 if (aggr_mode == LOCK_AGGR_TASK || aggr_mode == LOCK_AGGR_CGROUP)
416 return;
417
418 total_cpus = cpu__max_cpu().cpu;
419 ts_fd = bpf_map__fd(skel->maps.tstamp_cpu);
420
421 cpu_data = calloc(total_cpus, sizeof(*cpu_data));
422 if (cpu_data == NULL)
423 return;
424
425 prev_key = NULL;
426 while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) {
427 if (bpf_map_lookup_elem(ts_fd, &key, cpu_data) < 0)
428 goto next;
429
430 for (int i = 0; i < total_cpus; i++) {
431 if (cpu_data[i].lock == 0)
432 continue;
433
434 update_lock_stat(stat_fd, -1, end_ts, aggr_mode,
435 &cpu_data[i]);
436 }
437
438 next:
439 prev_key = &key;
440 }
441 free(cpu_data);
442 }
443
lock_contention_start(void)444 int lock_contention_start(void)
445 {
446 skel->bss->enabled = 1;
447 return 0;
448 }
449
lock_contention_stop(void)450 int lock_contention_stop(void)
451 {
452 skel->bss->enabled = 0;
453 mark_end_timestamp();
454 return 0;
455 }
456
lock_contention_get_name(struct lock_contention * con,struct contention_key * key,u64 * stack_trace,u32 flags)457 static const char *lock_contention_get_name(struct lock_contention *con,
458 struct contention_key *key,
459 u64 *stack_trace, u32 flags)
460 {
461 int idx = 0;
462 u64 addr;
463 static char name_buf[KSYM_NAME_LEN];
464 struct symbol *sym;
465 struct map *kmap;
466 struct machine *machine = con->machine;
467
468 if (con->aggr_mode == LOCK_AGGR_TASK) {
469 struct contention_task_data task;
470 int pid = key->pid;
471 int task_fd = bpf_map__fd(skel->maps.task_data);
472
473 /* do not update idle comm which contains CPU number */
474 if (pid) {
475 struct thread *t = machine__findnew_thread(machine, /*pid=*/-1, pid);
476
477 if (t != NULL &&
478 !bpf_map_lookup_elem(task_fd, &pid, &task) &&
479 thread__set_comm(t, task.comm, /*timestamp=*/0)) {
480 snprintf(name_buf, sizeof(name_buf), "%s", task.comm);
481 return name_buf;
482 }
483 }
484 return "";
485 }
486
487 if (con->aggr_mode == LOCK_AGGR_ADDR) {
488 int lock_fd = bpf_map__fd(skel->maps.lock_syms);
489 struct slab_cache_data *slab_data;
490
491 /* per-process locks set upper bits of the flags */
492 if (flags & LCD_F_MMAP_LOCK)
493 return "mmap_lock";
494 if (flags & LCD_F_SIGHAND_LOCK)
495 return "siglock";
496
497 /* global locks with symbols */
498 sym = machine__find_kernel_symbol(machine, key->lock_addr_or_cgroup, &kmap);
499 if (sym)
500 return sym->name;
501
502 /* try semi-global locks collected separately */
503 if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) {
504 if (flags == LOCK_CLASS_RQLOCK)
505 return "rq_lock";
506 }
507
508 /* look slab_hash for dynamic locks in a slab object */
509 if (hashmap__find(&slab_hash, flags & LCB_F_SLAB_ID_MASK, &slab_data)) {
510 snprintf(name_buf, sizeof(name_buf), "&%s", slab_data->name);
511 return name_buf;
512 }
513
514 return "";
515 }
516
517 if (con->aggr_mode == LOCK_AGGR_CGROUP) {
518 u64 cgrp_id = key->lock_addr_or_cgroup;
519 struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id);
520
521 if (cgrp)
522 return cgrp->name;
523
524 snprintf(name_buf, sizeof(name_buf), "cgroup:%" PRIu64 "", cgrp_id);
525 return name_buf;
526 }
527
528 /* LOCK_AGGR_CALLER: skip lock internal functions */
529 while (machine__is_lock_function(machine, stack_trace[idx]) &&
530 idx < con->max_stack - 1)
531 idx++;
532
533 addr = stack_trace[idx];
534 sym = machine__find_kernel_symbol(machine, addr, &kmap);
535
536 if (sym) {
537 unsigned long offset;
538
539 offset = map__map_ip(kmap, addr) - sym->start;
540
541 if (offset == 0)
542 return sym->name;
543
544 snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset);
545 } else {
546 snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr);
547 }
548
549 return name_buf;
550 }
551
pop_owner_stack_trace(struct lock_contention * con)552 struct lock_stat *pop_owner_stack_trace(struct lock_contention *con)
553 {
554 int stacks_fd, stat_fd;
555 u64 *stack_trace = NULL;
556 s32 stack_id;
557 struct contention_key ckey = {};
558 struct contention_data cdata = {};
559 size_t stack_size = con->max_stack * sizeof(*stack_trace);
560 struct lock_stat *st = NULL;
561
562 stacks_fd = bpf_map__fd(skel->maps.owner_stacks);
563 stat_fd = bpf_map__fd(skel->maps.owner_stat);
564 if (!stacks_fd || !stat_fd)
565 goto out_err;
566
567 stack_trace = zalloc(stack_size);
568 if (stack_trace == NULL)
569 goto out_err;
570
571 if (bpf_map_get_next_key(stacks_fd, NULL, stack_trace))
572 goto out_err;
573
574 bpf_map_lookup_elem(stacks_fd, stack_trace, &stack_id);
575 ckey.stack_id = stack_id;
576 bpf_map_lookup_elem(stat_fd, &ckey, &cdata);
577
578 st = zalloc(sizeof(struct lock_stat));
579 if (!st)
580 goto out_err;
581
582 st->name = strdup(stack_trace[0] ? lock_contention_get_name(con, NULL, stack_trace, 0) :
583 "unknown");
584 if (!st->name)
585 goto out_err;
586
587 st->flags = cdata.flags;
588 st->nr_contended = cdata.count;
589 st->wait_time_total = cdata.total_time;
590 st->wait_time_max = cdata.max_time;
591 st->wait_time_min = cdata.min_time;
592 st->callstack = stack_trace;
593
594 if (cdata.count)
595 st->avg_wait_time = cdata.total_time / cdata.count;
596
597 bpf_map_delete_elem(stacks_fd, stack_trace);
598 bpf_map_delete_elem(stat_fd, &ckey);
599
600 return st;
601
602 out_err:
603 free(stack_trace);
604 free(st);
605
606 return NULL;
607 }
608
lock_contention_read(struct lock_contention * con)609 int lock_contention_read(struct lock_contention *con)
610 {
611 int fd, stack, err = 0;
612 struct contention_key *prev_key, key = {};
613 struct contention_data data = {};
614 struct lock_stat *st = NULL;
615 struct machine *machine = con->machine;
616 u64 *stack_trace;
617 size_t stack_size = con->max_stack * sizeof(*stack_trace);
618
619 fd = bpf_map__fd(skel->maps.lock_stat);
620 stack = bpf_map__fd(skel->maps.stacks);
621
622 con->fails.task = skel->bss->task_fail;
623 con->fails.stack = skel->bss->stack_fail;
624 con->fails.time = skel->bss->time_fail;
625 con->fails.data = skel->bss->data_fail;
626
627 stack_trace = zalloc(stack_size);
628 if (stack_trace == NULL)
629 return -1;
630
631 account_end_timestamp(con);
632
633 if (con->aggr_mode == LOCK_AGGR_TASK) {
634 struct thread *idle = machine__findnew_thread(machine,
635 /*pid=*/0,
636 /*tid=*/0);
637 thread__set_comm(idle, "swapper", /*timestamp=*/0);
638 }
639
640 if (con->aggr_mode == LOCK_AGGR_ADDR) {
641 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
642 .flags = BPF_F_TEST_RUN_ON_CPU,
643 );
644 int prog_fd = bpf_program__fd(skel->progs.collect_lock_syms);
645
646 bpf_prog_test_run_opts(prog_fd, &opts);
647 }
648
649 /* make sure it loads the kernel map */
650 maps__load_first(machine->kmaps);
651
652 prev_key = NULL;
653 while (!bpf_map_get_next_key(fd, prev_key, &key)) {
654 s64 ls_key;
655 const char *name;
656
657 /* to handle errors in the loop body */
658 err = -1;
659
660 bpf_map_lookup_elem(fd, &key, &data);
661 if (con->save_callstack) {
662 bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
663
664 if (!match_callstack_filter(machine, stack_trace, con->max_stack)) {
665 con->nr_filtered += data.count;
666 goto next;
667 }
668 }
669
670 switch (con->aggr_mode) {
671 case LOCK_AGGR_CALLER:
672 ls_key = key.stack_id;
673 break;
674 case LOCK_AGGR_TASK:
675 ls_key = key.pid;
676 break;
677 case LOCK_AGGR_ADDR:
678 case LOCK_AGGR_CGROUP:
679 ls_key = key.lock_addr_or_cgroup;
680 break;
681 default:
682 goto next;
683 }
684
685 st = lock_stat_find(ls_key);
686 if (st != NULL) {
687 st->wait_time_total += data.total_time;
688 if (st->wait_time_max < data.max_time)
689 st->wait_time_max = data.max_time;
690 if (st->wait_time_min > data.min_time)
691 st->wait_time_min = data.min_time;
692
693 st->nr_contended += data.count;
694 if (st->nr_contended)
695 st->avg_wait_time = st->wait_time_total / st->nr_contended;
696 goto next;
697 }
698
699 name = lock_contention_get_name(con, &key, stack_trace, data.flags);
700 st = lock_stat_findnew(ls_key, name, data.flags);
701 if (st == NULL)
702 break;
703
704 st->nr_contended = data.count;
705 st->wait_time_total = data.total_time;
706 st->wait_time_max = data.max_time;
707 st->wait_time_min = data.min_time;
708
709 if (data.count)
710 st->avg_wait_time = data.total_time / data.count;
711
712 if (con->aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
713 st->callstack = memdup(stack_trace, stack_size);
714 if (st->callstack == NULL)
715 break;
716 }
717
718 next:
719 prev_key = &key;
720
721 /* we're fine now, reset the error */
722 err = 0;
723 }
724
725 free(stack_trace);
726
727 return err;
728 }
729
lock_contention_finish(struct lock_contention * con)730 int lock_contention_finish(struct lock_contention *con)
731 {
732 if (skel) {
733 skel->bss->enabled = 0;
734 lock_contention_bpf__destroy(skel);
735 }
736
737 while (!RB_EMPTY_ROOT(&con->cgroups)) {
738 struct rb_node *node = rb_first(&con->cgroups);
739 struct cgroup *cgrp = rb_entry(node, struct cgroup, node);
740
741 rb_erase(node, &con->cgroups);
742 cgroup__put(cgrp);
743 }
744
745 exit_slab_cache_iter();
746
747 return 0;
748 }
749