1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "kvm-stat.h"
8 #include "map.h"
9 #include "map_symbol.h"
10 #include "branch.h"
11 #include "mem-events.h"
12 #include "session.h"
13 #include "namespaces.h"
14 #include "cgroup.h"
15 #include "sort.h"
16 #include "units.h"
17 #include "evlist.h"
18 #include "evsel.h"
19 #include "annotate.h"
20 #include "srcline.h"
21 #include "symbol.h"
22 #include "thread.h"
23 #include "block-info.h"
24 #include "ui/progress.h"
25 #include <errno.h>
26 #include <math.h>
27 #include <inttypes.h>
28 #include <sys/param.h>
29 #include <linux/rbtree.h>
30 #include <linux/string.h>
31 #include <linux/time64.h>
32 #include <linux/zalloc.h>
33
34 static bool hists__filter_entry_by_dso(struct hists *hists,
35 struct hist_entry *he);
36 static bool hists__filter_entry_by_thread(struct hists *hists,
37 struct hist_entry *he);
38 static bool hists__filter_entry_by_symbol(struct hists *hists,
39 struct hist_entry *he);
40 static bool hists__filter_entry_by_socket(struct hists *hists,
41 struct hist_entry *he);
42
hists__col_len(struct hists * hists,enum hist_column col)43 u16 hists__col_len(struct hists *hists, enum hist_column col)
44 {
45 return hists->col_len[col];
46 }
47
hists__set_col_len(struct hists * hists,enum hist_column col,u16 len)48 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
49 {
50 hists->col_len[col] = len;
51 }
52
hists__new_col_len(struct hists * hists,enum hist_column col,u16 len)53 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
54 {
55 if (len > hists__col_len(hists, col)) {
56 hists__set_col_len(hists, col, len);
57 return true;
58 }
59 return false;
60 }
61
hists__reset_col_len(struct hists * hists)62 void hists__reset_col_len(struct hists *hists)
63 {
64 enum hist_column col;
65
66 for (col = 0; col < HISTC_NR_COLS; ++col)
67 hists__set_col_len(hists, col, 0);
68 }
69
hists__set_unres_dso_col_len(struct hists * hists,int dso)70 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
71 {
72 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
73
74 if (hists__col_len(hists, dso) < unresolved_col_width &&
75 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
76 !symbol_conf.dso_list)
77 hists__set_col_len(hists, dso, unresolved_col_width);
78 }
79
hists__calc_col_len(struct hists * hists,struct hist_entry * h)80 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
81 {
82 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
83 int symlen;
84 u16 len;
85
86 if (h->block_info)
87 return;
88 /*
89 * +4 accounts for '[x] ' priv level info
90 * +2 accounts for 0x prefix on raw addresses
91 * +3 accounts for ' y ' symtab origin info
92 */
93 if (h->ms.sym) {
94 symlen = h->ms.sym->namelen + 4;
95 if (verbose > 0)
96 symlen += BITS_PER_LONG / 4 + 2 + 3;
97 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
98 } else {
99 symlen = unresolved_col_width + 4 + 2;
100 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
101 hists__set_unres_dso_col_len(hists, HISTC_DSO);
102 }
103
104 len = thread__comm_len(h->thread);
105 if (hists__new_col_len(hists, HISTC_COMM, len))
106 hists__set_col_len(hists, HISTC_THREAD, len + 8);
107
108 if (h->ms.map) {
109 len = dso__name_len(map__dso(h->ms.map));
110 hists__new_col_len(hists, HISTC_DSO, len);
111 }
112
113 if (h->parent)
114 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
115
116 if (h->branch_info) {
117 if (h->branch_info->from.ms.sym) {
118 symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
119 if (verbose > 0)
120 symlen += BITS_PER_LONG / 4 + 2 + 3;
121 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
122
123 symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
124 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
125 } else {
126 symlen = unresolved_col_width + 4 + 2;
127 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
128 hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
129 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
130 }
131
132 if (h->branch_info->to.ms.sym) {
133 symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
134 if (verbose > 0)
135 symlen += BITS_PER_LONG / 4 + 2 + 3;
136 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
137
138 symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
139 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
140 } else {
141 symlen = unresolved_col_width + 4 + 2;
142 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
143 hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
144 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
145 }
146
147 if (h->branch_info->srcline_from)
148 hists__new_col_len(hists, HISTC_SRCLINE_FROM,
149 strlen(h->branch_info->srcline_from));
150 if (h->branch_info->srcline_to)
151 hists__new_col_len(hists, HISTC_SRCLINE_TO,
152 strlen(h->branch_info->srcline_to));
153 }
154
155 if (h->mem_info) {
156 if (h->mem_info->daddr.ms.sym) {
157 symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
158 + unresolved_col_width + 2;
159 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
160 symlen);
161 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
162 symlen + 1);
163 } else {
164 symlen = unresolved_col_width + 4 + 2;
165 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
166 symlen);
167 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
168 symlen);
169 }
170
171 if (h->mem_info->iaddr.ms.sym) {
172 symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
173 + unresolved_col_width + 2;
174 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
175 symlen);
176 } else {
177 symlen = unresolved_col_width + 4 + 2;
178 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
179 symlen);
180 }
181
182 if (h->mem_info->daddr.ms.map) {
183 symlen = dso__name_len(map__dso(h->mem_info->daddr.ms.map));
184 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
185 symlen);
186 } else {
187 symlen = unresolved_col_width + 4 + 2;
188 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
189 }
190
191 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
192 unresolved_col_width + 4 + 2);
193
194 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
195 unresolved_col_width + 4 + 2);
196
197 } else {
198 symlen = unresolved_col_width + 4 + 2;
199 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
200 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
201 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
202 }
203
204 hists__new_col_len(hists, HISTC_CGROUP, 6);
205 hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
206 hists__new_col_len(hists, HISTC_CPU, 3);
207 hists__new_col_len(hists, HISTC_SOCKET, 6);
208 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
209 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
210 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
211 hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3);
212 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
213 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
214 hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
215 hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
216 hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
217 hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
218 hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
219 hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2);
220
221 if (symbol_conf.nanosecs)
222 hists__new_col_len(hists, HISTC_TIME, 16);
223 else
224 hists__new_col_len(hists, HISTC_TIME, 12);
225 hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
226
227 if (h->srcline) {
228 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
229 hists__new_col_len(hists, HISTC_SRCLINE, len);
230 }
231
232 if (h->srcfile)
233 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
234
235 if (h->transaction)
236 hists__new_col_len(hists, HISTC_TRANSACTION,
237 hist_entry__transaction_len());
238
239 if (h->trace_output)
240 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
241
242 if (h->cgroup) {
243 const char *cgrp_name = "unknown";
244 struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
245 h->cgroup);
246 if (cgrp != NULL)
247 cgrp_name = cgrp->name;
248
249 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
250 }
251 }
252
hists__output_recalc_col_len(struct hists * hists,int max_rows)253 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
254 {
255 struct rb_node *next = rb_first_cached(&hists->entries);
256 struct hist_entry *n;
257 int row = 0;
258
259 hists__reset_col_len(hists);
260
261 while (next && row++ < max_rows) {
262 n = rb_entry(next, struct hist_entry, rb_node);
263 if (!n->filtered)
264 hists__calc_col_len(hists, n);
265 next = rb_next(&n->rb_node);
266 }
267 }
268
he_stat__add_cpumode_period(struct he_stat * he_stat,unsigned int cpumode,u64 period)269 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
270 unsigned int cpumode, u64 period)
271 {
272 switch (cpumode) {
273 case PERF_RECORD_MISC_KERNEL:
274 he_stat->period_sys += period;
275 break;
276 case PERF_RECORD_MISC_USER:
277 he_stat->period_us += period;
278 break;
279 case PERF_RECORD_MISC_GUEST_KERNEL:
280 he_stat->period_guest_sys += period;
281 break;
282 case PERF_RECORD_MISC_GUEST_USER:
283 he_stat->period_guest_us += period;
284 break;
285 default:
286 break;
287 }
288 }
289
hist_time(unsigned long htime)290 static long hist_time(unsigned long htime)
291 {
292 unsigned long time_quantum = symbol_conf.time_quantum;
293 if (time_quantum)
294 return (htime / time_quantum) * time_quantum;
295 return htime;
296 }
297
he_stat__add_period(struct he_stat * he_stat,u64 period)298 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
299 {
300 he_stat->period += period;
301 he_stat->nr_events += 1;
302 }
303
he_stat__add_stat(struct he_stat * dest,struct he_stat * src)304 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
305 {
306 dest->period += src->period;
307 dest->period_sys += src->period_sys;
308 dest->period_us += src->period_us;
309 dest->period_guest_sys += src->period_guest_sys;
310 dest->period_guest_us += src->period_guest_us;
311 dest->nr_events += src->nr_events;
312 }
313
he_stat__decay(struct he_stat * he_stat)314 static void he_stat__decay(struct he_stat *he_stat)
315 {
316 he_stat->period = (he_stat->period * 7) / 8;
317 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
318 /* XXX need decay for weight too? */
319 }
320
321 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
322
hists__decay_entry(struct hists * hists,struct hist_entry * he)323 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
324 {
325 u64 prev_period = he->stat.period;
326 u64 diff;
327
328 if (prev_period == 0)
329 return true;
330
331 he_stat__decay(&he->stat);
332 if (symbol_conf.cumulate_callchain)
333 he_stat__decay(he->stat_acc);
334 decay_callchain(he->callchain);
335
336 diff = prev_period - he->stat.period;
337
338 if (!he->depth) {
339 hists->stats.total_period -= diff;
340 if (!he->filtered)
341 hists->stats.total_non_filtered_period -= diff;
342 }
343
344 if (!he->leaf) {
345 struct hist_entry *child;
346 struct rb_node *node = rb_first_cached(&he->hroot_out);
347 while (node) {
348 child = rb_entry(node, struct hist_entry, rb_node);
349 node = rb_next(node);
350
351 if (hists__decay_entry(hists, child))
352 hists__delete_entry(hists, child);
353 }
354 }
355
356 return he->stat.period == 0;
357 }
358
hists__delete_entry(struct hists * hists,struct hist_entry * he)359 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
360 {
361 struct rb_root_cached *root_in;
362 struct rb_root_cached *root_out;
363
364 if (he->parent_he) {
365 root_in = &he->parent_he->hroot_in;
366 root_out = &he->parent_he->hroot_out;
367 } else {
368 if (hists__has(hists, need_collapse))
369 root_in = &hists->entries_collapsed;
370 else
371 root_in = hists->entries_in;
372 root_out = &hists->entries;
373 }
374
375 rb_erase_cached(&he->rb_node_in, root_in);
376 rb_erase_cached(&he->rb_node, root_out);
377
378 --hists->nr_entries;
379 if (!he->filtered)
380 --hists->nr_non_filtered_entries;
381
382 hist_entry__delete(he);
383 }
384
hists__decay_entries(struct hists * hists,bool zap_user,bool zap_kernel)385 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
386 {
387 struct rb_node *next = rb_first_cached(&hists->entries);
388 struct hist_entry *n;
389
390 while (next) {
391 n = rb_entry(next, struct hist_entry, rb_node);
392 next = rb_next(&n->rb_node);
393 if (((zap_user && n->level == '.') ||
394 (zap_kernel && n->level != '.') ||
395 hists__decay_entry(hists, n))) {
396 hists__delete_entry(hists, n);
397 }
398 }
399 }
400
hists__delete_entries(struct hists * hists)401 void hists__delete_entries(struct hists *hists)
402 {
403 struct rb_node *next = rb_first_cached(&hists->entries);
404 struct hist_entry *n;
405
406 while (next) {
407 n = rb_entry(next, struct hist_entry, rb_node);
408 next = rb_next(&n->rb_node);
409
410 hists__delete_entry(hists, n);
411 }
412 }
413
hists__get_entry(struct hists * hists,int idx)414 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
415 {
416 struct rb_node *next = rb_first_cached(&hists->entries);
417 struct hist_entry *n;
418 int i = 0;
419
420 while (next) {
421 n = rb_entry(next, struct hist_entry, rb_node);
422 if (i == idx)
423 return n;
424
425 next = rb_next(&n->rb_node);
426 i++;
427 }
428
429 return NULL;
430 }
431
432 /*
433 * histogram, sorted on item, collects periods
434 */
435
hist_entry__init(struct hist_entry * he,struct hist_entry * template,bool sample_self,size_t callchain_size)436 static int hist_entry__init(struct hist_entry *he,
437 struct hist_entry *template,
438 bool sample_self,
439 size_t callchain_size)
440 {
441 *he = *template;
442 he->callchain_size = callchain_size;
443
444 if (symbol_conf.cumulate_callchain) {
445 he->stat_acc = malloc(sizeof(he->stat));
446 if (he->stat_acc == NULL)
447 return -ENOMEM;
448 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
449 if (!sample_self)
450 memset(&he->stat, 0, sizeof(he->stat));
451 }
452
453 he->ms.maps = maps__get(he->ms.maps);
454 he->ms.map = map__get(he->ms.map);
455
456 if (he->branch_info) {
457 /*
458 * This branch info is (a part of) allocated from
459 * sample__resolve_bstack() and will be freed after
460 * adding new entries. So we need to save a copy.
461 */
462 he->branch_info = malloc(sizeof(*he->branch_info));
463 if (he->branch_info == NULL)
464 goto err;
465
466 memcpy(he->branch_info, template->branch_info,
467 sizeof(*he->branch_info));
468
469 he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
470 he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
471 }
472
473 if (he->mem_info) {
474 he->mem_info->iaddr.ms.map = map__get(he->mem_info->iaddr.ms.map);
475 he->mem_info->daddr.ms.map = map__get(he->mem_info->daddr.ms.map);
476 }
477
478 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
479 callchain_init(he->callchain);
480
481 if (he->raw_data) {
482 he->raw_data = memdup(he->raw_data, he->raw_size);
483 if (he->raw_data == NULL)
484 goto err_infos;
485 }
486
487 if (he->srcline && he->srcline != SRCLINE_UNKNOWN) {
488 he->srcline = strdup(he->srcline);
489 if (he->srcline == NULL)
490 goto err_rawdata;
491 }
492
493 if (symbol_conf.res_sample) {
494 he->res_samples = calloc(symbol_conf.res_sample,
495 sizeof(struct res_sample));
496 if (!he->res_samples)
497 goto err_srcline;
498 }
499
500 INIT_LIST_HEAD(&he->pairs.node);
501 he->thread = thread__get(he->thread);
502 he->hroot_in = RB_ROOT_CACHED;
503 he->hroot_out = RB_ROOT_CACHED;
504
505 if (!symbol_conf.report_hierarchy)
506 he->leaf = true;
507
508 return 0;
509
510 err_srcline:
511 zfree(&he->srcline);
512
513 err_rawdata:
514 zfree(&he->raw_data);
515
516 err_infos:
517 if (he->branch_info) {
518 map_symbol__exit(&he->branch_info->from.ms);
519 map_symbol__exit(&he->branch_info->to.ms);
520 zfree(&he->branch_info);
521 }
522 if (he->mem_info) {
523 map_symbol__exit(&he->mem_info->iaddr.ms);
524 map_symbol__exit(&he->mem_info->daddr.ms);
525 }
526 err:
527 map_symbol__exit(&he->ms);
528 zfree(&he->stat_acc);
529 return -ENOMEM;
530 }
531
hist_entry__zalloc(size_t size)532 static void *hist_entry__zalloc(size_t size)
533 {
534 return zalloc(size + sizeof(struct hist_entry));
535 }
536
hist_entry__free(void * ptr)537 static void hist_entry__free(void *ptr)
538 {
539 free(ptr);
540 }
541
542 static struct hist_entry_ops default_ops = {
543 .new = hist_entry__zalloc,
544 .free = hist_entry__free,
545 };
546
hist_entry__new(struct hist_entry * template,bool sample_self)547 static struct hist_entry *hist_entry__new(struct hist_entry *template,
548 bool sample_self)
549 {
550 struct hist_entry_ops *ops = template->ops;
551 size_t callchain_size = 0;
552 struct hist_entry *he;
553 int err = 0;
554
555 if (!ops)
556 ops = template->ops = &default_ops;
557
558 if (symbol_conf.use_callchain)
559 callchain_size = sizeof(struct callchain_root);
560
561 he = ops->new(callchain_size);
562 if (he) {
563 err = hist_entry__init(he, template, sample_self, callchain_size);
564 if (err) {
565 ops->free(he);
566 he = NULL;
567 }
568 }
569
570 return he;
571 }
572
symbol__parent_filter(const struct symbol * parent)573 static u8 symbol__parent_filter(const struct symbol *parent)
574 {
575 if (symbol_conf.exclude_other && parent == NULL)
576 return 1 << HIST_FILTER__PARENT;
577 return 0;
578 }
579
hist_entry__add_callchain_period(struct hist_entry * he,u64 period)580 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
581 {
582 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
583 return;
584
585 he->hists->callchain_period += period;
586 if (!he->filtered)
587 he->hists->callchain_non_filtered_period += period;
588 }
589
hists__findnew_entry(struct hists * hists,struct hist_entry * entry,const struct addr_location * al,bool sample_self)590 static struct hist_entry *hists__findnew_entry(struct hists *hists,
591 struct hist_entry *entry,
592 const struct addr_location *al,
593 bool sample_self)
594 {
595 struct rb_node **p;
596 struct rb_node *parent = NULL;
597 struct hist_entry *he;
598 int64_t cmp;
599 u64 period = entry->stat.period;
600 bool leftmost = true;
601
602 p = &hists->entries_in->rb_root.rb_node;
603
604 while (*p != NULL) {
605 parent = *p;
606 he = rb_entry(parent, struct hist_entry, rb_node_in);
607
608 /*
609 * Make sure that it receives arguments in a same order as
610 * hist_entry__collapse() so that we can use an appropriate
611 * function when searching an entry regardless which sort
612 * keys were used.
613 */
614 cmp = hist_entry__cmp(he, entry);
615 if (!cmp) {
616 if (sample_self) {
617 he_stat__add_period(&he->stat, period);
618 hist_entry__add_callchain_period(he, period);
619 }
620 if (symbol_conf.cumulate_callchain)
621 he_stat__add_period(he->stat_acc, period);
622
623 /*
624 * This mem info was allocated from sample__resolve_mem
625 * and will not be used anymore.
626 */
627 mem_info__zput(entry->mem_info);
628
629 block_info__zput(entry->block_info);
630
631 kvm_info__zput(entry->kvm_info);
632
633 /* If the map of an existing hist_entry has
634 * become out-of-date due to an exec() or
635 * similar, update it. Otherwise we will
636 * mis-adjust symbol addresses when computing
637 * the history counter to increment.
638 */
639 if (he->ms.map != entry->ms.map) {
640 map__put(he->ms.map);
641 he->ms.map = map__get(entry->ms.map);
642 }
643 goto out;
644 }
645
646 if (cmp < 0)
647 p = &(*p)->rb_left;
648 else {
649 p = &(*p)->rb_right;
650 leftmost = false;
651 }
652 }
653
654 he = hist_entry__new(entry, sample_self);
655 if (!he)
656 return NULL;
657
658 if (sample_self)
659 hist_entry__add_callchain_period(he, period);
660 hists->nr_entries++;
661
662 rb_link_node(&he->rb_node_in, parent, p);
663 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
664 out:
665 if (sample_self)
666 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
667 if (symbol_conf.cumulate_callchain)
668 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
669 return he;
670 }
671
random_max(unsigned high)672 static unsigned random_max(unsigned high)
673 {
674 unsigned thresh = -high % high;
675 for (;;) {
676 unsigned r = random();
677 if (r >= thresh)
678 return r % high;
679 }
680 }
681
hists__res_sample(struct hist_entry * he,struct perf_sample * sample)682 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
683 {
684 struct res_sample *r;
685 int j;
686
687 if (he->num_res < symbol_conf.res_sample) {
688 j = he->num_res++;
689 } else {
690 j = random_max(symbol_conf.res_sample);
691 }
692 r = &he->res_samples[j];
693 r->time = sample->time;
694 r->cpu = sample->cpu;
695 r->tid = sample->tid;
696 }
697
698 static struct hist_entry*
__hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct block_info * block_info,struct perf_sample * sample,bool sample_self,struct hist_entry_ops * ops)699 __hists__add_entry(struct hists *hists,
700 struct addr_location *al,
701 struct symbol *sym_parent,
702 struct branch_info *bi,
703 struct mem_info *mi,
704 struct kvm_info *ki,
705 struct block_info *block_info,
706 struct perf_sample *sample,
707 bool sample_self,
708 struct hist_entry_ops *ops)
709 {
710 struct namespaces *ns = thread__namespaces(al->thread);
711 struct hist_entry entry = {
712 .thread = al->thread,
713 .comm = thread__comm(al->thread),
714 .cgroup_id = {
715 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
716 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
717 },
718 .cgroup = sample->cgroup,
719 .ms = {
720 .maps = al->maps,
721 .map = al->map,
722 .sym = al->sym,
723 },
724 .srcline = (char *) al->srcline,
725 .socket = al->socket,
726 .cpu = al->cpu,
727 .cpumode = al->cpumode,
728 .ip = al->addr,
729 .level = al->level,
730 .code_page_size = sample->code_page_size,
731 .stat = {
732 .nr_events = 1,
733 .period = sample->period,
734 },
735 .parent = sym_parent,
736 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
737 .hists = hists,
738 .branch_info = bi,
739 .mem_info = mi,
740 .kvm_info = ki,
741 .block_info = block_info,
742 .transaction = sample->transaction,
743 .raw_data = sample->raw_data,
744 .raw_size = sample->raw_size,
745 .ops = ops,
746 .time = hist_time(sample->time),
747 .weight = sample->weight,
748 .ins_lat = sample->ins_lat,
749 .p_stage_cyc = sample->p_stage_cyc,
750 .simd_flags = sample->simd_flags,
751 }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
752
753 if (!hists->has_callchains && he && he->callchain_size != 0)
754 hists->has_callchains = true;
755 if (he && symbol_conf.res_sample)
756 hists__res_sample(he, sample);
757 return he;
758 }
759
hists__add_entry(struct hists * hists,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct perf_sample * sample,bool sample_self)760 struct hist_entry *hists__add_entry(struct hists *hists,
761 struct addr_location *al,
762 struct symbol *sym_parent,
763 struct branch_info *bi,
764 struct mem_info *mi,
765 struct kvm_info *ki,
766 struct perf_sample *sample,
767 bool sample_self)
768 {
769 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
770 sample, sample_self, NULL);
771 }
772
hists__add_entry_ops(struct hists * hists,struct hist_entry_ops * ops,struct addr_location * al,struct symbol * sym_parent,struct branch_info * bi,struct mem_info * mi,struct kvm_info * ki,struct perf_sample * sample,bool sample_self)773 struct hist_entry *hists__add_entry_ops(struct hists *hists,
774 struct hist_entry_ops *ops,
775 struct addr_location *al,
776 struct symbol *sym_parent,
777 struct branch_info *bi,
778 struct mem_info *mi,
779 struct kvm_info *ki,
780 struct perf_sample *sample,
781 bool sample_self)
782 {
783 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
784 sample, sample_self, ops);
785 }
786
hists__add_entry_block(struct hists * hists,struct addr_location * al,struct block_info * block_info)787 struct hist_entry *hists__add_entry_block(struct hists *hists,
788 struct addr_location *al,
789 struct block_info *block_info)
790 {
791 struct hist_entry entry = {
792 .block_info = block_info,
793 .hists = hists,
794 .ms = {
795 .maps = al->maps,
796 .map = al->map,
797 .sym = al->sym,
798 },
799 }, *he = hists__findnew_entry(hists, &entry, al, false);
800
801 return he;
802 }
803
804 static int
iter_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)805 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
806 struct addr_location *al __maybe_unused)
807 {
808 return 0;
809 }
810
811 static int
iter_add_next_nop_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)812 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
813 struct addr_location *al __maybe_unused)
814 {
815 return 0;
816 }
817
818 static int
iter_prepare_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)819 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
820 {
821 struct perf_sample *sample = iter->sample;
822 struct mem_info *mi;
823
824 mi = sample__resolve_mem(sample, al);
825 if (mi == NULL)
826 return -ENOMEM;
827
828 iter->priv = mi;
829 return 0;
830 }
831
832 static int
iter_add_single_mem_entry(struct hist_entry_iter * iter,struct addr_location * al)833 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
834 {
835 u64 cost;
836 struct mem_info *mi = iter->priv;
837 struct hists *hists = evsel__hists(iter->evsel);
838 struct perf_sample *sample = iter->sample;
839 struct hist_entry *he;
840
841 if (mi == NULL)
842 return -EINVAL;
843
844 cost = sample->weight;
845 if (!cost)
846 cost = 1;
847
848 /*
849 * must pass period=weight in order to get the correct
850 * sorting from hists__collapse_resort() which is solely
851 * based on periods. We want sorting be done on nr_events * weight
852 * and this is indirectly achieved by passing period=weight here
853 * and the he_stat__add_period() function.
854 */
855 sample->period = cost;
856
857 he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
858 sample, true);
859 if (!he)
860 return -ENOMEM;
861
862 iter->he = he;
863 return 0;
864 }
865
866 static int
iter_finish_mem_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)867 iter_finish_mem_entry(struct hist_entry_iter *iter,
868 struct addr_location *al __maybe_unused)
869 {
870 struct evsel *evsel = iter->evsel;
871 struct hists *hists = evsel__hists(evsel);
872 struct hist_entry *he = iter->he;
873 int err = -EINVAL;
874
875 if (he == NULL)
876 goto out;
877
878 hists__inc_nr_samples(hists, he->filtered);
879
880 err = hist_entry__append_callchain(he, iter->sample);
881
882 out:
883 /*
884 * We don't need to free iter->priv (mem_info) here since the mem info
885 * was either already freed in hists__findnew_entry() or passed to a
886 * new hist entry by hist_entry__new().
887 */
888 iter->priv = NULL;
889
890 iter->he = NULL;
891 return err;
892 }
893
894 static int
iter_prepare_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)895 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
896 {
897 struct branch_info *bi;
898 struct perf_sample *sample = iter->sample;
899
900 bi = sample__resolve_bstack(sample, al);
901 if (!bi)
902 return -ENOMEM;
903
904 iter->curr = 0;
905 iter->total = sample->branch_stack->nr;
906
907 iter->priv = bi;
908 return 0;
909 }
910
911 static int
iter_add_single_branch_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)912 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
913 struct addr_location *al __maybe_unused)
914 {
915 return 0;
916 }
917
918 static int
iter_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)919 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
920 {
921 struct branch_info *bi = iter->priv;
922 int i = iter->curr;
923
924 if (bi == NULL)
925 return 0;
926
927 if (iter->curr >= iter->total)
928 return 0;
929
930 maps__put(al->maps);
931 al->maps = maps__get(bi[i].to.ms.maps);
932 map__put(al->map);
933 al->map = map__get(bi[i].to.ms.map);
934 al->sym = bi[i].to.ms.sym;
935 al->addr = bi[i].to.addr;
936 return 1;
937 }
938
939 static int
iter_add_next_branch_entry(struct hist_entry_iter * iter,struct addr_location * al)940 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
941 {
942 struct branch_info *bi;
943 struct evsel *evsel = iter->evsel;
944 struct hists *hists = evsel__hists(evsel);
945 struct perf_sample *sample = iter->sample;
946 struct hist_entry *he = NULL;
947 int i = iter->curr;
948 int err = 0;
949
950 bi = iter->priv;
951
952 if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
953 goto out;
954
955 /*
956 * The report shows the percentage of total branches captured
957 * and not events sampled. Thus we use a pseudo period of 1.
958 */
959 sample->period = 1;
960 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
961
962 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
963 sample, true);
964 if (he == NULL)
965 return -ENOMEM;
966
967 hists__inc_nr_samples(hists, he->filtered);
968
969 out:
970 iter->he = he;
971 iter->curr++;
972 return err;
973 }
974
975 static int
iter_finish_branch_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)976 iter_finish_branch_entry(struct hist_entry_iter *iter,
977 struct addr_location *al __maybe_unused)
978 {
979 zfree(&iter->priv);
980 iter->he = NULL;
981
982 return iter->curr >= iter->total ? 0 : -1;
983 }
984
985 static int
iter_prepare_normal_entry(struct hist_entry_iter * iter __maybe_unused,struct addr_location * al __maybe_unused)986 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
987 struct addr_location *al __maybe_unused)
988 {
989 return 0;
990 }
991
992 static int
iter_add_single_normal_entry(struct hist_entry_iter * iter,struct addr_location * al)993 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
994 {
995 struct evsel *evsel = iter->evsel;
996 struct perf_sample *sample = iter->sample;
997 struct hist_entry *he;
998
999 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1000 NULL, sample, true);
1001 if (he == NULL)
1002 return -ENOMEM;
1003
1004 iter->he = he;
1005 return 0;
1006 }
1007
1008 static int
iter_finish_normal_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1009 iter_finish_normal_entry(struct hist_entry_iter *iter,
1010 struct addr_location *al __maybe_unused)
1011 {
1012 struct hist_entry *he = iter->he;
1013 struct evsel *evsel = iter->evsel;
1014 struct perf_sample *sample = iter->sample;
1015
1016 if (he == NULL)
1017 return 0;
1018
1019 iter->he = NULL;
1020
1021 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1022
1023 return hist_entry__append_callchain(he, sample);
1024 }
1025
1026 static int
iter_prepare_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1027 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1028 struct addr_location *al __maybe_unused)
1029 {
1030 struct hist_entry **he_cache;
1031 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1032
1033 if (cursor == NULL)
1034 return -ENOMEM;
1035
1036 callchain_cursor_commit(cursor);
1037
1038 /*
1039 * This is for detecting cycles or recursions so that they're
1040 * cumulated only one time to prevent entries more than 100%
1041 * overhead.
1042 */
1043 he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1));
1044 if (he_cache == NULL)
1045 return -ENOMEM;
1046
1047 iter->priv = he_cache;
1048 iter->curr = 0;
1049
1050 return 0;
1051 }
1052
1053 static int
iter_add_single_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1054 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1055 struct addr_location *al)
1056 {
1057 struct evsel *evsel = iter->evsel;
1058 struct hists *hists = evsel__hists(evsel);
1059 struct perf_sample *sample = iter->sample;
1060 struct hist_entry **he_cache = iter->priv;
1061 struct hist_entry *he;
1062 int err = 0;
1063
1064 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
1065 sample, true);
1066 if (he == NULL)
1067 return -ENOMEM;
1068
1069 iter->he = he;
1070 he_cache[iter->curr++] = he;
1071
1072 hist_entry__append_callchain(he, sample);
1073
1074 /*
1075 * We need to re-initialize the cursor since callchain_append()
1076 * advanced the cursor to the end.
1077 */
1078 callchain_cursor_commit(get_tls_callchain_cursor());
1079
1080 hists__inc_nr_samples(hists, he->filtered);
1081
1082 return err;
1083 }
1084
1085 static int
iter_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1086 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1087 struct addr_location *al)
1088 {
1089 struct callchain_cursor_node *node;
1090
1091 node = callchain_cursor_current(get_tls_callchain_cursor());
1092 if (node == NULL)
1093 return 0;
1094
1095 return fill_callchain_info(al, node, iter->hide_unresolved);
1096 }
1097
1098 static bool
hist_entry__fast__sym_diff(struct hist_entry * left,struct hist_entry * right)1099 hist_entry__fast__sym_diff(struct hist_entry *left,
1100 struct hist_entry *right)
1101 {
1102 struct symbol *sym_l = left->ms.sym;
1103 struct symbol *sym_r = right->ms.sym;
1104
1105 if (!sym_l && !sym_r)
1106 return left->ip != right->ip;
1107
1108 return !!_sort__sym_cmp(sym_l, sym_r);
1109 }
1110
1111
1112 static int
iter_add_next_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al)1113 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1114 struct addr_location *al)
1115 {
1116 struct evsel *evsel = iter->evsel;
1117 struct perf_sample *sample = iter->sample;
1118 struct hist_entry **he_cache = iter->priv;
1119 struct hist_entry *he;
1120 struct hist_entry he_tmp = {
1121 .hists = evsel__hists(evsel),
1122 .cpu = al->cpu,
1123 .thread = al->thread,
1124 .comm = thread__comm(al->thread),
1125 .ip = al->addr,
1126 .ms = {
1127 .maps = al->maps,
1128 .map = al->map,
1129 .sym = al->sym,
1130 },
1131 .srcline = (char *) al->srcline,
1132 .parent = iter->parent,
1133 .raw_data = sample->raw_data,
1134 .raw_size = sample->raw_size,
1135 };
1136 int i;
1137 struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor();
1138 bool fast = hists__has(he_tmp.hists, sym);
1139
1140 if (tls_cursor == NULL)
1141 return -ENOMEM;
1142
1143 callchain_cursor_snapshot(&cursor, tls_cursor);
1144
1145 callchain_cursor_advance(tls_cursor);
1146
1147 /*
1148 * Check if there's duplicate entries in the callchain.
1149 * It's possible that it has cycles or recursive calls.
1150 */
1151 for (i = 0; i < iter->curr; i++) {
1152 /*
1153 * For most cases, there are no duplicate entries in callchain.
1154 * The symbols are usually different. Do a quick check for
1155 * symbols first.
1156 */
1157 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1158 continue;
1159
1160 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1161 /* to avoid calling callback function */
1162 iter->he = NULL;
1163 return 0;
1164 }
1165 }
1166
1167 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1168 NULL, sample, false);
1169 if (he == NULL)
1170 return -ENOMEM;
1171
1172 iter->he = he;
1173 he_cache[iter->curr++] = he;
1174
1175 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1176 callchain_append(he->callchain, &cursor, sample->period);
1177 return 0;
1178 }
1179
1180 static int
iter_finish_cumulative_entry(struct hist_entry_iter * iter,struct addr_location * al __maybe_unused)1181 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1182 struct addr_location *al __maybe_unused)
1183 {
1184 zfree(&iter->priv);
1185 iter->he = NULL;
1186
1187 return 0;
1188 }
1189
1190 const struct hist_iter_ops hist_iter_mem = {
1191 .prepare_entry = iter_prepare_mem_entry,
1192 .add_single_entry = iter_add_single_mem_entry,
1193 .next_entry = iter_next_nop_entry,
1194 .add_next_entry = iter_add_next_nop_entry,
1195 .finish_entry = iter_finish_mem_entry,
1196 };
1197
1198 const struct hist_iter_ops hist_iter_branch = {
1199 .prepare_entry = iter_prepare_branch_entry,
1200 .add_single_entry = iter_add_single_branch_entry,
1201 .next_entry = iter_next_branch_entry,
1202 .add_next_entry = iter_add_next_branch_entry,
1203 .finish_entry = iter_finish_branch_entry,
1204 };
1205
1206 const struct hist_iter_ops hist_iter_normal = {
1207 .prepare_entry = iter_prepare_normal_entry,
1208 .add_single_entry = iter_add_single_normal_entry,
1209 .next_entry = iter_next_nop_entry,
1210 .add_next_entry = iter_add_next_nop_entry,
1211 .finish_entry = iter_finish_normal_entry,
1212 };
1213
1214 const struct hist_iter_ops hist_iter_cumulative = {
1215 .prepare_entry = iter_prepare_cumulative_entry,
1216 .add_single_entry = iter_add_single_cumulative_entry,
1217 .next_entry = iter_next_cumulative_entry,
1218 .add_next_entry = iter_add_next_cumulative_entry,
1219 .finish_entry = iter_finish_cumulative_entry,
1220 };
1221
hist_entry_iter__add(struct hist_entry_iter * iter,struct addr_location * al,int max_stack_depth,void * arg)1222 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1223 int max_stack_depth, void *arg)
1224 {
1225 int err, err2;
1226 struct map *alm = NULL;
1227
1228 if (al)
1229 alm = map__get(al->map);
1230
1231 err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent,
1232 iter->evsel, al, max_stack_depth);
1233 if (err) {
1234 map__put(alm);
1235 return err;
1236 }
1237
1238 err = iter->ops->prepare_entry(iter, al);
1239 if (err)
1240 goto out;
1241
1242 err = iter->ops->add_single_entry(iter, al);
1243 if (err)
1244 goto out;
1245
1246 if (iter->he && iter->add_entry_cb) {
1247 err = iter->add_entry_cb(iter, al, true, arg);
1248 if (err)
1249 goto out;
1250 }
1251
1252 while (iter->ops->next_entry(iter, al)) {
1253 err = iter->ops->add_next_entry(iter, al);
1254 if (err)
1255 break;
1256
1257 if (iter->he && iter->add_entry_cb) {
1258 err = iter->add_entry_cb(iter, al, false, arg);
1259 if (err)
1260 goto out;
1261 }
1262 }
1263
1264 out:
1265 err2 = iter->ops->finish_entry(iter, al);
1266 if (!err)
1267 err = err2;
1268
1269 map__put(alm);
1270
1271 return err;
1272 }
1273
1274 int64_t
hist_entry__cmp(struct hist_entry * left,struct hist_entry * right)1275 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1276 {
1277 struct hists *hists = left->hists;
1278 struct perf_hpp_fmt *fmt;
1279 int64_t cmp = 0;
1280
1281 hists__for_each_sort_list(hists, fmt) {
1282 if (perf_hpp__is_dynamic_entry(fmt) &&
1283 !perf_hpp__defined_dynamic_entry(fmt, hists))
1284 continue;
1285
1286 cmp = fmt->cmp(fmt, left, right);
1287 if (cmp)
1288 break;
1289 }
1290
1291 return cmp;
1292 }
1293
1294 int64_t
hist_entry__collapse(struct hist_entry * left,struct hist_entry * right)1295 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1296 {
1297 struct hists *hists = left->hists;
1298 struct perf_hpp_fmt *fmt;
1299 int64_t cmp = 0;
1300
1301 hists__for_each_sort_list(hists, fmt) {
1302 if (perf_hpp__is_dynamic_entry(fmt) &&
1303 !perf_hpp__defined_dynamic_entry(fmt, hists))
1304 continue;
1305
1306 cmp = fmt->collapse(fmt, left, right);
1307 if (cmp)
1308 break;
1309 }
1310
1311 return cmp;
1312 }
1313
hist_entry__delete(struct hist_entry * he)1314 void hist_entry__delete(struct hist_entry *he)
1315 {
1316 struct hist_entry_ops *ops = he->ops;
1317
1318 thread__zput(he->thread);
1319 map_symbol__exit(&he->ms);
1320
1321 if (he->branch_info) {
1322 map_symbol__exit(&he->branch_info->from.ms);
1323 map_symbol__exit(&he->branch_info->to.ms);
1324 zfree_srcline(&he->branch_info->srcline_from);
1325 zfree_srcline(&he->branch_info->srcline_to);
1326 zfree(&he->branch_info);
1327 }
1328
1329 if (he->mem_info) {
1330 map_symbol__exit(&he->mem_info->iaddr.ms);
1331 map_symbol__exit(&he->mem_info->daddr.ms);
1332 mem_info__zput(he->mem_info);
1333 }
1334
1335 if (he->block_info)
1336 block_info__zput(he->block_info);
1337
1338 if (he->kvm_info)
1339 kvm_info__zput(he->kvm_info);
1340
1341 zfree(&he->res_samples);
1342 zfree(&he->stat_acc);
1343 zfree_srcline(&he->srcline);
1344 if (he->srcfile && he->srcfile[0])
1345 zfree(&he->srcfile);
1346 free_callchain(he->callchain);
1347 zfree(&he->trace_output);
1348 zfree(&he->raw_data);
1349 ops->free(he);
1350 }
1351
1352 /*
1353 * If this is not the last column, then we need to pad it according to the
1354 * pre-calculated max length for this column, otherwise don't bother adding
1355 * spaces because that would break viewing this with, for instance, 'less',
1356 * that would show tons of trailing spaces when a long C++ demangled method
1357 * names is sampled.
1358 */
hist_entry__snprintf_alignment(struct hist_entry * he,struct perf_hpp * hpp,struct perf_hpp_fmt * fmt,int printed)1359 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1360 struct perf_hpp_fmt *fmt, int printed)
1361 {
1362 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1363 const int width = fmt->width(fmt, hpp, he->hists);
1364 if (printed < width) {
1365 advance_hpp(hpp, printed);
1366 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1367 }
1368 }
1369
1370 return printed;
1371 }
1372
1373 /*
1374 * collapse the histogram
1375 */
1376
1377 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1378 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1379 enum hist_filter type);
1380
1381 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1382
check_thread_entry(struct perf_hpp_fmt * fmt)1383 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1384 {
1385 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1386 }
1387
hist_entry__check_and_remove_filter(struct hist_entry * he,enum hist_filter type,fmt_chk_fn check)1388 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1389 enum hist_filter type,
1390 fmt_chk_fn check)
1391 {
1392 struct perf_hpp_fmt *fmt;
1393 bool type_match = false;
1394 struct hist_entry *parent = he->parent_he;
1395
1396 switch (type) {
1397 case HIST_FILTER__THREAD:
1398 if (symbol_conf.comm_list == NULL &&
1399 symbol_conf.pid_list == NULL &&
1400 symbol_conf.tid_list == NULL)
1401 return;
1402 break;
1403 case HIST_FILTER__DSO:
1404 if (symbol_conf.dso_list == NULL)
1405 return;
1406 break;
1407 case HIST_FILTER__SYMBOL:
1408 if (symbol_conf.sym_list == NULL)
1409 return;
1410 break;
1411 case HIST_FILTER__PARENT:
1412 case HIST_FILTER__GUEST:
1413 case HIST_FILTER__HOST:
1414 case HIST_FILTER__SOCKET:
1415 case HIST_FILTER__C2C:
1416 default:
1417 return;
1418 }
1419
1420 /* if it's filtered by own fmt, it has to have filter bits */
1421 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1422 if (check(fmt)) {
1423 type_match = true;
1424 break;
1425 }
1426 }
1427
1428 if (type_match) {
1429 /*
1430 * If the filter is for current level entry, propagate
1431 * filter marker to parents. The marker bit was
1432 * already set by default so it only needs to clear
1433 * non-filtered entries.
1434 */
1435 if (!(he->filtered & (1 << type))) {
1436 while (parent) {
1437 parent->filtered &= ~(1 << type);
1438 parent = parent->parent_he;
1439 }
1440 }
1441 } else {
1442 /*
1443 * If current entry doesn't have matching formats, set
1444 * filter marker for upper level entries. it will be
1445 * cleared if its lower level entries is not filtered.
1446 *
1447 * For lower-level entries, it inherits parent's
1448 * filter bit so that lower level entries of a
1449 * non-filtered entry won't set the filter marker.
1450 */
1451 if (parent == NULL)
1452 he->filtered |= (1 << type);
1453 else
1454 he->filtered |= (parent->filtered & (1 << type));
1455 }
1456 }
1457
hist_entry__apply_hierarchy_filters(struct hist_entry * he)1458 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1459 {
1460 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1461 check_thread_entry);
1462
1463 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1464 perf_hpp__is_dso_entry);
1465
1466 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1467 perf_hpp__is_sym_entry);
1468
1469 hists__apply_filters(he->hists, he);
1470 }
1471
hierarchy_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he,struct hist_entry * parent_he,struct perf_hpp_list * hpp_list)1472 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1473 struct rb_root_cached *root,
1474 struct hist_entry *he,
1475 struct hist_entry *parent_he,
1476 struct perf_hpp_list *hpp_list)
1477 {
1478 struct rb_node **p = &root->rb_root.rb_node;
1479 struct rb_node *parent = NULL;
1480 struct hist_entry *iter, *new;
1481 struct perf_hpp_fmt *fmt;
1482 int64_t cmp;
1483 bool leftmost = true;
1484
1485 while (*p != NULL) {
1486 parent = *p;
1487 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1488
1489 cmp = 0;
1490 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1491 cmp = fmt->collapse(fmt, iter, he);
1492 if (cmp)
1493 break;
1494 }
1495
1496 if (!cmp) {
1497 he_stat__add_stat(&iter->stat, &he->stat);
1498 return iter;
1499 }
1500
1501 if (cmp < 0)
1502 p = &parent->rb_left;
1503 else {
1504 p = &parent->rb_right;
1505 leftmost = false;
1506 }
1507 }
1508
1509 new = hist_entry__new(he, true);
1510 if (new == NULL)
1511 return NULL;
1512
1513 hists->nr_entries++;
1514
1515 /* save related format list for output */
1516 new->hpp_list = hpp_list;
1517 new->parent_he = parent_he;
1518
1519 hist_entry__apply_hierarchy_filters(new);
1520
1521 /* some fields are now passed to 'new' */
1522 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1523 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1524 he->trace_output = NULL;
1525 else
1526 new->trace_output = NULL;
1527
1528 if (perf_hpp__is_srcline_entry(fmt))
1529 he->srcline = NULL;
1530 else
1531 new->srcline = NULL;
1532
1533 if (perf_hpp__is_srcfile_entry(fmt))
1534 he->srcfile = NULL;
1535 else
1536 new->srcfile = NULL;
1537 }
1538
1539 rb_link_node(&new->rb_node_in, parent, p);
1540 rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1541 return new;
1542 }
1543
hists__hierarchy_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he)1544 static int hists__hierarchy_insert_entry(struct hists *hists,
1545 struct rb_root_cached *root,
1546 struct hist_entry *he)
1547 {
1548 struct perf_hpp_list_node *node;
1549 struct hist_entry *new_he = NULL;
1550 struct hist_entry *parent = NULL;
1551 int depth = 0;
1552 int ret = 0;
1553
1554 list_for_each_entry(node, &hists->hpp_formats, list) {
1555 /* skip period (overhead) and elided columns */
1556 if (node->level == 0 || node->skip)
1557 continue;
1558
1559 /* insert copy of 'he' for each fmt into the hierarchy */
1560 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1561 if (new_he == NULL) {
1562 ret = -1;
1563 break;
1564 }
1565
1566 root = &new_he->hroot_in;
1567 new_he->depth = depth++;
1568 parent = new_he;
1569 }
1570
1571 if (new_he) {
1572 new_he->leaf = true;
1573
1574 if (hist_entry__has_callchains(new_he) &&
1575 symbol_conf.use_callchain) {
1576 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1577
1578 if (cursor == NULL)
1579 return -1;
1580
1581 callchain_cursor_reset(cursor);
1582 if (callchain_merge(cursor,
1583 new_he->callchain,
1584 he->callchain) < 0)
1585 ret = -1;
1586 }
1587 }
1588
1589 /* 'he' is no longer used */
1590 hist_entry__delete(he);
1591
1592 /* return 0 (or -1) since it already applied filters */
1593 return ret;
1594 }
1595
hists__collapse_insert_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * he)1596 static int hists__collapse_insert_entry(struct hists *hists,
1597 struct rb_root_cached *root,
1598 struct hist_entry *he)
1599 {
1600 struct rb_node **p = &root->rb_root.rb_node;
1601 struct rb_node *parent = NULL;
1602 struct hist_entry *iter;
1603 int64_t cmp;
1604 bool leftmost = true;
1605
1606 if (symbol_conf.report_hierarchy)
1607 return hists__hierarchy_insert_entry(hists, root, he);
1608
1609 while (*p != NULL) {
1610 parent = *p;
1611 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1612
1613 cmp = hist_entry__collapse(iter, he);
1614
1615 if (!cmp) {
1616 int ret = 0;
1617
1618 he_stat__add_stat(&iter->stat, &he->stat);
1619 if (symbol_conf.cumulate_callchain)
1620 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1621
1622 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1623 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1624
1625 if (cursor != NULL) {
1626 callchain_cursor_reset(cursor);
1627 if (callchain_merge(cursor, iter->callchain, he->callchain) < 0)
1628 ret = -1;
1629 } else {
1630 ret = 0;
1631 }
1632 }
1633 hist_entry__delete(he);
1634 return ret;
1635 }
1636
1637 if (cmp < 0)
1638 p = &(*p)->rb_left;
1639 else {
1640 p = &(*p)->rb_right;
1641 leftmost = false;
1642 }
1643 }
1644 hists->nr_entries++;
1645
1646 rb_link_node(&he->rb_node_in, parent, p);
1647 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1648 return 1;
1649 }
1650
hists__get_rotate_entries_in(struct hists * hists)1651 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1652 {
1653 struct rb_root_cached *root;
1654
1655 mutex_lock(&hists->lock);
1656
1657 root = hists->entries_in;
1658 if (++hists->entries_in > &hists->entries_in_array[1])
1659 hists->entries_in = &hists->entries_in_array[0];
1660
1661 mutex_unlock(&hists->lock);
1662
1663 return root;
1664 }
1665
hists__apply_filters(struct hists * hists,struct hist_entry * he)1666 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1667 {
1668 hists__filter_entry_by_dso(hists, he);
1669 hists__filter_entry_by_thread(hists, he);
1670 hists__filter_entry_by_symbol(hists, he);
1671 hists__filter_entry_by_socket(hists, he);
1672 }
1673
hists__collapse_resort(struct hists * hists,struct ui_progress * prog)1674 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1675 {
1676 struct rb_root_cached *root;
1677 struct rb_node *next;
1678 struct hist_entry *n;
1679 int ret;
1680
1681 if (!hists__has(hists, need_collapse))
1682 return 0;
1683
1684 hists->nr_entries = 0;
1685
1686 root = hists__get_rotate_entries_in(hists);
1687
1688 next = rb_first_cached(root);
1689
1690 while (next) {
1691 if (session_done())
1692 break;
1693 n = rb_entry(next, struct hist_entry, rb_node_in);
1694 next = rb_next(&n->rb_node_in);
1695
1696 rb_erase_cached(&n->rb_node_in, root);
1697 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1698 if (ret < 0)
1699 return -1;
1700
1701 if (ret) {
1702 /*
1703 * If it wasn't combined with one of the entries already
1704 * collapsed, we need to apply the filters that may have
1705 * been set by, say, the hist_browser.
1706 */
1707 hists__apply_filters(hists, n);
1708 }
1709 if (prog)
1710 ui_progress__update(prog, 1);
1711 }
1712 return 0;
1713 }
1714
hist_entry__sort(struct hist_entry * a,struct hist_entry * b)1715 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1716 {
1717 struct hists *hists = a->hists;
1718 struct perf_hpp_fmt *fmt;
1719 int64_t cmp = 0;
1720
1721 hists__for_each_sort_list(hists, fmt) {
1722 if (perf_hpp__should_skip(fmt, a->hists))
1723 continue;
1724
1725 cmp = fmt->sort(fmt, a, b);
1726 if (cmp)
1727 break;
1728 }
1729
1730 return cmp;
1731 }
1732
hists__reset_filter_stats(struct hists * hists)1733 static void hists__reset_filter_stats(struct hists *hists)
1734 {
1735 hists->nr_non_filtered_entries = 0;
1736 hists->stats.total_non_filtered_period = 0;
1737 }
1738
hists__reset_stats(struct hists * hists)1739 void hists__reset_stats(struct hists *hists)
1740 {
1741 hists->nr_entries = 0;
1742 hists->stats.total_period = 0;
1743
1744 hists__reset_filter_stats(hists);
1745 }
1746
hists__inc_filter_stats(struct hists * hists,struct hist_entry * h)1747 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1748 {
1749 hists->nr_non_filtered_entries++;
1750 hists->stats.total_non_filtered_period += h->stat.period;
1751 }
1752
hists__inc_stats(struct hists * hists,struct hist_entry * h)1753 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1754 {
1755 if (!h->filtered)
1756 hists__inc_filter_stats(hists, h);
1757
1758 hists->nr_entries++;
1759 hists->stats.total_period += h->stat.period;
1760 }
1761
hierarchy_recalc_total_periods(struct hists * hists)1762 static void hierarchy_recalc_total_periods(struct hists *hists)
1763 {
1764 struct rb_node *node;
1765 struct hist_entry *he;
1766
1767 node = rb_first_cached(&hists->entries);
1768
1769 hists->stats.total_period = 0;
1770 hists->stats.total_non_filtered_period = 0;
1771
1772 /*
1773 * recalculate total period using top-level entries only
1774 * since lower level entries only see non-filtered entries
1775 * but upper level entries have sum of both entries.
1776 */
1777 while (node) {
1778 he = rb_entry(node, struct hist_entry, rb_node);
1779 node = rb_next(node);
1780
1781 hists->stats.total_period += he->stat.period;
1782 if (!he->filtered)
1783 hists->stats.total_non_filtered_period += he->stat.period;
1784 }
1785 }
1786
hierarchy_insert_output_entry(struct rb_root_cached * root,struct hist_entry * he)1787 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1788 struct hist_entry *he)
1789 {
1790 struct rb_node **p = &root->rb_root.rb_node;
1791 struct rb_node *parent = NULL;
1792 struct hist_entry *iter;
1793 struct perf_hpp_fmt *fmt;
1794 bool leftmost = true;
1795
1796 while (*p != NULL) {
1797 parent = *p;
1798 iter = rb_entry(parent, struct hist_entry, rb_node);
1799
1800 if (hist_entry__sort(he, iter) > 0)
1801 p = &parent->rb_left;
1802 else {
1803 p = &parent->rb_right;
1804 leftmost = false;
1805 }
1806 }
1807
1808 rb_link_node(&he->rb_node, parent, p);
1809 rb_insert_color_cached(&he->rb_node, root, leftmost);
1810
1811 /* update column width of dynamic entry */
1812 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1813 if (fmt->init)
1814 fmt->init(fmt, he);
1815 }
1816 }
1817
hists__hierarchy_output_resort(struct hists * hists,struct ui_progress * prog,struct rb_root_cached * root_in,struct rb_root_cached * root_out,u64 min_callchain_hits,bool use_callchain)1818 static void hists__hierarchy_output_resort(struct hists *hists,
1819 struct ui_progress *prog,
1820 struct rb_root_cached *root_in,
1821 struct rb_root_cached *root_out,
1822 u64 min_callchain_hits,
1823 bool use_callchain)
1824 {
1825 struct rb_node *node;
1826 struct hist_entry *he;
1827
1828 *root_out = RB_ROOT_CACHED;
1829 node = rb_first_cached(root_in);
1830
1831 while (node) {
1832 he = rb_entry(node, struct hist_entry, rb_node_in);
1833 node = rb_next(node);
1834
1835 hierarchy_insert_output_entry(root_out, he);
1836
1837 if (prog)
1838 ui_progress__update(prog, 1);
1839
1840 hists->nr_entries++;
1841 if (!he->filtered) {
1842 hists->nr_non_filtered_entries++;
1843 hists__calc_col_len(hists, he);
1844 }
1845
1846 if (!he->leaf) {
1847 hists__hierarchy_output_resort(hists, prog,
1848 &he->hroot_in,
1849 &he->hroot_out,
1850 min_callchain_hits,
1851 use_callchain);
1852 continue;
1853 }
1854
1855 if (!use_callchain)
1856 continue;
1857
1858 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1859 u64 total = he->stat.period;
1860
1861 if (symbol_conf.cumulate_callchain)
1862 total = he->stat_acc->period;
1863
1864 min_callchain_hits = total * (callchain_param.min_percent / 100);
1865 }
1866
1867 callchain_param.sort(&he->sorted_chain, he->callchain,
1868 min_callchain_hits, &callchain_param);
1869 }
1870 }
1871
__hists__insert_output_entry(struct rb_root_cached * entries,struct hist_entry * he,u64 min_callchain_hits,bool use_callchain)1872 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1873 struct hist_entry *he,
1874 u64 min_callchain_hits,
1875 bool use_callchain)
1876 {
1877 struct rb_node **p = &entries->rb_root.rb_node;
1878 struct rb_node *parent = NULL;
1879 struct hist_entry *iter;
1880 struct perf_hpp_fmt *fmt;
1881 bool leftmost = true;
1882
1883 if (use_callchain) {
1884 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1885 u64 total = he->stat.period;
1886
1887 if (symbol_conf.cumulate_callchain)
1888 total = he->stat_acc->period;
1889
1890 min_callchain_hits = total * (callchain_param.min_percent / 100);
1891 }
1892 callchain_param.sort(&he->sorted_chain, he->callchain,
1893 min_callchain_hits, &callchain_param);
1894 }
1895
1896 while (*p != NULL) {
1897 parent = *p;
1898 iter = rb_entry(parent, struct hist_entry, rb_node);
1899
1900 if (hist_entry__sort(he, iter) > 0)
1901 p = &(*p)->rb_left;
1902 else {
1903 p = &(*p)->rb_right;
1904 leftmost = false;
1905 }
1906 }
1907
1908 rb_link_node(&he->rb_node, parent, p);
1909 rb_insert_color_cached(&he->rb_node, entries, leftmost);
1910
1911 /* update column width of dynamic entries */
1912 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1913 if (fmt->init)
1914 fmt->init(fmt, he);
1915 }
1916 }
1917
output_resort(struct hists * hists,struct ui_progress * prog,bool use_callchain,hists__resort_cb_t cb,void * cb_arg)1918 static void output_resort(struct hists *hists, struct ui_progress *prog,
1919 bool use_callchain, hists__resort_cb_t cb,
1920 void *cb_arg)
1921 {
1922 struct rb_root_cached *root;
1923 struct rb_node *next;
1924 struct hist_entry *n;
1925 u64 callchain_total;
1926 u64 min_callchain_hits;
1927
1928 callchain_total = hists->callchain_period;
1929 if (symbol_conf.filter_relative)
1930 callchain_total = hists->callchain_non_filtered_period;
1931
1932 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1933
1934 hists__reset_stats(hists);
1935 hists__reset_col_len(hists);
1936
1937 if (symbol_conf.report_hierarchy) {
1938 hists__hierarchy_output_resort(hists, prog,
1939 &hists->entries_collapsed,
1940 &hists->entries,
1941 min_callchain_hits,
1942 use_callchain);
1943 hierarchy_recalc_total_periods(hists);
1944 return;
1945 }
1946
1947 if (hists__has(hists, need_collapse))
1948 root = &hists->entries_collapsed;
1949 else
1950 root = hists->entries_in;
1951
1952 next = rb_first_cached(root);
1953 hists->entries = RB_ROOT_CACHED;
1954
1955 while (next) {
1956 n = rb_entry(next, struct hist_entry, rb_node_in);
1957 next = rb_next(&n->rb_node_in);
1958
1959 if (cb && cb(n, cb_arg))
1960 continue;
1961
1962 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1963 hists__inc_stats(hists, n);
1964
1965 if (!n->filtered)
1966 hists__calc_col_len(hists, n);
1967
1968 if (prog)
1969 ui_progress__update(prog, 1);
1970 }
1971 }
1972
evsel__output_resort_cb(struct evsel * evsel,struct ui_progress * prog,hists__resort_cb_t cb,void * cb_arg)1973 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1974 hists__resort_cb_t cb, void *cb_arg)
1975 {
1976 bool use_callchain;
1977
1978 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1979 use_callchain = evsel__has_callchain(evsel);
1980 else
1981 use_callchain = symbol_conf.use_callchain;
1982
1983 use_callchain |= symbol_conf.show_branchflag_count;
1984
1985 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1986 }
1987
evsel__output_resort(struct evsel * evsel,struct ui_progress * prog)1988 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1989 {
1990 return evsel__output_resort_cb(evsel, prog, NULL, NULL);
1991 }
1992
hists__output_resort(struct hists * hists,struct ui_progress * prog)1993 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1994 {
1995 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1996 }
1997
hists__output_resort_cb(struct hists * hists,struct ui_progress * prog,hists__resort_cb_t cb)1998 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1999 hists__resort_cb_t cb)
2000 {
2001 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
2002 }
2003
can_goto_child(struct hist_entry * he,enum hierarchy_move_dir hmd)2004 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
2005 {
2006 if (he->leaf || hmd == HMD_FORCE_SIBLING)
2007 return false;
2008
2009 if (he->unfolded || hmd == HMD_FORCE_CHILD)
2010 return true;
2011
2012 return false;
2013 }
2014
rb_hierarchy_last(struct rb_node * node)2015 struct rb_node *rb_hierarchy_last(struct rb_node *node)
2016 {
2017 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2018
2019 while (can_goto_child(he, HMD_NORMAL)) {
2020 node = rb_last(&he->hroot_out.rb_root);
2021 he = rb_entry(node, struct hist_entry, rb_node);
2022 }
2023 return node;
2024 }
2025
__rb_hierarchy_next(struct rb_node * node,enum hierarchy_move_dir hmd)2026 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
2027 {
2028 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2029
2030 if (can_goto_child(he, hmd))
2031 node = rb_first_cached(&he->hroot_out);
2032 else
2033 node = rb_next(node);
2034
2035 while (node == NULL) {
2036 he = he->parent_he;
2037 if (he == NULL)
2038 break;
2039
2040 node = rb_next(&he->rb_node);
2041 }
2042 return node;
2043 }
2044
rb_hierarchy_prev(struct rb_node * node)2045 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2046 {
2047 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2048
2049 node = rb_prev(node);
2050 if (node)
2051 return rb_hierarchy_last(node);
2052
2053 he = he->parent_he;
2054 if (he == NULL)
2055 return NULL;
2056
2057 return &he->rb_node;
2058 }
2059
hist_entry__has_hierarchy_children(struct hist_entry * he,float limit)2060 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2061 {
2062 struct rb_node *node;
2063 struct hist_entry *child;
2064 float percent;
2065
2066 if (he->leaf)
2067 return false;
2068
2069 node = rb_first_cached(&he->hroot_out);
2070 child = rb_entry(node, struct hist_entry, rb_node);
2071
2072 while (node && child->filtered) {
2073 node = rb_next(node);
2074 child = rb_entry(node, struct hist_entry, rb_node);
2075 }
2076
2077 if (node)
2078 percent = hist_entry__get_percent_limit(child);
2079 else
2080 percent = 0;
2081
2082 return node && percent >= limit;
2083 }
2084
hists__remove_entry_filter(struct hists * hists,struct hist_entry * h,enum hist_filter filter)2085 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2086 enum hist_filter filter)
2087 {
2088 h->filtered &= ~(1 << filter);
2089
2090 if (symbol_conf.report_hierarchy) {
2091 struct hist_entry *parent = h->parent_he;
2092
2093 while (parent) {
2094 he_stat__add_stat(&parent->stat, &h->stat);
2095
2096 parent->filtered &= ~(1 << filter);
2097
2098 if (parent->filtered)
2099 goto next;
2100
2101 /* force fold unfiltered entry for simplicity */
2102 parent->unfolded = false;
2103 parent->has_no_entry = false;
2104 parent->row_offset = 0;
2105 parent->nr_rows = 0;
2106 next:
2107 parent = parent->parent_he;
2108 }
2109 }
2110
2111 if (h->filtered)
2112 return;
2113
2114 /* force fold unfiltered entry for simplicity */
2115 h->unfolded = false;
2116 h->has_no_entry = false;
2117 h->row_offset = 0;
2118 h->nr_rows = 0;
2119
2120 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2121
2122 hists__inc_filter_stats(hists, h);
2123 hists__calc_col_len(hists, h);
2124 }
2125
2126
hists__filter_entry_by_dso(struct hists * hists,struct hist_entry * he)2127 static bool hists__filter_entry_by_dso(struct hists *hists,
2128 struct hist_entry *he)
2129 {
2130 if (hists->dso_filter != NULL &&
2131 (he->ms.map == NULL || map__dso(he->ms.map) != hists->dso_filter)) {
2132 he->filtered |= (1 << HIST_FILTER__DSO);
2133 return true;
2134 }
2135
2136 return false;
2137 }
2138
hists__filter_entry_by_thread(struct hists * hists,struct hist_entry * he)2139 static bool hists__filter_entry_by_thread(struct hists *hists,
2140 struct hist_entry *he)
2141 {
2142 if (hists->thread_filter != NULL &&
2143 !RC_CHK_EQUAL(he->thread, hists->thread_filter)) {
2144 he->filtered |= (1 << HIST_FILTER__THREAD);
2145 return true;
2146 }
2147
2148 return false;
2149 }
2150
hists__filter_entry_by_symbol(struct hists * hists,struct hist_entry * he)2151 static bool hists__filter_entry_by_symbol(struct hists *hists,
2152 struct hist_entry *he)
2153 {
2154 if (hists->symbol_filter_str != NULL &&
2155 (!he->ms.sym || strstr(he->ms.sym->name,
2156 hists->symbol_filter_str) == NULL)) {
2157 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2158 return true;
2159 }
2160
2161 return false;
2162 }
2163
hists__filter_entry_by_socket(struct hists * hists,struct hist_entry * he)2164 static bool hists__filter_entry_by_socket(struct hists *hists,
2165 struct hist_entry *he)
2166 {
2167 if ((hists->socket_filter > -1) &&
2168 (he->socket != hists->socket_filter)) {
2169 he->filtered |= (1 << HIST_FILTER__SOCKET);
2170 return true;
2171 }
2172
2173 return false;
2174 }
2175
2176 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2177
hists__filter_by_type(struct hists * hists,int type,filter_fn_t filter)2178 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2179 {
2180 struct rb_node *nd;
2181
2182 hists->stats.nr_non_filtered_samples = 0;
2183
2184 hists__reset_filter_stats(hists);
2185 hists__reset_col_len(hists);
2186
2187 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2188 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2189
2190 if (filter(hists, h))
2191 continue;
2192
2193 hists__remove_entry_filter(hists, h, type);
2194 }
2195 }
2196
resort_filtered_entry(struct rb_root_cached * root,struct hist_entry * he)2197 static void resort_filtered_entry(struct rb_root_cached *root,
2198 struct hist_entry *he)
2199 {
2200 struct rb_node **p = &root->rb_root.rb_node;
2201 struct rb_node *parent = NULL;
2202 struct hist_entry *iter;
2203 struct rb_root_cached new_root = RB_ROOT_CACHED;
2204 struct rb_node *nd;
2205 bool leftmost = true;
2206
2207 while (*p != NULL) {
2208 parent = *p;
2209 iter = rb_entry(parent, struct hist_entry, rb_node);
2210
2211 if (hist_entry__sort(he, iter) > 0)
2212 p = &(*p)->rb_left;
2213 else {
2214 p = &(*p)->rb_right;
2215 leftmost = false;
2216 }
2217 }
2218
2219 rb_link_node(&he->rb_node, parent, p);
2220 rb_insert_color_cached(&he->rb_node, root, leftmost);
2221
2222 if (he->leaf || he->filtered)
2223 return;
2224
2225 nd = rb_first_cached(&he->hroot_out);
2226 while (nd) {
2227 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2228
2229 nd = rb_next(nd);
2230 rb_erase_cached(&h->rb_node, &he->hroot_out);
2231
2232 resort_filtered_entry(&new_root, h);
2233 }
2234
2235 he->hroot_out = new_root;
2236 }
2237
hists__filter_hierarchy(struct hists * hists,int type,const void * arg)2238 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2239 {
2240 struct rb_node *nd;
2241 struct rb_root_cached new_root = RB_ROOT_CACHED;
2242
2243 hists->stats.nr_non_filtered_samples = 0;
2244
2245 hists__reset_filter_stats(hists);
2246 hists__reset_col_len(hists);
2247
2248 nd = rb_first_cached(&hists->entries);
2249 while (nd) {
2250 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2251 int ret;
2252
2253 ret = hist_entry__filter(h, type, arg);
2254
2255 /*
2256 * case 1. non-matching type
2257 * zero out the period, set filter marker and move to child
2258 */
2259 if (ret < 0) {
2260 memset(&h->stat, 0, sizeof(h->stat));
2261 h->filtered |= (1 << type);
2262
2263 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2264 }
2265 /*
2266 * case 2. matched type (filter out)
2267 * set filter marker and move to next
2268 */
2269 else if (ret == 1) {
2270 h->filtered |= (1 << type);
2271
2272 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2273 }
2274 /*
2275 * case 3. ok (not filtered)
2276 * add period to hists and parents, erase the filter marker
2277 * and move to next sibling
2278 */
2279 else {
2280 hists__remove_entry_filter(hists, h, type);
2281
2282 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2283 }
2284 }
2285
2286 hierarchy_recalc_total_periods(hists);
2287
2288 /*
2289 * resort output after applying a new filter since filter in a lower
2290 * hierarchy can change periods in a upper hierarchy.
2291 */
2292 nd = rb_first_cached(&hists->entries);
2293 while (nd) {
2294 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2295
2296 nd = rb_next(nd);
2297 rb_erase_cached(&h->rb_node, &hists->entries);
2298
2299 resort_filtered_entry(&new_root, h);
2300 }
2301
2302 hists->entries = new_root;
2303 }
2304
hists__filter_by_thread(struct hists * hists)2305 void hists__filter_by_thread(struct hists *hists)
2306 {
2307 if (symbol_conf.report_hierarchy)
2308 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2309 hists->thread_filter);
2310 else
2311 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2312 hists__filter_entry_by_thread);
2313 }
2314
hists__filter_by_dso(struct hists * hists)2315 void hists__filter_by_dso(struct hists *hists)
2316 {
2317 if (symbol_conf.report_hierarchy)
2318 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2319 hists->dso_filter);
2320 else
2321 hists__filter_by_type(hists, HIST_FILTER__DSO,
2322 hists__filter_entry_by_dso);
2323 }
2324
hists__filter_by_symbol(struct hists * hists)2325 void hists__filter_by_symbol(struct hists *hists)
2326 {
2327 if (symbol_conf.report_hierarchy)
2328 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2329 hists->symbol_filter_str);
2330 else
2331 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2332 hists__filter_entry_by_symbol);
2333 }
2334
hists__filter_by_socket(struct hists * hists)2335 void hists__filter_by_socket(struct hists *hists)
2336 {
2337 if (symbol_conf.report_hierarchy)
2338 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2339 &hists->socket_filter);
2340 else
2341 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2342 hists__filter_entry_by_socket);
2343 }
2344
events_stats__inc(struct events_stats * stats,u32 type)2345 void events_stats__inc(struct events_stats *stats, u32 type)
2346 {
2347 ++stats->nr_events[0];
2348 ++stats->nr_events[type];
2349 }
2350
hists_stats__inc(struct hists_stats * stats)2351 static void hists_stats__inc(struct hists_stats *stats)
2352 {
2353 ++stats->nr_samples;
2354 }
2355
hists__inc_nr_events(struct hists * hists)2356 void hists__inc_nr_events(struct hists *hists)
2357 {
2358 hists_stats__inc(&hists->stats);
2359 }
2360
hists__inc_nr_samples(struct hists * hists,bool filtered)2361 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2362 {
2363 hists_stats__inc(&hists->stats);
2364 if (!filtered)
2365 hists->stats.nr_non_filtered_samples++;
2366 }
2367
hists__inc_nr_lost_samples(struct hists * hists,u32 lost)2368 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
2369 {
2370 hists->stats.nr_lost_samples += lost;
2371 }
2372
hists__add_dummy_entry(struct hists * hists,struct hist_entry * pair)2373 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2374 struct hist_entry *pair)
2375 {
2376 struct rb_root_cached *root;
2377 struct rb_node **p;
2378 struct rb_node *parent = NULL;
2379 struct hist_entry *he;
2380 int64_t cmp;
2381 bool leftmost = true;
2382
2383 if (hists__has(hists, need_collapse))
2384 root = &hists->entries_collapsed;
2385 else
2386 root = hists->entries_in;
2387
2388 p = &root->rb_root.rb_node;
2389
2390 while (*p != NULL) {
2391 parent = *p;
2392 he = rb_entry(parent, struct hist_entry, rb_node_in);
2393
2394 cmp = hist_entry__collapse(he, pair);
2395
2396 if (!cmp)
2397 goto out;
2398
2399 if (cmp < 0)
2400 p = &(*p)->rb_left;
2401 else {
2402 p = &(*p)->rb_right;
2403 leftmost = false;
2404 }
2405 }
2406
2407 he = hist_entry__new(pair, true);
2408 if (he) {
2409 memset(&he->stat, 0, sizeof(he->stat));
2410 he->hists = hists;
2411 if (symbol_conf.cumulate_callchain)
2412 memset(he->stat_acc, 0, sizeof(he->stat));
2413 rb_link_node(&he->rb_node_in, parent, p);
2414 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2415 hists__inc_stats(hists, he);
2416 he->dummy = true;
2417 }
2418 out:
2419 return he;
2420 }
2421
add_dummy_hierarchy_entry(struct hists * hists,struct rb_root_cached * root,struct hist_entry * pair)2422 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2423 struct rb_root_cached *root,
2424 struct hist_entry *pair)
2425 {
2426 struct rb_node **p;
2427 struct rb_node *parent = NULL;
2428 struct hist_entry *he;
2429 struct perf_hpp_fmt *fmt;
2430 bool leftmost = true;
2431
2432 p = &root->rb_root.rb_node;
2433 while (*p != NULL) {
2434 int64_t cmp = 0;
2435
2436 parent = *p;
2437 he = rb_entry(parent, struct hist_entry, rb_node_in);
2438
2439 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2440 cmp = fmt->collapse(fmt, he, pair);
2441 if (cmp)
2442 break;
2443 }
2444 if (!cmp)
2445 goto out;
2446
2447 if (cmp < 0)
2448 p = &parent->rb_left;
2449 else {
2450 p = &parent->rb_right;
2451 leftmost = false;
2452 }
2453 }
2454
2455 he = hist_entry__new(pair, true);
2456 if (he) {
2457 rb_link_node(&he->rb_node_in, parent, p);
2458 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2459
2460 he->dummy = true;
2461 he->hists = hists;
2462 memset(&he->stat, 0, sizeof(he->stat));
2463 hists__inc_stats(hists, he);
2464 }
2465 out:
2466 return he;
2467 }
2468
hists__find_entry(struct hists * hists,struct hist_entry * he)2469 static struct hist_entry *hists__find_entry(struct hists *hists,
2470 struct hist_entry *he)
2471 {
2472 struct rb_node *n;
2473
2474 if (hists__has(hists, need_collapse))
2475 n = hists->entries_collapsed.rb_root.rb_node;
2476 else
2477 n = hists->entries_in->rb_root.rb_node;
2478
2479 while (n) {
2480 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2481 int64_t cmp = hist_entry__collapse(iter, he);
2482
2483 if (cmp < 0)
2484 n = n->rb_left;
2485 else if (cmp > 0)
2486 n = n->rb_right;
2487 else
2488 return iter;
2489 }
2490
2491 return NULL;
2492 }
2493
hists__find_hierarchy_entry(struct rb_root_cached * root,struct hist_entry * he)2494 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2495 struct hist_entry *he)
2496 {
2497 struct rb_node *n = root->rb_root.rb_node;
2498
2499 while (n) {
2500 struct hist_entry *iter;
2501 struct perf_hpp_fmt *fmt;
2502 int64_t cmp = 0;
2503
2504 iter = rb_entry(n, struct hist_entry, rb_node_in);
2505 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2506 cmp = fmt->collapse(fmt, iter, he);
2507 if (cmp)
2508 break;
2509 }
2510
2511 if (cmp < 0)
2512 n = n->rb_left;
2513 else if (cmp > 0)
2514 n = n->rb_right;
2515 else
2516 return iter;
2517 }
2518
2519 return NULL;
2520 }
2521
hists__match_hierarchy(struct rb_root_cached * leader_root,struct rb_root_cached * other_root)2522 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2523 struct rb_root_cached *other_root)
2524 {
2525 struct rb_node *nd;
2526 struct hist_entry *pos, *pair;
2527
2528 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2529 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2530 pair = hists__find_hierarchy_entry(other_root, pos);
2531
2532 if (pair) {
2533 hist_entry__add_pair(pair, pos);
2534 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2535 }
2536 }
2537 }
2538
2539 /*
2540 * Look for pairs to link to the leader buckets (hist_entries):
2541 */
hists__match(struct hists * leader,struct hists * other)2542 void hists__match(struct hists *leader, struct hists *other)
2543 {
2544 struct rb_root_cached *root;
2545 struct rb_node *nd;
2546 struct hist_entry *pos, *pair;
2547
2548 if (symbol_conf.report_hierarchy) {
2549 /* hierarchy report always collapses entries */
2550 return hists__match_hierarchy(&leader->entries_collapsed,
2551 &other->entries_collapsed);
2552 }
2553
2554 if (hists__has(leader, need_collapse))
2555 root = &leader->entries_collapsed;
2556 else
2557 root = leader->entries_in;
2558
2559 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2560 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2561 pair = hists__find_entry(other, pos);
2562
2563 if (pair)
2564 hist_entry__add_pair(pair, pos);
2565 }
2566 }
2567
hists__link_hierarchy(struct hists * leader_hists,struct hist_entry * parent,struct rb_root_cached * leader_root,struct rb_root_cached * other_root)2568 static int hists__link_hierarchy(struct hists *leader_hists,
2569 struct hist_entry *parent,
2570 struct rb_root_cached *leader_root,
2571 struct rb_root_cached *other_root)
2572 {
2573 struct rb_node *nd;
2574 struct hist_entry *pos, *leader;
2575
2576 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2577 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2578
2579 if (hist_entry__has_pairs(pos)) {
2580 bool found = false;
2581
2582 list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2583 if (leader->hists == leader_hists) {
2584 found = true;
2585 break;
2586 }
2587 }
2588 if (!found)
2589 return -1;
2590 } else {
2591 leader = add_dummy_hierarchy_entry(leader_hists,
2592 leader_root, pos);
2593 if (leader == NULL)
2594 return -1;
2595
2596 /* do not point parent in the pos */
2597 leader->parent_he = parent;
2598
2599 hist_entry__add_pair(pos, leader);
2600 }
2601
2602 if (!pos->leaf) {
2603 if (hists__link_hierarchy(leader_hists, leader,
2604 &leader->hroot_in,
2605 &pos->hroot_in) < 0)
2606 return -1;
2607 }
2608 }
2609 return 0;
2610 }
2611
2612 /*
2613 * Look for entries in the other hists that are not present in the leader, if
2614 * we find them, just add a dummy entry on the leader hists, with period=0,
2615 * nr_events=0, to serve as the list header.
2616 */
hists__link(struct hists * leader,struct hists * other)2617 int hists__link(struct hists *leader, struct hists *other)
2618 {
2619 struct rb_root_cached *root;
2620 struct rb_node *nd;
2621 struct hist_entry *pos, *pair;
2622
2623 if (symbol_conf.report_hierarchy) {
2624 /* hierarchy report always collapses entries */
2625 return hists__link_hierarchy(leader, NULL,
2626 &leader->entries_collapsed,
2627 &other->entries_collapsed);
2628 }
2629
2630 if (hists__has(other, need_collapse))
2631 root = &other->entries_collapsed;
2632 else
2633 root = other->entries_in;
2634
2635 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2636 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2637
2638 if (!hist_entry__has_pairs(pos)) {
2639 pair = hists__add_dummy_entry(leader, pos);
2640 if (pair == NULL)
2641 return -1;
2642 hist_entry__add_pair(pos, pair);
2643 }
2644 }
2645
2646 return 0;
2647 }
2648
hists__unlink(struct hists * hists)2649 int hists__unlink(struct hists *hists)
2650 {
2651 struct rb_root_cached *root;
2652 struct rb_node *nd;
2653 struct hist_entry *pos;
2654
2655 if (hists__has(hists, need_collapse))
2656 root = &hists->entries_collapsed;
2657 else
2658 root = hists->entries_in;
2659
2660 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2661 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2662 list_del_init(&pos->pairs.node);
2663 }
2664
2665 return 0;
2666 }
2667
hist__account_cycles(struct branch_stack * bs,struct addr_location * al,struct perf_sample * sample,bool nonany_branch_mode,u64 * total_cycles)2668 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2669 struct perf_sample *sample, bool nonany_branch_mode,
2670 u64 *total_cycles)
2671 {
2672 struct branch_info *bi;
2673 struct branch_entry *entries = perf_sample__branch_entries(sample);
2674
2675 /* If we have branch cycles always annotate them. */
2676 if (bs && bs->nr && entries[0].flags.cycles) {
2677 bi = sample__resolve_bstack(sample, al);
2678 if (bi) {
2679 struct addr_map_symbol *prev = NULL;
2680
2681 /*
2682 * Ignore errors, still want to process the
2683 * other entries.
2684 *
2685 * For non standard branch modes always
2686 * force no IPC (prev == NULL)
2687 *
2688 * Note that perf stores branches reversed from
2689 * program order!
2690 */
2691 for (int i = bs->nr - 1; i >= 0; i--) {
2692 addr_map_symbol__account_cycles(&bi[i].from,
2693 nonany_branch_mode ? NULL : prev,
2694 bi[i].flags.cycles);
2695 prev = &bi[i].to;
2696
2697 if (total_cycles)
2698 *total_cycles += bi[i].flags.cycles;
2699 }
2700 for (unsigned int i = 0; i < bs->nr; i++) {
2701 map_symbol__exit(&bi[i].to.ms);
2702 map_symbol__exit(&bi[i].from.ms);
2703 }
2704 free(bi);
2705 }
2706 }
2707 }
2708
evlist__fprintf_nr_events(struct evlist * evlist,FILE * fp,bool skip_empty)2709 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
2710 bool skip_empty)
2711 {
2712 struct evsel *pos;
2713 size_t ret = 0;
2714
2715 evlist__for_each_entry(evlist, pos) {
2716 struct hists *hists = evsel__hists(pos);
2717
2718 if (skip_empty && !hists->stats.nr_samples && !hists->stats.nr_lost_samples)
2719 continue;
2720
2721 ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2722 if (hists->stats.nr_samples)
2723 ret += fprintf(fp, "%16s events: %10d\n",
2724 "SAMPLE", hists->stats.nr_samples);
2725 if (hists->stats.nr_lost_samples)
2726 ret += fprintf(fp, "%16s events: %10d\n",
2727 "LOST_SAMPLES", hists->stats.nr_lost_samples);
2728 }
2729
2730 return ret;
2731 }
2732
2733
hists__total_period(struct hists * hists)2734 u64 hists__total_period(struct hists *hists)
2735 {
2736 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2737 hists->stats.total_period;
2738 }
2739
__hists__scnprintf_title(struct hists * hists,char * bf,size_t size,bool show_freq)2740 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2741 {
2742 char unit;
2743 int printed;
2744 const struct dso *dso = hists->dso_filter;
2745 struct thread *thread = hists->thread_filter;
2746 int socket_id = hists->socket_filter;
2747 unsigned long nr_samples = hists->stats.nr_samples;
2748 u64 nr_events = hists->stats.total_period;
2749 struct evsel *evsel = hists_to_evsel(hists);
2750 const char *ev_name = evsel__name(evsel);
2751 char buf[512], sample_freq_str[64] = "";
2752 size_t buflen = sizeof(buf);
2753 char ref[30] = " show reference callgraph, ";
2754 bool enable_ref = false;
2755
2756 if (symbol_conf.filter_relative) {
2757 nr_samples = hists->stats.nr_non_filtered_samples;
2758 nr_events = hists->stats.total_non_filtered_period;
2759 }
2760
2761 if (evsel__is_group_event(evsel)) {
2762 struct evsel *pos;
2763
2764 evsel__group_desc(evsel, buf, buflen);
2765 ev_name = buf;
2766
2767 for_each_group_member(pos, evsel) {
2768 struct hists *pos_hists = evsel__hists(pos);
2769
2770 if (symbol_conf.filter_relative) {
2771 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2772 nr_events += pos_hists->stats.total_non_filtered_period;
2773 } else {
2774 nr_samples += pos_hists->stats.nr_samples;
2775 nr_events += pos_hists->stats.total_period;
2776 }
2777 }
2778 }
2779
2780 if (symbol_conf.show_ref_callgraph &&
2781 strstr(ev_name, "call-graph=no"))
2782 enable_ref = true;
2783
2784 if (show_freq)
2785 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2786
2787 nr_samples = convert_unit(nr_samples, &unit);
2788 printed = scnprintf(bf, size,
2789 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2790 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2791 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2792
2793
2794 if (hists->uid_filter_str)
2795 printed += snprintf(bf + printed, size - printed,
2796 ", UID: %s", hists->uid_filter_str);
2797 if (thread) {
2798 if (hists__has(hists, thread)) {
2799 printed += scnprintf(bf + printed, size - printed,
2800 ", Thread: %s(%d)",
2801 (thread__comm_set(thread) ? thread__comm_str(thread) : ""),
2802 thread__tid(thread));
2803 } else {
2804 printed += scnprintf(bf + printed, size - printed,
2805 ", Thread: %s",
2806 (thread__comm_set(thread) ? thread__comm_str(thread) : ""));
2807 }
2808 }
2809 if (dso)
2810 printed += scnprintf(bf + printed, size - printed,
2811 ", DSO: %s", dso->short_name);
2812 if (socket_id > -1)
2813 printed += scnprintf(bf + printed, size - printed,
2814 ", Processor Socket: %d", socket_id);
2815
2816 return printed;
2817 }
2818
parse_filter_percentage(const struct option * opt __maybe_unused,const char * arg,int unset __maybe_unused)2819 int parse_filter_percentage(const struct option *opt __maybe_unused,
2820 const char *arg, int unset __maybe_unused)
2821 {
2822 if (!strcmp(arg, "relative"))
2823 symbol_conf.filter_relative = true;
2824 else if (!strcmp(arg, "absolute"))
2825 symbol_conf.filter_relative = false;
2826 else {
2827 pr_debug("Invalid percentage: %s\n", arg);
2828 return -1;
2829 }
2830
2831 return 0;
2832 }
2833
perf_hist_config(const char * var,const char * value)2834 int perf_hist_config(const char *var, const char *value)
2835 {
2836 if (!strcmp(var, "hist.percentage"))
2837 return parse_filter_percentage(NULL, value, 0);
2838
2839 return 0;
2840 }
2841
__hists__init(struct hists * hists,struct perf_hpp_list * hpp_list)2842 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2843 {
2844 memset(hists, 0, sizeof(*hists));
2845 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2846 hists->entries_in = &hists->entries_in_array[0];
2847 hists->entries_collapsed = RB_ROOT_CACHED;
2848 hists->entries = RB_ROOT_CACHED;
2849 mutex_init(&hists->lock);
2850 hists->socket_filter = -1;
2851 hists->hpp_list = hpp_list;
2852 INIT_LIST_HEAD(&hists->hpp_formats);
2853 return 0;
2854 }
2855
hists__delete_remaining_entries(struct rb_root_cached * root)2856 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2857 {
2858 struct rb_node *node;
2859 struct hist_entry *he;
2860
2861 while (!RB_EMPTY_ROOT(&root->rb_root)) {
2862 node = rb_first_cached(root);
2863 rb_erase_cached(node, root);
2864
2865 he = rb_entry(node, struct hist_entry, rb_node_in);
2866 hist_entry__delete(he);
2867 }
2868 }
2869
hists__delete_all_entries(struct hists * hists)2870 static void hists__delete_all_entries(struct hists *hists)
2871 {
2872 hists__delete_entries(hists);
2873 hists__delete_remaining_entries(&hists->entries_in_array[0]);
2874 hists__delete_remaining_entries(&hists->entries_in_array[1]);
2875 hists__delete_remaining_entries(&hists->entries_collapsed);
2876 }
2877
hists_evsel__exit(struct evsel * evsel)2878 static void hists_evsel__exit(struct evsel *evsel)
2879 {
2880 struct hists *hists = evsel__hists(evsel);
2881 struct perf_hpp_fmt *fmt, *pos;
2882 struct perf_hpp_list_node *node, *tmp;
2883
2884 hists__delete_all_entries(hists);
2885
2886 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2887 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2888 list_del_init(&fmt->list);
2889 free(fmt);
2890 }
2891 list_del_init(&node->list);
2892 free(node);
2893 }
2894 }
2895
hists_evsel__init(struct evsel * evsel)2896 static int hists_evsel__init(struct evsel *evsel)
2897 {
2898 struct hists *hists = evsel__hists(evsel);
2899
2900 __hists__init(hists, &perf_hpp_list);
2901 return 0;
2902 }
2903
2904 /*
2905 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2906 * stored in the rbtree...
2907 */
2908
hists__init(void)2909 int hists__init(void)
2910 {
2911 int err = evsel__object_config(sizeof(struct hists_evsel),
2912 hists_evsel__init, hists_evsel__exit);
2913 if (err)
2914 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2915
2916 return err;
2917 }
2918
perf_hpp_list__init(struct perf_hpp_list * list)2919 void perf_hpp_list__init(struct perf_hpp_list *list)
2920 {
2921 INIT_LIST_HEAD(&list->fields);
2922 INIT_LIST_HEAD(&list->sorts);
2923 }
2924