1 // SPDX-License-Identifier: GPL-2.0
2 #include <limits.h>
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <linux/string.h>
6
7 #include "../../util/callchain.h"
8 #include "../../util/debug.h"
9 #include "../../util/event.h"
10 #include "../../util/hist.h"
11 #include "../../util/map.h"
12 #include "../../util/maps.h"
13 #include "../../util/symbol.h"
14 #include "../../util/sort.h"
15 #include "../../util/evsel.h"
16 #include "../../util/srcline.h"
17 #include "../../util/string2.h"
18 #include "../../util/thread.h"
19 #include "../../util/block-info.h"
20 #include <linux/ctype.h>
21 #include <linux/zalloc.h>
22
callchain__fprintf_left_margin(FILE * fp,int left_margin)23 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
24 {
25 int i;
26 int ret = fprintf(fp, " ");
27
28 if (left_margin > USHRT_MAX)
29 left_margin = USHRT_MAX;
30
31 for (i = 0; i < left_margin; i++)
32 ret += fprintf(fp, " ");
33
34 return ret;
35 }
36
ipchain__fprintf_graph_line(FILE * fp,int depth,int depth_mask,int left_margin)37 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
38 int left_margin)
39 {
40 int i;
41 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
42
43 for (i = 0; i < depth; i++)
44 if (depth_mask & (1 << i))
45 ret += fprintf(fp, "| ");
46 else
47 ret += fprintf(fp, " ");
48
49 ret += fprintf(fp, "\n");
50
51 return ret;
52 }
53
ipchain__fprintf_graph(FILE * fp,struct callchain_node * node,struct callchain_list * chain,int depth,int depth_mask,int period,u64 total_samples,int left_margin)54 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
55 struct callchain_list *chain,
56 int depth, int depth_mask, int period,
57 u64 total_samples, int left_margin)
58 {
59 int i;
60 size_t ret = 0;
61 char bf[1024], *alloc_str = NULL;
62 char buf[64];
63 const char *str;
64
65 ret += callchain__fprintf_left_margin(fp, left_margin);
66 for (i = 0; i < depth; i++) {
67 if (depth_mask & (1 << i))
68 ret += fprintf(fp, "|");
69 else
70 ret += fprintf(fp, " ");
71 if (!period && i == depth - 1) {
72 ret += fprintf(fp, "--");
73 ret += callchain_node__fprintf_value(node, fp, total_samples);
74 ret += fprintf(fp, "--");
75 } else
76 ret += fprintf(fp, "%s", " ");
77 }
78
79 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
80
81 if (symbol_conf.show_branchflag_count) {
82 callchain_list_counts__printf_value(chain, NULL,
83 buf, sizeof(buf));
84
85 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
86 str = "Not enough memory!";
87 else
88 str = alloc_str;
89 }
90
91 fputs(str, fp);
92 fputc('\n', fp);
93 free(alloc_str);
94
95 return ret;
96 }
97
98 static struct symbol *rem_sq_bracket;
99 static struct callchain_list rem_hits;
100
init_rem_hits(void)101 static void init_rem_hits(void)
102 {
103 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
104 if (!rem_sq_bracket) {
105 fprintf(stderr, "Not enough memory to display remaining hits\n");
106 return;
107 }
108
109 strcpy(rem_sq_bracket->name, "[...]");
110 rem_hits.ms.sym = rem_sq_bracket;
111 }
112
__callchain__fprintf_graph(FILE * fp,struct rb_root * root,u64 total_samples,int depth,int depth_mask,int left_margin)113 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
114 u64 total_samples, int depth,
115 int depth_mask, int left_margin)
116 {
117 struct rb_node *node, *next;
118 struct callchain_node *child = NULL;
119 struct callchain_list *chain;
120 int new_depth_mask = depth_mask;
121 u64 remaining;
122 size_t ret = 0;
123 int i;
124 uint entries_printed = 0;
125 int cumul_count = 0;
126
127 remaining = total_samples;
128
129 node = rb_first(root);
130 while (node) {
131 u64 new_total;
132 u64 cumul;
133
134 child = rb_entry(node, struct callchain_node, rb_node);
135 cumul = callchain_cumul_hits(child);
136 remaining -= cumul;
137 cumul_count += callchain_cumul_counts(child);
138
139 /*
140 * The depth mask manages the output of pipes that show
141 * the depth. We don't want to keep the pipes of the current
142 * level for the last child of this depth.
143 * Except if we have remaining filtered hits. They will
144 * supersede the last child
145 */
146 next = rb_next(node);
147 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
148 new_depth_mask &= ~(1 << (depth - 1));
149
150 /*
151 * But we keep the older depth mask for the line separator
152 * to keep the level link until we reach the last child
153 */
154 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
155 left_margin);
156 i = 0;
157 list_for_each_entry(chain, &child->val, list) {
158 ret += ipchain__fprintf_graph(fp, child, chain, depth,
159 new_depth_mask, i++,
160 total_samples,
161 left_margin);
162 }
163
164 if (callchain_param.mode == CHAIN_GRAPH_REL)
165 new_total = child->children_hit;
166 else
167 new_total = total_samples;
168
169 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
170 depth + 1,
171 new_depth_mask | (1 << depth),
172 left_margin);
173 node = next;
174 if (++entries_printed == callchain_param.print_limit)
175 break;
176 }
177
178 if (callchain_param.mode == CHAIN_GRAPH_REL &&
179 remaining && remaining != total_samples) {
180 struct callchain_node rem_node = {
181 .hit = remaining,
182 };
183
184 if (!rem_sq_bracket)
185 return ret;
186
187 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
188 rem_node.count = child->parent->children_count - cumul_count;
189 if (rem_node.count <= 0)
190 return ret;
191 }
192
193 new_depth_mask &= ~(1 << (depth - 1));
194 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
195 new_depth_mask, 0, total_samples,
196 left_margin);
197 }
198
199 return ret;
200 }
201
202 /*
203 * If have one single callchain root, don't bother printing
204 * its percentage (100 % in fractal mode and the same percentage
205 * than the hist in graph mode). This also avoid one level of column.
206 *
207 * However when percent-limit applied, it's possible that single callchain
208 * node have different (non-100% in fractal mode) percentage.
209 */
need_percent_display(struct rb_node * node,u64 parent_samples)210 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
211 {
212 struct callchain_node *cnode;
213
214 if (rb_next(node))
215 return true;
216
217 cnode = rb_entry(node, struct callchain_node, rb_node);
218 return callchain_cumul_hits(cnode) != parent_samples;
219 }
220
callchain__fprintf_graph(FILE * fp,struct rb_root * root,u64 total_samples,u64 parent_samples,int left_margin)221 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
222 u64 total_samples, u64 parent_samples,
223 int left_margin)
224 {
225 struct callchain_node *cnode;
226 struct callchain_list *chain;
227 u32 entries_printed = 0;
228 bool printed = false;
229 struct rb_node *node;
230 int i = 0;
231 int ret = 0;
232 char bf[1024];
233
234 node = rb_first(root);
235 if (node && !need_percent_display(node, parent_samples)) {
236 cnode = rb_entry(node, struct callchain_node, rb_node);
237 list_for_each_entry(chain, &cnode->val, list) {
238 /*
239 * If we sort by symbol, the first entry is the same than
240 * the symbol. No need to print it otherwise it appears as
241 * displayed twice.
242 */
243 if (!i++ && field_order == NULL &&
244 sort_order && strstarts(sort_order, "sym"))
245 continue;
246
247 if (!printed) {
248 ret += callchain__fprintf_left_margin(fp, left_margin);
249 ret += fprintf(fp, "|\n");
250 ret += callchain__fprintf_left_margin(fp, left_margin);
251 ret += fprintf(fp, "---");
252 left_margin += 3;
253 printed = true;
254 } else
255 ret += callchain__fprintf_left_margin(fp, left_margin);
256
257 ret += fprintf(fp, "%s",
258 callchain_list__sym_name(chain, bf,
259 sizeof(bf),
260 false));
261
262 if (symbol_conf.show_branchflag_count)
263 ret += callchain_list_counts__printf_value(
264 chain, fp, NULL, 0);
265 ret += fprintf(fp, "\n");
266
267 if (++entries_printed == callchain_param.print_limit)
268 break;
269 }
270 root = &cnode->rb_root;
271 }
272
273 if (callchain_param.mode == CHAIN_GRAPH_REL)
274 total_samples = parent_samples;
275
276 ret += __callchain__fprintf_graph(fp, root, total_samples,
277 1, 1, left_margin);
278 if (ret) {
279 /* do not add a blank line if it printed nothing */
280 ret += fprintf(fp, "\n");
281 }
282
283 return ret;
284 }
285
__callchain__fprintf_flat(FILE * fp,struct callchain_node * node,u64 total_samples)286 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
287 u64 total_samples)
288 {
289 struct callchain_list *chain;
290 size_t ret = 0;
291 char bf[1024];
292
293 if (!node)
294 return 0;
295
296 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
297
298
299 list_for_each_entry(chain, &node->val, list) {
300 if (chain->ip >= PERF_CONTEXT_MAX)
301 continue;
302 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
303 bf, sizeof(bf), false));
304 }
305
306 return ret;
307 }
308
callchain__fprintf_flat(FILE * fp,struct rb_root * tree,u64 total_samples)309 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
310 u64 total_samples)
311 {
312 size_t ret = 0;
313 u32 entries_printed = 0;
314 struct callchain_node *chain;
315 struct rb_node *rb_node = rb_first(tree);
316
317 while (rb_node) {
318 chain = rb_entry(rb_node, struct callchain_node, rb_node);
319
320 ret += fprintf(fp, " ");
321 ret += callchain_node__fprintf_value(chain, fp, total_samples);
322 ret += fprintf(fp, "\n");
323 ret += __callchain__fprintf_flat(fp, chain, total_samples);
324 ret += fprintf(fp, "\n");
325 if (++entries_printed == callchain_param.print_limit)
326 break;
327
328 rb_node = rb_next(rb_node);
329 }
330
331 return ret;
332 }
333
__callchain__fprintf_folded(FILE * fp,struct callchain_node * node)334 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
335 {
336 const char *sep = symbol_conf.field_sep ?: ";";
337 struct callchain_list *chain;
338 size_t ret = 0;
339 char bf[1024];
340 bool first;
341
342 if (!node)
343 return 0;
344
345 ret += __callchain__fprintf_folded(fp, node->parent);
346
347 first = (ret == 0);
348 list_for_each_entry(chain, &node->val, list) {
349 if (chain->ip >= PERF_CONTEXT_MAX)
350 continue;
351 ret += fprintf(fp, "%s%s", first ? "" : sep,
352 callchain_list__sym_name(chain,
353 bf, sizeof(bf), false));
354 first = false;
355 }
356
357 return ret;
358 }
359
callchain__fprintf_folded(FILE * fp,struct rb_root * tree,u64 total_samples)360 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
361 u64 total_samples)
362 {
363 size_t ret = 0;
364 u32 entries_printed = 0;
365 struct callchain_node *chain;
366 struct rb_node *rb_node = rb_first(tree);
367
368 while (rb_node) {
369
370 chain = rb_entry(rb_node, struct callchain_node, rb_node);
371
372 ret += callchain_node__fprintf_value(chain, fp, total_samples);
373 ret += fprintf(fp, " ");
374 ret += __callchain__fprintf_folded(fp, chain);
375 ret += fprintf(fp, "\n");
376 if (++entries_printed == callchain_param.print_limit)
377 break;
378
379 rb_node = rb_next(rb_node);
380 }
381
382 return ret;
383 }
384
hist_entry_callchain__fprintf(struct hist_entry * he,u64 total_samples,int left_margin,FILE * fp)385 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
386 u64 total_samples, int left_margin,
387 FILE *fp)
388 {
389 u64 parent_samples = he->stat.period;
390
391 if (symbol_conf.cumulate_callchain)
392 parent_samples = he->stat_acc->period;
393
394 switch (callchain_param.mode) {
395 case CHAIN_GRAPH_REL:
396 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
397 parent_samples, left_margin);
398 break;
399 case CHAIN_GRAPH_ABS:
400 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
401 parent_samples, left_margin);
402 break;
403 case CHAIN_FLAT:
404 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
405 break;
406 case CHAIN_FOLDED:
407 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
408 break;
409 case CHAIN_NONE:
410 break;
411 default:
412 pr_err("Bad callchain mode\n");
413 }
414
415 return 0;
416 }
417
__hist_entry__snprintf(struct hist_entry * he,struct perf_hpp * hpp,struct perf_hpp_list * hpp_list)418 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
419 struct perf_hpp_list *hpp_list)
420 {
421 const char *sep = symbol_conf.field_sep;
422 struct perf_hpp_fmt *fmt;
423 char *start = hpp->buf;
424 int ret;
425 bool first = true;
426
427 if (symbol_conf.exclude_other && !he->parent)
428 return 0;
429
430 perf_hpp_list__for_each_format(hpp_list, fmt) {
431 if (perf_hpp__should_skip(fmt, he->hists))
432 continue;
433
434 /*
435 * If there's no field_sep, we still need
436 * to display initial ' '.
437 */
438 if (!sep || !first) {
439 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
440 advance_hpp(hpp, ret);
441 } else
442 first = false;
443
444 if (perf_hpp__use_color() && fmt->color)
445 ret = fmt->color(fmt, hpp, he);
446 else
447 ret = fmt->entry(fmt, hpp, he);
448
449 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
450 advance_hpp(hpp, ret);
451 }
452
453 return hpp->buf - start;
454 }
455
hist_entry__snprintf(struct hist_entry * he,struct perf_hpp * hpp)456 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
457 {
458 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
459 }
460
hist_entry__hierarchy_fprintf(struct hist_entry * he,struct perf_hpp * hpp,struct hists * hists,FILE * fp)461 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
462 struct perf_hpp *hpp,
463 struct hists *hists,
464 FILE *fp)
465 {
466 const char *sep = symbol_conf.field_sep;
467 struct perf_hpp_fmt *fmt;
468 struct perf_hpp_list_node *fmt_node;
469 char *buf = hpp->buf;
470 size_t size = hpp->size;
471 int ret, printed = 0;
472 bool first = true;
473
474 if (symbol_conf.exclude_other && !he->parent)
475 return 0;
476
477 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
478 advance_hpp(hpp, ret);
479
480 /* the first hpp_list_node is for overhead columns */
481 fmt_node = list_first_entry(&hists->hpp_formats,
482 struct perf_hpp_list_node, list);
483 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
484 /*
485 * If there's no field_sep, we still need
486 * to display initial ' '.
487 */
488 if (!sep || !first) {
489 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
490 advance_hpp(hpp, ret);
491 } else
492 first = false;
493
494 if (perf_hpp__use_color() && fmt->color)
495 ret = fmt->color(fmt, hpp, he);
496 else
497 ret = fmt->entry(fmt, hpp, he);
498
499 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
500 advance_hpp(hpp, ret);
501 }
502
503 if (!sep)
504 ret = scnprintf(hpp->buf, hpp->size, "%*s",
505 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
506 advance_hpp(hpp, ret);
507
508 printed += fprintf(fp, "%s", buf);
509
510 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
511 hpp->buf = buf;
512 hpp->size = size;
513
514 /*
515 * No need to call hist_entry__snprintf_alignment() since this
516 * fmt is always the last column in the hierarchy mode.
517 */
518 if (perf_hpp__use_color() && fmt->color)
519 fmt->color(fmt, hpp, he);
520 else
521 fmt->entry(fmt, hpp, he);
522
523 /*
524 * dynamic entries are right-aligned but we want left-aligned
525 * in the hierarchy mode
526 */
527 printed += fprintf(fp, "%s%s", sep ?: " ", skip_spaces(buf));
528 }
529 printed += putc('\n', fp);
530
531 if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
532 u64 total = hists__total_period(hists);
533
534 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
535 goto out;
536 }
537
538 out:
539 return printed;
540 }
541
hist_entry__block_fprintf(struct hist_entry * he,char * bf,size_t size,FILE * fp)542 static int hist_entry__block_fprintf(struct hist_entry *he,
543 char *bf, size_t size,
544 FILE *fp)
545 {
546 struct block_hist *bh = container_of(he, struct block_hist, he);
547 int ret = 0;
548
549 for (unsigned int i = 0; i < bh->block_hists.nr_entries; i++) {
550 struct perf_hpp hpp = {
551 .buf = bf,
552 .size = size,
553 .skip = false,
554 };
555
556 bh->block_idx = i;
557 hist_entry__snprintf(he, &hpp);
558
559 if (!hpp.skip)
560 ret += fprintf(fp, "%s\n", bf);
561 }
562
563 return ret;
564 }
565
hist_entry__individual_block_fprintf(struct hist_entry * he,char * bf,size_t size,FILE * fp)566 static int hist_entry__individual_block_fprintf(struct hist_entry *he,
567 char *bf, size_t size,
568 FILE *fp)
569 {
570 int ret = 0;
571
572 struct perf_hpp hpp = {
573 .buf = bf,
574 .size = size,
575 .skip = false,
576 };
577
578 hist_entry__snprintf(he, &hpp);
579 if (!hpp.skip)
580 ret += fprintf(fp, "%s\n", bf);
581
582 return ret;
583 }
584
hist_entry__fprintf(struct hist_entry * he,size_t size,char * bf,size_t bfsz,FILE * fp,bool ignore_callchains)585 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
586 char *bf, size_t bfsz, FILE *fp,
587 bool ignore_callchains)
588 {
589 int ret;
590 int callchain_ret = 0;
591 struct perf_hpp hpp = {
592 .buf = bf,
593 .size = size,
594 };
595 struct hists *hists = he->hists;
596 u64 total_period = hists->stats.total_period;
597
598 if (size == 0 || size > bfsz)
599 size = hpp.size = bfsz;
600
601 if (symbol_conf.report_hierarchy)
602 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
603
604 if (symbol_conf.report_block)
605 return hist_entry__block_fprintf(he, bf, size, fp);
606
607 if (symbol_conf.report_individual_block)
608 return hist_entry__individual_block_fprintf(he, bf, size, fp);
609
610 hist_entry__snprintf(he, &hpp);
611
612 ret = fprintf(fp, "%s\n", bf);
613
614 if (hist_entry__has_callchains(he) && !ignore_callchains)
615 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
616 0, fp);
617
618 ret += callchain_ret;
619
620 return ret;
621 }
622
print_hierarchy_indent(const char * sep,int indent,const char * line,FILE * fp)623 static int print_hierarchy_indent(const char *sep, int indent,
624 const char *line, FILE *fp)
625 {
626 int width;
627
628 if (sep != NULL || indent < 2)
629 return 0;
630
631 width = (indent - 2) * HIERARCHY_INDENT;
632
633 return fprintf(fp, "%-*.*s", width, width, line);
634 }
635
hists__fprintf_hierarchy_headers(struct hists * hists,struct perf_hpp * hpp,FILE * fp)636 static int hists__fprintf_hierarchy_headers(struct hists *hists,
637 struct perf_hpp *hpp, FILE *fp)
638 {
639 bool first_node, first_col;
640 int indent;
641 int depth;
642 unsigned width = 0;
643 unsigned header_width = 0;
644 struct perf_hpp_fmt *fmt;
645 struct perf_hpp_list_node *fmt_node;
646 const char *sep = symbol_conf.field_sep;
647
648 indent = hists->nr_hpp_node;
649
650 /* preserve max indent depth for column headers */
651 print_hierarchy_indent(sep, indent, " ", fp);
652
653 /* the first hpp_list_node is for overhead columns */
654 fmt_node = list_first_entry(&hists->hpp_formats,
655 struct perf_hpp_list_node, list);
656
657 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
658 fmt->header(fmt, hpp, hists, 0, NULL);
659 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
660 }
661
662 /* combine sort headers with ' / ' */
663 first_node = true;
664 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
665 if (!first_node)
666 header_width += fprintf(fp, " / ");
667 first_node = false;
668
669 first_col = true;
670 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
671 if (perf_hpp__should_skip(fmt, hists))
672 continue;
673
674 if (!first_col)
675 header_width += fprintf(fp, "+");
676 first_col = false;
677
678 fmt->header(fmt, hpp, hists, 0, NULL);
679
680 header_width += fprintf(fp, "%s", strim(hpp->buf));
681 }
682 }
683
684 fprintf(fp, "\n# ");
685
686 /* preserve max indent depth for initial dots */
687 print_hierarchy_indent(sep, indent, dots, fp);
688
689 /* the first hpp_list_node is for overhead columns */
690 fmt_node = list_first_entry(&hists->hpp_formats,
691 struct perf_hpp_list_node, list);
692
693 first_col = true;
694 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
695 if (!first_col)
696 fprintf(fp, "%s", sep ?: "..");
697 first_col = false;
698
699 width = fmt->width(fmt, hpp, hists);
700 fprintf(fp, "%.*s", width, dots);
701 }
702
703 depth = 0;
704 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
705 first_col = true;
706 width = depth * HIERARCHY_INDENT;
707
708 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
709 if (perf_hpp__should_skip(fmt, hists))
710 continue;
711
712 if (!first_col)
713 width++; /* for '+' sign between column header */
714 first_col = false;
715
716 width += fmt->width(fmt, hpp, hists);
717 }
718
719 if (width > header_width)
720 header_width = width;
721
722 depth++;
723 }
724
725 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
726
727 fprintf(fp, "\n#\n");
728
729 return 2;
730 }
731
fprintf_line(struct hists * hists,struct perf_hpp * hpp,int line,FILE * fp)732 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
733 int line, FILE *fp)
734 {
735 struct perf_hpp_fmt *fmt;
736 const char *sep = symbol_conf.field_sep;
737 bool first = true;
738 int span = 0;
739
740 hists__for_each_format(hists, fmt) {
741 if (perf_hpp__should_skip(fmt, hists))
742 continue;
743
744 if (!first && !span)
745 fprintf(fp, "%s", sep ?: " ");
746 else
747 first = false;
748
749 fmt->header(fmt, hpp, hists, line, &span);
750
751 if (!span)
752 fprintf(fp, "%s", hpp->buf);
753 }
754 }
755
756 static int
hists__fprintf_standard_headers(struct hists * hists,struct perf_hpp * hpp,FILE * fp)757 hists__fprintf_standard_headers(struct hists *hists,
758 struct perf_hpp *hpp,
759 FILE *fp)
760 {
761 struct perf_hpp_list *hpp_list = hists->hpp_list;
762 struct perf_hpp_fmt *fmt;
763 unsigned int width;
764 const char *sep = symbol_conf.field_sep;
765 bool first = true;
766 int line;
767
768 for (line = 0; line < hpp_list->nr_header_lines; line++) {
769 /* first # is displayed one level up */
770 if (line)
771 fprintf(fp, "# ");
772 fprintf_line(hists, hpp, line, fp);
773 fprintf(fp, "\n");
774 }
775
776 if (sep)
777 return hpp_list->nr_header_lines;
778
779 first = true;
780
781 fprintf(fp, "# ");
782
783 hists__for_each_format(hists, fmt) {
784 unsigned int i;
785
786 if (perf_hpp__should_skip(fmt, hists))
787 continue;
788
789 if (!first)
790 fprintf(fp, "%s", sep ?: " ");
791 else
792 first = false;
793
794 width = fmt->width(fmt, hpp, hists);
795 for (i = 0; i < width; i++)
796 fprintf(fp, ".");
797 }
798
799 fprintf(fp, "\n");
800 fprintf(fp, "#\n");
801 return hpp_list->nr_header_lines + 2;
802 }
803
hists__fprintf_headers(struct hists * hists,FILE * fp)804 int hists__fprintf_headers(struct hists *hists, FILE *fp)
805 {
806 char bf[1024];
807 struct perf_hpp dummy_hpp = {
808 .buf = bf,
809 .size = sizeof(bf),
810 };
811
812 fprintf(fp, "# ");
813
814 if (symbol_conf.report_hierarchy)
815 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
816 else
817 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
818
819 }
820
hists__fprintf(struct hists * hists,bool show_header,int max_rows,int max_cols,float min_pcnt,FILE * fp,bool ignore_callchains)821 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
822 int max_cols, float min_pcnt, FILE *fp,
823 bool ignore_callchains)
824 {
825 struct rb_node *nd;
826 size_t ret = 0;
827 const char *sep = symbol_conf.field_sep;
828 int nr_rows = 0;
829 size_t linesz;
830 char *line = NULL;
831 unsigned indent;
832
833 init_rem_hits();
834
835 hists__reset_column_width(hists);
836
837 if (symbol_conf.col_width_list_str)
838 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
839
840 if (show_header)
841 nr_rows += hists__fprintf_headers(hists, fp);
842
843 if (max_rows && nr_rows >= max_rows)
844 goto out;
845
846 linesz = hists__sort_list_width(hists) + 3 + 1;
847 linesz += perf_hpp__color_overhead();
848 line = malloc(linesz);
849 if (line == NULL) {
850 ret = -1;
851 goto out;
852 }
853
854 indent = hists__overhead_width(hists) + 4;
855
856 for (nd = rb_first_cached(&hists->entries); nd;
857 nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
858 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
859 float percent;
860
861 if (h->filtered)
862 continue;
863
864 if (symbol_conf.report_individual_block)
865 percent = block_info__total_cycles_percent(h);
866 else
867 percent = hist_entry__get_percent_limit(h);
868
869 if (percent < min_pcnt)
870 continue;
871
872 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
873
874 if (max_rows && ++nr_rows >= max_rows)
875 break;
876
877 /*
878 * If all children are filtered out or percent-limited,
879 * display "no entry >= x.xx%" message.
880 */
881 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
882 int depth = hists->nr_hpp_node + h->depth + 1;
883
884 print_hierarchy_indent(sep, depth, " ", fp);
885 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
886
887 if (max_rows && ++nr_rows >= max_rows)
888 break;
889 }
890
891 if (h->ms.map == NULL && verbose > 1) {
892 maps__fprintf(thread__maps(h->thread), fp);
893 fprintf(fp, "%.10s end\n", graph_dotted_line);
894 }
895 }
896
897 free(line);
898 out:
899 zfree(&rem_sq_bracket);
900
901 return ret;
902 }
903
events_stats__fprintf(struct events_stats * stats,FILE * fp)904 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
905 {
906 int i;
907 size_t ret = 0;
908 u32 total = stats->nr_events[0];
909
910 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
911 const char *name;
912
913 name = perf_event__name(i);
914 if (!strcmp(name, "UNKNOWN"))
915 continue;
916 if (symbol_conf.skip_empty && !stats->nr_events[i])
917 continue;
918
919 if (i && total) {
920 ret += fprintf(fp, "%20s events: %10d (%4.1f%%)\n",
921 name, stats->nr_events[i],
922 100.0 * stats->nr_events[i] / total);
923 } else {
924 ret += fprintf(fp, "%20s events: %10d\n",
925 name, stats->nr_events[i]);
926 }
927 }
928
929 return ret;
930 }
931