1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include <stdlib.h>
6 #include <linux/mman.h>
7 #include <linux/time64.h>
8 #include "debug.h"
9 #include "dso.h"
10 #include "sort.h"
11 #include "hist.h"
12 #include "cacheline.h"
13 #include "comm.h"
14 #include "map.h"
15 #include "maps.h"
16 #include "symbol.h"
17 #include "map_symbol.h"
18 #include "branch.h"
19 #include "thread.h"
20 #include "evsel.h"
21 #include "evlist.h"
22 #include "srcline.h"
23 #include "strlist.h"
24 #include "strbuf.h"
25 #include "mem-events.h"
26 #include "mem-info.h"
27 #include "annotate.h"
28 #include "annotate-data.h"
29 #include "event.h"
30 #include "time-utils.h"
31 #include "cgroup.h"
32 #include "machine.h"
33 #include "trace-event.h"
34 #include <linux/kernel.h>
35 #include <linux/string.h>
36
37 #ifdef HAVE_LIBTRACEEVENT
38 #include <event-parse.h>
39 #endif
40
41 regex_t parent_regex;
42 const char default_parent_pattern[] = "^sys_|^do_page_fault";
43 const char *parent_pattern = default_parent_pattern;
44 const char *default_sort_order = "comm,dso,symbol";
45 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
46 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
47 const char default_top_sort_order[] = "dso,symbol";
48 const char default_diff_sort_order[] = "dso,symbol";
49 const char default_tracepoint_sort_order[] = "trace";
50 const char *sort_order;
51 const char *field_order;
52 regex_t ignore_callees_regex;
53 int have_ignore_callees = 0;
54 enum sort_mode sort__mode = SORT_MODE__NORMAL;
55 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
56 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
57
58 /*
59 * Some architectures have Adjacent Cacheline Prefetch feature, which
60 * behaves like the cacheline size is doubled. Enable this flag to
61 * check things in double cacheline granularity.
62 */
63 bool chk_double_cl;
64
65 /*
66 * Replaces all occurrences of a char used with the:
67 *
68 * -t, --field-separator
69 *
70 * option, that uses a special separator character and don't pad with spaces,
71 * replacing all occurrences of this separator in symbol names (and other
72 * output) with a '.' character, that thus it's the only non valid separator.
73 */
repsep_snprintf(char * bf,size_t size,const char * fmt,...)74 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
75 {
76 int n;
77 va_list ap;
78
79 va_start(ap, fmt);
80 n = vsnprintf(bf, size, fmt, ap);
81 if (symbol_conf.field_sep && n > 0) {
82 char *sep = bf;
83
84 while (1) {
85 sep = strchr(sep, *symbol_conf.field_sep);
86 if (sep == NULL)
87 break;
88 *sep = '.';
89 }
90 }
91 va_end(ap);
92
93 if (n >= (int)size)
94 return size - 1;
95 return n;
96 }
97
cmp_null(const void * l,const void * r)98 static int64_t cmp_null(const void *l, const void *r)
99 {
100 if (!l && !r)
101 return 0;
102 else if (!l)
103 return -1;
104 else
105 return 1;
106 }
107
108 /* --sort pid */
109
110 static int64_t
sort__thread_cmp(struct hist_entry * left,struct hist_entry * right)111 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
112 {
113 return thread__tid(right->thread) - thread__tid(left->thread);
114 }
115
hist_entry__thread_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)116 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
117 size_t size, unsigned int width)
118 {
119 const char *comm = thread__comm_str(he->thread);
120
121 width = max(7U, width) - 8;
122 return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
123 width, width, comm ?: "");
124 }
125
hist_entry__thread_filter(struct hist_entry * he,int type,const void * arg)126 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
127 {
128 const struct thread *th = arg;
129
130 if (type != HIST_FILTER__THREAD)
131 return -1;
132
133 return th && !RC_CHK_EQUAL(he->thread, th);
134 }
135
136 struct sort_entry sort_thread = {
137 .se_header = " Pid:Command",
138 .se_cmp = sort__thread_cmp,
139 .se_snprintf = hist_entry__thread_snprintf,
140 .se_filter = hist_entry__thread_filter,
141 .se_width_idx = HISTC_THREAD,
142 };
143
144 /* --sort simd */
145
146 static int64_t
sort__simd_cmp(struct hist_entry * left,struct hist_entry * right)147 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
148 {
149 if (left->simd_flags.arch != right->simd_flags.arch)
150 return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
151
152 return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
153 }
154
hist_entry__get_simd_name(struct simd_flags * simd_flags)155 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
156 {
157 u64 arch = simd_flags->arch;
158
159 if (arch & SIMD_OP_FLAGS_ARCH_SVE)
160 return "SVE";
161 else
162 return "n/a";
163 }
164
hist_entry__simd_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)165 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
166 size_t size, unsigned int width __maybe_unused)
167 {
168 const char *name;
169
170 if (!he->simd_flags.arch)
171 return repsep_snprintf(bf, size, "");
172
173 name = hist_entry__get_simd_name(&he->simd_flags);
174
175 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
176 return repsep_snprintf(bf, size, "[e] %s", name);
177 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
178 return repsep_snprintf(bf, size, "[p] %s", name);
179
180 return repsep_snprintf(bf, size, "[.] %s", name);
181 }
182
183 struct sort_entry sort_simd = {
184 .se_header = "Simd ",
185 .se_cmp = sort__simd_cmp,
186 .se_snprintf = hist_entry__simd_snprintf,
187 .se_width_idx = HISTC_SIMD,
188 };
189
190 /* --sort comm */
191
192 /*
193 * We can't use pointer comparison in functions below,
194 * because it gives different results based on pointer
195 * values, which could break some sorting assumptions.
196 */
197 static int64_t
sort__comm_cmp(struct hist_entry * left,struct hist_entry * right)198 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
199 {
200 return strcmp(comm__str(right->comm), comm__str(left->comm));
201 }
202
203 static int64_t
sort__comm_collapse(struct hist_entry * left,struct hist_entry * right)204 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
205 {
206 return strcmp(comm__str(right->comm), comm__str(left->comm));
207 }
208
209 static int64_t
sort__comm_sort(struct hist_entry * left,struct hist_entry * right)210 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
211 {
212 return strcmp(comm__str(right->comm), comm__str(left->comm));
213 }
214
hist_entry__comm_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)215 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
216 size_t size, unsigned int width)
217 {
218 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
219 }
220
221 struct sort_entry sort_comm = {
222 .se_header = "Command",
223 .se_cmp = sort__comm_cmp,
224 .se_collapse = sort__comm_collapse,
225 .se_sort = sort__comm_sort,
226 .se_snprintf = hist_entry__comm_snprintf,
227 .se_filter = hist_entry__thread_filter,
228 .se_width_idx = HISTC_COMM,
229 };
230
231 /* --sort dso */
232
_sort__dso_cmp(struct map * map_l,struct map * map_r)233 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
234 {
235 struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
236 struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
237 const char *dso_name_l, *dso_name_r;
238
239 if (!dso_l || !dso_r)
240 return cmp_null(dso_r, dso_l);
241
242 if (verbose > 0) {
243 dso_name_l = dso__long_name(dso_l);
244 dso_name_r = dso__long_name(dso_r);
245 } else {
246 dso_name_l = dso__short_name(dso_l);
247 dso_name_r = dso__short_name(dso_r);
248 }
249
250 return strcmp(dso_name_l, dso_name_r);
251 }
252
253 static int64_t
sort__dso_cmp(struct hist_entry * left,struct hist_entry * right)254 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
255 {
256 return _sort__dso_cmp(right->ms.map, left->ms.map);
257 }
258
_hist_entry__dso_snprintf(struct map * map,char * bf,size_t size,unsigned int width)259 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
260 size_t size, unsigned int width)
261 {
262 const struct dso *dso = map ? map__dso(map) : NULL;
263 const char *dso_name = "[unknown]";
264
265 if (dso)
266 dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso);
267
268 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
269 }
270
hist_entry__dso_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)271 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
272 size_t size, unsigned int width)
273 {
274 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
275 }
276
hist_entry__dso_filter(struct hist_entry * he,int type,const void * arg)277 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
278 {
279 const struct dso *dso = arg;
280
281 if (type != HIST_FILTER__DSO)
282 return -1;
283
284 return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
285 }
286
287 struct sort_entry sort_dso = {
288 .se_header = "Shared Object",
289 .se_cmp = sort__dso_cmp,
290 .se_snprintf = hist_entry__dso_snprintf,
291 .se_filter = hist_entry__dso_filter,
292 .se_width_idx = HISTC_DSO,
293 };
294
295 /* --sort symbol */
296
_sort__addr_cmp(u64 left_ip,u64 right_ip)297 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
298 {
299 return (int64_t)(right_ip - left_ip);
300 }
301
_sort__sym_cmp(struct symbol * sym_l,struct symbol * sym_r)302 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
303 {
304 if (!sym_l || !sym_r)
305 return cmp_null(sym_l, sym_r);
306
307 if (sym_l == sym_r)
308 return 0;
309
310 if (sym_l->inlined || sym_r->inlined) {
311 int ret = strcmp(sym_l->name, sym_r->name);
312
313 if (ret)
314 return ret;
315 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
316 return 0;
317 }
318
319 if (sym_l->start != sym_r->start)
320 return (int64_t)(sym_r->start - sym_l->start);
321
322 return (int64_t)(sym_r->end - sym_l->end);
323 }
324
325 static int64_t
sort__sym_cmp(struct hist_entry * left,struct hist_entry * right)326 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
327 {
328 int64_t ret;
329
330 if (!left->ms.sym && !right->ms.sym)
331 return _sort__addr_cmp(left->ip, right->ip);
332
333 /*
334 * comparing symbol address alone is not enough since it's a
335 * relative address within a dso.
336 */
337 if (!hists__has(left->hists, dso)) {
338 ret = sort__dso_cmp(left, right);
339 if (ret != 0)
340 return ret;
341 }
342
343 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
344 }
345
346 static int64_t
sort__sym_sort(struct hist_entry * left,struct hist_entry * right)347 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
348 {
349 if (!left->ms.sym || !right->ms.sym)
350 return cmp_null(left->ms.sym, right->ms.sym);
351
352 return strcmp(right->ms.sym->name, left->ms.sym->name);
353 }
354
_hist_entry__sym_snprintf(struct map_symbol * ms,u64 ip,char level,char * bf,size_t size,unsigned int width)355 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
356 u64 ip, char level, char *bf, size_t size,
357 unsigned int width)
358 {
359 struct symbol *sym = ms->sym;
360 struct map *map = ms->map;
361 size_t ret = 0;
362
363 if (verbose > 0) {
364 struct dso *dso = map ? map__dso(map) : NULL;
365 char o = dso ? dso__symtab_origin(dso) : '!';
366 u64 rip = ip;
367
368 if (dso && dso__kernel(dso) && dso__adjust_symbols(dso))
369 rip = map__unmap_ip(map, ip);
370
371 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
372 BITS_PER_LONG / 4 + 2, rip, o);
373 }
374
375 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
376 if (sym && map) {
377 if (sym->type == STT_OBJECT) {
378 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
379 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
380 ip - map__unmap_ip(map, sym->start));
381 } else {
382 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
383 width - ret,
384 sym->name);
385 if (sym->inlined)
386 ret += repsep_snprintf(bf + ret, size - ret,
387 " (inlined)");
388 }
389 } else {
390 size_t len = BITS_PER_LONG / 4;
391 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
392 len, ip);
393 }
394
395 return ret;
396 }
397
hist_entry__sym_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)398 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
399 {
400 return _hist_entry__sym_snprintf(&he->ms, he->ip,
401 he->level, bf, size, width);
402 }
403
hist_entry__sym_filter(struct hist_entry * he,int type,const void * arg)404 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
405 {
406 const char *sym = arg;
407
408 if (type != HIST_FILTER__SYMBOL)
409 return -1;
410
411 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
412 }
413
414 struct sort_entry sort_sym = {
415 .se_header = "Symbol",
416 .se_cmp = sort__sym_cmp,
417 .se_sort = sort__sym_sort,
418 .se_snprintf = hist_entry__sym_snprintf,
419 .se_filter = hist_entry__sym_filter,
420 .se_width_idx = HISTC_SYMBOL,
421 };
422
423 /* --sort symoff */
424
425 static int64_t
sort__symoff_cmp(struct hist_entry * left,struct hist_entry * right)426 sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right)
427 {
428 int64_t ret;
429
430 ret = sort__sym_cmp(left, right);
431 if (ret)
432 return ret;
433
434 return left->ip - right->ip;
435 }
436
437 static int64_t
sort__symoff_sort(struct hist_entry * left,struct hist_entry * right)438 sort__symoff_sort(struct hist_entry *left, struct hist_entry *right)
439 {
440 int64_t ret;
441
442 ret = sort__sym_sort(left, right);
443 if (ret)
444 return ret;
445
446 return left->ip - right->ip;
447 }
448
449 static int
hist_entry__symoff_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)450 hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
451 {
452 struct symbol *sym = he->ms.sym;
453
454 if (sym == NULL)
455 return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip);
456
457 return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
458 }
459
460 struct sort_entry sort_sym_offset = {
461 .se_header = "Symbol Offset",
462 .se_cmp = sort__symoff_cmp,
463 .se_sort = sort__symoff_sort,
464 .se_snprintf = hist_entry__symoff_snprintf,
465 .se_filter = hist_entry__sym_filter,
466 .se_width_idx = HISTC_SYMBOL_OFFSET,
467 };
468
469 /* --sort srcline */
470
hist_entry__srcline(struct hist_entry * he)471 char *hist_entry__srcline(struct hist_entry *he)
472 {
473 return map__srcline(he->ms.map, he->ip, he->ms.sym);
474 }
475
476 static int64_t
sort__srcline_cmp(struct hist_entry * left,struct hist_entry * right)477 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
478 {
479 int64_t ret;
480
481 ret = _sort__addr_cmp(left->ip, right->ip);
482 if (ret)
483 return ret;
484
485 return sort__dso_cmp(left, right);
486 }
487
488 static int64_t
sort__srcline_collapse(struct hist_entry * left,struct hist_entry * right)489 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
490 {
491 if (!left->srcline)
492 left->srcline = hist_entry__srcline(left);
493 if (!right->srcline)
494 right->srcline = hist_entry__srcline(right);
495
496 return strcmp(right->srcline, left->srcline);
497 }
498
499 static int64_t
sort__srcline_sort(struct hist_entry * left,struct hist_entry * right)500 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
501 {
502 return sort__srcline_collapse(left, right);
503 }
504
505 static void
sort__srcline_init(struct hist_entry * he)506 sort__srcline_init(struct hist_entry *he)
507 {
508 if (!he->srcline)
509 he->srcline = hist_entry__srcline(he);
510 }
511
hist_entry__srcline_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)512 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
513 size_t size, unsigned int width)
514 {
515 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
516 }
517
518 struct sort_entry sort_srcline = {
519 .se_header = "Source:Line",
520 .se_cmp = sort__srcline_cmp,
521 .se_collapse = sort__srcline_collapse,
522 .se_sort = sort__srcline_sort,
523 .se_init = sort__srcline_init,
524 .se_snprintf = hist_entry__srcline_snprintf,
525 .se_width_idx = HISTC_SRCLINE,
526 };
527
528 /* --sort srcline_from */
529
addr_map_symbol__srcline(struct addr_map_symbol * ams)530 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
531 {
532 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
533 }
534
535 static int64_t
sort__srcline_from_cmp(struct hist_entry * left,struct hist_entry * right)536 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
537 {
538 return left->branch_info->from.addr - right->branch_info->from.addr;
539 }
540
541 static int64_t
sort__srcline_from_collapse(struct hist_entry * left,struct hist_entry * right)542 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
543 {
544 if (!left->branch_info->srcline_from)
545 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
546
547 if (!right->branch_info->srcline_from)
548 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
549
550 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
551 }
552
553 static int64_t
sort__srcline_from_sort(struct hist_entry * left,struct hist_entry * right)554 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
555 {
556 return sort__srcline_from_collapse(left, right);
557 }
558
sort__srcline_from_init(struct hist_entry * he)559 static void sort__srcline_from_init(struct hist_entry *he)
560 {
561 if (!he->branch_info->srcline_from)
562 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
563 }
564
hist_entry__srcline_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)565 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
566 size_t size, unsigned int width)
567 {
568 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
569 }
570
571 struct sort_entry sort_srcline_from = {
572 .se_header = "From Source:Line",
573 .se_cmp = sort__srcline_from_cmp,
574 .se_collapse = sort__srcline_from_collapse,
575 .se_sort = sort__srcline_from_sort,
576 .se_init = sort__srcline_from_init,
577 .se_snprintf = hist_entry__srcline_from_snprintf,
578 .se_width_idx = HISTC_SRCLINE_FROM,
579 };
580
581 /* --sort srcline_to */
582
583 static int64_t
sort__srcline_to_cmp(struct hist_entry * left,struct hist_entry * right)584 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
585 {
586 return left->branch_info->to.addr - right->branch_info->to.addr;
587 }
588
589 static int64_t
sort__srcline_to_collapse(struct hist_entry * left,struct hist_entry * right)590 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
591 {
592 if (!left->branch_info->srcline_to)
593 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
594
595 if (!right->branch_info->srcline_to)
596 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
597
598 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
599 }
600
601 static int64_t
sort__srcline_to_sort(struct hist_entry * left,struct hist_entry * right)602 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
603 {
604 return sort__srcline_to_collapse(left, right);
605 }
606
sort__srcline_to_init(struct hist_entry * he)607 static void sort__srcline_to_init(struct hist_entry *he)
608 {
609 if (!he->branch_info->srcline_to)
610 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
611 }
612
hist_entry__srcline_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)613 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
614 size_t size, unsigned int width)
615 {
616 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
617 }
618
619 struct sort_entry sort_srcline_to = {
620 .se_header = "To Source:Line",
621 .se_cmp = sort__srcline_to_cmp,
622 .se_collapse = sort__srcline_to_collapse,
623 .se_sort = sort__srcline_to_sort,
624 .se_init = sort__srcline_to_init,
625 .se_snprintf = hist_entry__srcline_to_snprintf,
626 .se_width_idx = HISTC_SRCLINE_TO,
627 };
628
hist_entry__sym_ipc_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)629 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
630 size_t size, unsigned int width)
631 {
632
633 struct symbol *sym = he->ms.sym;
634 struct annotated_branch *branch;
635 double ipc = 0.0, coverage = 0.0;
636 char tmp[64];
637
638 if (!sym)
639 return repsep_snprintf(bf, size, "%-*s", width, "-");
640
641 branch = symbol__annotation(sym)->branch;
642
643 if (branch && branch->hit_cycles)
644 ipc = branch->hit_insn / ((double)branch->hit_cycles);
645
646 if (branch && branch->total_insn) {
647 coverage = branch->cover_insn * 100.0 /
648 ((double)branch->total_insn);
649 }
650
651 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
652 return repsep_snprintf(bf, size, "%-*s", width, tmp);
653 }
654
655 struct sort_entry sort_sym_ipc = {
656 .se_header = "IPC [IPC Coverage]",
657 .se_cmp = sort__sym_cmp,
658 .se_snprintf = hist_entry__sym_ipc_snprintf,
659 .se_width_idx = HISTC_SYMBOL_IPC,
660 };
661
hist_entry__sym_ipc_null_snprintf(struct hist_entry * he __maybe_unused,char * bf,size_t size,unsigned int width)662 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
663 __maybe_unused,
664 char *bf, size_t size,
665 unsigned int width)
666 {
667 char tmp[64];
668
669 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
670 return repsep_snprintf(bf, size, "%-*s", width, tmp);
671 }
672
673 struct sort_entry sort_sym_ipc_null = {
674 .se_header = "IPC [IPC Coverage]",
675 .se_cmp = sort__sym_cmp,
676 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
677 .se_width_idx = HISTC_SYMBOL_IPC,
678 };
679
680 /* --sort callchain_branch_predicted */
681
682 static int64_t
sort__callchain_branch_predicted_cmp(struct hist_entry * left __maybe_unused,struct hist_entry * right __maybe_unused)683 sort__callchain_branch_predicted_cmp(struct hist_entry *left __maybe_unused,
684 struct hist_entry *right __maybe_unused)
685 {
686 return 0;
687 }
688
hist_entry__callchain_branch_predicted_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)689 static int hist_entry__callchain_branch_predicted_snprintf(
690 struct hist_entry *he, char *bf, size_t size, unsigned int width)
691 {
692 u64 branch_count, predicted_count;
693 double percent = 0.0;
694 char str[32];
695
696 callchain_branch_counts(he->callchain, &branch_count,
697 &predicted_count, NULL, NULL);
698
699 if (branch_count)
700 percent = predicted_count * 100.0 / branch_count;
701
702 snprintf(str, sizeof(str), "%.1f%%", percent);
703 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
704 }
705
706 struct sort_entry sort_callchain_branch_predicted = {
707 .se_header = "Predicted",
708 .se_cmp = sort__callchain_branch_predicted_cmp,
709 .se_snprintf = hist_entry__callchain_branch_predicted_snprintf,
710 .se_width_idx = HISTC_CALLCHAIN_BRANCH_PREDICTED,
711 };
712
713 /* --sort callchain_branch_abort */
714
715 static int64_t
sort__callchain_branch_abort_cmp(struct hist_entry * left __maybe_unused,struct hist_entry * right __maybe_unused)716 sort__callchain_branch_abort_cmp(struct hist_entry *left __maybe_unused,
717 struct hist_entry *right __maybe_unused)
718 {
719 return 0;
720 }
721
hist_entry__callchain_branch_abort_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)722 static int hist_entry__callchain_branch_abort_snprintf(struct hist_entry *he,
723 char *bf, size_t size,
724 unsigned int width)
725 {
726 u64 branch_count, abort_count;
727 char str[32];
728
729 callchain_branch_counts(he->callchain, &branch_count,
730 NULL, &abort_count, NULL);
731
732 snprintf(str, sizeof(str), "%" PRId64, abort_count);
733 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
734 }
735
736 struct sort_entry sort_callchain_branch_abort = {
737 .se_header = "Abort",
738 .se_cmp = sort__callchain_branch_abort_cmp,
739 .se_snprintf = hist_entry__callchain_branch_abort_snprintf,
740 .se_width_idx = HISTC_CALLCHAIN_BRANCH_ABORT,
741 };
742
743 /* --sort callchain_branch_cycles */
744
745 static int64_t
sort__callchain_branch_cycles_cmp(struct hist_entry * left __maybe_unused,struct hist_entry * right __maybe_unused)746 sort__callchain_branch_cycles_cmp(struct hist_entry *left __maybe_unused,
747 struct hist_entry *right __maybe_unused)
748 {
749 return 0;
750 }
751
hist_entry__callchain_branch_cycles_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)752 static int hist_entry__callchain_branch_cycles_snprintf(struct hist_entry *he,
753 char *bf, size_t size,
754 unsigned int width)
755 {
756 u64 branch_count, cycles_count, cycles = 0;
757 char str[32];
758
759 callchain_branch_counts(he->callchain, &branch_count,
760 NULL, NULL, &cycles_count);
761
762 if (branch_count)
763 cycles = cycles_count / branch_count;
764
765 snprintf(str, sizeof(str), "%" PRId64 "", cycles);
766 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
767 }
768
769 struct sort_entry sort_callchain_branch_cycles = {
770 .se_header = "Cycles",
771 .se_cmp = sort__callchain_branch_cycles_cmp,
772 .se_snprintf = hist_entry__callchain_branch_cycles_snprintf,
773 .se_width_idx = HISTC_CALLCHAIN_BRANCH_CYCLES,
774 };
775
776 /* --sort srcfile */
777
778 static char no_srcfile[1];
779
hist_entry__get_srcfile(struct hist_entry * e)780 static char *hist_entry__get_srcfile(struct hist_entry *e)
781 {
782 char *sf, *p;
783 struct map *map = e->ms.map;
784
785 if (!map)
786 return no_srcfile;
787
788 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
789 e->ms.sym, false, true, true, e->ip);
790 if (sf == SRCLINE_UNKNOWN)
791 return no_srcfile;
792 p = strchr(sf, ':');
793 if (p && *sf) {
794 *p = 0;
795 return sf;
796 }
797 free(sf);
798 return no_srcfile;
799 }
800
801 static int64_t
sort__srcfile_cmp(struct hist_entry * left,struct hist_entry * right)802 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
803 {
804 return sort__srcline_cmp(left, right);
805 }
806
807 static int64_t
sort__srcfile_collapse(struct hist_entry * left,struct hist_entry * right)808 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
809 {
810 if (!left->srcfile)
811 left->srcfile = hist_entry__get_srcfile(left);
812 if (!right->srcfile)
813 right->srcfile = hist_entry__get_srcfile(right);
814
815 return strcmp(right->srcfile, left->srcfile);
816 }
817
818 static int64_t
sort__srcfile_sort(struct hist_entry * left,struct hist_entry * right)819 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
820 {
821 return sort__srcfile_collapse(left, right);
822 }
823
sort__srcfile_init(struct hist_entry * he)824 static void sort__srcfile_init(struct hist_entry *he)
825 {
826 if (!he->srcfile)
827 he->srcfile = hist_entry__get_srcfile(he);
828 }
829
hist_entry__srcfile_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)830 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
831 size_t size, unsigned int width)
832 {
833 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
834 }
835
836 struct sort_entry sort_srcfile = {
837 .se_header = "Source File",
838 .se_cmp = sort__srcfile_cmp,
839 .se_collapse = sort__srcfile_collapse,
840 .se_sort = sort__srcfile_sort,
841 .se_init = sort__srcfile_init,
842 .se_snprintf = hist_entry__srcfile_snprintf,
843 .se_width_idx = HISTC_SRCFILE,
844 };
845
846 /* --sort parent */
847
848 static int64_t
sort__parent_cmp(struct hist_entry * left,struct hist_entry * right)849 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
850 {
851 struct symbol *sym_l = left->parent;
852 struct symbol *sym_r = right->parent;
853
854 if (!sym_l || !sym_r)
855 return cmp_null(sym_l, sym_r);
856
857 return strcmp(sym_r->name, sym_l->name);
858 }
859
hist_entry__parent_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)860 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
861 size_t size, unsigned int width)
862 {
863 return repsep_snprintf(bf, size, "%-*.*s", width, width,
864 he->parent ? he->parent->name : "[other]");
865 }
866
867 struct sort_entry sort_parent = {
868 .se_header = "Parent symbol",
869 .se_cmp = sort__parent_cmp,
870 .se_snprintf = hist_entry__parent_snprintf,
871 .se_width_idx = HISTC_PARENT,
872 };
873
874 /* --sort cpu */
875
876 static int64_t
sort__cpu_cmp(struct hist_entry * left,struct hist_entry * right)877 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
878 {
879 return right->cpu - left->cpu;
880 }
881
hist_entry__cpu_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)882 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
883 size_t size, unsigned int width)
884 {
885 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
886 }
887
888 struct sort_entry sort_cpu = {
889 .se_header = "CPU",
890 .se_cmp = sort__cpu_cmp,
891 .se_snprintf = hist_entry__cpu_snprintf,
892 .se_width_idx = HISTC_CPU,
893 };
894
895 /* --sort parallelism */
896
897 static int64_t
sort__parallelism_cmp(struct hist_entry * left,struct hist_entry * right)898 sort__parallelism_cmp(struct hist_entry *left, struct hist_entry *right)
899 {
900 return right->parallelism - left->parallelism;
901 }
902
hist_entry__parallelism_filter(struct hist_entry * he,int type,const void * arg)903 static int hist_entry__parallelism_filter(struct hist_entry *he, int type, const void *arg)
904 {
905 const unsigned long *parallelism_filter = arg;
906
907 if (type != HIST_FILTER__PARALLELISM)
908 return -1;
909
910 return test_bit(he->parallelism, parallelism_filter);
911 }
912
hist_entry__parallelism_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)913 static int hist_entry__parallelism_snprintf(struct hist_entry *he, char *bf,
914 size_t size, unsigned int width)
915 {
916 return repsep_snprintf(bf, size, "%*d", width, he->parallelism);
917 }
918
919 struct sort_entry sort_parallelism = {
920 .se_header = "Parallelism",
921 .se_cmp = sort__parallelism_cmp,
922 .se_filter = hist_entry__parallelism_filter,
923 .se_snprintf = hist_entry__parallelism_snprintf,
924 .se_width_idx = HISTC_PARALLELISM,
925 };
926
927 /* --sort cgroup_id */
928
_sort__cgroup_dev_cmp(u64 left_dev,u64 right_dev)929 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
930 {
931 return (int64_t)(right_dev - left_dev);
932 }
933
_sort__cgroup_inode_cmp(u64 left_ino,u64 right_ino)934 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
935 {
936 return (int64_t)(right_ino - left_ino);
937 }
938
939 static int64_t
sort__cgroup_id_cmp(struct hist_entry * left,struct hist_entry * right)940 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
941 {
942 int64_t ret;
943
944 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
945 if (ret != 0)
946 return ret;
947
948 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
949 left->cgroup_id.ino);
950 }
951
hist_entry__cgroup_id_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)952 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
953 char *bf, size_t size,
954 unsigned int width __maybe_unused)
955 {
956 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
957 he->cgroup_id.ino);
958 }
959
960 struct sort_entry sort_cgroup_id = {
961 .se_header = "cgroup id (dev/inode)",
962 .se_cmp = sort__cgroup_id_cmp,
963 .se_snprintf = hist_entry__cgroup_id_snprintf,
964 .se_width_idx = HISTC_CGROUP_ID,
965 };
966
967 /* --sort cgroup */
968
969 static int64_t
sort__cgroup_cmp(struct hist_entry * left,struct hist_entry * right)970 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
971 {
972 return right->cgroup - left->cgroup;
973 }
974
hist_entry__cgroup_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)975 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
976 char *bf, size_t size,
977 unsigned int width __maybe_unused)
978 {
979 const char *cgrp_name = "N/A";
980
981 if (he->cgroup) {
982 struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
983 he->cgroup);
984 if (cgrp != NULL)
985 cgrp_name = cgrp->name;
986 else
987 cgrp_name = "unknown";
988 }
989
990 return repsep_snprintf(bf, size, "%s", cgrp_name);
991 }
992
993 struct sort_entry sort_cgroup = {
994 .se_header = "Cgroup",
995 .se_cmp = sort__cgroup_cmp,
996 .se_snprintf = hist_entry__cgroup_snprintf,
997 .se_width_idx = HISTC_CGROUP,
998 };
999
1000 /* --sort socket */
1001
1002 static int64_t
sort__socket_cmp(struct hist_entry * left,struct hist_entry * right)1003 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
1004 {
1005 return right->socket - left->socket;
1006 }
1007
hist_entry__socket_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1008 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
1009 size_t size, unsigned int width)
1010 {
1011 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
1012 }
1013
hist_entry__socket_filter(struct hist_entry * he,int type,const void * arg)1014 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
1015 {
1016 int sk = *(const int *)arg;
1017
1018 if (type != HIST_FILTER__SOCKET)
1019 return -1;
1020
1021 return sk >= 0 && he->socket != sk;
1022 }
1023
1024 struct sort_entry sort_socket = {
1025 .se_header = "Socket",
1026 .se_cmp = sort__socket_cmp,
1027 .se_snprintf = hist_entry__socket_snprintf,
1028 .se_filter = hist_entry__socket_filter,
1029 .se_width_idx = HISTC_SOCKET,
1030 };
1031
1032 /* --sort time */
1033
1034 static int64_t
sort__time_cmp(struct hist_entry * left,struct hist_entry * right)1035 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
1036 {
1037 return right->time - left->time;
1038 }
1039
hist_entry__time_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1040 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
1041 size_t size, unsigned int width)
1042 {
1043 char he_time[32];
1044
1045 if (symbol_conf.nanosecs)
1046 timestamp__scnprintf_nsec(he->time, he_time,
1047 sizeof(he_time));
1048 else
1049 timestamp__scnprintf_usec(he->time, he_time,
1050 sizeof(he_time));
1051
1052 return repsep_snprintf(bf, size, "%-.*s", width, he_time);
1053 }
1054
1055 struct sort_entry sort_time = {
1056 .se_header = "Time",
1057 .se_cmp = sort__time_cmp,
1058 .se_snprintf = hist_entry__time_snprintf,
1059 .se_width_idx = HISTC_TIME,
1060 };
1061
1062 /* --sort trace */
1063
1064 #ifdef HAVE_LIBTRACEEVENT
get_trace_output(struct hist_entry * he)1065 static char *get_trace_output(struct hist_entry *he)
1066 {
1067 struct trace_seq seq;
1068 struct evsel *evsel;
1069 struct tep_record rec = {
1070 .data = he->raw_data,
1071 .size = he->raw_size,
1072 };
1073 struct tep_event *tp_format;
1074
1075 evsel = hists_to_evsel(he->hists);
1076
1077 trace_seq_init(&seq);
1078 tp_format = evsel__tp_format(evsel);
1079 if (tp_format) {
1080 if (symbol_conf.raw_trace)
1081 tep_print_fields(&seq, he->raw_data, he->raw_size, tp_format);
1082 else
1083 tep_print_event(tp_format->tep, &seq, &rec, "%s", TEP_PRINT_INFO);
1084 }
1085
1086 /*
1087 * Trim the buffer, it starts at 4KB and we're not going to
1088 * add anything more to this buffer.
1089 */
1090 return realloc(seq.buffer, seq.len + 1);
1091 }
1092
1093 static int64_t
sort__trace_cmp(struct hist_entry * left,struct hist_entry * right)1094 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
1095 {
1096 struct evsel *evsel;
1097
1098 evsel = hists_to_evsel(left->hists);
1099 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1100 return 0;
1101
1102 if (left->trace_output == NULL)
1103 left->trace_output = get_trace_output(left);
1104 if (right->trace_output == NULL)
1105 right->trace_output = get_trace_output(right);
1106
1107 return strcmp(right->trace_output, left->trace_output);
1108 }
1109
hist_entry__trace_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1110 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
1111 size_t size, unsigned int width)
1112 {
1113 struct evsel *evsel;
1114
1115 evsel = hists_to_evsel(he->hists);
1116 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1117 return scnprintf(bf, size, "%-.*s", width, "N/A");
1118
1119 if (he->trace_output == NULL)
1120 he->trace_output = get_trace_output(he);
1121 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
1122 }
1123
1124 struct sort_entry sort_trace = {
1125 .se_header = "Trace output",
1126 .se_cmp = sort__trace_cmp,
1127 .se_snprintf = hist_entry__trace_snprintf,
1128 .se_width_idx = HISTC_TRACE,
1129 };
1130 #endif /* HAVE_LIBTRACEEVENT */
1131
1132 /* sort keys for branch stacks */
1133
1134 static int64_t
sort__dso_from_cmp(struct hist_entry * left,struct hist_entry * right)1135 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
1136 {
1137 if (!left->branch_info || !right->branch_info)
1138 return cmp_null(left->branch_info, right->branch_info);
1139
1140 return _sort__dso_cmp(left->branch_info->from.ms.map,
1141 right->branch_info->from.ms.map);
1142 }
1143
hist_entry__dso_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1144 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
1145 size_t size, unsigned int width)
1146 {
1147 if (he->branch_info)
1148 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
1149 bf, size, width);
1150 else
1151 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1152 }
1153
hist_entry__dso_from_filter(struct hist_entry * he,int type,const void * arg)1154 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
1155 const void *arg)
1156 {
1157 const struct dso *dso = arg;
1158
1159 if (type != HIST_FILTER__DSO)
1160 return -1;
1161
1162 return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
1163 map__dso(he->branch_info->from.ms.map) != dso);
1164 }
1165
1166 static int64_t
sort__dso_to_cmp(struct hist_entry * left,struct hist_entry * right)1167 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
1168 {
1169 if (!left->branch_info || !right->branch_info)
1170 return cmp_null(left->branch_info, right->branch_info);
1171
1172 return _sort__dso_cmp(left->branch_info->to.ms.map,
1173 right->branch_info->to.ms.map);
1174 }
1175
hist_entry__dso_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1176 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
1177 size_t size, unsigned int width)
1178 {
1179 if (he->branch_info)
1180 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
1181 bf, size, width);
1182 else
1183 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1184 }
1185
hist_entry__dso_to_filter(struct hist_entry * he,int type,const void * arg)1186 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
1187 const void *arg)
1188 {
1189 const struct dso *dso = arg;
1190
1191 if (type != HIST_FILTER__DSO)
1192 return -1;
1193
1194 return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
1195 map__dso(he->branch_info->to.ms.map) != dso);
1196 }
1197
1198 static int64_t
sort__sym_from_cmp(struct hist_entry * left,struct hist_entry * right)1199 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
1200 {
1201 struct addr_map_symbol *from_l, *from_r;
1202
1203 if (!left->branch_info || !right->branch_info)
1204 return cmp_null(left->branch_info, right->branch_info);
1205
1206 from_l = &left->branch_info->from;
1207 from_r = &right->branch_info->from;
1208
1209 if (!from_l->ms.sym && !from_r->ms.sym)
1210 return _sort__addr_cmp(from_l->addr, from_r->addr);
1211
1212 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
1213 }
1214
1215 static int64_t
sort__sym_to_cmp(struct hist_entry * left,struct hist_entry * right)1216 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
1217 {
1218 struct addr_map_symbol *to_l, *to_r;
1219
1220 if (!left->branch_info || !right->branch_info)
1221 return cmp_null(left->branch_info, right->branch_info);
1222
1223 to_l = &left->branch_info->to;
1224 to_r = &right->branch_info->to;
1225
1226 if (!to_l->ms.sym && !to_r->ms.sym)
1227 return _sort__addr_cmp(to_l->addr, to_r->addr);
1228
1229 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1230 }
1231
hist_entry__sym_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1232 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1233 size_t size, unsigned int width)
1234 {
1235 if (he->branch_info) {
1236 struct addr_map_symbol *from = &he->branch_info->from;
1237
1238 return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1239 from->al_level, bf, size, width);
1240 }
1241
1242 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1243 }
1244
hist_entry__sym_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1245 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1246 size_t size, unsigned int width)
1247 {
1248 if (he->branch_info) {
1249 struct addr_map_symbol *to = &he->branch_info->to;
1250
1251 return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1252 to->al_level, bf, size, width);
1253 }
1254
1255 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1256 }
1257
hist_entry__sym_from_filter(struct hist_entry * he,int type,const void * arg)1258 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1259 const void *arg)
1260 {
1261 const char *sym = arg;
1262
1263 if (type != HIST_FILTER__SYMBOL)
1264 return -1;
1265
1266 return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1267 strstr(he->branch_info->from.ms.sym->name, sym));
1268 }
1269
hist_entry__sym_to_filter(struct hist_entry * he,int type,const void * arg)1270 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1271 const void *arg)
1272 {
1273 const char *sym = arg;
1274
1275 if (type != HIST_FILTER__SYMBOL)
1276 return -1;
1277
1278 return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1279 strstr(he->branch_info->to.ms.sym->name, sym));
1280 }
1281
1282 struct sort_entry sort_dso_from = {
1283 .se_header = "Source Shared Object",
1284 .se_cmp = sort__dso_from_cmp,
1285 .se_snprintf = hist_entry__dso_from_snprintf,
1286 .se_filter = hist_entry__dso_from_filter,
1287 .se_width_idx = HISTC_DSO_FROM,
1288 };
1289
1290 struct sort_entry sort_dso_to = {
1291 .se_header = "Target Shared Object",
1292 .se_cmp = sort__dso_to_cmp,
1293 .se_snprintf = hist_entry__dso_to_snprintf,
1294 .se_filter = hist_entry__dso_to_filter,
1295 .se_width_idx = HISTC_DSO_TO,
1296 };
1297
1298 struct sort_entry sort_sym_from = {
1299 .se_header = "Source Symbol",
1300 .se_cmp = sort__sym_from_cmp,
1301 .se_snprintf = hist_entry__sym_from_snprintf,
1302 .se_filter = hist_entry__sym_from_filter,
1303 .se_width_idx = HISTC_SYMBOL_FROM,
1304 };
1305
1306 struct sort_entry sort_sym_to = {
1307 .se_header = "Target Symbol",
1308 .se_cmp = sort__sym_to_cmp,
1309 .se_snprintf = hist_entry__sym_to_snprintf,
1310 .se_filter = hist_entry__sym_to_filter,
1311 .se_width_idx = HISTC_SYMBOL_TO,
1312 };
1313
_hist_entry__addr_snprintf(struct map_symbol * ms,u64 ip,char level,char * bf,size_t size,unsigned int width)1314 static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1315 u64 ip, char level, char *bf, size_t size,
1316 unsigned int width)
1317 {
1318 struct symbol *sym = ms->sym;
1319 struct map *map = ms->map;
1320 size_t ret = 0, offs;
1321
1322 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1323 if (sym && map) {
1324 if (sym->type == STT_OBJECT) {
1325 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1326 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1327 ip - map__unmap_ip(map, sym->start));
1328 } else {
1329 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1330 width - ret,
1331 sym->name);
1332 offs = ip - sym->start;
1333 if (offs)
1334 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1335 }
1336 } else {
1337 size_t len = BITS_PER_LONG / 4;
1338 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1339 len, ip);
1340 }
1341
1342 return ret;
1343 }
1344
hist_entry__addr_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1345 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1346 size_t size, unsigned int width)
1347 {
1348 if (he->branch_info) {
1349 struct addr_map_symbol *from = &he->branch_info->from;
1350
1351 return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1352 he->level, bf, size, width);
1353 }
1354
1355 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1356 }
1357
hist_entry__addr_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1358 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1359 size_t size, unsigned int width)
1360 {
1361 if (he->branch_info) {
1362 struct addr_map_symbol *to = &he->branch_info->to;
1363
1364 return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1365 he->level, bf, size, width);
1366 }
1367
1368 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1369 }
1370
1371 static int64_t
sort__addr_from_cmp(struct hist_entry * left,struct hist_entry * right)1372 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1373 {
1374 struct addr_map_symbol *from_l;
1375 struct addr_map_symbol *from_r;
1376 int64_t ret;
1377
1378 if (!left->branch_info || !right->branch_info)
1379 return cmp_null(left->branch_info, right->branch_info);
1380
1381 from_l = &left->branch_info->from;
1382 from_r = &right->branch_info->from;
1383
1384 /*
1385 * comparing symbol address alone is not enough since it's a
1386 * relative address within a dso.
1387 */
1388 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1389 if (ret != 0)
1390 return ret;
1391
1392 return _sort__addr_cmp(from_l->addr, from_r->addr);
1393 }
1394
1395 static int64_t
sort__addr_to_cmp(struct hist_entry * left,struct hist_entry * right)1396 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1397 {
1398 struct addr_map_symbol *to_l;
1399 struct addr_map_symbol *to_r;
1400 int64_t ret;
1401
1402 if (!left->branch_info || !right->branch_info)
1403 return cmp_null(left->branch_info, right->branch_info);
1404
1405 to_l = &left->branch_info->to;
1406 to_r = &right->branch_info->to;
1407
1408 /*
1409 * comparing symbol address alone is not enough since it's a
1410 * relative address within a dso.
1411 */
1412 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1413 if (ret != 0)
1414 return ret;
1415
1416 return _sort__addr_cmp(to_l->addr, to_r->addr);
1417 }
1418
1419 struct sort_entry sort_addr_from = {
1420 .se_header = "Source Address",
1421 .se_cmp = sort__addr_from_cmp,
1422 .se_snprintf = hist_entry__addr_from_snprintf,
1423 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */
1424 .se_width_idx = HISTC_ADDR_FROM,
1425 };
1426
1427 struct sort_entry sort_addr_to = {
1428 .se_header = "Target Address",
1429 .se_cmp = sort__addr_to_cmp,
1430 .se_snprintf = hist_entry__addr_to_snprintf,
1431 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */
1432 .se_width_idx = HISTC_ADDR_TO,
1433 };
1434
1435
1436 static int64_t
sort__mispredict_cmp(struct hist_entry * left,struct hist_entry * right)1437 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1438 {
1439 unsigned char mp, p;
1440
1441 if (!left->branch_info || !right->branch_info)
1442 return cmp_null(left->branch_info, right->branch_info);
1443
1444 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1445 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1446 return mp || p;
1447 }
1448
hist_entry__mispredict_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1449 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1450 size_t size, unsigned int width){
1451 static const char *out = "N/A";
1452
1453 if (he->branch_info) {
1454 if (he->branch_info->flags.predicted)
1455 out = "N";
1456 else if (he->branch_info->flags.mispred)
1457 out = "Y";
1458 }
1459
1460 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1461 }
1462
1463 static int64_t
sort__cycles_cmp(struct hist_entry * left,struct hist_entry * right)1464 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1465 {
1466 if (!left->branch_info || !right->branch_info)
1467 return cmp_null(left->branch_info, right->branch_info);
1468
1469 return left->branch_info->flags.cycles -
1470 right->branch_info->flags.cycles;
1471 }
1472
hist_entry__cycles_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1473 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1474 size_t size, unsigned int width)
1475 {
1476 if (!he->branch_info)
1477 return scnprintf(bf, size, "%-.*s", width, "N/A");
1478 if (he->branch_info->flags.cycles == 0)
1479 return repsep_snprintf(bf, size, "%-*s", width, "-");
1480 return repsep_snprintf(bf, size, "%-*hd", width,
1481 he->branch_info->flags.cycles);
1482 }
1483
1484 struct sort_entry sort_cycles = {
1485 .se_header = "Basic Block Cycles",
1486 .se_cmp = sort__cycles_cmp,
1487 .se_snprintf = hist_entry__cycles_snprintf,
1488 .se_width_idx = HISTC_CYCLES,
1489 };
1490
1491 /* --sort daddr_sym */
1492 int64_t
sort__daddr_cmp(struct hist_entry * left,struct hist_entry * right)1493 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1494 {
1495 uint64_t l = 0, r = 0;
1496
1497 if (left->mem_info)
1498 l = mem_info__daddr(left->mem_info)->addr;
1499 if (right->mem_info)
1500 r = mem_info__daddr(right->mem_info)->addr;
1501
1502 return (int64_t)(r - l);
1503 }
1504
hist_entry__daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1505 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1506 size_t size, unsigned int width)
1507 {
1508 uint64_t addr = 0;
1509 struct map_symbol *ms = NULL;
1510
1511 if (he->mem_info) {
1512 addr = mem_info__daddr(he->mem_info)->addr;
1513 ms = &mem_info__daddr(he->mem_info)->ms;
1514 }
1515 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1516 }
1517
1518 int64_t
sort__iaddr_cmp(struct hist_entry * left,struct hist_entry * right)1519 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1520 {
1521 uint64_t l = 0, r = 0;
1522
1523 if (left->mem_info)
1524 l = mem_info__iaddr(left->mem_info)->addr;
1525 if (right->mem_info)
1526 r = mem_info__iaddr(right->mem_info)->addr;
1527
1528 return (int64_t)(r - l);
1529 }
1530
hist_entry__iaddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1531 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1532 size_t size, unsigned int width)
1533 {
1534 uint64_t addr = 0;
1535 struct map_symbol *ms = NULL;
1536
1537 if (he->mem_info) {
1538 addr = mem_info__iaddr(he->mem_info)->addr;
1539 ms = &mem_info__iaddr(he->mem_info)->ms;
1540 }
1541 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1542 }
1543
1544 static int64_t
sort__dso_daddr_cmp(struct hist_entry * left,struct hist_entry * right)1545 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1546 {
1547 struct map *map_l = NULL;
1548 struct map *map_r = NULL;
1549
1550 if (left->mem_info)
1551 map_l = mem_info__daddr(left->mem_info)->ms.map;
1552 if (right->mem_info)
1553 map_r = mem_info__daddr(right->mem_info)->ms.map;
1554
1555 return _sort__dso_cmp(map_l, map_r);
1556 }
1557
hist_entry__dso_daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1558 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1559 size_t size, unsigned int width)
1560 {
1561 struct map *map = NULL;
1562
1563 if (he->mem_info)
1564 map = mem_info__daddr(he->mem_info)->ms.map;
1565
1566 return _hist_entry__dso_snprintf(map, bf, size, width);
1567 }
1568
1569 static int64_t
sort__locked_cmp(struct hist_entry * left,struct hist_entry * right)1570 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1571 {
1572 union perf_mem_data_src data_src_l;
1573 union perf_mem_data_src data_src_r;
1574
1575 if (left->mem_info)
1576 data_src_l = *mem_info__data_src(left->mem_info);
1577 else
1578 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1579
1580 if (right->mem_info)
1581 data_src_r = *mem_info__data_src(right->mem_info);
1582 else
1583 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1584
1585 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1586 }
1587
hist_entry__locked_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1588 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1589 size_t size, unsigned int width)
1590 {
1591 char out[10];
1592
1593 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1594 return repsep_snprintf(bf, size, "%.*s", width, out);
1595 }
1596
1597 static int64_t
sort__tlb_cmp(struct hist_entry * left,struct hist_entry * right)1598 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1599 {
1600 union perf_mem_data_src data_src_l;
1601 union perf_mem_data_src data_src_r;
1602
1603 if (left->mem_info)
1604 data_src_l = *mem_info__data_src(left->mem_info);
1605 else
1606 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1607
1608 if (right->mem_info)
1609 data_src_r = *mem_info__data_src(right->mem_info);
1610 else
1611 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1612
1613 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1614 }
1615
hist_entry__tlb_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1616 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1617 size_t size, unsigned int width)
1618 {
1619 char out[64];
1620
1621 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1622 return repsep_snprintf(bf, size, "%-*s", width, out);
1623 }
1624
1625 static int64_t
sort__lvl_cmp(struct hist_entry * left,struct hist_entry * right)1626 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1627 {
1628 union perf_mem_data_src data_src_l;
1629 union perf_mem_data_src data_src_r;
1630
1631 if (left->mem_info)
1632 data_src_l = *mem_info__data_src(left->mem_info);
1633 else
1634 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1635
1636 if (right->mem_info)
1637 data_src_r = *mem_info__data_src(right->mem_info);
1638 else
1639 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1640
1641 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1642 }
1643
hist_entry__lvl_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1644 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1645 size_t size, unsigned int width)
1646 {
1647 char out[64];
1648
1649 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1650 return repsep_snprintf(bf, size, "%-*s", width, out);
1651 }
1652
1653 static int64_t
sort__snoop_cmp(struct hist_entry * left,struct hist_entry * right)1654 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1655 {
1656 union perf_mem_data_src data_src_l;
1657 union perf_mem_data_src data_src_r;
1658
1659 if (left->mem_info)
1660 data_src_l = *mem_info__data_src(left->mem_info);
1661 else
1662 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1663
1664 if (right->mem_info)
1665 data_src_r = *mem_info__data_src(right->mem_info);
1666 else
1667 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1668
1669 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1670 }
1671
hist_entry__snoop_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1672 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1673 size_t size, unsigned int width)
1674 {
1675 char out[64];
1676
1677 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1678 return repsep_snprintf(bf, size, "%-*s", width, out);
1679 }
1680
1681 int64_t
sort__dcacheline_cmp(struct hist_entry * left,struct hist_entry * right)1682 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1683 {
1684 u64 l, r;
1685 struct map *l_map, *r_map;
1686 struct dso *l_dso, *r_dso;
1687 int rc;
1688
1689 if (!left->mem_info) return -1;
1690 if (!right->mem_info) return 1;
1691
1692 /* group event types together */
1693 if (left->cpumode > right->cpumode) return -1;
1694 if (left->cpumode < right->cpumode) return 1;
1695
1696 l_map = mem_info__daddr(left->mem_info)->ms.map;
1697 r_map = mem_info__daddr(right->mem_info)->ms.map;
1698
1699 /* if both are NULL, jump to sort on al_addr instead */
1700 if (!l_map && !r_map)
1701 goto addr;
1702
1703 if (!l_map) return -1;
1704 if (!r_map) return 1;
1705
1706 l_dso = map__dso(l_map);
1707 r_dso = map__dso(r_map);
1708 rc = dso__cmp_id(l_dso, r_dso);
1709 if (rc)
1710 return rc;
1711 /*
1712 * Addresses with no major/minor numbers are assumed to be
1713 * anonymous in userspace. Sort those on pid then address.
1714 *
1715 * The kernel and non-zero major/minor mapped areas are
1716 * assumed to be unity mapped. Sort those on address.
1717 */
1718
1719 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1720 (!(map__flags(l_map) & MAP_SHARED)) && !dso__id(l_dso)->maj && !dso__id(l_dso)->min &&
1721 !dso__id(l_dso)->ino && !dso__id(l_dso)->ino_generation) {
1722 /* userspace anonymous */
1723
1724 if (thread__pid(left->thread) > thread__pid(right->thread))
1725 return -1;
1726 if (thread__pid(left->thread) < thread__pid(right->thread))
1727 return 1;
1728 }
1729
1730 addr:
1731 /* al_addr does all the right addr - start + offset calculations */
1732 l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl);
1733 r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl);
1734
1735 if (l > r) return -1;
1736 if (l < r) return 1;
1737
1738 return 0;
1739 }
1740
hist_entry__dcacheline_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1741 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1742 size_t size, unsigned int width)
1743 {
1744
1745 uint64_t addr = 0;
1746 struct map_symbol *ms = NULL;
1747 char level = he->level;
1748
1749 if (he->mem_info) {
1750 struct map *map = mem_info__daddr(he->mem_info)->ms.map;
1751 struct dso *dso = map ? map__dso(map) : NULL;
1752
1753 addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl);
1754 ms = &mem_info__daddr(he->mem_info)->ms;
1755
1756 /* print [s] for shared data mmaps */
1757 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1758 map && !(map__prot(map) & PROT_EXEC) &&
1759 (map__flags(map) & MAP_SHARED) &&
1760 (dso__id(dso)->maj || dso__id(dso)->min || dso__id(dso)->ino ||
1761 dso__id(dso)->ino_generation))
1762 level = 's';
1763 else if (!map)
1764 level = 'X';
1765 }
1766 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1767 }
1768
1769 struct sort_entry sort_mispredict = {
1770 .se_header = "Branch Mispredicted",
1771 .se_cmp = sort__mispredict_cmp,
1772 .se_snprintf = hist_entry__mispredict_snprintf,
1773 .se_width_idx = HISTC_MISPREDICT,
1774 };
1775
1776 static int64_t
sort__weight_cmp(struct hist_entry * left,struct hist_entry * right)1777 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1778 {
1779 return left->weight - right->weight;
1780 }
1781
hist_entry__local_weight_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1782 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1783 size_t size, unsigned int width)
1784 {
1785 return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1786 }
1787
1788 struct sort_entry sort_local_weight = {
1789 .se_header = "Local Weight",
1790 .se_cmp = sort__weight_cmp,
1791 .se_snprintf = hist_entry__local_weight_snprintf,
1792 .se_width_idx = HISTC_LOCAL_WEIGHT,
1793 };
1794
hist_entry__global_weight_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1795 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1796 size_t size, unsigned int width)
1797 {
1798 return repsep_snprintf(bf, size, "%-*llu", width,
1799 he->weight * he->stat.nr_events);
1800 }
1801
1802 struct sort_entry sort_global_weight = {
1803 .se_header = "Weight",
1804 .se_cmp = sort__weight_cmp,
1805 .se_snprintf = hist_entry__global_weight_snprintf,
1806 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1807 };
1808
1809 static int64_t
sort__ins_lat_cmp(struct hist_entry * left,struct hist_entry * right)1810 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1811 {
1812 return left->ins_lat - right->ins_lat;
1813 }
1814
hist_entry__local_ins_lat_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1815 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1816 size_t size, unsigned int width)
1817 {
1818 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1819 }
1820
1821 struct sort_entry sort_local_ins_lat = {
1822 .se_header = "Local INSTR Latency",
1823 .se_cmp = sort__ins_lat_cmp,
1824 .se_snprintf = hist_entry__local_ins_lat_snprintf,
1825 .se_width_idx = HISTC_LOCAL_INS_LAT,
1826 };
1827
hist_entry__global_ins_lat_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1828 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1829 size_t size, unsigned int width)
1830 {
1831 return repsep_snprintf(bf, size, "%-*u", width,
1832 he->ins_lat * he->stat.nr_events);
1833 }
1834
1835 struct sort_entry sort_global_ins_lat = {
1836 .se_header = "INSTR Latency",
1837 .se_cmp = sort__ins_lat_cmp,
1838 .se_snprintf = hist_entry__global_ins_lat_snprintf,
1839 .se_width_idx = HISTC_GLOBAL_INS_LAT,
1840 };
1841
1842 static int64_t
sort__p_stage_cyc_cmp(struct hist_entry * left,struct hist_entry * right)1843 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1844 {
1845 return left->p_stage_cyc - right->p_stage_cyc;
1846 }
1847
hist_entry__global_p_stage_cyc_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1848 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1849 size_t size, unsigned int width)
1850 {
1851 return repsep_snprintf(bf, size, "%-*u", width,
1852 he->p_stage_cyc * he->stat.nr_events);
1853 }
1854
1855
hist_entry__p_stage_cyc_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1856 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1857 size_t size, unsigned int width)
1858 {
1859 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1860 }
1861
1862 struct sort_entry sort_local_p_stage_cyc = {
1863 .se_header = "Local Pipeline Stage Cycle",
1864 .se_cmp = sort__p_stage_cyc_cmp,
1865 .se_snprintf = hist_entry__p_stage_cyc_snprintf,
1866 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC,
1867 };
1868
1869 struct sort_entry sort_global_p_stage_cyc = {
1870 .se_header = "Pipeline Stage Cycle",
1871 .se_cmp = sort__p_stage_cyc_cmp,
1872 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf,
1873 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC,
1874 };
1875
1876 struct sort_entry sort_mem_daddr_sym = {
1877 .se_header = "Data Symbol",
1878 .se_cmp = sort__daddr_cmp,
1879 .se_snprintf = hist_entry__daddr_snprintf,
1880 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1881 };
1882
1883 struct sort_entry sort_mem_iaddr_sym = {
1884 .se_header = "Code Symbol",
1885 .se_cmp = sort__iaddr_cmp,
1886 .se_snprintf = hist_entry__iaddr_snprintf,
1887 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1888 };
1889
1890 struct sort_entry sort_mem_daddr_dso = {
1891 .se_header = "Data Object",
1892 .se_cmp = sort__dso_daddr_cmp,
1893 .se_snprintf = hist_entry__dso_daddr_snprintf,
1894 .se_width_idx = HISTC_MEM_DADDR_DSO,
1895 };
1896
1897 struct sort_entry sort_mem_locked = {
1898 .se_header = "Locked",
1899 .se_cmp = sort__locked_cmp,
1900 .se_snprintf = hist_entry__locked_snprintf,
1901 .se_width_idx = HISTC_MEM_LOCKED,
1902 };
1903
1904 struct sort_entry sort_mem_tlb = {
1905 .se_header = "TLB access",
1906 .se_cmp = sort__tlb_cmp,
1907 .se_snprintf = hist_entry__tlb_snprintf,
1908 .se_width_idx = HISTC_MEM_TLB,
1909 };
1910
1911 struct sort_entry sort_mem_lvl = {
1912 .se_header = "Memory access",
1913 .se_cmp = sort__lvl_cmp,
1914 .se_snprintf = hist_entry__lvl_snprintf,
1915 .se_width_idx = HISTC_MEM_LVL,
1916 };
1917
1918 struct sort_entry sort_mem_snoop = {
1919 .se_header = "Snoop",
1920 .se_cmp = sort__snoop_cmp,
1921 .se_snprintf = hist_entry__snoop_snprintf,
1922 .se_width_idx = HISTC_MEM_SNOOP,
1923 };
1924
1925 struct sort_entry sort_mem_dcacheline = {
1926 .se_header = "Data Cacheline",
1927 .se_cmp = sort__dcacheline_cmp,
1928 .se_snprintf = hist_entry__dcacheline_snprintf,
1929 .se_width_idx = HISTC_MEM_DCACHELINE,
1930 };
1931
1932 static int64_t
sort__blocked_cmp(struct hist_entry * left,struct hist_entry * right)1933 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1934 {
1935 union perf_mem_data_src data_src_l;
1936 union perf_mem_data_src data_src_r;
1937
1938 if (left->mem_info)
1939 data_src_l = *mem_info__data_src(left->mem_info);
1940 else
1941 data_src_l.mem_blk = PERF_MEM_BLK_NA;
1942
1943 if (right->mem_info)
1944 data_src_r = *mem_info__data_src(right->mem_info);
1945 else
1946 data_src_r.mem_blk = PERF_MEM_BLK_NA;
1947
1948 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1949 }
1950
hist_entry__blocked_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1951 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1952 size_t size, unsigned int width)
1953 {
1954 char out[16];
1955
1956 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1957 return repsep_snprintf(bf, size, "%.*s", width, out);
1958 }
1959
1960 struct sort_entry sort_mem_blocked = {
1961 .se_header = "Blocked",
1962 .se_cmp = sort__blocked_cmp,
1963 .se_snprintf = hist_entry__blocked_snprintf,
1964 .se_width_idx = HISTC_MEM_BLOCKED,
1965 };
1966
1967 static int64_t
sort__phys_daddr_cmp(struct hist_entry * left,struct hist_entry * right)1968 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1969 {
1970 uint64_t l = 0, r = 0;
1971
1972 if (left->mem_info)
1973 l = mem_info__daddr(left->mem_info)->phys_addr;
1974 if (right->mem_info)
1975 r = mem_info__daddr(right->mem_info)->phys_addr;
1976
1977 return (int64_t)(r - l);
1978 }
1979
hist_entry__phys_daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1980 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1981 size_t size, unsigned int width)
1982 {
1983 uint64_t addr = 0;
1984 size_t ret = 0;
1985 size_t len = BITS_PER_LONG / 4;
1986
1987 addr = mem_info__daddr(he->mem_info)->phys_addr;
1988
1989 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1990
1991 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1992
1993 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1994
1995 if (ret > width)
1996 bf[width] = '\0';
1997
1998 return width;
1999 }
2000
2001 struct sort_entry sort_mem_phys_daddr = {
2002 .se_header = "Data Physical Address",
2003 .se_cmp = sort__phys_daddr_cmp,
2004 .se_snprintf = hist_entry__phys_daddr_snprintf,
2005 .se_width_idx = HISTC_MEM_PHYS_DADDR,
2006 };
2007
2008 static int64_t
sort__data_page_size_cmp(struct hist_entry * left,struct hist_entry * right)2009 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
2010 {
2011 uint64_t l = 0, r = 0;
2012
2013 if (left->mem_info)
2014 l = mem_info__daddr(left->mem_info)->data_page_size;
2015 if (right->mem_info)
2016 r = mem_info__daddr(right->mem_info)->data_page_size;
2017
2018 return (int64_t)(r - l);
2019 }
2020
hist_entry__data_page_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2021 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
2022 size_t size, unsigned int width)
2023 {
2024 char str[PAGE_SIZE_NAME_LEN];
2025
2026 return repsep_snprintf(bf, size, "%-*s", width,
2027 get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str));
2028 }
2029
2030 struct sort_entry sort_mem_data_page_size = {
2031 .se_header = "Data Page Size",
2032 .se_cmp = sort__data_page_size_cmp,
2033 .se_snprintf = hist_entry__data_page_size_snprintf,
2034 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE,
2035 };
2036
2037 static int64_t
sort__code_page_size_cmp(struct hist_entry * left,struct hist_entry * right)2038 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
2039 {
2040 uint64_t l = left->code_page_size;
2041 uint64_t r = right->code_page_size;
2042
2043 return (int64_t)(r - l);
2044 }
2045
hist_entry__code_page_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2046 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
2047 size_t size, unsigned int width)
2048 {
2049 char str[PAGE_SIZE_NAME_LEN];
2050
2051 return repsep_snprintf(bf, size, "%-*s", width,
2052 get_page_size_name(he->code_page_size, str));
2053 }
2054
2055 struct sort_entry sort_code_page_size = {
2056 .se_header = "Code Page Size",
2057 .se_cmp = sort__code_page_size_cmp,
2058 .se_snprintf = hist_entry__code_page_size_snprintf,
2059 .se_width_idx = HISTC_CODE_PAGE_SIZE,
2060 };
2061
2062 static int64_t
sort__abort_cmp(struct hist_entry * left,struct hist_entry * right)2063 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
2064 {
2065 if (!left->branch_info || !right->branch_info)
2066 return cmp_null(left->branch_info, right->branch_info);
2067
2068 return left->branch_info->flags.abort !=
2069 right->branch_info->flags.abort;
2070 }
2071
hist_entry__abort_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2072 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
2073 size_t size, unsigned int width)
2074 {
2075 static const char *out = "N/A";
2076
2077 if (he->branch_info) {
2078 if (he->branch_info->flags.abort)
2079 out = "A";
2080 else
2081 out = ".";
2082 }
2083
2084 return repsep_snprintf(bf, size, "%-*s", width, out);
2085 }
2086
2087 struct sort_entry sort_abort = {
2088 .se_header = "Transaction abort",
2089 .se_cmp = sort__abort_cmp,
2090 .se_snprintf = hist_entry__abort_snprintf,
2091 .se_width_idx = HISTC_ABORT,
2092 };
2093
2094 static int64_t
sort__in_tx_cmp(struct hist_entry * left,struct hist_entry * right)2095 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
2096 {
2097 if (!left->branch_info || !right->branch_info)
2098 return cmp_null(left->branch_info, right->branch_info);
2099
2100 return left->branch_info->flags.in_tx !=
2101 right->branch_info->flags.in_tx;
2102 }
2103
hist_entry__in_tx_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2104 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
2105 size_t size, unsigned int width)
2106 {
2107 static const char *out = "N/A";
2108
2109 if (he->branch_info) {
2110 if (he->branch_info->flags.in_tx)
2111 out = "T";
2112 else
2113 out = ".";
2114 }
2115
2116 return repsep_snprintf(bf, size, "%-*s", width, out);
2117 }
2118
2119 struct sort_entry sort_in_tx = {
2120 .se_header = "Branch in transaction",
2121 .se_cmp = sort__in_tx_cmp,
2122 .se_snprintf = hist_entry__in_tx_snprintf,
2123 .se_width_idx = HISTC_IN_TX,
2124 };
2125
2126 static int64_t
sort__transaction_cmp(struct hist_entry * left,struct hist_entry * right)2127 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
2128 {
2129 return left->transaction - right->transaction;
2130 }
2131
add_str(char * p,const char * str)2132 static inline char *add_str(char *p, const char *str)
2133 {
2134 strcpy(p, str);
2135 return p + strlen(str);
2136 }
2137
2138 static struct txbit {
2139 unsigned flag;
2140 const char *name;
2141 int skip_for_len;
2142 } txbits[] = {
2143 { PERF_TXN_ELISION, "EL ", 0 },
2144 { PERF_TXN_TRANSACTION, "TX ", 1 },
2145 { PERF_TXN_SYNC, "SYNC ", 1 },
2146 { PERF_TXN_ASYNC, "ASYNC ", 0 },
2147 { PERF_TXN_RETRY, "RETRY ", 0 },
2148 { PERF_TXN_CONFLICT, "CON ", 0 },
2149 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
2150 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
2151 { 0, NULL, 0 }
2152 };
2153
hist_entry__transaction_len(void)2154 int hist_entry__transaction_len(void)
2155 {
2156 int i;
2157 int len = 0;
2158
2159 for (i = 0; txbits[i].name; i++) {
2160 if (!txbits[i].skip_for_len)
2161 len += strlen(txbits[i].name);
2162 }
2163 len += 4; /* :XX<space> */
2164 return len;
2165 }
2166
hist_entry__transaction_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2167 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
2168 size_t size, unsigned int width)
2169 {
2170 u64 t = he->transaction;
2171 char buf[128];
2172 char *p = buf;
2173 int i;
2174
2175 buf[0] = 0;
2176 for (i = 0; txbits[i].name; i++)
2177 if (txbits[i].flag & t)
2178 p = add_str(p, txbits[i].name);
2179 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
2180 p = add_str(p, "NEITHER ");
2181 if (t & PERF_TXN_ABORT_MASK) {
2182 sprintf(p, ":%" PRIx64,
2183 (t & PERF_TXN_ABORT_MASK) >>
2184 PERF_TXN_ABORT_SHIFT);
2185 p += strlen(p);
2186 }
2187
2188 return repsep_snprintf(bf, size, "%-*s", width, buf);
2189 }
2190
2191 struct sort_entry sort_transaction = {
2192 .se_header = "Transaction ",
2193 .se_cmp = sort__transaction_cmp,
2194 .se_snprintf = hist_entry__transaction_snprintf,
2195 .se_width_idx = HISTC_TRANSACTION,
2196 };
2197
2198 /* --sort symbol_size */
2199
_sort__sym_size_cmp(struct symbol * sym_l,struct symbol * sym_r)2200 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
2201 {
2202 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
2203 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
2204
2205 return size_l < size_r ? -1 :
2206 size_l == size_r ? 0 : 1;
2207 }
2208
2209 static int64_t
sort__sym_size_cmp(struct hist_entry * left,struct hist_entry * right)2210 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
2211 {
2212 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
2213 }
2214
_hist_entry__sym_size_snprintf(struct symbol * sym,char * bf,size_t bf_size,unsigned int width)2215 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
2216 size_t bf_size, unsigned int width)
2217 {
2218 if (sym)
2219 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
2220
2221 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2222 }
2223
hist_entry__sym_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2224 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2225 size_t size, unsigned int width)
2226 {
2227 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2228 }
2229
2230 struct sort_entry sort_sym_size = {
2231 .se_header = "Symbol size",
2232 .se_cmp = sort__sym_size_cmp,
2233 .se_snprintf = hist_entry__sym_size_snprintf,
2234 .se_width_idx = HISTC_SYM_SIZE,
2235 };
2236
2237 /* --sort dso_size */
2238
_sort__dso_size_cmp(struct map * map_l,struct map * map_r)2239 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2240 {
2241 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2242 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2243
2244 return size_l < size_r ? -1 :
2245 size_l == size_r ? 0 : 1;
2246 }
2247
2248 static int64_t
sort__dso_size_cmp(struct hist_entry * left,struct hist_entry * right)2249 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2250 {
2251 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2252 }
2253
_hist_entry__dso_size_snprintf(struct map * map,char * bf,size_t bf_size,unsigned int width)2254 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2255 size_t bf_size, unsigned int width)
2256 {
2257 if (map && map__dso(map))
2258 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
2259
2260 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2261 }
2262
hist_entry__dso_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2263 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2264 size_t size, unsigned int width)
2265 {
2266 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2267 }
2268
2269 struct sort_entry sort_dso_size = {
2270 .se_header = "DSO size",
2271 .se_cmp = sort__dso_size_cmp,
2272 .se_snprintf = hist_entry__dso_size_snprintf,
2273 .se_width_idx = HISTC_DSO_SIZE,
2274 };
2275
2276 /* --sort addr */
2277
2278 static int64_t
sort__addr_cmp(struct hist_entry * left,struct hist_entry * right)2279 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2280 {
2281 u64 left_ip = left->ip;
2282 u64 right_ip = right->ip;
2283 struct map *left_map = left->ms.map;
2284 struct map *right_map = right->ms.map;
2285
2286 if (left_map)
2287 left_ip = map__unmap_ip(left_map, left_ip);
2288 if (right_map)
2289 right_ip = map__unmap_ip(right_map, right_ip);
2290
2291 return _sort__addr_cmp(left_ip, right_ip);
2292 }
2293
hist_entry__addr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2294 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2295 size_t size, unsigned int width)
2296 {
2297 u64 ip = he->ip;
2298 struct map *map = he->ms.map;
2299
2300 if (map)
2301 ip = map__unmap_ip(map, ip);
2302
2303 return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2304 }
2305
2306 struct sort_entry sort_addr = {
2307 .se_header = "Address",
2308 .se_cmp = sort__addr_cmp,
2309 .se_snprintf = hist_entry__addr_snprintf,
2310 .se_width_idx = HISTC_ADDR,
2311 };
2312
2313 /* --sort type */
2314
2315 struct annotated_data_type unknown_type = {
2316 .self = {
2317 .type_name = (char *)"(unknown)",
2318 .children = LIST_HEAD_INIT(unknown_type.self.children),
2319 },
2320 };
2321
2322 static int64_t
sort__type_cmp(struct hist_entry * left,struct hist_entry * right)2323 sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
2324 {
2325 return sort__addr_cmp(left, right);
2326 }
2327
sort__type_init(struct hist_entry * he)2328 static void sort__type_init(struct hist_entry *he)
2329 {
2330 if (he->mem_type)
2331 return;
2332
2333 he->mem_type = hist_entry__get_data_type(he);
2334 if (he->mem_type == NULL) {
2335 he->mem_type = &unknown_type;
2336 he->mem_type_off = 0;
2337 }
2338 }
2339
2340 static int64_t
sort__type_collapse(struct hist_entry * left,struct hist_entry * right)2341 sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
2342 {
2343 struct annotated_data_type *left_type = left->mem_type;
2344 struct annotated_data_type *right_type = right->mem_type;
2345
2346 if (!left_type) {
2347 sort__type_init(left);
2348 left_type = left->mem_type;
2349 }
2350
2351 if (!right_type) {
2352 sort__type_init(right);
2353 right_type = right->mem_type;
2354 }
2355
2356 return strcmp(left_type->self.type_name, right_type->self.type_name);
2357 }
2358
2359 static int64_t
sort__type_sort(struct hist_entry * left,struct hist_entry * right)2360 sort__type_sort(struct hist_entry *left, struct hist_entry *right)
2361 {
2362 return sort__type_collapse(left, right);
2363 }
2364
hist_entry__type_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)2365 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
2366 size_t size, unsigned int width)
2367 {
2368 return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
2369 }
2370
2371 struct sort_entry sort_type = {
2372 .se_header = "Data Type",
2373 .se_cmp = sort__type_cmp,
2374 .se_collapse = sort__type_collapse,
2375 .se_sort = sort__type_sort,
2376 .se_init = sort__type_init,
2377 .se_snprintf = hist_entry__type_snprintf,
2378 .se_width_idx = HISTC_TYPE,
2379 };
2380
2381 /* --sort typeoff */
2382
2383 static int64_t
sort__typeoff_sort(struct hist_entry * left,struct hist_entry * right)2384 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
2385 {
2386 struct annotated_data_type *left_type = left->mem_type;
2387 struct annotated_data_type *right_type = right->mem_type;
2388 int64_t ret;
2389
2390 if (!left_type) {
2391 sort__type_init(left);
2392 left_type = left->mem_type;
2393 }
2394
2395 if (!right_type) {
2396 sort__type_init(right);
2397 right_type = right->mem_type;
2398 }
2399
2400 ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2401 if (ret)
2402 return ret;
2403 return left->mem_type_off - right->mem_type_off;
2404 }
2405
hist_entry__typeoff_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)2406 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
2407 size_t size, unsigned int width __maybe_unused)
2408 {
2409 struct annotated_data_type *he_type = he->mem_type;
2410 char buf[4096];
2411
2412 if (he_type == &unknown_type || he_type == &stackop_type ||
2413 he_type == &canary_type)
2414 return repsep_snprintf(bf, size, "%s", he_type->self.type_name);
2415
2416 if (!annotated_data_type__get_member_name(he_type, buf, sizeof(buf),
2417 he->mem_type_off))
2418 scnprintf(buf, sizeof(buf), "no field");
2419
2420 return repsep_snprintf(bf, size, "%s +%#x (%s)", he_type->self.type_name,
2421 he->mem_type_off, buf);
2422 }
2423
2424 struct sort_entry sort_type_offset = {
2425 .se_header = "Data Type Offset",
2426 .se_cmp = sort__type_cmp,
2427 .se_collapse = sort__typeoff_sort,
2428 .se_sort = sort__typeoff_sort,
2429 .se_init = sort__type_init,
2430 .se_snprintf = hist_entry__typeoff_snprintf,
2431 .se_width_idx = HISTC_TYPE_OFFSET,
2432 };
2433
2434 /* --sort typecln */
2435
2436 /* TODO: use actual value in the system */
2437 #define TYPE_CACHELINE_SIZE 64
2438
2439 static int64_t
sort__typecln_sort(struct hist_entry * left,struct hist_entry * right)2440 sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
2441 {
2442 struct annotated_data_type *left_type = left->mem_type;
2443 struct annotated_data_type *right_type = right->mem_type;
2444 int64_t left_cln, right_cln;
2445 int64_t ret;
2446
2447 if (!left_type) {
2448 sort__type_init(left);
2449 left_type = left->mem_type;
2450 }
2451
2452 if (!right_type) {
2453 sort__type_init(right);
2454 right_type = right->mem_type;
2455 }
2456
2457 ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2458 if (ret)
2459 return ret;
2460
2461 left_cln = left->mem_type_off / TYPE_CACHELINE_SIZE;
2462 right_cln = right->mem_type_off / TYPE_CACHELINE_SIZE;
2463 return left_cln - right_cln;
2464 }
2465
hist_entry__typecln_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)2466 static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf,
2467 size_t size, unsigned int width __maybe_unused)
2468 {
2469 struct annotated_data_type *he_type = he->mem_type;
2470
2471 return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name,
2472 he->mem_type_off / TYPE_CACHELINE_SIZE);
2473 }
2474
2475 struct sort_entry sort_type_cacheline = {
2476 .se_header = "Data Type Cacheline",
2477 .se_cmp = sort__type_cmp,
2478 .se_collapse = sort__typecln_sort,
2479 .se_sort = sort__typecln_sort,
2480 .se_init = sort__type_init,
2481 .se_snprintf = hist_entry__typecln_snprintf,
2482 .se_width_idx = HISTC_TYPE_CACHELINE,
2483 };
2484
2485
2486 struct sort_dimension {
2487 const char *name;
2488 struct sort_entry *entry;
2489 int taken;
2490 };
2491
arch_support_sort_key(const char * sort_key __maybe_unused)2492 int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2493 {
2494 return 0;
2495 }
2496
arch_perf_header_entry(const char * se_header)2497 const char * __weak arch_perf_header_entry(const char *se_header)
2498 {
2499 return se_header;
2500 }
2501
sort_dimension_add_dynamic_header(struct sort_dimension * sd)2502 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2503 {
2504 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2505 }
2506
2507 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2508
2509 static struct sort_dimension common_sort_dimensions[] = {
2510 DIM(SORT_PID, "pid", sort_thread),
2511 DIM(SORT_COMM, "comm", sort_comm),
2512 DIM(SORT_DSO, "dso", sort_dso),
2513 DIM(SORT_SYM, "symbol", sort_sym),
2514 DIM(SORT_PARENT, "parent", sort_parent),
2515 DIM(SORT_CPU, "cpu", sort_cpu),
2516 DIM(SORT_SOCKET, "socket", sort_socket),
2517 DIM(SORT_SRCLINE, "srcline", sort_srcline),
2518 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2519 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2520 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2521 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2522 #ifdef HAVE_LIBTRACEEVENT
2523 DIM(SORT_TRACE, "trace", sort_trace),
2524 #endif
2525 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2526 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2527 DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2528 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2529 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2530 DIM(SORT_TIME, "time", sort_time),
2531 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2532 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2533 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2534 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2535 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2536 DIM(SORT_ADDR, "addr", sort_addr),
2537 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2538 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2539 DIM(SORT_SIMD, "simd", sort_simd),
2540 DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
2541 DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
2542 DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
2543 DIM(SORT_ANNOTATE_DATA_TYPE_CACHELINE, "typecln", sort_type_cacheline),
2544 DIM(SORT_PARALLELISM, "parallelism", sort_parallelism),
2545 };
2546
2547 #undef DIM
2548
2549 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2550
2551 static struct sort_dimension bstack_sort_dimensions[] = {
2552 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2553 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2554 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2555 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2556 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2557 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2558 DIM(SORT_ABORT, "abort", sort_abort),
2559 DIM(SORT_CYCLES, "cycles", sort_cycles),
2560 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2561 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2562 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2563 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2564 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2565 DIM(SORT_CALLCHAIN_BRANCH_PREDICTED,
2566 "callchain_branch_predicted",
2567 sort_callchain_branch_predicted),
2568 DIM(SORT_CALLCHAIN_BRANCH_ABORT,
2569 "callchain_branch_abort",
2570 sort_callchain_branch_abort),
2571 DIM(SORT_CALLCHAIN_BRANCH_CYCLES,
2572 "callchain_branch_cycles",
2573 sort_callchain_branch_cycles)
2574 };
2575
2576 #undef DIM
2577
2578 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2579
2580 static struct sort_dimension memory_sort_dimensions[] = {
2581 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2582 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2583 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2584 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2585 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2586 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2587 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2588 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2589 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2590 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2591 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2592 };
2593
2594 #undef DIM
2595
2596 struct hpp_dimension {
2597 const char *name;
2598 struct perf_hpp_fmt *fmt;
2599 int taken;
2600 int was_taken;
2601 };
2602
2603 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2604
2605 static struct hpp_dimension hpp_sort_dimensions[] = {
2606 DIM(PERF_HPP__OVERHEAD, "overhead"),
2607 DIM(PERF_HPP__LATENCY, "latency"),
2608 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2609 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2610 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2611 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2612 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2613 DIM(PERF_HPP__LATENCY_ACC, "latency_children"),
2614 DIM(PERF_HPP__SAMPLES, "sample"),
2615 DIM(PERF_HPP__PERIOD, "period"),
2616 DIM(PERF_HPP__WEIGHT1, "weight1"),
2617 DIM(PERF_HPP__WEIGHT2, "weight2"),
2618 DIM(PERF_HPP__WEIGHT3, "weight3"),
2619 /* aliases for weight_struct */
2620 DIM(PERF_HPP__WEIGHT2, "ins_lat"),
2621 DIM(PERF_HPP__WEIGHT3, "retire_lat"),
2622 DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"),
2623 };
2624
2625 #undef DIM
2626
2627 struct hpp_sort_entry {
2628 struct perf_hpp_fmt hpp;
2629 struct sort_entry *se;
2630 };
2631
perf_hpp__reset_sort_width(struct perf_hpp_fmt * fmt,struct hists * hists)2632 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2633 {
2634 struct hpp_sort_entry *hse;
2635
2636 if (!perf_hpp__is_sort_entry(fmt))
2637 return;
2638
2639 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2640 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2641 }
2642
__sort__hpp_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists,int line __maybe_unused,int * span __maybe_unused)2643 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2644 struct hists *hists, int line __maybe_unused,
2645 int *span __maybe_unused)
2646 {
2647 struct hpp_sort_entry *hse;
2648 size_t len = fmt->user_len;
2649
2650 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2651
2652 if (!len)
2653 len = hists__col_len(hists, hse->se->se_width_idx);
2654
2655 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
2656 }
2657
__sort__hpp_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists)2658 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2659 struct perf_hpp *hpp __maybe_unused,
2660 struct hists *hists)
2661 {
2662 struct hpp_sort_entry *hse;
2663 size_t len = fmt->user_len;
2664
2665 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2666
2667 if (!len)
2668 len = hists__col_len(hists, hse->se->se_width_idx);
2669
2670 return len;
2671 }
2672
__sort__hpp_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)2673 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2674 struct hist_entry *he)
2675 {
2676 struct hpp_sort_entry *hse;
2677 size_t len = fmt->user_len;
2678
2679 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2680
2681 if (!len)
2682 len = hists__col_len(he->hists, hse->se->se_width_idx);
2683
2684 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2685 }
2686
__sort__hpp_cmp(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2687 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2688 struct hist_entry *a, struct hist_entry *b)
2689 {
2690 struct hpp_sort_entry *hse;
2691
2692 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2693 return hse->se->se_cmp(a, b);
2694 }
2695
__sort__hpp_collapse(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2696 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2697 struct hist_entry *a, struct hist_entry *b)
2698 {
2699 struct hpp_sort_entry *hse;
2700 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2701
2702 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2703 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2704 return collapse_fn(a, b);
2705 }
2706
__sort__hpp_sort(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2707 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2708 struct hist_entry *a, struct hist_entry *b)
2709 {
2710 struct hpp_sort_entry *hse;
2711 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2712
2713 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2714 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2715 return sort_fn(a, b);
2716 }
2717
perf_hpp__is_sort_entry(struct perf_hpp_fmt * format)2718 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2719 {
2720 return format->header == __sort__hpp_header;
2721 }
2722
2723 #define MK_SORT_ENTRY_CHK(key) \
2724 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
2725 { \
2726 struct hpp_sort_entry *hse; \
2727 \
2728 if (!perf_hpp__is_sort_entry(fmt)) \
2729 return false; \
2730 \
2731 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
2732 return hse->se == &sort_ ## key ; \
2733 }
2734
2735 #ifdef HAVE_LIBTRACEEVENT
2736 MK_SORT_ENTRY_CHK(trace)
2737 #else
2738 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2739 {
2740 return false;
2741 }
2742 #endif
MK_SORT_ENTRY_CHK(srcline)2743 MK_SORT_ENTRY_CHK(srcline)
2744 MK_SORT_ENTRY_CHK(srcfile)
2745 MK_SORT_ENTRY_CHK(thread)
2746 MK_SORT_ENTRY_CHK(comm)
2747 MK_SORT_ENTRY_CHK(dso)
2748 MK_SORT_ENTRY_CHK(sym)
2749 MK_SORT_ENTRY_CHK(parallelism)
2750
2751
2752 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2753 {
2754 struct hpp_sort_entry *hse_a;
2755 struct hpp_sort_entry *hse_b;
2756
2757 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2758 return false;
2759
2760 hse_a = container_of(a, struct hpp_sort_entry, hpp);
2761 hse_b = container_of(b, struct hpp_sort_entry, hpp);
2762
2763 return hse_a->se == hse_b->se;
2764 }
2765
hse_free(struct perf_hpp_fmt * fmt)2766 static void hse_free(struct perf_hpp_fmt *fmt)
2767 {
2768 struct hpp_sort_entry *hse;
2769
2770 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2771 free(hse);
2772 }
2773
hse_init(struct perf_hpp_fmt * fmt,struct hist_entry * he)2774 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2775 {
2776 struct hpp_sort_entry *hse;
2777
2778 if (!perf_hpp__is_sort_entry(fmt))
2779 return;
2780
2781 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2782
2783 if (hse->se->se_init)
2784 hse->se->se_init(he);
2785 }
2786
2787 static struct hpp_sort_entry *
__sort_dimension__alloc_hpp(struct sort_dimension * sd,int level)2788 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2789 {
2790 struct hpp_sort_entry *hse;
2791
2792 hse = malloc(sizeof(*hse));
2793 if (hse == NULL) {
2794 pr_err("Memory allocation failed\n");
2795 return NULL;
2796 }
2797
2798 hse->se = sd->entry;
2799 hse->hpp.name = sd->entry->se_header;
2800 hse->hpp.header = __sort__hpp_header;
2801 hse->hpp.width = __sort__hpp_width;
2802 hse->hpp.entry = __sort__hpp_entry;
2803 hse->hpp.color = NULL;
2804
2805 hse->hpp.cmp = __sort__hpp_cmp;
2806 hse->hpp.collapse = __sort__hpp_collapse;
2807 hse->hpp.sort = __sort__hpp_sort;
2808 hse->hpp.equal = __sort__hpp_equal;
2809 hse->hpp.free = hse_free;
2810 hse->hpp.init = hse_init;
2811
2812 INIT_LIST_HEAD(&hse->hpp.list);
2813 INIT_LIST_HEAD(&hse->hpp.sort_list);
2814 hse->hpp.elide = false;
2815 hse->hpp.len = 0;
2816 hse->hpp.user_len = 0;
2817 hse->hpp.level = level;
2818
2819 return hse;
2820 }
2821
hpp_free(struct perf_hpp_fmt * fmt)2822 static void hpp_free(struct perf_hpp_fmt *fmt)
2823 {
2824 free(fmt);
2825 }
2826
__hpp_dimension__alloc_hpp(struct hpp_dimension * hd,int level)2827 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2828 int level)
2829 {
2830 struct perf_hpp_fmt *fmt;
2831
2832 fmt = memdup(hd->fmt, sizeof(*fmt));
2833 if (fmt) {
2834 INIT_LIST_HEAD(&fmt->list);
2835 INIT_LIST_HEAD(&fmt->sort_list);
2836 fmt->free = hpp_free;
2837 fmt->level = level;
2838 }
2839
2840 return fmt;
2841 }
2842
hist_entry__filter(struct hist_entry * he,int type,const void * arg)2843 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2844 {
2845 struct perf_hpp_fmt *fmt;
2846 struct hpp_sort_entry *hse;
2847 int ret = -1;
2848 int r;
2849
2850 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2851 if (!perf_hpp__is_sort_entry(fmt))
2852 continue;
2853
2854 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2855 if (hse->se->se_filter == NULL)
2856 continue;
2857
2858 /*
2859 * hist entry is filtered if any of sort key in the hpp list
2860 * is applied. But it should skip non-matched filter types.
2861 */
2862 r = hse->se->se_filter(he, type, arg);
2863 if (r >= 0) {
2864 if (ret < 0)
2865 ret = 0;
2866 ret |= r;
2867 }
2868 }
2869
2870 return ret;
2871 }
2872
__sort_dimension__add_hpp_sort(struct sort_dimension * sd,struct perf_hpp_list * list,int level)2873 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2874 struct perf_hpp_list *list,
2875 int level)
2876 {
2877 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2878
2879 if (hse == NULL)
2880 return -1;
2881
2882 perf_hpp_list__register_sort_field(list, &hse->hpp);
2883 return 0;
2884 }
2885
__sort_dimension__add_hpp_output(struct sort_dimension * sd,struct perf_hpp_list * list)2886 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2887 struct perf_hpp_list *list)
2888 {
2889 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2890
2891 if (hse == NULL)
2892 return -1;
2893
2894 perf_hpp_list__column_register(list, &hse->hpp);
2895 return 0;
2896 }
2897
2898 #ifndef HAVE_LIBTRACEEVENT
perf_hpp__is_dynamic_entry(struct perf_hpp_fmt * fmt __maybe_unused)2899 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2900 {
2901 return false;
2902 }
perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt * fmt __maybe_unused,struct hists * hists __maybe_unused)2903 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2904 struct hists *hists __maybe_unused)
2905 {
2906 return false;
2907 }
2908 #else
2909 struct hpp_dynamic_entry {
2910 struct perf_hpp_fmt hpp;
2911 struct evsel *evsel;
2912 struct tep_format_field *field;
2913 unsigned dynamic_len;
2914 bool raw_trace;
2915 };
2916
hde_width(struct hpp_dynamic_entry * hde)2917 static int hde_width(struct hpp_dynamic_entry *hde)
2918 {
2919 if (!hde->hpp.len) {
2920 int len = hde->dynamic_len;
2921 int namelen = strlen(hde->field->name);
2922 int fieldlen = hde->field->size;
2923
2924 if (namelen > len)
2925 len = namelen;
2926
2927 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2928 /* length for print hex numbers */
2929 fieldlen = hde->field->size * 2 + 2;
2930 }
2931 if (fieldlen > len)
2932 len = fieldlen;
2933
2934 hde->hpp.len = len;
2935 }
2936 return hde->hpp.len;
2937 }
2938
update_dynamic_len(struct hpp_dynamic_entry * hde,struct hist_entry * he)2939 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2940 struct hist_entry *he)
2941 {
2942 char *str, *pos;
2943 struct tep_format_field *field = hde->field;
2944 size_t namelen;
2945 bool last = false;
2946
2947 if (hde->raw_trace)
2948 return;
2949
2950 /* parse pretty print result and update max length */
2951 if (!he->trace_output)
2952 he->trace_output = get_trace_output(he);
2953
2954 namelen = strlen(field->name);
2955 str = he->trace_output;
2956
2957 while (str) {
2958 pos = strchr(str, ' ');
2959 if (pos == NULL) {
2960 last = true;
2961 pos = str + strlen(str);
2962 }
2963
2964 if (!strncmp(str, field->name, namelen)) {
2965 size_t len;
2966
2967 str += namelen + 1;
2968 len = pos - str;
2969
2970 if (len > hde->dynamic_len)
2971 hde->dynamic_len = len;
2972 break;
2973 }
2974
2975 if (last)
2976 str = NULL;
2977 else
2978 str = pos + 1;
2979 }
2980 }
2981
__sort__hde_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists __maybe_unused,int line __maybe_unused,int * span __maybe_unused)2982 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2983 struct hists *hists __maybe_unused,
2984 int line __maybe_unused,
2985 int *span __maybe_unused)
2986 {
2987 struct hpp_dynamic_entry *hde;
2988 size_t len = fmt->user_len;
2989
2990 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2991
2992 if (!len)
2993 len = hde_width(hde);
2994
2995 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2996 }
2997
__sort__hde_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists __maybe_unused)2998 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2999 struct perf_hpp *hpp __maybe_unused,
3000 struct hists *hists __maybe_unused)
3001 {
3002 struct hpp_dynamic_entry *hde;
3003 size_t len = fmt->user_len;
3004
3005 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3006
3007 if (!len)
3008 len = hde_width(hde);
3009
3010 return len;
3011 }
3012
perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt * fmt,struct hists * hists)3013 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
3014 {
3015 struct hpp_dynamic_entry *hde;
3016
3017 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3018
3019 return hists_to_evsel(hists) == hde->evsel;
3020 }
3021
__sort__hde_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)3022 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
3023 struct hist_entry *he)
3024 {
3025 struct hpp_dynamic_entry *hde;
3026 size_t len = fmt->user_len;
3027 char *str, *pos;
3028 struct tep_format_field *field;
3029 size_t namelen;
3030 bool last = false;
3031 int ret;
3032
3033 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3034
3035 if (!len)
3036 len = hde_width(hde);
3037
3038 if (hde->raw_trace)
3039 goto raw_field;
3040
3041 if (!he->trace_output)
3042 he->trace_output = get_trace_output(he);
3043
3044 field = hde->field;
3045 namelen = strlen(field->name);
3046 str = he->trace_output;
3047
3048 while (str) {
3049 pos = strchr(str, ' ');
3050 if (pos == NULL) {
3051 last = true;
3052 pos = str + strlen(str);
3053 }
3054
3055 if (!strncmp(str, field->name, namelen)) {
3056 str += namelen + 1;
3057 str = strndup(str, pos - str);
3058
3059 if (str == NULL)
3060 return scnprintf(hpp->buf, hpp->size,
3061 "%*.*s", len, len, "ERROR");
3062 break;
3063 }
3064
3065 if (last)
3066 str = NULL;
3067 else
3068 str = pos + 1;
3069 }
3070
3071 if (str == NULL) {
3072 struct trace_seq seq;
3073 raw_field:
3074 trace_seq_init(&seq);
3075 tep_print_field(&seq, he->raw_data, hde->field);
3076 str = seq.buffer;
3077 }
3078
3079 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
3080 free(str);
3081 return ret;
3082 }
3083
__sort__hde_cmp(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)3084 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
3085 struct hist_entry *a, struct hist_entry *b)
3086 {
3087 struct hpp_dynamic_entry *hde;
3088 struct tep_format_field *field;
3089 unsigned offset, size;
3090
3091 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3092
3093 field = hde->field;
3094 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
3095 unsigned long long dyn;
3096
3097 tep_read_number_field(field, a->raw_data, &dyn);
3098 offset = dyn & 0xffff;
3099 size = (dyn >> 16) & 0xffff;
3100 if (tep_field_is_relative(field->flags))
3101 offset += field->offset + field->size;
3102 /* record max width for output */
3103 if (size > hde->dynamic_len)
3104 hde->dynamic_len = size;
3105 } else {
3106 offset = field->offset;
3107 size = field->size;
3108 }
3109
3110 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
3111 }
3112
perf_hpp__is_dynamic_entry(struct perf_hpp_fmt * fmt)3113 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
3114 {
3115 return fmt->cmp == __sort__hde_cmp;
3116 }
3117
__sort__hde_equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)3118 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
3119 {
3120 struct hpp_dynamic_entry *hde_a;
3121 struct hpp_dynamic_entry *hde_b;
3122
3123 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
3124 return false;
3125
3126 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
3127 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
3128
3129 return hde_a->field == hde_b->field;
3130 }
3131
hde_free(struct perf_hpp_fmt * fmt)3132 static void hde_free(struct perf_hpp_fmt *fmt)
3133 {
3134 struct hpp_dynamic_entry *hde;
3135
3136 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3137 free(hde);
3138 }
3139
__sort__hde_init(struct perf_hpp_fmt * fmt,struct hist_entry * he)3140 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
3141 {
3142 struct hpp_dynamic_entry *hde;
3143
3144 if (!perf_hpp__is_dynamic_entry(fmt))
3145 return;
3146
3147 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3148 update_dynamic_len(hde, he);
3149 }
3150
3151 static struct hpp_dynamic_entry *
__alloc_dynamic_entry(struct evsel * evsel,struct tep_format_field * field,int level)3152 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
3153 int level)
3154 {
3155 struct hpp_dynamic_entry *hde;
3156
3157 hde = malloc(sizeof(*hde));
3158 if (hde == NULL) {
3159 pr_debug("Memory allocation failed\n");
3160 return NULL;
3161 }
3162
3163 hde->evsel = evsel;
3164 hde->field = field;
3165 hde->dynamic_len = 0;
3166
3167 hde->hpp.name = field->name;
3168 hde->hpp.header = __sort__hde_header;
3169 hde->hpp.width = __sort__hde_width;
3170 hde->hpp.entry = __sort__hde_entry;
3171 hde->hpp.color = NULL;
3172
3173 hde->hpp.init = __sort__hde_init;
3174 hde->hpp.cmp = __sort__hde_cmp;
3175 hde->hpp.collapse = __sort__hde_cmp;
3176 hde->hpp.sort = __sort__hde_cmp;
3177 hde->hpp.equal = __sort__hde_equal;
3178 hde->hpp.free = hde_free;
3179
3180 INIT_LIST_HEAD(&hde->hpp.list);
3181 INIT_LIST_HEAD(&hde->hpp.sort_list);
3182 hde->hpp.elide = false;
3183 hde->hpp.len = 0;
3184 hde->hpp.user_len = 0;
3185 hde->hpp.level = level;
3186
3187 return hde;
3188 }
3189 #endif /* HAVE_LIBTRACEEVENT */
3190
perf_hpp_fmt__dup(struct perf_hpp_fmt * fmt)3191 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
3192 {
3193 struct perf_hpp_fmt *new_fmt = NULL;
3194
3195 if (perf_hpp__is_sort_entry(fmt)) {
3196 struct hpp_sort_entry *hse, *new_hse;
3197
3198 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3199 new_hse = memdup(hse, sizeof(*hse));
3200 if (new_hse)
3201 new_fmt = &new_hse->hpp;
3202 #ifdef HAVE_LIBTRACEEVENT
3203 } else if (perf_hpp__is_dynamic_entry(fmt)) {
3204 struct hpp_dynamic_entry *hde, *new_hde;
3205
3206 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3207 new_hde = memdup(hde, sizeof(*hde));
3208 if (new_hde)
3209 new_fmt = &new_hde->hpp;
3210 #endif
3211 } else {
3212 new_fmt = memdup(fmt, sizeof(*fmt));
3213 }
3214
3215 INIT_LIST_HEAD(&new_fmt->list);
3216 INIT_LIST_HEAD(&new_fmt->sort_list);
3217
3218 return new_fmt;
3219 }
3220
parse_field_name(char * str,char ** event,char ** field,char ** opt)3221 static int parse_field_name(char *str, char **event, char **field, char **opt)
3222 {
3223 char *event_name, *field_name, *opt_name;
3224
3225 event_name = str;
3226 field_name = strchr(str, '.');
3227
3228 if (field_name) {
3229 *field_name++ = '\0';
3230 } else {
3231 event_name = NULL;
3232 field_name = str;
3233 }
3234
3235 opt_name = strchr(field_name, '/');
3236 if (opt_name)
3237 *opt_name++ = '\0';
3238
3239 *event = event_name;
3240 *field = field_name;
3241 *opt = opt_name;
3242
3243 return 0;
3244 }
3245
3246 /* find match evsel using a given event name. The event name can be:
3247 * 1. '%' + event index (e.g. '%1' for first event)
3248 * 2. full event name (e.g. sched:sched_switch)
3249 * 3. partial event name (should not contain ':')
3250 */
find_evsel(struct evlist * evlist,char * event_name)3251 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
3252 {
3253 struct evsel *evsel = NULL;
3254 struct evsel *pos;
3255 bool full_name;
3256
3257 /* case 1 */
3258 if (event_name[0] == '%') {
3259 int nr = strtol(event_name+1, NULL, 0);
3260
3261 if (nr > evlist->core.nr_entries)
3262 return NULL;
3263
3264 evsel = evlist__first(evlist);
3265 while (--nr > 0)
3266 evsel = evsel__next(evsel);
3267
3268 return evsel;
3269 }
3270
3271 full_name = !!strchr(event_name, ':');
3272 evlist__for_each_entry(evlist, pos) {
3273 /* case 2 */
3274 if (full_name && evsel__name_is(pos, event_name))
3275 return pos;
3276 /* case 3 */
3277 if (!full_name && strstr(pos->name, event_name)) {
3278 if (evsel) {
3279 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
3280 event_name, evsel->name, pos->name);
3281 return NULL;
3282 }
3283 evsel = pos;
3284 }
3285 }
3286
3287 return evsel;
3288 }
3289
3290 #ifdef HAVE_LIBTRACEEVENT
__dynamic_dimension__add(struct evsel * evsel,struct tep_format_field * field,bool raw_trace,int level)3291 static int __dynamic_dimension__add(struct evsel *evsel,
3292 struct tep_format_field *field,
3293 bool raw_trace, int level)
3294 {
3295 struct hpp_dynamic_entry *hde;
3296
3297 hde = __alloc_dynamic_entry(evsel, field, level);
3298 if (hde == NULL)
3299 return -ENOMEM;
3300
3301 hde->raw_trace = raw_trace;
3302
3303 perf_hpp__register_sort_field(&hde->hpp);
3304 return 0;
3305 }
3306
add_evsel_fields(struct evsel * evsel,bool raw_trace,int level)3307 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
3308 {
3309 int ret;
3310 struct tep_event *tp_format = evsel__tp_format(evsel);
3311 struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL;
3312 while (field) {
3313 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3314 if (ret < 0)
3315 return ret;
3316
3317 field = field->next;
3318 }
3319 return 0;
3320 }
3321
add_all_dynamic_fields(struct evlist * evlist,bool raw_trace,int level)3322 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
3323 int level)
3324 {
3325 int ret;
3326 struct evsel *evsel;
3327
3328 evlist__for_each_entry(evlist, evsel) {
3329 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3330 continue;
3331
3332 ret = add_evsel_fields(evsel, raw_trace, level);
3333 if (ret < 0)
3334 return ret;
3335 }
3336 return 0;
3337 }
3338
add_all_matching_fields(struct evlist * evlist,char * field_name,bool raw_trace,int level)3339 static int add_all_matching_fields(struct evlist *evlist,
3340 char *field_name, bool raw_trace, int level)
3341 {
3342 int ret = -ESRCH;
3343 struct evsel *evsel;
3344
3345 evlist__for_each_entry(evlist, evsel) {
3346 struct tep_event *tp_format;
3347 struct tep_format_field *field;
3348
3349 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3350 continue;
3351
3352 tp_format = evsel__tp_format(evsel);
3353 if (tp_format == NULL)
3354 continue;
3355
3356 field = tep_find_any_field(tp_format, field_name);
3357 if (field == NULL)
3358 continue;
3359
3360 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3361 if (ret < 0)
3362 break;
3363 }
3364 return ret;
3365 }
3366 #endif /* HAVE_LIBTRACEEVENT */
3367
add_dynamic_entry(struct evlist * evlist,const char * tok,int level)3368 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
3369 int level)
3370 {
3371 char *str, *event_name, *field_name, *opt_name;
3372 struct evsel *evsel;
3373 bool raw_trace = symbol_conf.raw_trace;
3374 int ret = 0;
3375
3376 if (evlist == NULL)
3377 return -ENOENT;
3378
3379 str = strdup(tok);
3380 if (str == NULL)
3381 return -ENOMEM;
3382
3383 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
3384 ret = -EINVAL;
3385 goto out;
3386 }
3387
3388 if (opt_name) {
3389 if (strcmp(opt_name, "raw")) {
3390 pr_debug("unsupported field option %s\n", opt_name);
3391 ret = -EINVAL;
3392 goto out;
3393 }
3394 raw_trace = true;
3395 }
3396
3397 #ifdef HAVE_LIBTRACEEVENT
3398 if (!strcmp(field_name, "trace_fields")) {
3399 ret = add_all_dynamic_fields(evlist, raw_trace, level);
3400 goto out;
3401 }
3402
3403 if (event_name == NULL) {
3404 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
3405 goto out;
3406 }
3407 #else
3408 evlist__for_each_entry(evlist, evsel) {
3409 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
3410 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
3411 ret = -ENOTSUP;
3412 }
3413 }
3414
3415 if (ret) {
3416 pr_err("\n");
3417 goto out;
3418 }
3419 #endif
3420
3421 evsel = find_evsel(evlist, event_name);
3422 if (evsel == NULL) {
3423 pr_debug("Cannot find event: %s\n", event_name);
3424 ret = -ENOENT;
3425 goto out;
3426 }
3427
3428 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3429 pr_debug("%s is not a tracepoint event\n", event_name);
3430 ret = -EINVAL;
3431 goto out;
3432 }
3433
3434 #ifdef HAVE_LIBTRACEEVENT
3435 if (!strcmp(field_name, "*")) {
3436 ret = add_evsel_fields(evsel, raw_trace, level);
3437 } else {
3438 struct tep_event *tp_format = evsel__tp_format(evsel);
3439 struct tep_format_field *field =
3440 tp_format ? tep_find_any_field(tp_format, field_name) : NULL;
3441
3442 if (field == NULL) {
3443 pr_debug("Cannot find event field for %s.%s\n",
3444 event_name, field_name);
3445 return -ENOENT;
3446 }
3447
3448 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3449 }
3450 #else
3451 (void)level;
3452 (void)raw_trace;
3453 #endif /* HAVE_LIBTRACEEVENT */
3454
3455 out:
3456 free(str);
3457 return ret;
3458 }
3459
__sort_dimension__add(struct sort_dimension * sd,struct perf_hpp_list * list,int level)3460 static int __sort_dimension__add(struct sort_dimension *sd,
3461 struct perf_hpp_list *list,
3462 int level)
3463 {
3464 if (sd->taken)
3465 return 0;
3466
3467 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3468 return -1;
3469
3470 if (sd->entry->se_collapse)
3471 list->need_collapse = 1;
3472
3473 sd->taken = 1;
3474
3475 return 0;
3476 }
3477
__hpp_dimension__add(struct hpp_dimension * hd,struct perf_hpp_list * list,int level)3478 static int __hpp_dimension__add(struct hpp_dimension *hd,
3479 struct perf_hpp_list *list,
3480 int level)
3481 {
3482 struct perf_hpp_fmt *fmt;
3483
3484 if (hd->taken)
3485 return 0;
3486
3487 fmt = __hpp_dimension__alloc_hpp(hd, level);
3488 if (!fmt)
3489 return -1;
3490
3491 hd->taken = 1;
3492 hd->was_taken = 1;
3493 perf_hpp_list__register_sort_field(list, fmt);
3494 return 0;
3495 }
3496
__sort_dimension__add_output(struct perf_hpp_list * list,struct sort_dimension * sd)3497 static int __sort_dimension__add_output(struct perf_hpp_list *list,
3498 struct sort_dimension *sd)
3499 {
3500 if (sd->taken)
3501 return 0;
3502
3503 if (__sort_dimension__add_hpp_output(sd, list) < 0)
3504 return -1;
3505
3506 sd->taken = 1;
3507 return 0;
3508 }
3509
__hpp_dimension__add_output(struct perf_hpp_list * list,struct hpp_dimension * hd)3510 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3511 struct hpp_dimension *hd)
3512 {
3513 struct perf_hpp_fmt *fmt;
3514
3515 if (hd->taken)
3516 return 0;
3517
3518 fmt = __hpp_dimension__alloc_hpp(hd, 0);
3519 if (!fmt)
3520 return -1;
3521
3522 hd->taken = 1;
3523 perf_hpp_list__column_register(list, fmt);
3524 return 0;
3525 }
3526
hpp_dimension__add_output(unsigned col,bool implicit)3527 int hpp_dimension__add_output(unsigned col, bool implicit)
3528 {
3529 struct hpp_dimension *hd;
3530
3531 BUG_ON(col >= PERF_HPP__MAX_INDEX);
3532 hd = &hpp_sort_dimensions[col];
3533 if (implicit && !hd->was_taken)
3534 return 0;
3535 return __hpp_dimension__add_output(&perf_hpp_list, hd);
3536 }
3537
sort_dimension__add(struct perf_hpp_list * list,const char * tok,struct evlist * evlist,int level)3538 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3539 struct evlist *evlist,
3540 int level)
3541 {
3542 unsigned int i, j;
3543
3544 /*
3545 * Check to see if there are any arch specific
3546 * sort dimensions not applicable for the current
3547 * architecture. If so, Skip that sort key since
3548 * we don't want to display it in the output fields.
3549 */
3550 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3551 if (!strcmp(arch_specific_sort_keys[j], tok) &&
3552 !arch_support_sort_key(tok)) {
3553 return 0;
3554 }
3555 }
3556
3557 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3558 struct sort_dimension *sd = &common_sort_dimensions[i];
3559
3560 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3561 continue;
3562
3563 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3564 if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3565 sort_dimension_add_dynamic_header(sd);
3566 }
3567
3568 if (sd->entry == &sort_parent && parent_pattern) {
3569 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3570 if (ret) {
3571 char err[BUFSIZ];
3572
3573 regerror(ret, &parent_regex, err, sizeof(err));
3574 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3575 return -EINVAL;
3576 }
3577 list->parent = 1;
3578 } else if (sd->entry == &sort_sym) {
3579 list->sym = 1;
3580 /*
3581 * perf diff displays the performance difference amongst
3582 * two or more perf.data files. Those files could come
3583 * from different binaries. So we should not compare
3584 * their ips, but the name of symbol.
3585 */
3586 if (sort__mode == SORT_MODE__DIFF)
3587 sd->entry->se_collapse = sort__sym_sort;
3588
3589 } else if (sd->entry == &sort_dso) {
3590 list->dso = 1;
3591 } else if (sd->entry == &sort_socket) {
3592 list->socket = 1;
3593 } else if (sd->entry == &sort_thread) {
3594 list->thread = 1;
3595 } else if (sd->entry == &sort_comm) {
3596 list->comm = 1;
3597 } else if (sd->entry == &sort_type_offset) {
3598 symbol_conf.annotate_data_member = true;
3599 }
3600
3601 return __sort_dimension__add(sd, list, level);
3602 }
3603
3604 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3605 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3606
3607 if (strncasecmp(tok, hd->name, strlen(tok)))
3608 continue;
3609
3610 return __hpp_dimension__add(hd, list, level);
3611 }
3612
3613 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3614 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3615
3616 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3617 continue;
3618
3619 if ((sort__mode != SORT_MODE__BRANCH) &&
3620 strncasecmp(tok, "callchain_branch_predicted",
3621 strlen(tok)) &&
3622 strncasecmp(tok, "callchain_branch_abort",
3623 strlen(tok)) &&
3624 strncasecmp(tok, "callchain_branch_cycles",
3625 strlen(tok)))
3626 return -EINVAL;
3627
3628 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3629 list->sym = 1;
3630
3631 __sort_dimension__add(sd, list, level);
3632 return 0;
3633 }
3634
3635 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3636 struct sort_dimension *sd = &memory_sort_dimensions[i];
3637
3638 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3639 continue;
3640
3641 if (sort__mode != SORT_MODE__MEMORY)
3642 return -EINVAL;
3643
3644 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3645 return -EINVAL;
3646
3647 if (sd->entry == &sort_mem_daddr_sym)
3648 list->sym = 1;
3649
3650 __sort_dimension__add(sd, list, level);
3651 return 0;
3652 }
3653
3654 if (!add_dynamic_entry(evlist, tok, level))
3655 return 0;
3656
3657 return -ESRCH;
3658 }
3659
3660 /* This should match with sort_dimension__add() above */
is_hpp_sort_key(const char * key)3661 static bool is_hpp_sort_key(const char *key)
3662 {
3663 unsigned i;
3664
3665 for (i = 0; i < ARRAY_SIZE(arch_specific_sort_keys); i++) {
3666 if (!strcmp(arch_specific_sort_keys[i], key) &&
3667 !arch_support_sort_key(key)) {
3668 return false;
3669 }
3670 }
3671
3672 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3673 struct sort_dimension *sd = &common_sort_dimensions[i];
3674
3675 if (sd->name && !strncasecmp(key, sd->name, strlen(key)))
3676 return false;
3677 }
3678
3679 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3680 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3681
3682 if (!strncasecmp(key, hd->name, strlen(key)))
3683 return true;
3684 }
3685 return false;
3686 }
3687
setup_sort_list(struct perf_hpp_list * list,char * str,struct evlist * evlist)3688 static int setup_sort_list(struct perf_hpp_list *list, char *str,
3689 struct evlist *evlist)
3690 {
3691 char *tmp, *tok;
3692 int ret = 0;
3693 int level = 0;
3694 int next_level = 1;
3695 int prev_level = 0;
3696 bool in_group = false;
3697 bool prev_was_hpp = false;
3698
3699 do {
3700 tok = str;
3701 tmp = strpbrk(str, "{}, ");
3702 if (tmp) {
3703 if (in_group)
3704 next_level = level;
3705 else
3706 next_level = level + 1;
3707
3708 if (*tmp == '{')
3709 in_group = true;
3710 else if (*tmp == '}')
3711 in_group = false;
3712
3713 *tmp = '\0';
3714 str = tmp + 1;
3715 }
3716
3717 if (*tok) {
3718 if (is_hpp_sort_key(tok)) {
3719 /* keep output (hpp) sort keys in the same level */
3720 if (prev_was_hpp) {
3721 bool next_same = (level == next_level);
3722
3723 level = prev_level;
3724 next_level = next_same ? level : level+1;
3725 }
3726 prev_was_hpp = true;
3727 } else {
3728 prev_was_hpp = false;
3729 }
3730
3731 ret = sort_dimension__add(list, tok, evlist, level);
3732 if (ret == -EINVAL) {
3733 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3734 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3735 else
3736 ui__error("Invalid --sort key: `%s'", tok);
3737 break;
3738 } else if (ret == -ESRCH) {
3739 ui__error("Unknown --sort key: `%s'", tok);
3740 break;
3741 }
3742 prev_level = level;
3743 }
3744
3745 level = next_level;
3746 } while (tmp);
3747
3748 return ret;
3749 }
3750
get_default_sort_order(struct evlist * evlist)3751 static const char *get_default_sort_order(struct evlist *evlist)
3752 {
3753 const char *default_sort_orders[] = {
3754 default_sort_order,
3755 default_branch_sort_order,
3756 default_mem_sort_order,
3757 default_top_sort_order,
3758 default_diff_sort_order,
3759 default_tracepoint_sort_order,
3760 };
3761 bool use_trace = true;
3762 struct evsel *evsel;
3763
3764 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3765
3766 if (evlist == NULL || evlist__empty(evlist))
3767 goto out_no_evlist;
3768
3769 evlist__for_each_entry(evlist, evsel) {
3770 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3771 use_trace = false;
3772 break;
3773 }
3774 }
3775
3776 if (use_trace) {
3777 sort__mode = SORT_MODE__TRACEPOINT;
3778 if (symbol_conf.raw_trace)
3779 return "trace_fields";
3780 }
3781 out_no_evlist:
3782 return default_sort_orders[sort__mode];
3783 }
3784
setup_sort_order(struct evlist * evlist)3785 static int setup_sort_order(struct evlist *evlist)
3786 {
3787 char *new_sort_order;
3788
3789 /*
3790 * Append '+'-prefixed sort order to the default sort
3791 * order string.
3792 */
3793 if (!sort_order || is_strict_order(sort_order))
3794 return 0;
3795
3796 if (sort_order[1] == '\0') {
3797 ui__error("Invalid --sort key: `+'");
3798 return -EINVAL;
3799 }
3800
3801 /*
3802 * We allocate new sort_order string, but we never free it,
3803 * because it's checked over the rest of the code.
3804 */
3805 if (asprintf(&new_sort_order, "%s,%s",
3806 get_default_sort_order(evlist), sort_order + 1) < 0) {
3807 pr_err("Not enough memory to set up --sort");
3808 return -ENOMEM;
3809 }
3810
3811 sort_order = new_sort_order;
3812 return 0;
3813 }
3814
3815 /*
3816 * Adds 'pre,' prefix into 'str' is 'pre' is
3817 * not already part of 'str'.
3818 */
prefix_if_not_in(const char * pre,char * str)3819 static char *prefix_if_not_in(const char *pre, char *str)
3820 {
3821 char *n;
3822
3823 if (!str || strstr(str, pre))
3824 return str;
3825
3826 if (asprintf(&n, "%s,%s", pre, str) < 0)
3827 n = NULL;
3828
3829 free(str);
3830 return n;
3831 }
3832
setup_overhead(char * keys)3833 static char *setup_overhead(char *keys)
3834 {
3835 if (sort__mode == SORT_MODE__DIFF)
3836 return keys;
3837
3838 if (symbol_conf.prefer_latency) {
3839 keys = prefix_if_not_in("overhead", keys);
3840 keys = prefix_if_not_in("latency", keys);
3841 if (symbol_conf.cumulate_callchain) {
3842 keys = prefix_if_not_in("overhead_children", keys);
3843 keys = prefix_if_not_in("latency_children", keys);
3844 }
3845 } else if (!keys || (!strstr(keys, "overhead") &&
3846 !strstr(keys, "latency"))) {
3847 if (symbol_conf.enable_latency)
3848 keys = prefix_if_not_in("latency", keys);
3849 keys = prefix_if_not_in("overhead", keys);
3850 if (symbol_conf.cumulate_callchain) {
3851 if (symbol_conf.enable_latency)
3852 keys = prefix_if_not_in("latency_children", keys);
3853 keys = prefix_if_not_in("overhead_children", keys);
3854 }
3855 }
3856
3857 return keys;
3858 }
3859
__setup_sorting(struct evlist * evlist)3860 static int __setup_sorting(struct evlist *evlist)
3861 {
3862 char *str;
3863 const char *sort_keys;
3864 int ret = 0;
3865
3866 ret = setup_sort_order(evlist);
3867 if (ret)
3868 return ret;
3869
3870 sort_keys = sort_order;
3871 if (sort_keys == NULL) {
3872 if (is_strict_order(field_order)) {
3873 /*
3874 * If user specified field order but no sort order,
3875 * we'll honor it and not add default sort orders.
3876 */
3877 return 0;
3878 }
3879
3880 sort_keys = get_default_sort_order(evlist);
3881 }
3882
3883 str = strdup(sort_keys);
3884 if (str == NULL) {
3885 pr_err("Not enough memory to setup sort keys");
3886 return -ENOMEM;
3887 }
3888
3889 /*
3890 * Prepend overhead fields for backward compatibility.
3891 */
3892 if (!is_strict_order(field_order)) {
3893 str = setup_overhead(str);
3894 if (str == NULL) {
3895 pr_err("Not enough memory to setup overhead keys");
3896 return -ENOMEM;
3897 }
3898 }
3899
3900 ret = setup_sort_list(&perf_hpp_list, str, evlist);
3901
3902 free(str);
3903 return ret;
3904 }
3905
perf_hpp__set_elide(int idx,bool elide)3906 void perf_hpp__set_elide(int idx, bool elide)
3907 {
3908 struct perf_hpp_fmt *fmt;
3909 struct hpp_sort_entry *hse;
3910
3911 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3912 if (!perf_hpp__is_sort_entry(fmt))
3913 continue;
3914
3915 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3916 if (hse->se->se_width_idx == idx) {
3917 fmt->elide = elide;
3918 break;
3919 }
3920 }
3921 }
3922
__get_elide(struct strlist * list,const char * list_name,FILE * fp)3923 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3924 {
3925 if (list && strlist__nr_entries(list) == 1) {
3926 if (fp != NULL)
3927 fprintf(fp, "# %s: %s\n", list_name,
3928 strlist__entry(list, 0)->s);
3929 return true;
3930 }
3931 return false;
3932 }
3933
get_elide(int idx,FILE * output)3934 static bool get_elide(int idx, FILE *output)
3935 {
3936 switch (idx) {
3937 case HISTC_SYMBOL:
3938 return __get_elide(symbol_conf.sym_list, "symbol", output);
3939 case HISTC_DSO:
3940 return __get_elide(symbol_conf.dso_list, "dso", output);
3941 case HISTC_COMM:
3942 return __get_elide(symbol_conf.comm_list, "comm", output);
3943 default:
3944 break;
3945 }
3946
3947 if (sort__mode != SORT_MODE__BRANCH)
3948 return false;
3949
3950 switch (idx) {
3951 case HISTC_SYMBOL_FROM:
3952 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3953 case HISTC_SYMBOL_TO:
3954 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3955 case HISTC_DSO_FROM:
3956 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3957 case HISTC_DSO_TO:
3958 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3959 case HISTC_ADDR_FROM:
3960 return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
3961 case HISTC_ADDR_TO:
3962 return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
3963 default:
3964 break;
3965 }
3966
3967 return false;
3968 }
3969
sort__setup_elide(FILE * output)3970 void sort__setup_elide(FILE *output)
3971 {
3972 struct perf_hpp_fmt *fmt;
3973 struct hpp_sort_entry *hse;
3974
3975 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3976 if (!perf_hpp__is_sort_entry(fmt))
3977 continue;
3978
3979 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3980 fmt->elide = get_elide(hse->se->se_width_idx, output);
3981 }
3982
3983 /*
3984 * It makes no sense to elide all of sort entries.
3985 * Just revert them to show up again.
3986 */
3987 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3988 if (!perf_hpp__is_sort_entry(fmt))
3989 continue;
3990
3991 if (!fmt->elide)
3992 return;
3993 }
3994
3995 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3996 if (!perf_hpp__is_sort_entry(fmt))
3997 continue;
3998
3999 fmt->elide = false;
4000 }
4001 }
4002
output_field_add(struct perf_hpp_list * list,const char * tok)4003 int output_field_add(struct perf_hpp_list *list, const char *tok)
4004 {
4005 unsigned int i;
4006
4007 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
4008 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
4009
4010 if (strncasecmp(tok, hd->name, strlen(tok)))
4011 continue;
4012
4013 if (!strcasecmp(tok, "weight"))
4014 ui__warning("--fields weight shows the average value unlike in the --sort key.\n");
4015
4016 return __hpp_dimension__add_output(list, hd);
4017 }
4018
4019 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
4020 struct sort_dimension *sd = &common_sort_dimensions[i];
4021
4022 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
4023 continue;
4024
4025 return __sort_dimension__add_output(list, sd);
4026 }
4027
4028 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
4029 struct sort_dimension *sd = &bstack_sort_dimensions[i];
4030
4031 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
4032 continue;
4033
4034 if (sort__mode != SORT_MODE__BRANCH)
4035 return -EINVAL;
4036
4037 return __sort_dimension__add_output(list, sd);
4038 }
4039
4040 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
4041 struct sort_dimension *sd = &memory_sort_dimensions[i];
4042
4043 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
4044 continue;
4045
4046 if (sort__mode != SORT_MODE__MEMORY)
4047 return -EINVAL;
4048
4049 return __sort_dimension__add_output(list, sd);
4050 }
4051
4052 return -ESRCH;
4053 }
4054
setup_output_list(struct perf_hpp_list * list,char * str)4055 static int setup_output_list(struct perf_hpp_list *list, char *str)
4056 {
4057 char *tmp, *tok;
4058 int ret = 0;
4059
4060 for (tok = strtok_r(str, ", ", &tmp);
4061 tok; tok = strtok_r(NULL, ", ", &tmp)) {
4062 ret = output_field_add(list, tok);
4063 if (ret == -EINVAL) {
4064 ui__error("Invalid --fields key: `%s'", tok);
4065 break;
4066 } else if (ret == -ESRCH) {
4067 ui__error("Unknown --fields key: `%s'", tok);
4068 break;
4069 }
4070 }
4071
4072 return ret;
4073 }
4074
reset_dimensions(void)4075 void reset_dimensions(void)
4076 {
4077 unsigned int i;
4078
4079 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
4080 common_sort_dimensions[i].taken = 0;
4081
4082 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
4083 hpp_sort_dimensions[i].taken = 0;
4084
4085 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
4086 bstack_sort_dimensions[i].taken = 0;
4087
4088 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
4089 memory_sort_dimensions[i].taken = 0;
4090 }
4091
is_strict_order(const char * order)4092 bool is_strict_order(const char *order)
4093 {
4094 return order && (*order != '+');
4095 }
4096
__setup_output_field(void)4097 static int __setup_output_field(void)
4098 {
4099 char *str, *strp;
4100 int ret = -EINVAL;
4101
4102 if (field_order == NULL)
4103 return 0;
4104
4105 strp = str = strdup(field_order);
4106 if (str == NULL) {
4107 pr_err("Not enough memory to setup output fields");
4108 return -ENOMEM;
4109 }
4110
4111 if (!is_strict_order(field_order))
4112 strp++;
4113
4114 if (!strlen(strp)) {
4115 ui__error("Invalid --fields key: `+'");
4116 goto out;
4117 }
4118
4119 ret = setup_output_list(&perf_hpp_list, strp);
4120
4121 out:
4122 free(str);
4123 return ret;
4124 }
4125
setup_sorting(struct evlist * evlist)4126 int setup_sorting(struct evlist *evlist)
4127 {
4128 int err;
4129
4130 err = __setup_sorting(evlist);
4131 if (err < 0)
4132 return err;
4133
4134 if (parent_pattern != default_parent_pattern) {
4135 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
4136 if (err < 0)
4137 return err;
4138 }
4139
4140 reset_dimensions();
4141
4142 /*
4143 * perf diff doesn't use default hpp output fields.
4144 */
4145 if (sort__mode != SORT_MODE__DIFF)
4146 perf_hpp__init();
4147
4148 err = __setup_output_field();
4149 if (err < 0)
4150 return err;
4151
4152 /* copy sort keys to output fields */
4153 perf_hpp__setup_output_field(&perf_hpp_list);
4154 /* and then copy output fields to sort keys */
4155 perf_hpp__append_sort_keys(&perf_hpp_list);
4156
4157 /* setup hists-specific output fields */
4158 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
4159 return -1;
4160
4161 return 0;
4162 }
4163
reset_output_field(void)4164 void reset_output_field(void)
4165 {
4166 perf_hpp_list.need_collapse = 0;
4167 perf_hpp_list.parent = 0;
4168 perf_hpp_list.sym = 0;
4169 perf_hpp_list.dso = 0;
4170
4171 field_order = NULL;
4172 sort_order = NULL;
4173
4174 reset_dimensions();
4175 perf_hpp__reset_output_field(&perf_hpp_list);
4176 }
4177
4178 #define INDENT (3*8 + 1)
4179
add_key(struct strbuf * sb,const char * str,int * llen)4180 static void add_key(struct strbuf *sb, const char *str, int *llen)
4181 {
4182 if (!str)
4183 return;
4184
4185 if (*llen >= 75) {
4186 strbuf_addstr(sb, "\n\t\t\t ");
4187 *llen = INDENT;
4188 }
4189 strbuf_addf(sb, " %s", str);
4190 *llen += strlen(str) + 1;
4191 }
4192
add_sort_string(struct strbuf * sb,struct sort_dimension * s,int n,int * llen)4193 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
4194 int *llen)
4195 {
4196 int i;
4197
4198 for (i = 0; i < n; i++)
4199 add_key(sb, s[i].name, llen);
4200 }
4201
add_hpp_sort_string(struct strbuf * sb,struct hpp_dimension * s,int n,int * llen)4202 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
4203 int *llen)
4204 {
4205 int i;
4206
4207 for (i = 0; i < n; i++)
4208 add_key(sb, s[i].name, llen);
4209 }
4210
sort_help(const char * prefix,enum sort_mode mode)4211 char *sort_help(const char *prefix, enum sort_mode mode)
4212 {
4213 struct strbuf sb;
4214 char *s;
4215 int len = strlen(prefix) + INDENT;
4216
4217 strbuf_init(&sb, 300);
4218 strbuf_addstr(&sb, prefix);
4219 add_hpp_sort_string(&sb, hpp_sort_dimensions,
4220 ARRAY_SIZE(hpp_sort_dimensions), &len);
4221 add_sort_string(&sb, common_sort_dimensions,
4222 ARRAY_SIZE(common_sort_dimensions), &len);
4223 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__BRANCH)
4224 add_sort_string(&sb, bstack_sort_dimensions,
4225 ARRAY_SIZE(bstack_sort_dimensions), &len);
4226 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__MEMORY)
4227 add_sort_string(&sb, memory_sort_dimensions,
4228 ARRAY_SIZE(memory_sort_dimensions), &len);
4229 s = strbuf_detach(&sb, NULL);
4230 strbuf_release(&sb);
4231 return s;
4232 }
4233