1 // SPDX-License-Identifier: GPL-2.0
2 #include <inttypes.h>
3 #include <math.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <linux/compiler.h>
7
8 #include "../util/callchain.h"
9 #include "../util/debug.h"
10 #include "../util/hist.h"
11 #include "../util/sort.h"
12 #include "../util/evsel.h"
13 #include "../util/evlist.h"
14 #include "../util/thread.h"
15 #include "../util/util.h"
16
17 /* hist period print (hpp) functions */
18
19 #define hpp__call_print_fn(hpp, fn, fmt, ...) \
20 ({ \
21 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
22 advance_hpp(hpp, __ret); \
23 __ret; \
24 })
25
__hpp__fmt_print(struct perf_hpp * hpp,struct hists * hists,u64 val,int nr_samples,const char * fmt,int len,hpp_snprint_fn print_fn,enum perf_hpp_fmt_type fmtype)26 static int __hpp__fmt_print(struct perf_hpp *hpp, struct hists *hists, u64 val,
27 int nr_samples, const char *fmt, int len,
28 hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
29 {
30 if (fmtype == PERF_HPP_FMT_TYPE__PERCENT || fmtype == PERF_HPP_FMT_TYPE__LATENCY) {
31 double percent = 0.0;
32 u64 total = fmtype == PERF_HPP_FMT_TYPE__PERCENT ? hists__total_period(hists) :
33 hists__total_latency(hists);
34
35 if (total)
36 percent = 100.0 * val / total;
37
38 return hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
39 }
40
41 if (fmtype == PERF_HPP_FMT_TYPE__AVERAGE) {
42 double avg = nr_samples ? (1.0 * val / nr_samples) : 0;
43
44 return hpp__call_print_fn(hpp, print_fn, fmt, len, avg);
45 }
46
47 return hpp__call_print_fn(hpp, print_fn, fmt, len, val);
48 }
49
50 struct hpp_fmt_value {
51 struct hists *hists;
52 u64 val;
53 int samples;
54 };
55
__hpp__fmt(struct perf_hpp * hpp,struct hist_entry * he,hpp_field_fn get_field,const char * fmt,int len,hpp_snprint_fn print_fn,enum perf_hpp_fmt_type fmtype)56 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
57 hpp_field_fn get_field, const char *fmt, int len,
58 hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
59 {
60 int ret = 0;
61 struct hists *hists = he->hists;
62 struct evsel *evsel = hists_to_evsel(hists);
63 struct evsel *pos;
64 char *buf = hpp->buf;
65 size_t size = hpp->size;
66 int i = 0, nr_members = 1;
67 struct hpp_fmt_value *values;
68
69 if (evsel__is_group_event(evsel))
70 nr_members = evsel->core.nr_members;
71
72 values = calloc(nr_members, sizeof(*values));
73 if (values == NULL)
74 return 0;
75
76 values[0].hists = evsel__hists(evsel);
77 values[0].val = get_field(he);
78 values[0].samples = he->stat.nr_events;
79
80 if (evsel__is_group_event(evsel)) {
81 struct hist_entry *pair;
82
83 for_each_group_member(pos, evsel)
84 values[++i].hists = evsel__hists(pos);
85
86 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
87 for (i = 0; i < nr_members; i++) {
88 if (values[i].hists != pair->hists)
89 continue;
90
91 values[i].val = get_field(pair);
92 values[i].samples = pair->stat.nr_events;
93 break;
94 }
95 }
96 }
97
98 for (i = 0; i < nr_members; i++) {
99 if (symbol_conf.skip_empty &&
100 values[i].hists->stats.nr_samples == 0)
101 continue;
102
103 ret += __hpp__fmt_print(hpp, values[i].hists, values[i].val,
104 values[i].samples, fmt, len,
105 print_fn, fmtype);
106 }
107
108 free(values);
109
110 /*
111 * Restore original buf and size as it's where caller expects
112 * the result will be saved.
113 */
114 hpp->buf = buf;
115 hpp->size = size;
116
117 return ret;
118 }
119
hpp__fmt(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he,hpp_field_fn get_field,const char * fmtstr,hpp_snprint_fn print_fn,enum perf_hpp_fmt_type fmtype)120 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
121 struct hist_entry *he, hpp_field_fn get_field,
122 const char *fmtstr, hpp_snprint_fn print_fn,
123 enum perf_hpp_fmt_type fmtype)
124 {
125 int len = max(fmt->user_len ?: fmt->len, (int)strlen(fmt->name));
126
127 if (symbol_conf.field_sep) {
128 return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
129 print_fn, fmtype);
130 }
131
132 if (fmtype == PERF_HPP_FMT_TYPE__PERCENT || fmtype == PERF_HPP_FMT_TYPE__LATENCY)
133 len -= 2; /* 2 for a space and a % sign */
134 else
135 len -= 1;
136
137 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmtype);
138 }
139
hpp__fmt_acc(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he,hpp_field_fn get_field,const char * fmtstr,hpp_snprint_fn print_fn,enum perf_hpp_fmt_type fmtype)140 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
141 struct hist_entry *he, hpp_field_fn get_field,
142 const char *fmtstr, hpp_snprint_fn print_fn,
143 enum perf_hpp_fmt_type fmtype)
144 {
145 if (!symbol_conf.cumulate_callchain) {
146 int len = fmt->user_len ?: fmt->len;
147 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
148 }
149
150 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmtype);
151 }
152
field_cmp(u64 field_a,u64 field_b)153 static int field_cmp(u64 field_a, u64 field_b)
154 {
155 if (field_a > field_b)
156 return 1;
157 if (field_a < field_b)
158 return -1;
159 return 0;
160 }
161
hist_entry__new_pair(struct hist_entry * a,struct hist_entry * b,hpp_field_fn get_field,int nr_members,u64 ** fields_a,u64 ** fields_b)162 static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
163 hpp_field_fn get_field, int nr_members,
164 u64 **fields_a, u64 **fields_b)
165 {
166 u64 *fa = calloc(nr_members, sizeof(*fa)),
167 *fb = calloc(nr_members, sizeof(*fb));
168 struct hist_entry *pair;
169
170 if (!fa || !fb)
171 goto out_free;
172
173 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
174 struct evsel *evsel = hists_to_evsel(pair->hists);
175 fa[evsel__group_idx(evsel)] = get_field(pair);
176 }
177
178 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
179 struct evsel *evsel = hists_to_evsel(pair->hists);
180 fb[evsel__group_idx(evsel)] = get_field(pair);
181 }
182
183 *fields_a = fa;
184 *fields_b = fb;
185 return 0;
186 out_free:
187 free(fa);
188 free(fb);
189 *fields_a = *fields_b = NULL;
190 return -1;
191 }
192
__hpp__group_sort_idx(struct hist_entry * a,struct hist_entry * b,hpp_field_fn get_field,int idx)193 static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
194 hpp_field_fn get_field, int idx)
195 {
196 struct evsel *evsel = hists_to_evsel(a->hists);
197 u64 *fields_a, *fields_b;
198 int cmp, nr_members, ret, i;
199
200 cmp = field_cmp(get_field(a), get_field(b));
201 if (!evsel__is_group_event(evsel))
202 return cmp;
203
204 nr_members = evsel->core.nr_members;
205 if (idx < 1 || idx >= nr_members)
206 return cmp;
207
208 ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
209 if (ret) {
210 ret = cmp;
211 goto out;
212 }
213
214 ret = field_cmp(fields_a[idx], fields_b[idx]);
215 if (ret)
216 goto out;
217
218 for (i = 1; i < nr_members; i++) {
219 if (i != idx) {
220 ret = field_cmp(fields_a[i], fields_b[i]);
221 if (ret)
222 goto out;
223 }
224 }
225
226 out:
227 free(fields_a);
228 free(fields_b);
229
230 return ret;
231 }
232
__hpp__sort(struct hist_entry * a,struct hist_entry * b,hpp_field_fn get_field)233 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
234 hpp_field_fn get_field)
235 {
236 s64 ret;
237 int i, nr_members;
238 struct evsel *evsel;
239 u64 *fields_a, *fields_b;
240
241 if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
242 return __hpp__group_sort_idx(a, b, get_field,
243 symbol_conf.group_sort_idx);
244 }
245
246 ret = field_cmp(get_field(a), get_field(b));
247 if (ret || !symbol_conf.event_group)
248 return ret;
249
250 evsel = hists_to_evsel(a->hists);
251 if (!evsel__is_group_event(evsel))
252 return ret;
253
254 nr_members = evsel->core.nr_members;
255 i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
256 if (i)
257 goto out;
258
259 for (i = 1; i < nr_members; i++) {
260 ret = field_cmp(fields_a[i], fields_b[i]);
261 if (ret)
262 break;
263 }
264
265 out:
266 free(fields_a);
267 free(fields_b);
268
269 return ret;
270 }
271
__hpp__sort_acc(struct hist_entry * a,struct hist_entry * b,hpp_field_fn get_field)272 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
273 hpp_field_fn get_field)
274 {
275 s64 ret = 0;
276
277 if (symbol_conf.cumulate_callchain) {
278 /*
279 * Put caller above callee when they have equal period.
280 */
281 ret = field_cmp(get_field(a), get_field(b));
282 if (ret)
283 return ret;
284
285 if ((a->thread == NULL ? NULL : RC_CHK_ACCESS(a->thread)) !=
286 (b->thread == NULL ? NULL : RC_CHK_ACCESS(b->thread)) ||
287 !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
288 return 0;
289
290 ret = b->callchain->max_depth - a->callchain->max_depth;
291 if (callchain_param.order == ORDER_CALLER)
292 ret = -ret;
293 }
294 return ret;
295 }
296
hpp__width_fn(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists)297 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
298 struct perf_hpp *hpp __maybe_unused,
299 struct hists *hists)
300 {
301 int len = fmt->user_len ?: fmt->len;
302 struct evsel *evsel = hists_to_evsel(hists);
303
304 if (symbol_conf.event_group) {
305 int nr = 0;
306 struct evsel *pos;
307
308 for_each_group_evsel(pos, evsel) {
309 if (!symbol_conf.skip_empty ||
310 evsel__hists(pos)->stats.nr_samples)
311 nr++;
312 }
313
314 len = max(len, nr * fmt->len);
315 }
316
317 if (len < (int)strlen(fmt->name))
318 len = strlen(fmt->name);
319
320 return len;
321 }
322
hpp__header_fn(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists,int line __maybe_unused,int * span __maybe_unused)323 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
324 struct hists *hists, int line __maybe_unused,
325 int *span __maybe_unused)
326 {
327 int len = hpp__width_fn(fmt, hpp, hists);
328 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
329 }
330
hpp_color_scnprintf(struct perf_hpp * hpp,const char * fmt,...)331 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
332 {
333 va_list args;
334 ssize_t ssize = hpp->size;
335 double percent;
336 int ret, len;
337
338 va_start(args, fmt);
339 len = va_arg(args, int);
340 percent = va_arg(args, double);
341 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
342 va_end(args);
343
344 return (ret >= ssize) ? (ssize - 1) : ret;
345 }
346
hpp_entry_scnprintf(struct perf_hpp * hpp,const char * fmt,...)347 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
348 {
349 va_list args;
350 ssize_t ssize = hpp->size;
351 int ret;
352
353 va_start(args, fmt);
354 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
355 va_end(args);
356
357 return (ret >= ssize) ? (ssize - 1) : ret;
358 }
359
360 #define __HPP_COLOR_PERCENT_FN(_type, _field, _fmttype) \
361 static u64 he_get_##_field(struct hist_entry *he) \
362 { \
363 return he->stat._field; \
364 } \
365 \
366 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
367 struct perf_hpp *hpp, struct hist_entry *he) \
368 { \
369 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
370 hpp_color_scnprintf, _fmttype); \
371 }
372
373 #define __HPP_ENTRY_PERCENT_FN(_type, _field, _fmttype) \
374 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
375 struct perf_hpp *hpp, struct hist_entry *he) \
376 { \
377 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
378 hpp_entry_scnprintf, _fmttype); \
379 }
380
381 #define __HPP_SORT_FN(_type, _field) \
382 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
383 struct hist_entry *a, struct hist_entry *b) \
384 { \
385 return __hpp__sort(a, b, he_get_##_field); \
386 }
387
388 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field, _fmttype) \
389 static u64 he_get_acc_##_field(struct hist_entry *he) \
390 { \
391 return he->stat_acc->_field; \
392 } \
393 \
394 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
395 struct perf_hpp *hpp, struct hist_entry *he) \
396 { \
397 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
398 hpp_color_scnprintf, _fmttype); \
399 }
400
401 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field, _fmttype) \
402 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
403 struct perf_hpp *hpp, struct hist_entry *he) \
404 { \
405 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
406 hpp_entry_scnprintf, _fmttype); \
407 }
408
409 #define __HPP_SORT_ACC_FN(_type, _field) \
410 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
411 struct hist_entry *a, struct hist_entry *b) \
412 { \
413 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
414 }
415
416 #define __HPP_ENTRY_RAW_FN(_type, _field) \
417 static u64 he_get_raw_##_field(struct hist_entry *he) \
418 { \
419 return he->stat._field; \
420 } \
421 \
422 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
423 struct perf_hpp *hpp, struct hist_entry *he) \
424 { \
425 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
426 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__RAW); \
427 }
428
429 #define __HPP_SORT_RAW_FN(_type, _field) \
430 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
431 struct hist_entry *a, struct hist_entry *b) \
432 { \
433 return __hpp__sort(a, b, he_get_raw_##_field); \
434 }
435
436 #define __HPP_ENTRY_AVERAGE_FN(_type, _field) \
437 static u64 he_get_##_field(struct hist_entry *he) \
438 { \
439 return he->stat._field; \
440 } \
441 \
442 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
443 struct perf_hpp *hpp, struct hist_entry *he) \
444 { \
445 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.1f", \
446 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__AVERAGE); \
447 }
448
449 #define __HPP_SORT_AVERAGE_FN(_type, _field) \
450 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
451 struct hist_entry *a, struct hist_entry *b) \
452 { \
453 return __hpp__sort(a, b, he_get_##_field); \
454 }
455
456
457 #define HPP_PERCENT_FNS(_type, _field, _fmttype) \
458 __HPP_COLOR_PERCENT_FN(_type, _field, _fmttype) \
459 __HPP_ENTRY_PERCENT_FN(_type, _field, _fmttype) \
460 __HPP_SORT_FN(_type, _field)
461
462 #define HPP_PERCENT_ACC_FNS(_type, _field, _fmttype) \
463 __HPP_COLOR_ACC_PERCENT_FN(_type, _field, _fmttype) \
464 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field, _fmttype) \
465 __HPP_SORT_ACC_FN(_type, _field)
466
467 #define HPP_RAW_FNS(_type, _field) \
468 __HPP_ENTRY_RAW_FN(_type, _field) \
469 __HPP_SORT_RAW_FN(_type, _field)
470
471 #define HPP_AVERAGE_FNS(_type, _field) \
472 __HPP_ENTRY_AVERAGE_FN(_type, _field) \
473 __HPP_SORT_AVERAGE_FN(_type, _field)
474
HPP_PERCENT_FNS(overhead,period,PERF_HPP_FMT_TYPE__PERCENT)475 HPP_PERCENT_FNS(overhead, period, PERF_HPP_FMT_TYPE__PERCENT)
476 HPP_PERCENT_FNS(latency, latency, PERF_HPP_FMT_TYPE__LATENCY)
477 HPP_PERCENT_FNS(overhead_sys, period_sys, PERF_HPP_FMT_TYPE__PERCENT)
478 HPP_PERCENT_FNS(overhead_us, period_us, PERF_HPP_FMT_TYPE__PERCENT)
479 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys, PERF_HPP_FMT_TYPE__PERCENT)
480 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us, PERF_HPP_FMT_TYPE__PERCENT)
481 HPP_PERCENT_ACC_FNS(overhead_acc, period, PERF_HPP_FMT_TYPE__PERCENT)
482 HPP_PERCENT_ACC_FNS(latency_acc, latency, PERF_HPP_FMT_TYPE__LATENCY)
483
484 HPP_RAW_FNS(samples, nr_events)
485 HPP_RAW_FNS(period, period)
486
487 HPP_AVERAGE_FNS(weight1, weight1)
488 HPP_AVERAGE_FNS(weight2, weight2)
489 HPP_AVERAGE_FNS(weight3, weight3)
490
491 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
492 struct hist_entry *a __maybe_unused,
493 struct hist_entry *b __maybe_unused)
494 {
495 return 0;
496 }
497
perf_hpp__is_hpp_entry(struct perf_hpp_fmt * a)498 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
499 {
500 return a->header == hpp__header_fn;
501 }
502
hpp__equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)503 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
504 {
505 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
506 return false;
507
508 return a->idx == b->idx;
509 }
510
511 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
512 { \
513 .name = _name, \
514 .header = hpp__header_fn, \
515 .width = hpp__width_fn, \
516 .color = hpp__color_ ## _fn, \
517 .entry = hpp__entry_ ## _fn, \
518 .cmp = hpp__nop_cmp, \
519 .collapse = hpp__nop_cmp, \
520 .sort = hpp__sort_ ## _fn, \
521 .idx = PERF_HPP__ ## _idx, \
522 .equal = hpp__equal, \
523 }
524
525 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
526 { \
527 .name = _name, \
528 .header = hpp__header_fn, \
529 .width = hpp__width_fn, \
530 .color = hpp__color_ ## _fn, \
531 .entry = hpp__entry_ ## _fn, \
532 .cmp = hpp__nop_cmp, \
533 .collapse = hpp__nop_cmp, \
534 .sort = hpp__sort_ ## _fn, \
535 .idx = PERF_HPP__ ## _idx, \
536 .equal = hpp__equal, \
537 }
538
539 #define HPP__PRINT_FNS(_name, _fn, _idx) \
540 { \
541 .name = _name, \
542 .header = hpp__header_fn, \
543 .width = hpp__width_fn, \
544 .entry = hpp__entry_ ## _fn, \
545 .cmp = hpp__nop_cmp, \
546 .collapse = hpp__nop_cmp, \
547 .sort = hpp__sort_ ## _fn, \
548 .idx = PERF_HPP__ ## _idx, \
549 .equal = hpp__equal, \
550 }
551
552 struct perf_hpp_fmt perf_hpp__format[] = {
553 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
554 HPP__COLOR_PRINT_FNS("Latency", latency, LATENCY),
555 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
556 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
557 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
558 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
559 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
560 HPP__COLOR_ACC_PRINT_FNS("Latency", latency_acc, LATENCY_ACC),
561 HPP__PRINT_FNS("Samples", samples, SAMPLES),
562 HPP__PRINT_FNS("Period", period, PERIOD),
563 HPP__PRINT_FNS("Weight1", weight1, WEIGHT1),
564 HPP__PRINT_FNS("Weight2", weight2, WEIGHT2),
565 HPP__PRINT_FNS("Weight3", weight3, WEIGHT3),
566 };
567
568 struct perf_hpp_list perf_hpp_list = {
569 .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
570 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
571 .nr_header_lines = 1,
572 };
573
574 #undef HPP__COLOR_PRINT_FNS
575 #undef HPP__COLOR_ACC_PRINT_FNS
576 #undef HPP__PRINT_FNS
577
578 #undef HPP_PERCENT_FNS
579 #undef HPP_PERCENT_ACC_FNS
580 #undef HPP_RAW_FNS
581 #undef HPP_AVERAGE_FNS
582
583 #undef __HPP_HEADER_FN
584 #undef __HPP_WIDTH_FN
585 #undef __HPP_COLOR_PERCENT_FN
586 #undef __HPP_ENTRY_PERCENT_FN
587 #undef __HPP_COLOR_ACC_PERCENT_FN
588 #undef __HPP_ENTRY_ACC_PERCENT_FN
589 #undef __HPP_ENTRY_RAW_FN
590 #undef __HPP_ENTRY_AVERAGE_FN
591 #undef __HPP_SORT_FN
592 #undef __HPP_SORT_ACC_FN
593 #undef __HPP_SORT_RAW_FN
594 #undef __HPP_SORT_AVERAGE_FN
595
fmt_free(struct perf_hpp_fmt * fmt)596 static void fmt_free(struct perf_hpp_fmt *fmt)
597 {
598 /*
599 * At this point fmt should be completely
600 * unhooked, if not it's a bug.
601 */
602 BUG_ON(!list_empty(&fmt->list));
603 BUG_ON(!list_empty(&fmt->sort_list));
604
605 if (fmt->free)
606 fmt->free(fmt);
607 }
608
fmt_equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)609 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
610 {
611 return a->equal && a->equal(a, b);
612 }
613
perf_hpp__init(void)614 void perf_hpp__init(void)
615 {
616 int i;
617
618 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
619 struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
620
621 INIT_LIST_HEAD(&fmt->list);
622
623 /* sort_list may be linked by setup_sorting() */
624 if (fmt->sort_list.next == NULL)
625 INIT_LIST_HEAD(&fmt->sort_list);
626 }
627
628 /*
629 * If user specified field order, no need to setup default fields.
630 */
631 if (is_strict_order(field_order))
632 return;
633
634 /*
635 * Overhead and latency columns are added in setup_overhead(),
636 * so they are added implicitly here only if they were added
637 * by setup_overhead() before (have was_taken flag set).
638 * This is required because setup_overhead() has more complex
639 * logic, in particular it does not add "overhead" if user
640 * specified "latency" in sort order, and vise versa.
641 */
642 if (symbol_conf.cumulate_callchain) {
643 /*
644 * Addition of fields is idempotent, so we add latency
645 * column twice to get desired order with simpler logic.
646 */
647 if (symbol_conf.prefer_latency)
648 hpp_dimension__add_output(PERF_HPP__LATENCY_ACC, true);
649 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC, true);
650 if (symbol_conf.enable_latency)
651 hpp_dimension__add_output(PERF_HPP__LATENCY_ACC, true);
652 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
653 }
654
655 if (symbol_conf.prefer_latency)
656 hpp_dimension__add_output(PERF_HPP__LATENCY, true);
657 hpp_dimension__add_output(PERF_HPP__OVERHEAD, true);
658 if (symbol_conf.enable_latency)
659 hpp_dimension__add_output(PERF_HPP__LATENCY, true);
660
661 if (symbol_conf.show_cpu_utilization) {
662 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS, false);
663 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US, false);
664
665 if (perf_guest) {
666 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS, false);
667 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US, false);
668 }
669 }
670
671 if (symbol_conf.show_nr_samples)
672 hpp_dimension__add_output(PERF_HPP__SAMPLES, false);
673
674 if (symbol_conf.show_total_period)
675 hpp_dimension__add_output(PERF_HPP__PERIOD, false);
676 }
677
perf_hpp_list__column_register(struct perf_hpp_list * list,struct perf_hpp_fmt * format)678 void perf_hpp_list__column_register(struct perf_hpp_list *list,
679 struct perf_hpp_fmt *format)
680 {
681 list_add_tail(&format->list, &list->fields);
682 }
683
perf_hpp_list__register_sort_field(struct perf_hpp_list * list,struct perf_hpp_fmt * format)684 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
685 struct perf_hpp_fmt *format)
686 {
687 list_add_tail(&format->sort_list, &list->sorts);
688 }
689
perf_hpp_list__prepend_sort_field(struct perf_hpp_list * list,struct perf_hpp_fmt * format)690 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
691 struct perf_hpp_fmt *format)
692 {
693 list_add(&format->sort_list, &list->sorts);
694 }
695
perf_hpp__column_unregister(struct perf_hpp_fmt * format)696 static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
697 {
698 list_del_init(&format->list);
699 fmt_free(format);
700 }
701
perf_hpp__cancel_cumulate(void)702 void perf_hpp__cancel_cumulate(void)
703 {
704 struct perf_hpp_fmt *fmt, *acc, *ovh, *acc_lat, *tmp;
705
706 if (is_strict_order(field_order))
707 return;
708
709 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
710 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
711 acc_lat = &perf_hpp__format[PERF_HPP__LATENCY_ACC];
712
713 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
714 if (fmt_equal(acc, fmt) || fmt_equal(acc_lat, fmt)) {
715 perf_hpp__column_unregister(fmt);
716 continue;
717 }
718
719 if (fmt_equal(ovh, fmt))
720 fmt->name = "Overhead";
721 }
722 }
723
perf_hpp__cancel_latency(void)724 void perf_hpp__cancel_latency(void)
725 {
726 struct perf_hpp_fmt *fmt, *lat, *acc, *tmp;
727
728 if (is_strict_order(field_order))
729 return;
730 if (sort_order && strstr(sort_order, "latency"))
731 return;
732
733 lat = &perf_hpp__format[PERF_HPP__LATENCY];
734 acc = &perf_hpp__format[PERF_HPP__LATENCY_ACC];
735
736 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
737 if (fmt_equal(lat, fmt) || fmt_equal(acc, fmt))
738 perf_hpp__column_unregister(fmt);
739 }
740 }
741
perf_hpp__setup_output_field(struct perf_hpp_list * list)742 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
743 {
744 struct perf_hpp_fmt *fmt;
745
746 /* append sort keys to output field */
747 perf_hpp_list__for_each_sort_list(list, fmt) {
748 struct perf_hpp_fmt *pos;
749
750 /* skip sort-only fields ("sort_compute" in perf diff) */
751 if (!fmt->entry && !fmt->color)
752 continue;
753
754 perf_hpp_list__for_each_format(list, pos) {
755 if (fmt_equal(fmt, pos))
756 goto next;
757 }
758
759 perf_hpp__column_register(fmt);
760 next:
761 continue;
762 }
763 }
764
perf_hpp__append_sort_keys(struct perf_hpp_list * list)765 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
766 {
767 struct perf_hpp_fmt *fmt;
768
769 /* append output fields to sort keys */
770 perf_hpp_list__for_each_format(list, fmt) {
771 struct perf_hpp_fmt *pos;
772
773 perf_hpp_list__for_each_sort_list(list, pos) {
774 if (fmt_equal(fmt, pos))
775 goto next;
776 }
777
778 perf_hpp__register_sort_field(fmt);
779 next:
780 continue;
781 }
782 }
783
784
perf_hpp__reset_output_field(struct perf_hpp_list * list)785 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
786 {
787 struct perf_hpp_fmt *fmt, *tmp;
788
789 /* reset output fields */
790 perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
791 list_del_init(&fmt->list);
792 list_del_init(&fmt->sort_list);
793 fmt_free(fmt);
794 }
795
796 /* reset sort keys */
797 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
798 list_del_init(&fmt->list);
799 list_del_init(&fmt->sort_list);
800 fmt_free(fmt);
801 }
802 }
803
804 /*
805 * See hists__fprintf to match the column widths
806 */
hists__sort_list_width(struct hists * hists)807 unsigned int hists__sort_list_width(struct hists *hists)
808 {
809 struct perf_hpp_fmt *fmt;
810 int ret = 0;
811 bool first = true;
812 struct perf_hpp dummy_hpp;
813
814 hists__for_each_format(hists, fmt) {
815 if (perf_hpp__should_skip(fmt, hists))
816 continue;
817
818 if (first)
819 first = false;
820 else
821 ret += 2;
822
823 ret += fmt->width(fmt, &dummy_hpp, hists);
824 }
825
826 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
827 ret += 3 + BITS_PER_LONG / 4;
828
829 return ret;
830 }
831
hists__overhead_width(struct hists * hists)832 unsigned int hists__overhead_width(struct hists *hists)
833 {
834 struct perf_hpp_fmt *fmt;
835 int ret = 0;
836 bool first = true;
837 struct perf_hpp dummy_hpp;
838
839 hists__for_each_format(hists, fmt) {
840 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
841 break;
842
843 if (first)
844 first = false;
845 else
846 ret += 2;
847
848 ret += fmt->width(fmt, &dummy_hpp, hists);
849 }
850
851 return ret;
852 }
853
perf_hpp__reset_width(struct perf_hpp_fmt * fmt,struct hists * hists)854 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
855 {
856 if (perf_hpp__is_sort_entry(fmt))
857 return perf_hpp__reset_sort_width(fmt, hists);
858
859 if (perf_hpp__is_dynamic_entry(fmt))
860 return;
861
862 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
863
864 switch (fmt->idx) {
865 case PERF_HPP__OVERHEAD:
866 case PERF_HPP__LATENCY:
867 case PERF_HPP__OVERHEAD_SYS:
868 case PERF_HPP__OVERHEAD_US:
869 case PERF_HPP__OVERHEAD_ACC:
870 fmt->len = 8;
871 break;
872
873 case PERF_HPP__OVERHEAD_GUEST_SYS:
874 case PERF_HPP__OVERHEAD_GUEST_US:
875 fmt->len = 9;
876 break;
877
878 case PERF_HPP__SAMPLES:
879 case PERF_HPP__PERIOD:
880 fmt->len = 12;
881 break;
882
883 case PERF_HPP__WEIGHT1:
884 case PERF_HPP__WEIGHT2:
885 case PERF_HPP__WEIGHT3:
886 fmt->len = 8;
887 break;
888
889 default:
890 break;
891 }
892 }
893
hists__reset_column_width(struct hists * hists)894 void hists__reset_column_width(struct hists *hists)
895 {
896 struct perf_hpp_fmt *fmt;
897 struct perf_hpp_list_node *node;
898
899 hists__for_each_format(hists, fmt)
900 perf_hpp__reset_width(fmt, hists);
901
902 /* hierarchy entries have their own hpp list */
903 list_for_each_entry(node, &hists->hpp_formats, list) {
904 perf_hpp_list__for_each_format(&node->hpp, fmt)
905 perf_hpp__reset_width(fmt, hists);
906 }
907 }
908
perf_hpp__set_user_width(const char * width_list_str)909 void perf_hpp__set_user_width(const char *width_list_str)
910 {
911 struct perf_hpp_fmt *fmt;
912 const char *ptr = width_list_str;
913
914 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
915 char *p;
916
917 int len = strtol(ptr, &p, 10);
918 fmt->user_len = len;
919
920 if (*p == ',')
921 ptr = p + 1;
922 else
923 break;
924 }
925 }
926
add_hierarchy_fmt(struct hists * hists,struct perf_hpp_fmt * fmt)927 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
928 {
929 struct perf_hpp_list_node *node = NULL;
930 struct perf_hpp_fmt *fmt_copy;
931 bool found = false;
932 bool skip = perf_hpp__should_skip(fmt, hists);
933
934 list_for_each_entry(node, &hists->hpp_formats, list) {
935 if (node->level == fmt->level) {
936 found = true;
937 break;
938 }
939 }
940
941 if (!found) {
942 node = malloc(sizeof(*node));
943 if (node == NULL)
944 return -1;
945
946 node->skip = skip;
947 node->level = fmt->level;
948 perf_hpp_list__init(&node->hpp);
949
950 hists->nr_hpp_node++;
951 list_add_tail(&node->list, &hists->hpp_formats);
952 }
953
954 fmt_copy = perf_hpp_fmt__dup(fmt);
955 if (fmt_copy == NULL)
956 return -1;
957
958 if (!skip)
959 node->skip = false;
960
961 list_add_tail(&fmt_copy->list, &node->hpp.fields);
962 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
963
964 return 0;
965 }
966
perf_hpp__setup_hists_formats(struct perf_hpp_list * list,struct evlist * evlist)967 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
968 struct evlist *evlist)
969 {
970 struct evsel *evsel;
971 struct perf_hpp_fmt *fmt;
972 struct hists *hists;
973 int ret;
974
975 if (!symbol_conf.report_hierarchy)
976 return 0;
977
978 evlist__for_each_entry(evlist, evsel) {
979 hists = evsel__hists(evsel);
980
981 perf_hpp_list__for_each_sort_list(list, fmt) {
982 if (perf_hpp__is_dynamic_entry(fmt) &&
983 !perf_hpp__defined_dynamic_entry(fmt, hists))
984 continue;
985
986 ret = add_hierarchy_fmt(hists, fmt);
987 if (ret < 0)
988 return ret;
989 }
990 }
991
992 return 0;
993 }
994