xref: /linux/tools/perf/ui/hist.c (revision b09124e2e167e1ec4e29fe4c12f8956c43822ffd)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <inttypes.h>
3 #include <math.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <linux/compiler.h>
7 
8 #include "../util/callchain.h"
9 #include "../util/debug.h"
10 #include "../util/hist.h"
11 #include "../util/sort.h"
12 #include "../util/evsel.h"
13 #include "../util/evlist.h"
14 #include "../util/thread.h"
15 #include "../util/util.h"
16 
17 /* hist period print (hpp) functions */
18 
19 #define hpp__call_print_fn(hpp, fn, fmt, ...)			\
20 ({								\
21 	int __ret = fn(hpp, fmt, ##__VA_ARGS__);		\
22 	advance_hpp(hpp, __ret);				\
23 	__ret;							\
24 })
25 
26 static int __hpp__fmt_print(struct perf_hpp *hpp, struct hists *hists, u64 val,
27 			    int nr_samples, const char *fmt, int len,
28 			    hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
29 {
30 	if (fmtype == PERF_HPP_FMT_TYPE__PERCENT || fmtype == PERF_HPP_FMT_TYPE__LATENCY) {
31 		double percent = 0.0;
32 		u64 total = fmtype == PERF_HPP_FMT_TYPE__PERCENT ? hists__total_period(hists) :
33 			hists__total_latency(hists);
34 
35 		if (total)
36 			percent = 100.0 * val / total;
37 
38 		return hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
39 	}
40 
41 	if (fmtype == PERF_HPP_FMT_TYPE__AVERAGE) {
42 		double avg = nr_samples ? (1.0 * val / nr_samples) : 0;
43 
44 		return hpp__call_print_fn(hpp, print_fn, fmt, len, avg);
45 	}
46 
47 	return hpp__call_print_fn(hpp, print_fn, fmt, len, val);
48 }
49 
50 struct hpp_fmt_value {
51 	struct hists *hists;
52 	u64 val;
53 	int samples;
54 };
55 
56 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
57 		      hpp_field_fn get_field, const char *fmt, int len,
58 		      hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
59 {
60 	int ret = 0;
61 	struct hists *hists = he->hists;
62 	struct evsel *evsel = hists_to_evsel(hists);
63 	struct evsel *pos;
64 	char *buf = hpp->buf;
65 	size_t size = hpp->size;
66 	int i = 0, nr_members = 1;
67 	struct hpp_fmt_value *values;
68 
69 	if (evsel__is_group_event(evsel))
70 		nr_members = evsel->core.nr_members;
71 
72 	values = calloc(nr_members, sizeof(*values));
73 	if (values == NULL)
74 		return 0;
75 
76 	values[0].hists = evsel__hists(evsel);
77 	values[0].val = get_field(he);
78 	values[0].samples = he->stat.nr_events;
79 
80 	if (evsel__is_group_event(evsel)) {
81 		struct hist_entry *pair;
82 
83 		for_each_group_member(pos, evsel)
84 			values[++i].hists = evsel__hists(pos);
85 
86 		list_for_each_entry(pair, &he->pairs.head, pairs.node) {
87 			for (i = 0; i < nr_members; i++) {
88 				if (values[i].hists != pair->hists)
89 					continue;
90 
91 				values[i].val = get_field(pair);
92 				values[i].samples = pair->stat.nr_events;
93 				break;
94 			}
95 		}
96 	}
97 
98 	for (i = 0; i < nr_members; i++) {
99 		if (symbol_conf.skip_empty &&
100 		    values[i].hists->stats.nr_samples == 0)
101 			continue;
102 
103 		ret += __hpp__fmt_print(hpp, values[i].hists, values[i].val,
104 					values[i].samples, fmt, len,
105 					print_fn, fmtype);
106 	}
107 
108 	free(values);
109 
110 	/*
111 	 * Restore original buf and size as it's where caller expects
112 	 * the result will be saved.
113 	 */
114 	hpp->buf = buf;
115 	hpp->size = size;
116 
117 	return ret;
118 }
119 
120 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
121 	     struct hist_entry *he, hpp_field_fn get_field,
122 	     const char *fmtstr, hpp_snprint_fn print_fn,
123 	     enum perf_hpp_fmt_type fmtype)
124 {
125 	int len = max(fmt->user_len ?: fmt->len, (int)strlen(fmt->name));
126 
127 	if (symbol_conf.field_sep) {
128 		return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
129 				  print_fn, fmtype);
130 	}
131 
132 	if (fmtype == PERF_HPP_FMT_TYPE__PERCENT || fmtype == PERF_HPP_FMT_TYPE__LATENCY)
133 		len -= 2; /* 2 for a space and a % sign */
134 	else
135 		len -= 1;
136 
137 	return  __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmtype);
138 }
139 
140 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
141 		 struct hist_entry *he, hpp_field_fn get_field,
142 		 const char *fmtstr, hpp_snprint_fn print_fn,
143 		 enum perf_hpp_fmt_type fmtype)
144 {
145 	if (!symbol_conf.cumulate_callchain) {
146 		int len = fmt->user_len ?: fmt->len;
147 		return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
148 	}
149 
150 	return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmtype);
151 }
152 
153 static int field_cmp(u64 field_a, u64 field_b)
154 {
155 	if (field_a > field_b)
156 		return 1;
157 	if (field_a < field_b)
158 		return -1;
159 	return 0;
160 }
161 
162 static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
163 				hpp_field_fn get_field, int nr_members,
164 				u64 **fields_a, u64 **fields_b)
165 {
166 	u64 *fa = calloc(nr_members, sizeof(*fa)),
167 	    *fb = calloc(nr_members, sizeof(*fb));
168 	struct hist_entry *pair;
169 
170 	if (!fa || !fb)
171 		goto out_free;
172 
173 	list_for_each_entry(pair, &a->pairs.head, pairs.node) {
174 		struct evsel *evsel = hists_to_evsel(pair->hists);
175 		fa[evsel__group_idx(evsel)] = get_field(pair);
176 	}
177 
178 	list_for_each_entry(pair, &b->pairs.head, pairs.node) {
179 		struct evsel *evsel = hists_to_evsel(pair->hists);
180 		fb[evsel__group_idx(evsel)] = get_field(pair);
181 	}
182 
183 	*fields_a = fa;
184 	*fields_b = fb;
185 	return 0;
186 out_free:
187 	free(fa);
188 	free(fb);
189 	*fields_a = *fields_b = NULL;
190 	return -1;
191 }
192 
193 static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
194 				 hpp_field_fn get_field, int idx)
195 {
196 	struct evsel *evsel = hists_to_evsel(a->hists);
197 	u64 *fields_a, *fields_b;
198 	int cmp, nr_members, ret, i;
199 
200 	cmp = field_cmp(get_field(a), get_field(b));
201 	if (!evsel__is_group_event(evsel))
202 		return cmp;
203 
204 	nr_members = evsel->core.nr_members;
205 	if (idx < 1 || idx >= nr_members)
206 		return cmp;
207 
208 	ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
209 	if (ret) {
210 		ret = cmp;
211 		goto out;
212 	}
213 
214 	ret = field_cmp(fields_a[idx], fields_b[idx]);
215 	if (ret)
216 		goto out;
217 
218 	for (i = 1; i < nr_members; i++) {
219 		if (i != idx) {
220 			ret = field_cmp(fields_a[i], fields_b[i]);
221 			if (ret)
222 				goto out;
223 		}
224 	}
225 
226 out:
227 	free(fields_a);
228 	free(fields_b);
229 
230 	return ret;
231 }
232 
233 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
234 		       hpp_field_fn get_field)
235 {
236 	s64 ret;
237 	int i, nr_members;
238 	struct evsel *evsel;
239 	u64 *fields_a, *fields_b;
240 
241 	if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
242 		return __hpp__group_sort_idx(a, b, get_field,
243 					     symbol_conf.group_sort_idx);
244 	}
245 
246 	ret = field_cmp(get_field(a), get_field(b));
247 	if (ret || !symbol_conf.event_group)
248 		return ret;
249 
250 	evsel = hists_to_evsel(a->hists);
251 	if (!evsel__is_group_event(evsel))
252 		return ret;
253 
254 	nr_members = evsel->core.nr_members;
255 	i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
256 	if (i)
257 		goto out;
258 
259 	for (i = 1; i < nr_members; i++) {
260 		ret = field_cmp(fields_a[i], fields_b[i]);
261 		if (ret)
262 			break;
263 	}
264 
265 out:
266 	free(fields_a);
267 	free(fields_b);
268 
269 	return ret;
270 }
271 
272 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
273 			   hpp_field_fn get_field)
274 {
275 	s64 ret = 0;
276 
277 	if (symbol_conf.cumulate_callchain) {
278 		/*
279 		 * Put caller above callee when they have equal period.
280 		 */
281 		ret = field_cmp(get_field(a), get_field(b));
282 		if (ret)
283 			return ret;
284 
285 		if ((a->thread == NULL ? NULL : RC_CHK_ACCESS(a->thread)) !=
286 		    (b->thread == NULL ? NULL : RC_CHK_ACCESS(b->thread)) ||
287 		    !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
288 			return 0;
289 
290 		ret = b->callchain->max_depth - a->callchain->max_depth;
291 		if (callchain_param.order == ORDER_CALLER)
292 			ret = -ret;
293 	}
294 	return ret;
295 }
296 
297 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
298 			 struct perf_hpp *hpp __maybe_unused,
299 			 struct hists *hists)
300 {
301 	int len = fmt->user_len ?: fmt->len;
302 	struct evsel *evsel = hists_to_evsel(hists);
303 
304 	if (symbol_conf.event_group) {
305 		int nr = 0;
306 		struct evsel *pos;
307 
308 		for_each_group_evsel(pos, evsel) {
309 			if (!symbol_conf.skip_empty ||
310 			    evsel__hists(pos)->stats.nr_samples)
311 				nr++;
312 		}
313 
314 		len = max(len, nr * fmt->len);
315 	}
316 
317 	if (len < (int)strlen(fmt->name))
318 		len = strlen(fmt->name);
319 
320 	return len;
321 }
322 
323 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
324 			  struct hists *hists, int line __maybe_unused,
325 			  int *span __maybe_unused)
326 {
327 	int len = hpp__width_fn(fmt, hpp, hists);
328 	return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
329 }
330 
331 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
332 {
333 	va_list args;
334 	ssize_t ssize = hpp->size;
335 	double percent;
336 	int ret, len;
337 
338 	va_start(args, fmt);
339 	len = va_arg(args, int);
340 	percent = va_arg(args, double);
341 	ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
342 	va_end(args);
343 
344 	return (ret >= ssize) ? (ssize - 1) : ret;
345 }
346 
347 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
348 {
349 	va_list args;
350 	ssize_t ssize = hpp->size;
351 	int ret;
352 
353 	va_start(args, fmt);
354 	ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
355 	va_end(args);
356 
357 	return (ret >= ssize) ? (ssize - 1) : ret;
358 }
359 
360 #define __HPP_COLOR_PERCENT_FN(_type, _field, _fmttype)				\
361 static u64 he_get_##_field(struct hist_entry *he)				\
362 {										\
363 	return he->stat._field;							\
364 }										\
365 										\
366 static int hpp__color_##_type(struct perf_hpp_fmt *fmt,				\
367 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
368 {										\
369 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%",		\
370 			hpp_color_scnprintf, _fmttype);				\
371 }
372 
373 #define __HPP_ENTRY_PERCENT_FN(_type, _field, _fmttype)				\
374 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
375 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
376 {										\
377 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%",		\
378 			hpp_entry_scnprintf, _fmttype);	\
379 }
380 
381 #define __HPP_SORT_FN(_type, _field)						\
382 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
383 				 struct hist_entry *a, struct hist_entry *b) 	\
384 {										\
385 	return __hpp__sort(a, b, he_get_##_field);				\
386 }
387 
388 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field, _fmttype)			\
389 static u64 he_get_acc_##_field(struct hist_entry *he)				\
390 {										\
391 	return he->stat_acc->_field;						\
392 }										\
393 										\
394 static int hpp__color_##_type(struct perf_hpp_fmt *fmt,				\
395 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
396 {										\
397 	return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", 	\
398 			    hpp_color_scnprintf, _fmttype);			\
399 }
400 
401 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field, _fmttype)			\
402 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
403 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
404 {										\
405 	return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%",	\
406 			    hpp_entry_scnprintf, _fmttype);			\
407 }
408 
409 #define __HPP_SORT_ACC_FN(_type, _field)					\
410 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
411 				 struct hist_entry *a, struct hist_entry *b) 	\
412 {										\
413 	return __hpp__sort_acc(a, b, he_get_acc_##_field);			\
414 }
415 
416 #define __HPP_ENTRY_RAW_FN(_type, _field)					\
417 static u64 he_get_raw_##_field(struct hist_entry *he)				\
418 {										\
419 	return he->stat._field;							\
420 }										\
421 										\
422 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
423 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
424 {										\
425 	return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, 	\
426 			hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__RAW);		\
427 }
428 
429 #define __HPP_SORT_RAW_FN(_type, _field)					\
430 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
431 				 struct hist_entry *a, struct hist_entry *b) 	\
432 {										\
433 	return __hpp__sort(a, b, he_get_raw_##_field);				\
434 }
435 
436 #define __HPP_ENTRY_AVERAGE_FN(_type, _field)					\
437 static u64 he_get_##_field(struct hist_entry *he)				\
438 {										\
439 	return he->stat._field;							\
440 }										\
441 										\
442 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,				\
443 			      struct perf_hpp *hpp, struct hist_entry *he) 	\
444 {										\
445 	return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.1f",		\
446 			hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__AVERAGE);	\
447 }
448 
449 #define __HPP_SORT_AVERAGE_FN(_type, _field)					\
450 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, 	\
451 				 struct hist_entry *a, struct hist_entry *b) 	\
452 {										\
453 	return __hpp__sort(a, b, he_get_##_field);				\
454 }
455 
456 
457 #define HPP_PERCENT_FNS(_type, _field, _fmttype)			\
458 __HPP_COLOR_PERCENT_FN(_type, _field, _fmttype)				\
459 __HPP_ENTRY_PERCENT_FN(_type, _field, _fmttype)				\
460 __HPP_SORT_FN(_type, _field)
461 
462 #define HPP_PERCENT_ACC_FNS(_type, _field, _fmttype)			\
463 __HPP_COLOR_ACC_PERCENT_FN(_type, _field, _fmttype)			\
464 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field, _fmttype)			\
465 __HPP_SORT_ACC_FN(_type, _field)
466 
467 #define HPP_RAW_FNS(_type, _field)					\
468 __HPP_ENTRY_RAW_FN(_type, _field)					\
469 __HPP_SORT_RAW_FN(_type, _field)
470 
471 #define HPP_AVERAGE_FNS(_type, _field)					\
472 __HPP_ENTRY_AVERAGE_FN(_type, _field)					\
473 __HPP_SORT_AVERAGE_FN(_type, _field)
474 
475 HPP_PERCENT_FNS(overhead, period, PERF_HPP_FMT_TYPE__PERCENT)
476 HPP_PERCENT_FNS(latency, latency, PERF_HPP_FMT_TYPE__LATENCY)
477 HPP_PERCENT_FNS(overhead_sys, period_sys, PERF_HPP_FMT_TYPE__PERCENT)
478 HPP_PERCENT_FNS(overhead_us, period_us, PERF_HPP_FMT_TYPE__PERCENT)
479 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys, PERF_HPP_FMT_TYPE__PERCENT)
480 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us, PERF_HPP_FMT_TYPE__PERCENT)
481 HPP_PERCENT_ACC_FNS(overhead_acc, period, PERF_HPP_FMT_TYPE__PERCENT)
482 HPP_PERCENT_ACC_FNS(latency_acc, latency, PERF_HPP_FMT_TYPE__LATENCY)
483 
484 HPP_RAW_FNS(samples, nr_events)
485 HPP_RAW_FNS(period, period)
486 
487 HPP_AVERAGE_FNS(weight1, weight1)
488 HPP_AVERAGE_FNS(weight2, weight2)
489 HPP_AVERAGE_FNS(weight3, weight3)
490 
491 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
492 			    struct hist_entry *a __maybe_unused,
493 			    struct hist_entry *b __maybe_unused)
494 {
495 	return 0;
496 }
497 
498 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
499 {
500 	return a->header == hpp__header_fn;
501 }
502 
503 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
504 {
505 	if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
506 		return false;
507 
508 	return a->idx == b->idx;
509 }
510 
511 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx)		\
512 	{						\
513 		.name   = _name,			\
514 		.header	= hpp__header_fn,		\
515 		.width	= hpp__width_fn,		\
516 		.color	= hpp__color_ ## _fn,		\
517 		.entry	= hpp__entry_ ## _fn,		\
518 		.cmp	= hpp__nop_cmp,			\
519 		.collapse = hpp__nop_cmp,		\
520 		.sort	= hpp__sort_ ## _fn,		\
521 		.idx	= PERF_HPP__ ## _idx,		\
522 		.equal	= hpp__equal,			\
523 	}
524 
525 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx)	\
526 	{						\
527 		.name   = _name,			\
528 		.header	= hpp__header_fn,		\
529 		.width	= hpp__width_fn,		\
530 		.color	= hpp__color_ ## _fn,		\
531 		.entry	= hpp__entry_ ## _fn,		\
532 		.cmp	= hpp__nop_cmp,			\
533 		.collapse = hpp__nop_cmp,		\
534 		.sort	= hpp__sort_ ## _fn,		\
535 		.idx	= PERF_HPP__ ## _idx,		\
536 		.equal	= hpp__equal,			\
537 	}
538 
539 #define HPP__PRINT_FNS(_name, _fn, _idx)		\
540 	{						\
541 		.name   = _name,			\
542 		.header	= hpp__header_fn,		\
543 		.width	= hpp__width_fn,		\
544 		.entry	= hpp__entry_ ## _fn,		\
545 		.cmp	= hpp__nop_cmp,			\
546 		.collapse = hpp__nop_cmp,		\
547 		.sort	= hpp__sort_ ## _fn,		\
548 		.idx	= PERF_HPP__ ## _idx,		\
549 		.equal	= hpp__equal,			\
550 	}
551 
552 struct perf_hpp_fmt perf_hpp__format[] = {
553 	HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
554 	HPP__COLOR_PRINT_FNS("Latency", latency, LATENCY),
555 	HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
556 	HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
557 	HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
558 	HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
559 	HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
560 	HPP__COLOR_ACC_PRINT_FNS("Latency", latency_acc, LATENCY_ACC),
561 	HPP__PRINT_FNS("Samples", samples, SAMPLES),
562 	HPP__PRINT_FNS("Period", period, PERIOD),
563 	HPP__PRINT_FNS("Weight1", weight1, WEIGHT1),
564 	HPP__PRINT_FNS("Weight2", weight2, WEIGHT2),
565 	HPP__PRINT_FNS("Weight3", weight3, WEIGHT3),
566 };
567 
568 struct perf_hpp_list perf_hpp_list = {
569 	.fields	= LIST_HEAD_INIT(perf_hpp_list.fields),
570 	.sorts	= LIST_HEAD_INIT(perf_hpp_list.sorts),
571 	.nr_header_lines = 1,
572 };
573 
574 #undef HPP__COLOR_PRINT_FNS
575 #undef HPP__COLOR_ACC_PRINT_FNS
576 #undef HPP__PRINT_FNS
577 
578 #undef HPP_PERCENT_FNS
579 #undef HPP_PERCENT_ACC_FNS
580 #undef HPP_RAW_FNS
581 #undef HPP_AVERAGE_FNS
582 
583 #undef __HPP_HEADER_FN
584 #undef __HPP_WIDTH_FN
585 #undef __HPP_COLOR_PERCENT_FN
586 #undef __HPP_ENTRY_PERCENT_FN
587 #undef __HPP_COLOR_ACC_PERCENT_FN
588 #undef __HPP_ENTRY_ACC_PERCENT_FN
589 #undef __HPP_ENTRY_RAW_FN
590 #undef __HPP_ENTRY_AVERAGE_FN
591 #undef __HPP_SORT_FN
592 #undef __HPP_SORT_ACC_FN
593 #undef __HPP_SORT_RAW_FN
594 #undef __HPP_SORT_AVERAGE_FN
595 
596 static void fmt_free(struct perf_hpp_fmt *fmt)
597 {
598 	/*
599 	 * At this point fmt should be completely
600 	 * unhooked, if not it's a bug.
601 	 */
602 	BUG_ON(!list_empty(&fmt->list));
603 	BUG_ON(!list_empty(&fmt->sort_list));
604 
605 	if (fmt->free)
606 		fmt->free(fmt);
607 }
608 
609 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
610 {
611 	return a->equal && a->equal(a, b);
612 }
613 
614 void perf_hpp__init(void)
615 {
616 	int i;
617 
618 	for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
619 		struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
620 
621 		INIT_LIST_HEAD(&fmt->list);
622 
623 		/* sort_list may be linked by setup_sorting() */
624 		if (fmt->sort_list.next == NULL)
625 			INIT_LIST_HEAD(&fmt->sort_list);
626 	}
627 
628 	/*
629 	 * If user specified field order, no need to setup default fields.
630 	 */
631 	if (is_strict_order(field_order))
632 		return;
633 
634 	/*
635 	 * Overhead and latency columns are added in setup_overhead(),
636 	 * so they are added implicitly here only if they were added
637 	 * by setup_overhead() before (have was_taken flag set).
638 	 * This is required because setup_overhead() has more complex
639 	 * logic, in particular it does not add "overhead" if user
640 	 * specified "latency" in sort order, and vise versa.
641 	 */
642 	if (symbol_conf.cumulate_callchain) {
643 		/*
644 		 * Addition of fields is idempotent, so we add latency
645 		 * column twice to get desired order with simpler logic.
646 		 */
647 		if (symbol_conf.prefer_latency)
648 			hpp_dimension__add_output(PERF_HPP__LATENCY_ACC, true);
649 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC, true);
650 		if (symbol_conf.enable_latency)
651 			hpp_dimension__add_output(PERF_HPP__LATENCY_ACC, true);
652 		perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
653 	}
654 
655 	if (symbol_conf.prefer_latency)
656 		hpp_dimension__add_output(PERF_HPP__LATENCY, true);
657 	hpp_dimension__add_output(PERF_HPP__OVERHEAD, true);
658 	if (symbol_conf.enable_latency)
659 		hpp_dimension__add_output(PERF_HPP__LATENCY, true);
660 
661 	if (symbol_conf.show_cpu_utilization) {
662 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS, false);
663 		hpp_dimension__add_output(PERF_HPP__OVERHEAD_US, false);
664 
665 		if (perf_guest) {
666 			hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS, false);
667 			hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US, false);
668 		}
669 	}
670 
671 	if (symbol_conf.show_nr_samples)
672 		hpp_dimension__add_output(PERF_HPP__SAMPLES, false);
673 
674 	if (symbol_conf.show_total_period)
675 		hpp_dimension__add_output(PERF_HPP__PERIOD, false);
676 }
677 
678 void perf_hpp_list__column_register(struct perf_hpp_list *list,
679 				    struct perf_hpp_fmt *format)
680 {
681 	list_add_tail(&format->list, &list->fields);
682 }
683 
684 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
685 					struct perf_hpp_fmt *format)
686 {
687 	list_add_tail(&format->sort_list, &list->sorts);
688 }
689 
690 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
691 				       struct perf_hpp_fmt *format)
692 {
693 	list_add(&format->sort_list, &list->sorts);
694 }
695 
696 static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
697 {
698 	list_del_init(&format->list);
699 	fmt_free(format);
700 }
701 
702 void perf_hpp__cancel_cumulate(struct evlist *evlist)
703 {
704 	struct perf_hpp_fmt *fmt, *acc, *ovh, *acc_lat, *tmp;
705 	struct evsel *evsel;
706 
707 	if (is_strict_order(field_order))
708 		return;
709 
710 	ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
711 	acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
712 	acc_lat = &perf_hpp__format[PERF_HPP__LATENCY_ACC];
713 
714 	perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
715 		if (fmt_equal(acc, fmt) || fmt_equal(acc_lat, fmt)) {
716 			perf_hpp__column_unregister(fmt);
717 			continue;
718 		}
719 
720 		if (fmt_equal(ovh, fmt))
721 			fmt->name = "Overhead";
722 	}
723 
724 	evlist__for_each_entry(evlist, evsel) {
725 		struct hists *hists = evsel__hists(evsel);
726 		struct perf_hpp_list_node *node;
727 
728 		list_for_each_entry(node, &hists->hpp_formats, list) {
729 			perf_hpp_list__for_each_format_safe(&node->hpp, fmt, tmp) {
730 				if (fmt_equal(acc, fmt) || fmt_equal(acc_lat, fmt)) {
731 					perf_hpp__column_unregister(fmt);
732 					continue;
733 				}
734 
735 				if (fmt_equal(ovh, fmt))
736 					fmt->name = "Overhead";
737 			}
738 		}
739 	}
740 }
741 
742 void perf_hpp__cancel_latency(struct evlist *evlist)
743 {
744 	struct perf_hpp_fmt *fmt, *lat, *acc, *tmp;
745 	struct evsel *evsel;
746 
747 	if (is_strict_order(field_order))
748 		return;
749 	if (sort_order && strstr(sort_order, "latency"))
750 		return;
751 
752 	lat = &perf_hpp__format[PERF_HPP__LATENCY];
753 	acc = &perf_hpp__format[PERF_HPP__LATENCY_ACC];
754 
755 	perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
756 		if (fmt_equal(lat, fmt) || fmt_equal(acc, fmt))
757 			perf_hpp__column_unregister(fmt);
758 	}
759 
760 	evlist__for_each_entry(evlist, evsel) {
761 		struct hists *hists = evsel__hists(evsel);
762 		struct perf_hpp_list_node *node;
763 
764 		list_for_each_entry(node, &hists->hpp_formats, list) {
765 			perf_hpp_list__for_each_format_safe(&node->hpp, fmt, tmp) {
766 				if (fmt_equal(lat, fmt) || fmt_equal(acc, fmt))
767 					perf_hpp__column_unregister(fmt);
768 			}
769 		}
770 	}
771 }
772 
773 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
774 {
775 	struct perf_hpp_fmt *fmt;
776 
777 	/* append sort keys to output field */
778 	perf_hpp_list__for_each_sort_list(list, fmt) {
779 		struct perf_hpp_fmt *pos;
780 
781 		/* skip sort-only fields ("sort_compute" in perf diff) */
782 		if (!fmt->entry && !fmt->color)
783 			continue;
784 
785 		perf_hpp_list__for_each_format(list, pos) {
786 			if (fmt_equal(fmt, pos))
787 				goto next;
788 		}
789 
790 		perf_hpp__column_register(fmt);
791 next:
792 		continue;
793 	}
794 }
795 
796 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
797 {
798 	struct perf_hpp_fmt *fmt;
799 
800 	/* append output fields to sort keys */
801 	perf_hpp_list__for_each_format(list, fmt) {
802 		struct perf_hpp_fmt *pos;
803 
804 		perf_hpp_list__for_each_sort_list(list, pos) {
805 			if (fmt_equal(fmt, pos))
806 				goto next;
807 		}
808 
809 		perf_hpp__register_sort_field(fmt);
810 next:
811 		continue;
812 	}
813 }
814 
815 
816 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
817 {
818 	struct perf_hpp_fmt *fmt, *tmp;
819 
820 	/* reset output fields */
821 	perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
822 		list_del_init(&fmt->list);
823 		list_del_init(&fmt->sort_list);
824 		fmt_free(fmt);
825 	}
826 
827 	/* reset sort keys */
828 	perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
829 		list_del_init(&fmt->list);
830 		list_del_init(&fmt->sort_list);
831 		fmt_free(fmt);
832 	}
833 }
834 
835 /*
836  * See hists__fprintf to match the column widths
837  */
838 unsigned int hists__sort_list_width(struct hists *hists)
839 {
840 	struct perf_hpp_fmt *fmt;
841 	int ret = 0;
842 	bool first = true;
843 	struct perf_hpp dummy_hpp;
844 
845 	hists__for_each_format(hists, fmt) {
846 		if (perf_hpp__should_skip(fmt, hists))
847 			continue;
848 
849 		if (first)
850 			first = false;
851 		else
852 			ret += 2;
853 
854 		ret += fmt->width(fmt, &dummy_hpp, hists);
855 	}
856 
857 	if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
858 		ret += 3 + BITS_PER_LONG / 4;
859 
860 	return ret;
861 }
862 
863 unsigned int hists__overhead_width(struct hists *hists)
864 {
865 	struct perf_hpp_fmt *fmt;
866 	int ret = 0;
867 	bool first = true;
868 	struct perf_hpp dummy_hpp;
869 
870 	hists__for_each_format(hists, fmt) {
871 		if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
872 			break;
873 
874 		if (first)
875 			first = false;
876 		else
877 			ret += 2;
878 
879 		ret += fmt->width(fmt, &dummy_hpp, hists);
880 	}
881 
882 	return ret;
883 }
884 
885 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
886 {
887 	if (perf_hpp__is_sort_entry(fmt))
888 		return perf_hpp__reset_sort_width(fmt, hists);
889 
890 	if (perf_hpp__is_dynamic_entry(fmt))
891 		return;
892 
893 	BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
894 
895 	switch (fmt->idx) {
896 	case PERF_HPP__OVERHEAD:
897 	case PERF_HPP__LATENCY:
898 	case PERF_HPP__OVERHEAD_SYS:
899 	case PERF_HPP__OVERHEAD_US:
900 	case PERF_HPP__OVERHEAD_ACC:
901 		fmt->len = 8;
902 		break;
903 
904 	case PERF_HPP__OVERHEAD_GUEST_SYS:
905 	case PERF_HPP__OVERHEAD_GUEST_US:
906 		fmt->len = 9;
907 		break;
908 
909 	case PERF_HPP__SAMPLES:
910 	case PERF_HPP__PERIOD:
911 		fmt->len = 12;
912 		break;
913 
914 	case PERF_HPP__WEIGHT1:
915 	case PERF_HPP__WEIGHT2:
916 	case PERF_HPP__WEIGHT3:
917 		fmt->len = 8;
918 		break;
919 
920 	default:
921 		break;
922 	}
923 }
924 
925 void hists__reset_column_width(struct hists *hists)
926 {
927 	struct perf_hpp_fmt *fmt;
928 	struct perf_hpp_list_node *node;
929 
930 	hists__for_each_format(hists, fmt)
931 		perf_hpp__reset_width(fmt, hists);
932 
933 	/* hierarchy entries have their own hpp list */
934 	list_for_each_entry(node, &hists->hpp_formats, list) {
935 		perf_hpp_list__for_each_format(&node->hpp, fmt)
936 			perf_hpp__reset_width(fmt, hists);
937 	}
938 }
939 
940 void perf_hpp__set_user_width(const char *width_list_str)
941 {
942 	struct perf_hpp_fmt *fmt;
943 	const char *ptr = width_list_str;
944 
945 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
946 		char *p;
947 
948 		int len = strtol(ptr, &p, 10);
949 		fmt->user_len = len;
950 
951 		if (*p == ',')
952 			ptr = p + 1;
953 		else
954 			break;
955 	}
956 }
957 
958 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
959 {
960 	struct perf_hpp_list_node *node = NULL;
961 	struct perf_hpp_fmt *fmt_copy;
962 	bool found = false;
963 	bool skip = perf_hpp__should_skip(fmt, hists);
964 
965 	list_for_each_entry(node, &hists->hpp_formats, list) {
966 		if (node->level == fmt->level) {
967 			found = true;
968 			break;
969 		}
970 	}
971 
972 	if (!found) {
973 		node = malloc(sizeof(*node));
974 		if (node == NULL)
975 			return -1;
976 
977 		node->skip = skip;
978 		node->level = fmt->level;
979 		perf_hpp_list__init(&node->hpp);
980 
981 		hists->nr_hpp_node++;
982 		list_add_tail(&node->list, &hists->hpp_formats);
983 	}
984 
985 	fmt_copy = perf_hpp_fmt__dup(fmt);
986 	if (fmt_copy == NULL)
987 		return -1;
988 
989 	if (!skip)
990 		node->skip = false;
991 
992 	list_add_tail(&fmt_copy->list, &node->hpp.fields);
993 	list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
994 
995 	return 0;
996 }
997 
998 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
999 				  struct evlist *evlist)
1000 {
1001 	struct evsel *evsel;
1002 	struct perf_hpp_fmt *fmt;
1003 	struct hists *hists;
1004 	int ret;
1005 
1006 	if (!symbol_conf.report_hierarchy)
1007 		return 0;
1008 
1009 	evlist__for_each_entry(evlist, evsel) {
1010 		hists = evsel__hists(evsel);
1011 
1012 		perf_hpp_list__for_each_sort_list(list, fmt) {
1013 			if (perf_hpp__is_dynamic_entry(fmt) &&
1014 			    !perf_hpp__defined_dynamic_entry(fmt, hists))
1015 				continue;
1016 
1017 			ret = add_hierarchy_fmt(hists, fmt);
1018 			if (ret < 0)
1019 				return ret;
1020 		}
1021 	}
1022 
1023 	return 0;
1024 }
1025