1 /*
2 * builtin-trace.c
3 *
4 * Builtin 'trace' command:
5 *
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
9 *
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 *
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
13 *
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15 */
16
17 #include "util/record.h"
18 #include <api/fs/tracing_path.h>
19 #ifdef HAVE_LIBBPF_SUPPORT
20 #include <bpf/bpf.h>
21 #include <bpf/libbpf.h>
22 #include <bpf/btf.h>
23 #ifdef HAVE_BPF_SKEL
24 #include "bpf_skel/augmented_raw_syscalls.skel.h"
25 #endif
26 #endif
27 #include "util/bpf_map.h"
28 #include "util/rlimit.h"
29 #include "builtin.h"
30 #include "util/cgroup.h"
31 #include "util/color.h"
32 #include "util/config.h"
33 #include "util/debug.h"
34 #include "util/dso.h"
35 #include "util/env.h"
36 #include "util/event.h"
37 #include "util/evsel.h"
38 #include "util/evsel_fprintf.h"
39 #include "util/synthetic-events.h"
40 #include "util/evlist.h"
41 #include "util/evswitch.h"
42 #include "util/hashmap.h"
43 #include "util/mmap.h"
44 #include <subcmd/pager.h>
45 #include <subcmd/exec-cmd.h>
46 #include "util/machine.h"
47 #include "util/map.h"
48 #include "util/symbol.h"
49 #include "util/path.h"
50 #include "util/session.h"
51 #include "util/thread.h"
52 #include <subcmd/parse-options.h>
53 #include "util/strlist.h"
54 #include "util/intlist.h"
55 #include "util/thread_map.h"
56 #include "util/stat.h"
57 #include "util/tool.h"
58 #include "util/util.h"
59 #include "trace/beauty/beauty.h"
60 #include "trace-event.h"
61 #include "util/parse-events.h"
62 #include "util/tracepoint.h"
63 #include "callchain.h"
64 #include "print_binary.h"
65 #include "string2.h"
66 #include "syscalltbl.h"
67 #include "../perf.h"
68 #include "trace_augment.h"
69 #include "dwarf-regs.h"
70
71 #include <errno.h>
72 #include <inttypes.h>
73 #include <poll.h>
74 #include <signal.h>
75 #include <stdlib.h>
76 #include <string.h>
77 #include <linux/err.h>
78 #include <linux/filter.h>
79 #include <linux/kernel.h>
80 #include <linux/list_sort.h>
81 #include <linux/random.h>
82 #include <linux/stringify.h>
83 #include <linux/time64.h>
84 #include <linux/zalloc.h>
85 #include <fcntl.h>
86 #include <sys/sysmacros.h>
87
88 #include <linux/ctype.h>
89 #include <perf/mmap.h>
90 #include <tools/libc_compat.h>
91
92 #ifdef HAVE_LIBTRACEEVENT
93 #include <event-parse.h>
94 #endif
95
96 #ifndef O_CLOEXEC
97 # define O_CLOEXEC 02000000
98 #endif
99
100 #ifndef F_LINUX_SPECIFIC_BASE
101 # define F_LINUX_SPECIFIC_BASE 1024
102 #endif
103
104 #define RAW_SYSCALL_ARGS_NUM 6
105
106 /*
107 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
108 *
109 * We have to explicitely mark the direction of the flow of data, if from the
110 * kernel to user space or the other way around, since the BPF collector we
111 * have so far copies only from user to kernel space, mark the arguments that
112 * go that direction, so that we don´t end up collecting the previous contents
113 * for syscall args that goes from kernel to user space.
114 */
115 struct syscall_arg_fmt {
116 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
117 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
118 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
119 void *parm;
120 const char *name;
121 u16 nr_entries; // for arrays
122 bool from_user;
123 bool show_zero;
124 #ifdef HAVE_LIBBPF_SUPPORT
125 const struct btf_type *type;
126 int type_id; /* used in btf_dump */
127 #endif
128 };
129
130 struct syscall_fmt {
131 const char *name;
132 const char *alias;
133 struct {
134 const char *sys_enter,
135 *sys_exit;
136 } bpf_prog_name;
137 struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
138 u8 nr_args;
139 bool errpid;
140 bool timeout;
141 bool hexret;
142 };
143
144 enum summary_mode {
145 SUMMARY__NONE = 0,
146 SUMMARY__BY_TOTAL,
147 SUMMARY__BY_THREAD,
148 };
149
150 struct trace {
151 struct perf_tool tool;
152 struct {
153 /** Sorted sycall numbers used by the trace. */
154 struct syscall **table;
155 /** Size of table. */
156 size_t table_size;
157 struct {
158 struct evsel *sys_enter,
159 *sys_exit,
160 *bpf_output;
161 } events;
162 } syscalls;
163 #ifdef HAVE_BPF_SKEL
164 struct augmented_raw_syscalls_bpf *skel;
165 #endif
166 #ifdef HAVE_LIBBPF_SUPPORT
167 struct btf *btf;
168 #endif
169 struct record_opts opts;
170 struct evlist *evlist;
171 struct machine *host;
172 struct thread *current;
173 struct cgroup *cgroup;
174 u64 base_time;
175 FILE *output;
176 unsigned long nr_events;
177 unsigned long nr_events_printed;
178 unsigned long max_events;
179 struct evswitch evswitch;
180 struct strlist *ev_qualifier;
181 struct {
182 size_t nr;
183 int *entries;
184 } ev_qualifier_ids;
185 struct {
186 size_t nr;
187 pid_t *entries;
188 struct bpf_map *map;
189 } filter_pids;
190 /*
191 * TODO: The map is from an ID (aka system call number) to struct
192 * syscall_stats. If there is >1 e_machine, such as i386 and x86-64
193 * processes, then the stats here will gather wrong the statistics for
194 * the non EM_HOST system calls. A fix would be to add the e_machine
195 * into the key, but this would make the code inconsistent with the
196 * per-thread version.
197 */
198 struct hashmap *syscall_stats;
199 double duration_filter;
200 double runtime_ms;
201 unsigned long pfmaj, pfmin;
202 struct {
203 u64 vfs_getname,
204 proc_getname;
205 } stats;
206 unsigned int max_stack;
207 unsigned int min_stack;
208 enum summary_mode summary_mode;
209 int raw_augmented_syscalls_args_size;
210 bool raw_augmented_syscalls;
211 bool fd_path_disabled;
212 bool sort_events;
213 bool not_ev_qualifier;
214 bool live;
215 bool full_time;
216 bool sched;
217 bool multiple_threads;
218 bool summary;
219 bool summary_only;
220 bool errno_summary;
221 bool failure_only;
222 bool show_comm;
223 bool print_sample;
224 bool show_tool_stats;
225 bool trace_syscalls;
226 bool libtraceevent_print;
227 bool kernel_syscallchains;
228 s16 args_alignment;
229 bool show_tstamp;
230 bool show_duration;
231 bool show_zeros;
232 bool show_arg_names;
233 bool show_string_prefix;
234 bool force;
235 bool vfs_getname;
236 bool force_btf;
237 int trace_pgfaults;
238 char *perfconfig_events;
239 struct {
240 struct ordered_events data;
241 u64 last;
242 } oe;
243 };
244
trace__load_vmlinux_btf(struct trace * trace __maybe_unused)245 static void trace__load_vmlinux_btf(struct trace *trace __maybe_unused)
246 {
247 #ifdef HAVE_LIBBPF_SUPPORT
248 if (trace->btf != NULL)
249 return;
250
251 trace->btf = btf__load_vmlinux_btf();
252 if (verbose > 0) {
253 fprintf(trace->output, trace->btf ? "vmlinux BTF loaded\n" :
254 "Failed to load vmlinux BTF\n");
255 }
256 #endif
257 }
258
259 struct tp_field {
260 int offset;
261 union {
262 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
263 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
264 };
265 };
266
267 #define TP_UINT_FIELD(bits) \
268 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
269 { \
270 u##bits value; \
271 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
272 return value; \
273 }
274
275 TP_UINT_FIELD(8);
276 TP_UINT_FIELD(16);
277 TP_UINT_FIELD(32);
278 TP_UINT_FIELD(64);
279
280 #define TP_UINT_FIELD__SWAPPED(bits) \
281 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
282 { \
283 u##bits value; \
284 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
285 return bswap_##bits(value);\
286 }
287
288 TP_UINT_FIELD__SWAPPED(16);
289 TP_UINT_FIELD__SWAPPED(32);
290 TP_UINT_FIELD__SWAPPED(64);
291
__tp_field__init_uint(struct tp_field * field,int size,int offset,bool needs_swap)292 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
293 {
294 field->offset = offset;
295
296 switch (size) {
297 case 1:
298 field->integer = tp_field__u8;
299 break;
300 case 2:
301 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
302 break;
303 case 4:
304 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
305 break;
306 case 8:
307 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
308 break;
309 default:
310 return -1;
311 }
312
313 return 0;
314 }
315
tp_field__init_uint(struct tp_field * field,struct tep_format_field * format_field,bool needs_swap)316 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
317 {
318 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
319 }
320
tp_field__ptr(struct tp_field * field,struct perf_sample * sample)321 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
322 {
323 return sample->raw_data + field->offset;
324 }
325
__tp_field__init_ptr(struct tp_field * field,int offset)326 static int __tp_field__init_ptr(struct tp_field *field, int offset)
327 {
328 field->offset = offset;
329 field->pointer = tp_field__ptr;
330 return 0;
331 }
332
tp_field__init_ptr(struct tp_field * field,struct tep_format_field * format_field)333 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
334 {
335 return __tp_field__init_ptr(field, format_field->offset);
336 }
337
338 struct syscall_tp {
339 struct tp_field id;
340 union {
341 struct tp_field args, ret;
342 };
343 };
344
345 /*
346 * The evsel->priv as used by 'perf trace'
347 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME
348 * fmt: for all the other tracepoints
349 */
350 struct evsel_trace {
351 struct syscall_tp sc;
352 struct syscall_arg_fmt *fmt;
353 };
354
evsel_trace__new(void)355 static struct evsel_trace *evsel_trace__new(void)
356 {
357 return zalloc(sizeof(struct evsel_trace));
358 }
359
evsel_trace__delete(struct evsel_trace * et)360 static void evsel_trace__delete(struct evsel_trace *et)
361 {
362 if (et == NULL)
363 return;
364
365 zfree(&et->fmt);
366 free(et);
367 }
368
369 /*
370 * Used with raw_syscalls:sys_{enter,exit} and with the
371 * syscalls:sys_{enter,exit}_SYSCALL tracepoints
372 */
__evsel__syscall_tp(struct evsel * evsel)373 static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel)
374 {
375 struct evsel_trace *et = evsel->priv;
376
377 return &et->sc;
378 }
379
evsel__syscall_tp(struct evsel * evsel)380 static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel)
381 {
382 if (evsel->priv == NULL) {
383 evsel->priv = evsel_trace__new();
384 if (evsel->priv == NULL)
385 return NULL;
386 }
387
388 return __evsel__syscall_tp(evsel);
389 }
390
391 /*
392 * Used with all the other tracepoints.
393 */
__evsel__syscall_arg_fmt(struct evsel * evsel)394 static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel)
395 {
396 struct evsel_trace *et = evsel->priv;
397
398 return et->fmt;
399 }
400
evsel__syscall_arg_fmt(struct evsel * evsel)401 static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel)
402 {
403 struct evsel_trace *et = evsel->priv;
404
405 if (evsel->priv == NULL) {
406 et = evsel->priv = evsel_trace__new();
407
408 if (et == NULL)
409 return NULL;
410 }
411
412 if (et->fmt == NULL) {
413 const struct tep_event *tp_format = evsel__tp_format(evsel);
414
415 if (tp_format == NULL)
416 goto out_delete;
417
418 et->fmt = calloc(tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
419 if (et->fmt == NULL)
420 goto out_delete;
421 }
422
423 return __evsel__syscall_arg_fmt(evsel);
424
425 out_delete:
426 evsel_trace__delete(evsel->priv);
427 evsel->priv = NULL;
428 return NULL;
429 }
430
evsel__init_tp_uint_field(struct evsel * evsel,struct tp_field * field,const char * name)431 static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name)
432 {
433 struct tep_format_field *format_field = evsel__field(evsel, name);
434
435 if (format_field == NULL)
436 return -1;
437
438 return tp_field__init_uint(field, format_field, evsel->needs_swap);
439 }
440
441 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
442 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
443 evsel__init_tp_uint_field(evsel, &sc->name, #name); })
444
evsel__init_tp_ptr_field(struct evsel * evsel,struct tp_field * field,const char * name)445 static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name)
446 {
447 struct tep_format_field *format_field = evsel__field(evsel, name);
448
449 if (format_field == NULL)
450 return -1;
451
452 return tp_field__init_ptr(field, format_field);
453 }
454
455 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
456 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
457 evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
458
evsel__delete_priv(struct evsel * evsel)459 static void evsel__delete_priv(struct evsel *evsel)
460 {
461 zfree(&evsel->priv);
462 evsel__delete(evsel);
463 }
464
evsel__init_syscall_tp(struct evsel * evsel)465 static int evsel__init_syscall_tp(struct evsel *evsel)
466 {
467 struct syscall_tp *sc = evsel__syscall_tp(evsel);
468
469 if (sc != NULL) {
470 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
471 evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
472 return -ENOENT;
473
474 return 0;
475 }
476
477 return -ENOMEM;
478 }
479
evsel__init_augmented_syscall_tp(struct evsel * evsel,struct evsel * tp)480 static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
481 {
482 struct syscall_tp *sc = evsel__syscall_tp(evsel);
483
484 if (sc != NULL) {
485 struct tep_format_field *syscall_id = evsel__field(tp, "id");
486 if (syscall_id == NULL)
487 syscall_id = evsel__field(tp, "__syscall_nr");
488 if (syscall_id == NULL ||
489 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
490 return -EINVAL;
491
492 return 0;
493 }
494
495 return -ENOMEM;
496 }
497
evsel__init_augmented_syscall_tp_args(struct evsel * evsel)498 static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
499 {
500 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
501
502 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
503 }
504
evsel__init_augmented_syscall_tp_ret(struct evsel * evsel)505 static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
506 {
507 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
508
509 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
510 }
511
evsel__init_raw_syscall_tp(struct evsel * evsel,void * handler)512 static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
513 {
514 if (evsel__syscall_tp(evsel) != NULL) {
515 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
516 return -ENOENT;
517
518 evsel->handler = handler;
519 return 0;
520 }
521
522 return -ENOMEM;
523 }
524
perf_evsel__raw_syscall_newtp(const char * direction,void * handler)525 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
526 {
527 struct evsel *evsel = evsel__newtp("raw_syscalls", direction);
528
529 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
530 if (IS_ERR(evsel))
531 evsel = evsel__newtp("syscalls", direction);
532
533 if (IS_ERR(evsel))
534 return NULL;
535
536 if (evsel__init_raw_syscall_tp(evsel, handler))
537 goto out_delete;
538
539 return evsel;
540
541 out_delete:
542 evsel__delete_priv(evsel);
543 return NULL;
544 }
545
546 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
547 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
548 fields->name.integer(&fields->name, sample); })
549
550 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
551 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
552 fields->name.pointer(&fields->name, sample); })
553
strarray__scnprintf_suffix(struct strarray * sa,char * bf,size_t size,const char * intfmt,bool show_suffix,int val)554 size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val)
555 {
556 int idx = val - sa->offset;
557
558 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
559 size_t printed = scnprintf(bf, size, intfmt, val);
560 if (show_suffix)
561 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
562 return printed;
563 }
564
565 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : "");
566 }
567
strarray__scnprintf(struct strarray * sa,char * bf,size_t size,const char * intfmt,bool show_prefix,int val)568 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
569 {
570 int idx = val - sa->offset;
571
572 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
573 size_t printed = scnprintf(bf, size, intfmt, val);
574 if (show_prefix)
575 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
576 return printed;
577 }
578
579 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
580 }
581
__syscall_arg__scnprintf_strarray(char * bf,size_t size,const char * intfmt,struct syscall_arg * arg)582 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
583 const char *intfmt,
584 struct syscall_arg *arg)
585 {
586 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
587 }
588
syscall_arg__scnprintf_strarray(char * bf,size_t size,struct syscall_arg * arg)589 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
590 struct syscall_arg *arg)
591 {
592 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
593 }
594
595 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
596
syscall_arg__strtoul_strarray(char * bf,size_t size,struct syscall_arg * arg,u64 * ret)597 bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
598 {
599 return strarray__strtoul(arg->parm, bf, size, ret);
600 }
601
syscall_arg__strtoul_strarray_flags(char * bf,size_t size,struct syscall_arg * arg,u64 * ret)602 bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
603 {
604 return strarray__strtoul_flags(arg->parm, bf, size, ret);
605 }
606
syscall_arg__strtoul_strarrays(char * bf,size_t size,struct syscall_arg * arg,u64 * ret)607 bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
608 {
609 return strarrays__strtoul(arg->parm, bf, size, ret);
610 }
611
syscall_arg__scnprintf_strarray_flags(char * bf,size_t size,struct syscall_arg * arg)612 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
613 {
614 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
615 }
616
strarrays__scnprintf(struct strarrays * sas,char * bf,size_t size,const char * intfmt,bool show_prefix,int val)617 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
618 {
619 size_t printed;
620 int i;
621
622 for (i = 0; i < sas->nr_entries; ++i) {
623 struct strarray *sa = sas->entries[i];
624 int idx = val - sa->offset;
625
626 if (idx >= 0 && idx < sa->nr_entries) {
627 if (sa->entries[idx] == NULL)
628 break;
629 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
630 }
631 }
632
633 printed = scnprintf(bf, size, intfmt, val);
634 if (show_prefix)
635 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
636 return printed;
637 }
638
strarray__strtoul(struct strarray * sa,char * bf,size_t size,u64 * ret)639 bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret)
640 {
641 int i;
642
643 for (i = 0; i < sa->nr_entries; ++i) {
644 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') {
645 *ret = sa->offset + i;
646 return true;
647 }
648 }
649
650 return false;
651 }
652
strarray__strtoul_flags(struct strarray * sa,char * bf,size_t size,u64 * ret)653 bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret)
654 {
655 u64 val = 0;
656 char *tok = bf, *sep, *end;
657
658 *ret = 0;
659
660 while (size != 0) {
661 int toklen = size;
662
663 sep = memchr(tok, '|', size);
664 if (sep != NULL) {
665 size -= sep - tok + 1;
666
667 end = sep - 1;
668 while (end > tok && isspace(*end))
669 --end;
670
671 toklen = end - tok + 1;
672 }
673
674 while (isspace(*tok))
675 ++tok;
676
677 if (isalpha(*tok) || *tok == '_') {
678 if (!strarray__strtoul(sa, tok, toklen, &val))
679 return false;
680 } else
681 val = strtoul(tok, NULL, 0);
682
683 *ret |= (1 << (val - 1));
684
685 if (sep == NULL)
686 break;
687 tok = sep + 1;
688 }
689
690 return true;
691 }
692
strarrays__strtoul(struct strarrays * sas,char * bf,size_t size,u64 * ret)693 bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret)
694 {
695 int i;
696
697 for (i = 0; i < sas->nr_entries; ++i) {
698 struct strarray *sa = sas->entries[i];
699
700 if (strarray__strtoul(sa, bf, size, ret))
701 return true;
702 }
703
704 return false;
705 }
706
syscall_arg__scnprintf_strarrays(char * bf,size_t size,struct syscall_arg * arg)707 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
708 struct syscall_arg *arg)
709 {
710 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
711 }
712
713 #ifndef AT_FDCWD
714 #define AT_FDCWD -100
715 #endif
716
syscall_arg__scnprintf_fd_at(char * bf,size_t size,struct syscall_arg * arg)717 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
718 struct syscall_arg *arg)
719 {
720 int fd = arg->val;
721 const char *prefix = "AT_FD";
722
723 if (fd == AT_FDCWD)
724 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
725
726 return syscall_arg__scnprintf_fd(bf, size, arg);
727 }
728
729 #define SCA_FDAT syscall_arg__scnprintf_fd_at
730
731 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
732 struct syscall_arg *arg);
733
734 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
735
syscall_arg__scnprintf_hex(char * bf,size_t size,struct syscall_arg * arg)736 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
737 {
738 return scnprintf(bf, size, "%#lx", arg->val);
739 }
740
syscall_arg__scnprintf_ptr(char * bf,size_t size,struct syscall_arg * arg)741 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
742 {
743 if (arg->val == 0)
744 return scnprintf(bf, size, "NULL");
745 return syscall_arg__scnprintf_hex(bf, size, arg);
746 }
747
syscall_arg__scnprintf_int(char * bf,size_t size,struct syscall_arg * arg)748 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
749 {
750 return scnprintf(bf, size, "%d", arg->val);
751 }
752
syscall_arg__scnprintf_long(char * bf,size_t size,struct syscall_arg * arg)753 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
754 {
755 return scnprintf(bf, size, "%ld", arg->val);
756 }
757
syscall_arg__scnprintf_char_array(char * bf,size_t size,struct syscall_arg * arg)758 static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg)
759 {
760 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can
761 // fill missing comms using thread__set_comm()...
762 // here or in a special syscall_arg__scnprintf_pid_sched_tp...
763 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val);
764 }
765
766 #define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array
767
768 static const char *bpf_cmd[] = {
769 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
770 "MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH",
771 "PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID",
772 "PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD",
773 "PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID",
774 "TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE",
775 "BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH",
776 "MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE",
777 "LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE",
778 "LINK_DETACH", "PROG_BIND_MAP",
779 };
780 static DEFINE_STRARRAY(bpf_cmd, "BPF_");
781
782 static const char *fsmount_flags[] = {
783 [1] = "CLOEXEC",
784 };
785 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
786
787 #include "trace/beauty/generated/fsconfig_arrays.c"
788
789 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
790
791 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
792 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
793
794 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
795 static DEFINE_STRARRAY(itimers, "ITIMER_");
796
797 static const char *keyctl_options[] = {
798 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
799 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
800 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
801 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
802 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
803 };
804 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
805
806 static const char *whences[] = { "SET", "CUR", "END",
807 #ifdef SEEK_DATA
808 "DATA",
809 #endif
810 #ifdef SEEK_HOLE
811 "HOLE",
812 #endif
813 };
814 static DEFINE_STRARRAY(whences, "SEEK_");
815
816 static const char *fcntl_cmds[] = {
817 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
818 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
819 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
820 "GETOWNER_UIDS",
821 };
822 static DEFINE_STRARRAY(fcntl_cmds, "F_");
823
824 static const char *fcntl_linux_specific_cmds[] = {
825 "SETLEASE", "GETLEASE", "NOTIFY", "DUPFD_QUERY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
826 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
827 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
828 };
829
830 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
831
832 static struct strarray *fcntl_cmds_arrays[] = {
833 &strarray__fcntl_cmds,
834 &strarray__fcntl_linux_specific_cmds,
835 };
836
837 static DEFINE_STRARRAYS(fcntl_cmds_arrays);
838
839 static const char *rlimit_resources[] = {
840 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
841 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
842 "RTTIME",
843 };
844 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
845
846 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
847 static DEFINE_STRARRAY(sighow, "SIG_");
848
849 static const char *clockid[] = {
850 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
851 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
852 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
853 };
854 static DEFINE_STRARRAY(clockid, "CLOCK_");
855
syscall_arg__scnprintf_access_mode(char * bf,size_t size,struct syscall_arg * arg)856 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
857 struct syscall_arg *arg)
858 {
859 bool show_prefix = arg->show_string_prefix;
860 const char *suffix = "_OK";
861 size_t printed = 0;
862 int mode = arg->val;
863
864 if (mode == F_OK) /* 0 */
865 return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
866 #define P_MODE(n) \
867 if (mode & n##_OK) { \
868 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
869 mode &= ~n##_OK; \
870 }
871
872 P_MODE(R);
873 P_MODE(W);
874 P_MODE(X);
875 #undef P_MODE
876
877 if (mode)
878 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
879
880 return printed;
881 }
882
883 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
884
885 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
886 struct syscall_arg *arg);
887
888 #define SCA_FILENAME syscall_arg__scnprintf_filename
889
890 // 'argname' is just documentational at this point, to remove the previous comment with that info
891 #define SCA_FILENAME_FROM_USER(argname) \
892 { .scnprintf = SCA_FILENAME, \
893 .from_user = true, }
894
895 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg);
896
897 #define SCA_BUF syscall_arg__scnprintf_buf
898
syscall_arg__scnprintf_pipe_flags(char * bf,size_t size,struct syscall_arg * arg)899 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
900 struct syscall_arg *arg)
901 {
902 bool show_prefix = arg->show_string_prefix;
903 const char *prefix = "O_";
904 int printed = 0, flags = arg->val;
905
906 #define P_FLAG(n) \
907 if (flags & O_##n) { \
908 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
909 flags &= ~O_##n; \
910 }
911
912 P_FLAG(CLOEXEC);
913 P_FLAG(NONBLOCK);
914 #undef P_FLAG
915
916 if (flags)
917 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
918
919 return printed;
920 }
921
922 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
923
924 #ifndef GRND_NONBLOCK
925 #define GRND_NONBLOCK 0x0001
926 #endif
927 #ifndef GRND_RANDOM
928 #define GRND_RANDOM 0x0002
929 #endif
930
syscall_arg__scnprintf_getrandom_flags(char * bf,size_t size,struct syscall_arg * arg)931 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
932 struct syscall_arg *arg)
933 {
934 bool show_prefix = arg->show_string_prefix;
935 const char *prefix = "GRND_";
936 int printed = 0, flags = arg->val;
937
938 #define P_FLAG(n) \
939 if (flags & GRND_##n) { \
940 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
941 flags &= ~GRND_##n; \
942 }
943
944 P_FLAG(RANDOM);
945 P_FLAG(NONBLOCK);
946 #undef P_FLAG
947
948 if (flags)
949 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
950
951 return printed;
952 }
953
954 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
955
956 #ifdef HAVE_LIBBPF_SUPPORT
syscall_arg_fmt__cache_btf_enum(struct syscall_arg_fmt * arg_fmt,struct btf * btf,char * type)957 static void syscall_arg_fmt__cache_btf_enum(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type)
958 {
959 int id;
960
961 type = strstr(type, "enum ");
962 if (type == NULL)
963 return;
964
965 type += 5; // skip "enum " to get the enumeration name
966
967 id = btf__find_by_name(btf, type);
968 if (id < 0)
969 return;
970
971 arg_fmt->type = btf__type_by_id(btf, id);
972 }
973
syscall_arg__strtoul_btf_enum(char * bf,size_t size,struct syscall_arg * arg,u64 * val)974 static bool syscall_arg__strtoul_btf_enum(char *bf, size_t size, struct syscall_arg *arg, u64 *val)
975 {
976 const struct btf_type *bt = arg->fmt->type;
977 struct btf *btf = arg->trace->btf;
978 struct btf_enum *be = btf_enum(bt);
979
980 for (int i = 0; i < btf_vlen(bt); ++i, ++be) {
981 const char *name = btf__name_by_offset(btf, be->name_off);
982 int max_len = max(size, strlen(name));
983
984 if (strncmp(name, bf, max_len) == 0) {
985 *val = be->val;
986 return true;
987 }
988 }
989
990 return false;
991 }
992
syscall_arg__strtoul_btf_type(char * bf,size_t size,struct syscall_arg * arg,u64 * val)993 static bool syscall_arg__strtoul_btf_type(char *bf, size_t size, struct syscall_arg *arg, u64 *val)
994 {
995 const struct btf_type *bt;
996 char *type = arg->type_name;
997 struct btf *btf;
998
999 trace__load_vmlinux_btf(arg->trace);
1000
1001 btf = arg->trace->btf;
1002 if (btf == NULL)
1003 return false;
1004
1005 if (arg->fmt->type == NULL) {
1006 // See if this is an enum
1007 syscall_arg_fmt__cache_btf_enum(arg->fmt, btf, type);
1008 }
1009
1010 // Now let's see if we have a BTF type resolved
1011 bt = arg->fmt->type;
1012 if (bt == NULL)
1013 return false;
1014
1015 // If it is an enum:
1016 if (btf_is_enum(arg->fmt->type))
1017 return syscall_arg__strtoul_btf_enum(bf, size, arg, val);
1018
1019 return false;
1020 }
1021
btf_enum_scnprintf(const struct btf_type * type,struct btf * btf,char * bf,size_t size,int val)1022 static size_t btf_enum_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, int val)
1023 {
1024 struct btf_enum *be = btf_enum(type);
1025 const int nr_entries = btf_vlen(type);
1026
1027 for (int i = 0; i < nr_entries; ++i, ++be) {
1028 if (be->val == val) {
1029 return scnprintf(bf, size, "%s",
1030 btf__name_by_offset(btf, be->name_off));
1031 }
1032 }
1033
1034 return 0;
1035 }
1036
1037 struct trace_btf_dump_snprintf_ctx {
1038 char *bf;
1039 size_t printed, size;
1040 };
1041
trace__btf_dump_snprintf(void * vctx,const char * fmt,va_list args)1042 static void trace__btf_dump_snprintf(void *vctx, const char *fmt, va_list args)
1043 {
1044 struct trace_btf_dump_snprintf_ctx *ctx = vctx;
1045
1046 ctx->printed += vscnprintf(ctx->bf + ctx->printed, ctx->size - ctx->printed, fmt, args);
1047 }
1048
btf_struct_scnprintf(const struct btf_type * type,struct btf * btf,char * bf,size_t size,struct syscall_arg * arg)1049 static size_t btf_struct_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, struct syscall_arg *arg)
1050 {
1051 struct trace_btf_dump_snprintf_ctx ctx = {
1052 .bf = bf,
1053 .size = size,
1054 };
1055 struct augmented_arg *augmented_arg = arg->augmented.args;
1056 int type_id = arg->fmt->type_id, consumed;
1057 struct btf_dump *btf_dump;
1058
1059 LIBBPF_OPTS(btf_dump_opts, dump_opts);
1060 LIBBPF_OPTS(btf_dump_type_data_opts, dump_data_opts);
1061
1062 if (arg == NULL || arg->augmented.args == NULL)
1063 return 0;
1064
1065 dump_data_opts.compact = true;
1066 dump_data_opts.skip_names = !arg->trace->show_arg_names;
1067
1068 btf_dump = btf_dump__new(btf, trace__btf_dump_snprintf, &ctx, &dump_opts);
1069 if (btf_dump == NULL)
1070 return 0;
1071
1072 /* pretty print the struct data here */
1073 if (btf_dump__dump_type_data(btf_dump, type_id, arg->augmented.args->value, type->size, &dump_data_opts) == 0)
1074 return 0;
1075
1076 consumed = sizeof(*augmented_arg) + augmented_arg->size;
1077 arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1078 arg->augmented.size -= consumed;
1079
1080 btf_dump__free(btf_dump);
1081
1082 return ctx.printed;
1083 }
1084
trace__btf_scnprintf(struct trace * trace,struct syscall_arg * arg,char * bf,size_t size,int val,char * type)1085 static size_t trace__btf_scnprintf(struct trace *trace, struct syscall_arg *arg, char *bf,
1086 size_t size, int val, char *type)
1087 {
1088 struct syscall_arg_fmt *arg_fmt = arg->fmt;
1089
1090 if (trace->btf == NULL)
1091 return 0;
1092
1093 if (arg_fmt->type == NULL) {
1094 // Check if this is an enum and if we have the BTF type for it.
1095 syscall_arg_fmt__cache_btf_enum(arg_fmt, trace->btf, type);
1096 }
1097
1098 // Did we manage to find a BTF type for the syscall/tracepoint argument?
1099 if (arg_fmt->type == NULL)
1100 return 0;
1101
1102 if (btf_is_enum(arg_fmt->type))
1103 return btf_enum_scnprintf(arg_fmt->type, trace->btf, bf, size, val);
1104 else if (btf_is_struct(arg_fmt->type) || btf_is_union(arg_fmt->type))
1105 return btf_struct_scnprintf(arg_fmt->type, trace->btf, bf, size, arg);
1106
1107 return 0;
1108 }
1109
1110 #else // HAVE_LIBBPF_SUPPORT
trace__btf_scnprintf(struct trace * trace __maybe_unused,struct syscall_arg * arg __maybe_unused,char * bf __maybe_unused,size_t size __maybe_unused,int val __maybe_unused,char * type __maybe_unused)1111 static size_t trace__btf_scnprintf(struct trace *trace __maybe_unused, struct syscall_arg *arg __maybe_unused,
1112 char *bf __maybe_unused, size_t size __maybe_unused, int val __maybe_unused,
1113 char *type __maybe_unused)
1114 {
1115 return 0;
1116 }
1117
syscall_arg__strtoul_btf_type(char * bf __maybe_unused,size_t size __maybe_unused,struct syscall_arg * arg __maybe_unused,u64 * val __maybe_unused)1118 static bool syscall_arg__strtoul_btf_type(char *bf __maybe_unused, size_t size __maybe_unused,
1119 struct syscall_arg *arg __maybe_unused, u64 *val __maybe_unused)
1120 {
1121 return false;
1122 }
1123 #endif // HAVE_LIBBPF_SUPPORT
1124
1125 #define STUL_BTF_TYPE syscall_arg__strtoul_btf_type
1126
1127 #define STRARRAY(name, array) \
1128 { .scnprintf = SCA_STRARRAY, \
1129 .strtoul = STUL_STRARRAY, \
1130 .parm = &strarray__##array, }
1131
1132 #define STRARRAY_FLAGS(name, array) \
1133 { .scnprintf = SCA_STRARRAY_FLAGS, \
1134 .strtoul = STUL_STRARRAY_FLAGS, \
1135 .parm = &strarray__##array, }
1136
1137 #include "trace/beauty/eventfd.c"
1138 #include "trace/beauty/futex_op.c"
1139 #include "trace/beauty/futex_val3.c"
1140 #include "trace/beauty/mmap.c"
1141 #include "trace/beauty/mode_t.c"
1142 #include "trace/beauty/msg_flags.c"
1143 #include "trace/beauty/open_flags.c"
1144 #include "trace/beauty/perf_event_open.c"
1145 #include "trace/beauty/pid.c"
1146 #include "trace/beauty/sched_policy.c"
1147 #include "trace/beauty/seccomp.c"
1148 #include "trace/beauty/signum.c"
1149 #include "trace/beauty/socket_type.c"
1150 #include "trace/beauty/waitid_options.c"
1151
1152 static const struct syscall_fmt syscall_fmts[] = {
1153 { .name = "access",
1154 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
1155 { .name = "arch_prctl",
1156 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
1157 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
1158 { .name = "bind",
1159 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
1160 [1] = SCA_SOCKADDR_FROM_USER(umyaddr),
1161 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
1162 { .name = "bpf",
1163 .arg = { [0] = STRARRAY(cmd, bpf_cmd),
1164 [1] = { .from_user = true /* attr */, }, } },
1165 { .name = "brk", .hexret = true,
1166 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
1167 { .name = "clock_gettime",
1168 .arg = { [0] = STRARRAY(clk_id, clockid), }, },
1169 { .name = "clock_nanosleep",
1170 .arg = { [2] = SCA_TIMESPEC_FROM_USER(req), }, },
1171 { .name = "clone", .errpid = true, .nr_args = 5,
1172 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
1173 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
1174 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
1175 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, },
1176 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, },
1177 { .name = "close",
1178 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
1179 { .name = "connect",
1180 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
1181 [1] = SCA_SOCKADDR_FROM_USER(servaddr),
1182 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
1183 { .name = "epoll_ctl",
1184 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
1185 { .name = "eventfd2",
1186 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
1187 { .name = "faccessat",
1188 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ },
1189 [1] = SCA_FILENAME_FROM_USER(pathname),
1190 [2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
1191 { .name = "faccessat2",
1192 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ },
1193 [1] = SCA_FILENAME_FROM_USER(pathname),
1194 [2] = { .scnprintf = SCA_ACCMODE, /* mode */ },
1195 [3] = { .scnprintf = SCA_FACCESSAT2_FLAGS, /* flags */ }, }, },
1196 { .name = "fchmodat",
1197 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1198 { .name = "fchownat",
1199 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1200 { .name = "fcntl",
1201 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
1202 .strtoul = STUL_STRARRAYS,
1203 .parm = &strarrays__fcntl_cmds_arrays,
1204 .show_zero = true, },
1205 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
1206 { .name = "flock",
1207 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
1208 { .name = "fsconfig",
1209 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
1210 { .name = "fsmount",
1211 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
1212 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
1213 { .name = "fspick",
1214 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
1215 [1] = SCA_FILENAME_FROM_USER(path),
1216 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
1217 { .name = "fstat", .alias = "newfstat", },
1218 { .name = "futex",
1219 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
1220 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
1221 { .name = "futimesat",
1222 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1223 { .name = "getitimer",
1224 .arg = { [0] = STRARRAY(which, itimers), }, },
1225 { .name = "getpid", .errpid = true, },
1226 { .name = "getpgid", .errpid = true, },
1227 { .name = "getppid", .errpid = true, },
1228 { .name = "getrandom",
1229 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
1230 { .name = "getrlimit",
1231 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
1232 { .name = "getsockopt",
1233 .arg = { [1] = STRARRAY(level, socket_level), }, },
1234 { .name = "gettid", .errpid = true, },
1235 { .name = "ioctl",
1236 .arg = {
1237 #if defined(__i386__) || defined(__x86_64__)
1238 /*
1239 * FIXME: Make this available to all arches.
1240 */
1241 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
1242 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
1243 #else
1244 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
1245 #endif
1246 { .name = "kcmp", .nr_args = 5,
1247 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, },
1248 [1] = { .name = "pid2", .scnprintf = SCA_PID, },
1249 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, },
1250 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, },
1251 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, },
1252 { .name = "keyctl",
1253 .arg = { [0] = STRARRAY(option, keyctl_options), }, },
1254 { .name = "kill",
1255 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1256 { .name = "linkat",
1257 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1258 { .name = "lseek",
1259 .arg = { [2] = STRARRAY(whence, whences), }, },
1260 { .name = "lstat", .alias = "newlstat", },
1261 { .name = "madvise",
1262 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1263 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
1264 { .name = "mkdirat",
1265 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1266 { .name = "mknodat",
1267 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1268 { .name = "mmap", .hexret = true,
1269 /* The standard mmap maps to old_mmap on s390x */
1270 #if defined(__s390x__)
1271 .alias = "old_mmap",
1272 #endif
1273 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ },
1274 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */
1275 .strtoul = STUL_STRARRAY_FLAGS,
1276 .parm = &strarray__mmap_flags, },
1277 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, },
1278 { .name = "mount",
1279 .arg = { [0] = SCA_FILENAME_FROM_USER(devname),
1280 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
1281 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
1282 { .name = "move_mount",
1283 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ },
1284 [1] = SCA_FILENAME_FROM_USER(pathname),
1285 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ },
1286 [3] = SCA_FILENAME_FROM_USER(pathname),
1287 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
1288 { .name = "mprotect",
1289 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1290 [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, }, },
1291 { .name = "mq_unlink",
1292 .arg = { [0] = SCA_FILENAME_FROM_USER(u_name), }, },
1293 { .name = "mremap", .hexret = true,
1294 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
1295 { .name = "name_to_handle_at",
1296 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1297 { .name = "nanosleep",
1298 .arg = { [0] = SCA_TIMESPEC_FROM_USER(req), }, },
1299 { .name = "newfstatat", .alias = "fstatat",
1300 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ },
1301 [1] = SCA_FILENAME_FROM_USER(pathname),
1302 [3] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, },
1303 { .name = "open",
1304 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1305 { .name = "open_by_handle_at",
1306 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
1307 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1308 { .name = "openat",
1309 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
1310 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1311 { .name = "perf_event_open",
1312 .arg = { [0] = SCA_PERF_ATTR_FROM_USER(attr),
1313 [2] = { .scnprintf = SCA_INT, /* cpu */ },
1314 [3] = { .scnprintf = SCA_FD, /* group_fd */ },
1315 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
1316 { .name = "pipe2",
1317 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
1318 { .name = "pkey_alloc",
1319 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, },
1320 { .name = "pkey_free",
1321 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, },
1322 { .name = "pkey_mprotect",
1323 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1324 [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ },
1325 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
1326 { .name = "poll", .timeout = true, },
1327 { .name = "ppoll", .timeout = true, },
1328 { .name = "prctl",
1329 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */
1330 .strtoul = STUL_STRARRAY,
1331 .parm = &strarray__prctl_options, },
1332 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
1333 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
1334 { .name = "pread", .alias = "pread64", },
1335 { .name = "preadv", .alias = "pread", },
1336 { .name = "prlimit64",
1337 .arg = { [1] = STRARRAY(resource, rlimit_resources),
1338 [2] = { .from_user = true /* new_rlim */, }, }, },
1339 { .name = "pwrite", .alias = "pwrite64", },
1340 { .name = "readlinkat",
1341 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1342 { .name = "recvfrom",
1343 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1344 { .name = "recvmmsg",
1345 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1346 { .name = "recvmsg",
1347 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1348 { .name = "renameat",
1349 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1350 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
1351 { .name = "renameat2",
1352 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1353 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
1354 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
1355 { .name = "rseq", .errpid = true,
1356 .arg = { [0] = { .from_user = true /* rseq */, }, }, },
1357 { .name = "rt_sigaction",
1358 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1359 { .name = "rt_sigprocmask",
1360 .arg = { [0] = STRARRAY(how, sighow), }, },
1361 { .name = "rt_sigqueueinfo",
1362 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1363 { .name = "rt_tgsigqueueinfo",
1364 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1365 { .name = "sched_setscheduler",
1366 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
1367 { .name = "seccomp",
1368 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ },
1369 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
1370 { .name = "select", .timeout = true, },
1371 { .name = "sendfile", .alias = "sendfile64", },
1372 { .name = "sendmmsg",
1373 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1374 { .name = "sendmsg",
1375 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1376 { .name = "sendto",
1377 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
1378 [4] = SCA_SOCKADDR_FROM_USER(addr), }, },
1379 { .name = "set_robust_list", .errpid = true,
1380 .arg = { [0] = { .from_user = true /* head */, }, }, },
1381 { .name = "set_tid_address", .errpid = true, },
1382 { .name = "setitimer",
1383 .arg = { [0] = STRARRAY(which, itimers), }, },
1384 { .name = "setrlimit",
1385 .arg = { [0] = STRARRAY(resource, rlimit_resources),
1386 [1] = { .from_user = true /* rlim */, }, }, },
1387 { .name = "setsockopt",
1388 .arg = { [1] = STRARRAY(level, socket_level), }, },
1389 { .name = "socket",
1390 .arg = { [0] = STRARRAY(family, socket_families),
1391 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1392 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1393 { .name = "socketpair",
1394 .arg = { [0] = STRARRAY(family, socket_families),
1395 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1396 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1397 { .name = "stat", .alias = "newstat", },
1398 { .name = "statx",
1399 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ },
1400 [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ } ,
1401 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, },
1402 { .name = "swapoff",
1403 .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, },
1404 { .name = "swapon",
1405 .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, },
1406 { .name = "symlinkat",
1407 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1408 { .name = "sync_file_range",
1409 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
1410 { .name = "tgkill",
1411 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1412 { .name = "tkill",
1413 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1414 { .name = "umount2", .alias = "umount",
1415 .arg = { [0] = SCA_FILENAME_FROM_USER(name), }, },
1416 { .name = "uname", .alias = "newuname", },
1417 { .name = "unlinkat",
1418 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
1419 [1] = SCA_FILENAME_FROM_USER(pathname),
1420 [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, },
1421 { .name = "utimensat",
1422 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
1423 { .name = "wait4", .errpid = true,
1424 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1425 { .name = "waitid", .errpid = true,
1426 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1427 { .name = "write",
1428 .arg = { [1] = { .scnprintf = SCA_BUF /* buf */, .from_user = true, }, }, },
1429 };
1430
syscall_fmt__cmp(const void * name,const void * fmtp)1431 static int syscall_fmt__cmp(const void *name, const void *fmtp)
1432 {
1433 const struct syscall_fmt *fmt = fmtp;
1434 return strcmp(name, fmt->name);
1435 }
1436
__syscall_fmt__find(const struct syscall_fmt * fmts,const int nmemb,const char * name)1437 static const struct syscall_fmt *__syscall_fmt__find(const struct syscall_fmt *fmts,
1438 const int nmemb,
1439 const char *name)
1440 {
1441 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
1442 }
1443
syscall_fmt__find(const char * name)1444 static const struct syscall_fmt *syscall_fmt__find(const char *name)
1445 {
1446 const int nmemb = ARRAY_SIZE(syscall_fmts);
1447 return __syscall_fmt__find(syscall_fmts, nmemb, name);
1448 }
1449
__syscall_fmt__find_by_alias(const struct syscall_fmt * fmts,const int nmemb,const char * alias)1450 static const struct syscall_fmt *__syscall_fmt__find_by_alias(const struct syscall_fmt *fmts,
1451 const int nmemb, const char *alias)
1452 {
1453 int i;
1454
1455 for (i = 0; i < nmemb; ++i) {
1456 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0)
1457 return &fmts[i];
1458 }
1459
1460 return NULL;
1461 }
1462
syscall_fmt__find_by_alias(const char * alias)1463 static const struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
1464 {
1465 const int nmemb = ARRAY_SIZE(syscall_fmts);
1466 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias);
1467 }
1468
1469 /**
1470 * struct syscall
1471 */
1472 struct syscall {
1473 /** @e_machine: The ELF machine associated with the entry. */
1474 int e_machine;
1475 /** @id: id value from the tracepoint, the system call number. */
1476 int id;
1477 struct tep_event *tp_format;
1478 int nr_args;
1479 /**
1480 * @args_size: sum of the sizes of the syscall arguments, anything
1481 * after that is augmented stuff: pathname for openat, etc.
1482 */
1483
1484 int args_size;
1485 struct {
1486 struct bpf_program *sys_enter,
1487 *sys_exit;
1488 } bpf_prog;
1489 /** @is_exit: is this "exit" or "exit_group"? */
1490 bool is_exit;
1491 /**
1492 * @is_open: is this "open" or "openat"? To associate the fd returned in
1493 * sys_exit with the pathname in sys_enter.
1494 */
1495 bool is_open;
1496 /**
1497 * @nonexistent: Name lookup failed. Just a hole in the syscall table,
1498 * syscall id not allocated.
1499 */
1500 bool nonexistent;
1501 bool use_btf;
1502 struct tep_format_field *args;
1503 const char *name;
1504 const struct syscall_fmt *fmt;
1505 struct syscall_arg_fmt *arg_fmt;
1506 };
1507
1508 /*
1509 * We need to have this 'calculated' boolean because in some cases we really
1510 * don't know what is the duration of a syscall, for instance, when we start
1511 * a session and some threads are waiting for a syscall to finish, say 'poll',
1512 * in which case all we can do is to print "( ? ) for duration and for the
1513 * start timestamp.
1514 */
fprintf_duration(unsigned long t,bool calculated,FILE * fp)1515 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
1516 {
1517 double duration = (double)t / NSEC_PER_MSEC;
1518 size_t printed = fprintf(fp, "(");
1519
1520 if (!calculated)
1521 printed += fprintf(fp, " ");
1522 else if (duration >= 1.0)
1523 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1524 else if (duration >= 0.01)
1525 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1526 else
1527 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1528 return printed + fprintf(fp, "): ");
1529 }
1530
1531 /**
1532 * filename.ptr: The filename char pointer that will be vfs_getname'd
1533 * filename.entry_str_pos: Where to insert the string translated from
1534 * filename.ptr by the vfs_getname tracepoint/kprobe.
1535 * ret_scnprintf: syscall args may set this to a different syscall return
1536 * formatter, for instance, fcntl may return fds, file flags, etc.
1537 */
1538 struct thread_trace {
1539 u64 entry_time;
1540 bool entry_pending;
1541 unsigned long nr_events;
1542 unsigned long pfmaj, pfmin;
1543 char *entry_str;
1544 double runtime_ms;
1545 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1546 struct {
1547 unsigned long ptr;
1548 short int entry_str_pos;
1549 bool pending_open;
1550 unsigned int namelen;
1551 char *name;
1552 } filename;
1553 struct {
1554 int max;
1555 struct file *table;
1556 } files;
1557
1558 struct hashmap *syscall_stats;
1559 };
1560
syscall_id_hash(long key,void * ctx __maybe_unused)1561 static size_t syscall_id_hash(long key, void *ctx __maybe_unused)
1562 {
1563 return key;
1564 }
1565
syscall_id_equal(long key1,long key2,void * ctx __maybe_unused)1566 static bool syscall_id_equal(long key1, long key2, void *ctx __maybe_unused)
1567 {
1568 return key1 == key2;
1569 }
1570
alloc_syscall_stats(void)1571 static struct hashmap *alloc_syscall_stats(void)
1572 {
1573 return hashmap__new(syscall_id_hash, syscall_id_equal, NULL);
1574 }
1575
delete_syscall_stats(struct hashmap * syscall_stats)1576 static void delete_syscall_stats(struct hashmap *syscall_stats)
1577 {
1578 struct hashmap_entry *pos;
1579 size_t bkt;
1580
1581 if (syscall_stats == NULL)
1582 return;
1583
1584 hashmap__for_each_entry(syscall_stats, pos, bkt)
1585 zfree(&pos->pvalue);
1586 hashmap__free(syscall_stats);
1587 }
1588
thread_trace__new(struct trace * trace)1589 static struct thread_trace *thread_trace__new(struct trace *trace)
1590 {
1591 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
1592
1593 if (ttrace) {
1594 ttrace->files.max = -1;
1595 if (trace->summary) {
1596 ttrace->syscall_stats = alloc_syscall_stats();
1597 if (IS_ERR(ttrace->syscall_stats))
1598 zfree(&ttrace);
1599 }
1600 }
1601
1602 return ttrace;
1603 }
1604
1605 static void thread_trace__free_files(struct thread_trace *ttrace);
1606
thread_trace__delete(void * pttrace)1607 static void thread_trace__delete(void *pttrace)
1608 {
1609 struct thread_trace *ttrace = pttrace;
1610
1611 if (!ttrace)
1612 return;
1613
1614 delete_syscall_stats(ttrace->syscall_stats);
1615 ttrace->syscall_stats = NULL;
1616 thread_trace__free_files(ttrace);
1617 zfree(&ttrace->entry_str);
1618 free(ttrace);
1619 }
1620
thread__trace(struct thread * thread,struct trace * trace)1621 static struct thread_trace *thread__trace(struct thread *thread, struct trace *trace)
1622 {
1623 struct thread_trace *ttrace;
1624
1625 if (thread == NULL)
1626 goto fail;
1627
1628 if (thread__priv(thread) == NULL)
1629 thread__set_priv(thread, thread_trace__new(trace));
1630
1631 if (thread__priv(thread) == NULL)
1632 goto fail;
1633
1634 ttrace = thread__priv(thread);
1635 ++ttrace->nr_events;
1636
1637 return ttrace;
1638 fail:
1639 color_fprintf(trace->output, PERF_COLOR_RED,
1640 "WARNING: not enough memory, dropping samples!\n");
1641 return NULL;
1642 }
1643
1644
syscall_arg__set_ret_scnprintf(struct syscall_arg * arg,size_t (* ret_scnprintf)(char * bf,size_t size,struct syscall_arg * arg))1645 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1646 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1647 {
1648 struct thread_trace *ttrace = thread__priv(arg->thread);
1649
1650 ttrace->ret_scnprintf = ret_scnprintf;
1651 }
1652
1653 #define TRACE_PFMAJ (1 << 0)
1654 #define TRACE_PFMIN (1 << 1)
1655
1656 static const size_t trace__entry_str_size = 2048;
1657
thread_trace__free_files(struct thread_trace * ttrace)1658 static void thread_trace__free_files(struct thread_trace *ttrace)
1659 {
1660 for (int i = 0; i < ttrace->files.max; ++i) {
1661 struct file *file = ttrace->files.table + i;
1662 zfree(&file->pathname);
1663 }
1664
1665 zfree(&ttrace->files.table);
1666 ttrace->files.max = -1;
1667 }
1668
thread_trace__files_entry(struct thread_trace * ttrace,int fd)1669 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
1670 {
1671 if (fd < 0)
1672 return NULL;
1673
1674 if (fd > ttrace->files.max) {
1675 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
1676
1677 if (nfiles == NULL)
1678 return NULL;
1679
1680 if (ttrace->files.max != -1) {
1681 memset(nfiles + ttrace->files.max + 1, 0,
1682 (fd - ttrace->files.max) * sizeof(struct file));
1683 } else {
1684 memset(nfiles, 0, (fd + 1) * sizeof(struct file));
1685 }
1686
1687 ttrace->files.table = nfiles;
1688 ttrace->files.max = fd;
1689 }
1690
1691 return ttrace->files.table + fd;
1692 }
1693
thread__files_entry(struct thread * thread,int fd)1694 struct file *thread__files_entry(struct thread *thread, int fd)
1695 {
1696 return thread_trace__files_entry(thread__priv(thread), fd);
1697 }
1698
trace__set_fd_pathname(struct thread * thread,int fd,const char * pathname)1699 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1700 {
1701 struct thread_trace *ttrace = thread__priv(thread);
1702 struct file *file = thread_trace__files_entry(ttrace, fd);
1703
1704 if (file != NULL) {
1705 struct stat st;
1706 if (stat(pathname, &st) == 0)
1707 file->dev_maj = major(st.st_rdev);
1708 file->pathname = strdup(pathname);
1709 if (file->pathname)
1710 return 0;
1711 }
1712
1713 return -1;
1714 }
1715
thread__read_fd_path(struct thread * thread,int fd)1716 static int thread__read_fd_path(struct thread *thread, int fd)
1717 {
1718 char linkname[PATH_MAX], pathname[PATH_MAX];
1719 struct stat st;
1720 int ret;
1721
1722 if (thread__pid(thread) == thread__tid(thread)) {
1723 scnprintf(linkname, sizeof(linkname),
1724 "/proc/%d/fd/%d", thread__pid(thread), fd);
1725 } else {
1726 scnprintf(linkname, sizeof(linkname),
1727 "/proc/%d/task/%d/fd/%d",
1728 thread__pid(thread), thread__tid(thread), fd);
1729 }
1730
1731 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1732 return -1;
1733
1734 ret = readlink(linkname, pathname, sizeof(pathname));
1735
1736 if (ret < 0 || ret > st.st_size)
1737 return -1;
1738
1739 pathname[ret] = '\0';
1740 return trace__set_fd_pathname(thread, fd, pathname);
1741 }
1742
thread__fd_path(struct thread * thread,int fd,struct trace * trace)1743 static const char *thread__fd_path(struct thread *thread, int fd,
1744 struct trace *trace)
1745 {
1746 struct thread_trace *ttrace = thread__priv(thread);
1747
1748 if (ttrace == NULL || trace->fd_path_disabled)
1749 return NULL;
1750
1751 if (fd < 0)
1752 return NULL;
1753
1754 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
1755 if (!trace->live)
1756 return NULL;
1757 ++trace->stats.proc_getname;
1758 if (thread__read_fd_path(thread, fd))
1759 return NULL;
1760 }
1761
1762 return ttrace->files.table[fd].pathname;
1763 }
1764
syscall_arg__scnprintf_fd(char * bf,size_t size,struct syscall_arg * arg)1765 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
1766 {
1767 int fd = arg->val;
1768 size_t printed = scnprintf(bf, size, "%d", fd);
1769 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1770
1771 if (path)
1772 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1773
1774 return printed;
1775 }
1776
pid__scnprintf_fd(struct trace * trace,pid_t pid,int fd,char * bf,size_t size)1777 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1778 {
1779 size_t printed = scnprintf(bf, size, "%d", fd);
1780 struct thread *thread = machine__find_thread(trace->host, pid, pid);
1781
1782 if (thread) {
1783 const char *path = thread__fd_path(thread, fd, trace);
1784
1785 if (path)
1786 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1787
1788 thread__put(thread);
1789 }
1790
1791 return printed;
1792 }
1793
syscall_arg__scnprintf_close_fd(char * bf,size_t size,struct syscall_arg * arg)1794 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1795 struct syscall_arg *arg)
1796 {
1797 int fd = arg->val;
1798 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1799 struct thread_trace *ttrace = thread__priv(arg->thread);
1800
1801 if (ttrace && fd >= 0 && fd <= ttrace->files.max)
1802 zfree(&ttrace->files.table[fd].pathname);
1803
1804 return printed;
1805 }
1806
thread__set_filename_pos(struct thread * thread,const char * bf,unsigned long ptr)1807 static void thread__set_filename_pos(struct thread *thread, const char *bf,
1808 unsigned long ptr)
1809 {
1810 struct thread_trace *ttrace = thread__priv(thread);
1811
1812 ttrace->filename.ptr = ptr;
1813 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1814 }
1815
syscall_arg__scnprintf_augmented_string(struct syscall_arg * arg,char * bf,size_t size)1816 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1817 {
1818 struct augmented_arg *augmented_arg = arg->augmented.args;
1819 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1820 /*
1821 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1822 * we would have two strings, each prefixed by its size.
1823 */
1824 int consumed = sizeof(*augmented_arg) + augmented_arg->size;
1825
1826 arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1827 arg->augmented.size -= consumed;
1828
1829 return printed;
1830 }
1831
syscall_arg__scnprintf_filename(char * bf,size_t size,struct syscall_arg * arg)1832 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1833 struct syscall_arg *arg)
1834 {
1835 unsigned long ptr = arg->val;
1836
1837 if (arg->augmented.args)
1838 return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1839
1840 if (!arg->trace->vfs_getname)
1841 return scnprintf(bf, size, "%#x", ptr);
1842
1843 thread__set_filename_pos(arg->thread, bf, ptr);
1844 return 0;
1845 }
1846
1847 #define MAX_CONTROL_CHAR 31
1848 #define MAX_ASCII 127
1849
syscall_arg__scnprintf_buf(char * bf,size_t size,struct syscall_arg * arg)1850 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg)
1851 {
1852 struct augmented_arg *augmented_arg = arg->augmented.args;
1853 unsigned char *orig = (unsigned char *)augmented_arg->value;
1854 size_t printed = 0;
1855 int consumed;
1856
1857 if (augmented_arg == NULL)
1858 return 0;
1859
1860 for (int j = 0; j < augmented_arg->size; ++j) {
1861 bool control_char = orig[j] <= MAX_CONTROL_CHAR || orig[j] >= MAX_ASCII;
1862 /* print control characters (0~31 and 127), and non-ascii characters in \(digits) */
1863 printed += scnprintf(bf + printed, size - printed, control_char ? "\\%d" : "%c", (int)orig[j]);
1864 }
1865
1866 consumed = sizeof(*augmented_arg) + augmented_arg->size;
1867 arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1868 arg->augmented.size -= consumed;
1869
1870 return printed;
1871 }
1872
trace__filter_duration(struct trace * trace,double t)1873 static bool trace__filter_duration(struct trace *trace, double t)
1874 {
1875 return t < (trace->duration_filter * NSEC_PER_MSEC);
1876 }
1877
__trace__fprintf_tstamp(struct trace * trace,u64 tstamp,FILE * fp)1878 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1879 {
1880 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1881
1882 return fprintf(fp, "%10.3f ", ts);
1883 }
1884
1885 /*
1886 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1887 * using ttrace->entry_time for a thread that receives a sys_exit without
1888 * first having received a sys_enter ("poll" issued before tracing session
1889 * starts, lost sys_enter exit due to ring buffer overflow).
1890 */
trace__fprintf_tstamp(struct trace * trace,u64 tstamp,FILE * fp)1891 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1892 {
1893 if (tstamp > 0)
1894 return __trace__fprintf_tstamp(trace, tstamp, fp);
1895
1896 return fprintf(fp, " ? ");
1897 }
1898
1899 static pid_t workload_pid = -1;
1900 static volatile sig_atomic_t done = false;
1901 static volatile sig_atomic_t interrupted = false;
1902
sighandler_interrupt(int sig __maybe_unused)1903 static void sighandler_interrupt(int sig __maybe_unused)
1904 {
1905 done = interrupted = true;
1906 }
1907
sighandler_chld(int sig __maybe_unused,siginfo_t * info,void * context __maybe_unused)1908 static void sighandler_chld(int sig __maybe_unused, siginfo_t *info,
1909 void *context __maybe_unused)
1910 {
1911 if (info->si_pid == workload_pid)
1912 done = true;
1913 }
1914
trace__fprintf_comm_tid(struct trace * trace,struct thread * thread,FILE * fp)1915 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1916 {
1917 size_t printed = 0;
1918
1919 if (trace->multiple_threads) {
1920 if (trace->show_comm)
1921 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1922 printed += fprintf(fp, "%d ", thread__tid(thread));
1923 }
1924
1925 return printed;
1926 }
1927
trace__fprintf_entry_head(struct trace * trace,struct thread * thread,u64 duration,bool duration_calculated,u64 tstamp,FILE * fp)1928 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1929 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1930 {
1931 size_t printed = 0;
1932
1933 if (trace->show_tstamp)
1934 printed = trace__fprintf_tstamp(trace, tstamp, fp);
1935 if (trace->show_duration)
1936 printed += fprintf_duration(duration, duration_calculated, fp);
1937 return printed + trace__fprintf_comm_tid(trace, thread, fp);
1938 }
1939
trace__process_event(struct trace * trace,struct machine * machine,union perf_event * event,struct perf_sample * sample)1940 static int trace__process_event(struct trace *trace, struct machine *machine,
1941 union perf_event *event, struct perf_sample *sample)
1942 {
1943 int ret = 0;
1944
1945 switch (event->header.type) {
1946 case PERF_RECORD_LOST:
1947 color_fprintf(trace->output, PERF_COLOR_RED,
1948 "LOST %" PRIu64 " events!\n", (u64)event->lost.lost);
1949 ret = machine__process_lost_event(machine, event, sample);
1950 break;
1951 default:
1952 ret = machine__process_event(machine, event, sample);
1953 break;
1954 }
1955
1956 return ret;
1957 }
1958
trace__tool_process(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1959 static int trace__tool_process(const struct perf_tool *tool,
1960 union perf_event *event,
1961 struct perf_sample *sample,
1962 struct machine *machine)
1963 {
1964 struct trace *trace = container_of(tool, struct trace, tool);
1965 return trace__process_event(trace, machine, event, sample);
1966 }
1967
trace__machine__resolve_kernel_addr(void * vmachine,unsigned long long * addrp,char ** modp)1968 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1969 {
1970 struct machine *machine = vmachine;
1971
1972 if (machine->kptr_restrict_warned)
1973 return NULL;
1974
1975 if (symbol_conf.kptr_restrict) {
1976 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1977 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1978 "Kernel samples will not be resolved.\n");
1979 machine->kptr_restrict_warned = true;
1980 return NULL;
1981 }
1982
1983 return machine__resolve_kernel_addr(vmachine, addrp, modp);
1984 }
1985
trace__symbols_init(struct trace * trace,struct evlist * evlist)1986 static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1987 {
1988 int err = symbol__init(NULL);
1989
1990 if (err)
1991 return err;
1992
1993 trace->host = machine__new_host();
1994 if (trace->host == NULL)
1995 return -ENOMEM;
1996
1997 thread__set_priv_destructor(thread_trace__delete);
1998
1999 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
2000 if (err < 0)
2001 goto out;
2002
2003 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
2004 evlist->core.threads, trace__tool_process,
2005 true, false, 1);
2006 out:
2007 if (err)
2008 symbol__exit();
2009
2010 return err;
2011 }
2012
trace__symbols__exit(struct trace * trace)2013 static void trace__symbols__exit(struct trace *trace)
2014 {
2015 machine__exit(trace->host);
2016 trace->host = NULL;
2017
2018 symbol__exit();
2019 }
2020
syscall__alloc_arg_fmts(struct syscall * sc,int nr_args)2021 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
2022 {
2023 int idx;
2024
2025 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
2026 nr_args = sc->fmt->nr_args;
2027
2028 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
2029 if (sc->arg_fmt == NULL)
2030 return -1;
2031
2032 for (idx = 0; idx < nr_args; ++idx) {
2033 if (sc->fmt)
2034 sc->arg_fmt[idx] = sc->fmt->arg[idx];
2035 }
2036
2037 sc->nr_args = nr_args;
2038 return 0;
2039 }
2040
2041 static const struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
2042 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, },
2043 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, },
2044 };
2045
syscall_arg_fmt__cmp(const void * name,const void * fmtp)2046 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
2047 {
2048 const struct syscall_arg_fmt *fmt = fmtp;
2049 return strcmp(name, fmt->name);
2050 }
2051
2052 static const struct syscall_arg_fmt *
__syscall_arg_fmt__find_by_name(const struct syscall_arg_fmt * fmts,const int nmemb,const char * name)2053 __syscall_arg_fmt__find_by_name(const struct syscall_arg_fmt *fmts, const int nmemb,
2054 const char *name)
2055 {
2056 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
2057 }
2058
syscall_arg_fmt__find_by_name(const char * name)2059 static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
2060 {
2061 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
2062 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
2063 }
2064
2065 static struct tep_format_field *
syscall_arg_fmt__init_array(struct syscall_arg_fmt * arg,struct tep_format_field * field,bool * use_btf)2066 syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field,
2067 bool *use_btf)
2068 {
2069 struct tep_format_field *last_field = NULL;
2070 int len;
2071
2072 for (; field; field = field->next, ++arg) {
2073 last_field = field;
2074
2075 if (arg->scnprintf)
2076 continue;
2077
2078 len = strlen(field->name);
2079
2080 // As far as heuristics (or intention) goes this seems to hold true, and makes sense!
2081 if ((field->flags & TEP_FIELD_IS_POINTER) && strstarts(field->type, "const "))
2082 arg->from_user = true;
2083
2084 if (strcmp(field->type, "const char *") == 0 &&
2085 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
2086 strstr(field->name, "path") != NULL)) {
2087 arg->scnprintf = SCA_FILENAME;
2088 } else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
2089 arg->scnprintf = SCA_PTR;
2090 else if (strcmp(field->type, "pid_t") == 0)
2091 arg->scnprintf = SCA_PID;
2092 else if (strcmp(field->type, "umode_t") == 0)
2093 arg->scnprintf = SCA_MODE_T;
2094 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) {
2095 arg->scnprintf = SCA_CHAR_ARRAY;
2096 arg->nr_entries = field->arraylen;
2097 } else if ((strcmp(field->type, "int") == 0 ||
2098 strcmp(field->type, "unsigned int") == 0 ||
2099 strcmp(field->type, "long") == 0) &&
2100 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
2101 /*
2102 * /sys/kernel/tracing/events/syscalls/sys_enter*
2103 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
2104 * 65 int
2105 * 23 unsigned int
2106 * 7 unsigned long
2107 */
2108 arg->scnprintf = SCA_FD;
2109 } else if (strstr(field->type, "enum") && use_btf != NULL) {
2110 *use_btf = true;
2111 arg->strtoul = STUL_BTF_TYPE;
2112 } else {
2113 const struct syscall_arg_fmt *fmt =
2114 syscall_arg_fmt__find_by_name(field->name);
2115
2116 if (fmt) {
2117 arg->scnprintf = fmt->scnprintf;
2118 arg->strtoul = fmt->strtoul;
2119 }
2120 }
2121 }
2122
2123 return last_field;
2124 }
2125
syscall__set_arg_fmts(struct syscall * sc)2126 static int syscall__set_arg_fmts(struct syscall *sc)
2127 {
2128 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args,
2129 &sc->use_btf);
2130
2131 if (last_field)
2132 sc->args_size = last_field->offset + last_field->size;
2133
2134 return 0;
2135 }
2136
syscall__read_info(struct syscall * sc,struct trace * trace)2137 static int syscall__read_info(struct syscall *sc, struct trace *trace)
2138 {
2139 char tp_name[128];
2140 const char *name;
2141 int err;
2142
2143 if (sc->nonexistent)
2144 return -EEXIST;
2145
2146 if (sc->name) {
2147 /* Info already read. */
2148 return 0;
2149 }
2150
2151 name = syscalltbl__name(sc->e_machine, sc->id);
2152 if (name == NULL) {
2153 sc->nonexistent = true;
2154 return -EEXIST;
2155 }
2156
2157 sc->name = name;
2158 sc->fmt = syscall_fmt__find(sc->name);
2159
2160 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
2161 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
2162
2163 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
2164 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
2165 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
2166 }
2167
2168 /*
2169 * Fails to read trace point format via sysfs node, so the trace point
2170 * doesn't exist. Set the 'nonexistent' flag as true.
2171 */
2172 if (IS_ERR(sc->tp_format)) {
2173 sc->nonexistent = true;
2174 err = PTR_ERR(sc->tp_format);
2175 sc->tp_format = NULL;
2176 return err;
2177 }
2178
2179 /*
2180 * The tracepoint format contains __syscall_nr field, so it's one more
2181 * than the actual number of syscall arguments.
2182 */
2183 if (syscall__alloc_arg_fmts(sc, sc->tp_format->format.nr_fields - 1))
2184 return -ENOMEM;
2185
2186 sc->args = sc->tp_format->format.fields;
2187 /*
2188 * We need to check and discard the first variable '__syscall_nr'
2189 * or 'nr' that mean the syscall number. It is needless here.
2190 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
2191 */
2192 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
2193 sc->args = sc->args->next;
2194 --sc->nr_args;
2195 }
2196
2197 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
2198 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
2199
2200 err = syscall__set_arg_fmts(sc);
2201
2202 /* after calling syscall__set_arg_fmts() we'll know whether use_btf is true */
2203 if (sc->use_btf)
2204 trace__load_vmlinux_btf(trace);
2205
2206 return err;
2207 }
2208
evsel__init_tp_arg_scnprintf(struct evsel * evsel,bool * use_btf)2209 static int evsel__init_tp_arg_scnprintf(struct evsel *evsel, bool *use_btf)
2210 {
2211 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
2212
2213 if (fmt != NULL) {
2214 const struct tep_event *tp_format = evsel__tp_format(evsel);
2215
2216 if (tp_format) {
2217 syscall_arg_fmt__init_array(fmt, tp_format->format.fields, use_btf);
2218 return 0;
2219 }
2220 }
2221
2222 return -ENOMEM;
2223 }
2224
intcmp(const void * a,const void * b)2225 static int intcmp(const void *a, const void *b)
2226 {
2227 const int *one = a, *another = b;
2228
2229 return *one - *another;
2230 }
2231
trace__validate_ev_qualifier(struct trace * trace)2232 static int trace__validate_ev_qualifier(struct trace *trace)
2233 {
2234 int err = 0;
2235 bool printed_invalid_prefix = false;
2236 struct str_node *pos;
2237 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
2238
2239 trace->ev_qualifier_ids.entries = malloc(nr_allocated *
2240 sizeof(trace->ev_qualifier_ids.entries[0]));
2241
2242 if (trace->ev_qualifier_ids.entries == NULL) {
2243 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
2244 trace->output);
2245 err = -EINVAL;
2246 goto out;
2247 }
2248
2249 strlist__for_each_entry(pos, trace->ev_qualifier) {
2250 const char *sc = pos->s;
2251 /*
2252 * TODO: Assume more than the validation/warnings are all for
2253 * the same binary type as perf.
2254 */
2255 int id = syscalltbl__id(EM_HOST, sc), match_next = -1;
2256
2257 if (id < 0) {
2258 id = syscalltbl__strglobmatch_first(EM_HOST, sc, &match_next);
2259 if (id >= 0)
2260 goto matches;
2261
2262 if (!printed_invalid_prefix) {
2263 pr_debug("Skipping unknown syscalls: ");
2264 printed_invalid_prefix = true;
2265 } else {
2266 pr_debug(", ");
2267 }
2268
2269 pr_debug("%s", sc);
2270 continue;
2271 }
2272 matches:
2273 trace->ev_qualifier_ids.entries[nr_used++] = id;
2274 if (match_next == -1)
2275 continue;
2276
2277 while (1) {
2278 id = syscalltbl__strglobmatch_next(EM_HOST, sc, &match_next);
2279 if (id < 0)
2280 break;
2281 if (nr_allocated == nr_used) {
2282 void *entries;
2283
2284 nr_allocated += 8;
2285 entries = realloc(trace->ev_qualifier_ids.entries,
2286 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
2287 if (entries == NULL) {
2288 err = -ENOMEM;
2289 fputs("\nError:\t Not enough memory for parsing\n", trace->output);
2290 goto out_free;
2291 }
2292 trace->ev_qualifier_ids.entries = entries;
2293 }
2294 trace->ev_qualifier_ids.entries[nr_used++] = id;
2295 }
2296 }
2297
2298 trace->ev_qualifier_ids.nr = nr_used;
2299 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
2300 out:
2301 if (printed_invalid_prefix)
2302 pr_debug("\n");
2303 return err;
2304 out_free:
2305 zfree(&trace->ev_qualifier_ids.entries);
2306 trace->ev_qualifier_ids.nr = 0;
2307 goto out;
2308 }
2309
trace__syscall_enabled(struct trace * trace,int id)2310 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
2311 {
2312 bool in_ev_qualifier;
2313
2314 if (trace->ev_qualifier_ids.nr == 0)
2315 return true;
2316
2317 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
2318 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
2319
2320 if (in_ev_qualifier)
2321 return !trace->not_ev_qualifier;
2322
2323 return trace->not_ev_qualifier;
2324 }
2325
2326 /*
2327 * args is to be interpreted as a series of longs but we need to handle
2328 * 8-byte unaligned accesses. args points to raw_data within the event
2329 * and raw_data is guaranteed to be 8-byte unaligned because it is
2330 * preceded by raw_size which is a u32. So we need to copy args to a temp
2331 * variable to read it. Most notably this avoids extended load instructions
2332 * on unaligned addresses
2333 */
syscall_arg__val(struct syscall_arg * arg,u8 idx)2334 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
2335 {
2336 unsigned long val;
2337 unsigned char *p = arg->args + sizeof(unsigned long) * idx;
2338
2339 memcpy(&val, p, sizeof(val));
2340 return val;
2341 }
2342
syscall__scnprintf_name(struct syscall * sc,char * bf,size_t size,struct syscall_arg * arg)2343 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
2344 struct syscall_arg *arg)
2345 {
2346 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
2347 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
2348
2349 return scnprintf(bf, size, "arg%d: ", arg->idx);
2350 }
2351
2352 /*
2353 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
2354 * as mount 'flags' argument that needs ignoring some magic flag, see comment
2355 * in tools/perf/trace/beauty/mount_flags.c
2356 */
syscall_arg_fmt__mask_val(struct syscall_arg_fmt * fmt,struct syscall_arg * arg,unsigned long val)2357 static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val)
2358 {
2359 if (fmt && fmt->mask_val)
2360 return fmt->mask_val(arg, val);
2361
2362 return val;
2363 }
2364
syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt * fmt,char * bf,size_t size,struct syscall_arg * arg,unsigned long val)2365 static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size,
2366 struct syscall_arg *arg, unsigned long val)
2367 {
2368 if (fmt && fmt->scnprintf) {
2369 arg->val = val;
2370 if (fmt->parm)
2371 arg->parm = fmt->parm;
2372 return fmt->scnprintf(bf, size, arg);
2373 }
2374 return scnprintf(bf, size, "%ld", val);
2375 }
2376
syscall__scnprintf_args(struct syscall * sc,char * bf,size_t size,unsigned char * args,void * augmented_args,int augmented_args_size,struct trace * trace,struct thread * thread)2377 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
2378 unsigned char *args, void *augmented_args, int augmented_args_size,
2379 struct trace *trace, struct thread *thread)
2380 {
2381 size_t printed = 0, btf_printed;
2382 unsigned long val;
2383 u8 bit = 1;
2384 struct syscall_arg arg = {
2385 .args = args,
2386 .augmented = {
2387 .size = augmented_args_size,
2388 .args = augmented_args,
2389 },
2390 .idx = 0,
2391 .mask = 0,
2392 .trace = trace,
2393 .thread = thread,
2394 .show_string_prefix = trace->show_string_prefix,
2395 };
2396 struct thread_trace *ttrace = thread__priv(thread);
2397 void *default_scnprintf;
2398
2399 /*
2400 * Things like fcntl will set this in its 'cmd' formatter to pick the
2401 * right formatter for the return value (an fd? file flags?), which is
2402 * not needed for syscalls that always return a given type, say an fd.
2403 */
2404 ttrace->ret_scnprintf = NULL;
2405
2406 if (sc->args != NULL) {
2407 struct tep_format_field *field;
2408
2409 for (field = sc->args; field;
2410 field = field->next, ++arg.idx, bit <<= 1) {
2411 if (arg.mask & bit)
2412 continue;
2413
2414 arg.fmt = &sc->arg_fmt[arg.idx];
2415 val = syscall_arg__val(&arg, arg.idx);
2416 /*
2417 * Some syscall args need some mask, most don't and
2418 * return val untouched.
2419 */
2420 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
2421
2422 /*
2423 * Suppress this argument if its value is zero and show_zero
2424 * property isn't set.
2425 *
2426 * If it has a BTF type, then override the zero suppression knob
2427 * as the common case is for zero in an enum to have an associated entry.
2428 */
2429 if (val == 0 && !trace->show_zeros &&
2430 !(sc->arg_fmt && sc->arg_fmt[arg.idx].show_zero) &&
2431 !(sc->arg_fmt && sc->arg_fmt[arg.idx].strtoul == STUL_BTF_TYPE))
2432 continue;
2433
2434 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2435
2436 if (trace->show_arg_names)
2437 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2438
2439 default_scnprintf = sc->arg_fmt[arg.idx].scnprintf;
2440
2441 if (trace->force_btf || default_scnprintf == NULL || default_scnprintf == SCA_PTR) {
2442 btf_printed = trace__btf_scnprintf(trace, &arg, bf + printed,
2443 size - printed, val, field->type);
2444 if (btf_printed) {
2445 printed += btf_printed;
2446 continue;
2447 }
2448 }
2449
2450 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
2451 bf + printed, size - printed, &arg, val);
2452 }
2453 } else if (IS_ERR(sc->tp_format)) {
2454 /*
2455 * If we managed to read the tracepoint /format file, then we
2456 * may end up not having any args, like with gettid(), so only
2457 * print the raw args when we didn't manage to read it.
2458 */
2459 while (arg.idx < sc->nr_args) {
2460 if (arg.mask & bit)
2461 goto next_arg;
2462 val = syscall_arg__val(&arg, arg.idx);
2463 if (printed)
2464 printed += scnprintf(bf + printed, size - printed, ", ");
2465 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
2466 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val);
2467 next_arg:
2468 ++arg.idx;
2469 bit <<= 1;
2470 }
2471 }
2472
2473 return printed;
2474 }
2475
syscall__new(int e_machine,int id)2476 static struct syscall *syscall__new(int e_machine, int id)
2477 {
2478 struct syscall *sc = zalloc(sizeof(*sc));
2479
2480 if (!sc)
2481 return NULL;
2482
2483 sc->e_machine = e_machine;
2484 sc->id = id;
2485 return sc;
2486 }
2487
syscall__delete(struct syscall * sc)2488 static void syscall__delete(struct syscall *sc)
2489 {
2490 if (!sc)
2491 return;
2492
2493 free(sc->arg_fmt);
2494 free(sc);
2495 }
2496
syscall__bsearch_cmp(const void * key,const void * entry)2497 static int syscall__bsearch_cmp(const void *key, const void *entry)
2498 {
2499 const struct syscall *a = key, *b = *((const struct syscall **)entry);
2500
2501 if (a->e_machine != b->e_machine)
2502 return a->e_machine - b->e_machine;
2503
2504 return a->id - b->id;
2505 }
2506
syscall__cmp(const void * va,const void * vb)2507 static int syscall__cmp(const void *va, const void *vb)
2508 {
2509 const struct syscall *a = *((const struct syscall **)va);
2510 const struct syscall *b = *((const struct syscall **)vb);
2511
2512 if (a->e_machine != b->e_machine)
2513 return a->e_machine - b->e_machine;
2514
2515 return a->id - b->id;
2516 }
2517
trace__find_syscall(struct trace * trace,int e_machine,int id)2518 static struct syscall *trace__find_syscall(struct trace *trace, int e_machine, int id)
2519 {
2520 struct syscall key = {
2521 .e_machine = e_machine,
2522 .id = id,
2523 };
2524 struct syscall *sc, **tmp;
2525
2526 if (trace->syscalls.table) {
2527 struct syscall **sc_entry = bsearch(&key, trace->syscalls.table,
2528 trace->syscalls.table_size,
2529 sizeof(trace->syscalls.table[0]),
2530 syscall__bsearch_cmp);
2531
2532 if (sc_entry)
2533 return *sc_entry;
2534 }
2535
2536 sc = syscall__new(e_machine, id);
2537 if (!sc)
2538 return NULL;
2539
2540 tmp = reallocarray(trace->syscalls.table, trace->syscalls.table_size + 1,
2541 sizeof(trace->syscalls.table[0]));
2542 if (!tmp) {
2543 syscall__delete(sc);
2544 return NULL;
2545 }
2546
2547 trace->syscalls.table = tmp;
2548 trace->syscalls.table[trace->syscalls.table_size++] = sc;
2549 qsort(trace->syscalls.table, trace->syscalls.table_size, sizeof(trace->syscalls.table[0]),
2550 syscall__cmp);
2551 return sc;
2552 }
2553
2554 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2555 union perf_event *event,
2556 struct perf_sample *sample);
2557
trace__syscall_info(struct trace * trace,struct evsel * evsel,int e_machine,int id)2558 static struct syscall *trace__syscall_info(struct trace *trace, struct evsel *evsel,
2559 int e_machine, int id)
2560 {
2561 struct syscall *sc;
2562 int err = 0;
2563
2564 if (id < 0) {
2565
2566 /*
2567 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
2568 * before that, leaving at a higher verbosity level till that is
2569 * explained. Reproduced with plain ftrace with:
2570 *
2571 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
2572 * grep "NR -1 " /t/trace_pipe
2573 *
2574 * After generating some load on the machine.
2575 */
2576 if (verbose > 1) {
2577 static u64 n;
2578 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2579 id, evsel__name(evsel), ++n);
2580 }
2581 return NULL;
2582 }
2583
2584 err = -EINVAL;
2585
2586 sc = trace__find_syscall(trace, e_machine, id);
2587 if (sc)
2588 err = syscall__read_info(sc, trace);
2589
2590 if (err && verbose > 0) {
2591 char sbuf[STRERR_BUFSIZE];
2592
2593 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err,
2594 str_error_r(-err, sbuf, sizeof(sbuf)));
2595 if (sc && sc->name)
2596 fprintf(trace->output, "(%s)", sc->name);
2597 fputs(" information\n", trace->output);
2598 }
2599 return err ? NULL : sc;
2600 }
2601
2602 struct syscall_stats {
2603 struct stats stats;
2604 u64 nr_failures;
2605 int max_errno;
2606 u32 *errnos;
2607 };
2608
thread__update_stats(struct thread * thread,struct thread_trace * ttrace,int id,struct perf_sample * sample,long err,struct trace * trace)2609 static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace,
2610 int id, struct perf_sample *sample, long err,
2611 struct trace *trace)
2612 {
2613 struct hashmap *syscall_stats = ttrace->syscall_stats;
2614 struct syscall_stats *stats = NULL;
2615 u64 duration = 0;
2616
2617 if (trace->summary_mode == SUMMARY__BY_TOTAL)
2618 syscall_stats = trace->syscall_stats;
2619
2620 if (!hashmap__find(syscall_stats, id, &stats)) {
2621 stats = zalloc(sizeof(*stats));
2622 if (stats == NULL)
2623 return;
2624
2625 init_stats(&stats->stats);
2626 if (hashmap__add(syscall_stats, id, stats) < 0) {
2627 free(stats);
2628 return;
2629 }
2630 }
2631
2632 if (ttrace->entry_time && sample->time > ttrace->entry_time)
2633 duration = sample->time - ttrace->entry_time;
2634
2635 update_stats(&stats->stats, duration);
2636
2637 if (err < 0) {
2638 ++stats->nr_failures;
2639
2640 if (!trace->errno_summary)
2641 return;
2642
2643 err = -err;
2644 if (err > stats->max_errno) {
2645 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32));
2646
2647 if (new_errnos) {
2648 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
2649 } else {
2650 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n",
2651 thread__comm_str(thread), thread__pid(thread),
2652 thread__tid(thread));
2653 return;
2654 }
2655
2656 stats->errnos = new_errnos;
2657 stats->max_errno = err;
2658 }
2659
2660 ++stats->errnos[err - 1];
2661 }
2662 }
2663
trace__printf_interrupted_entry(struct trace * trace)2664 static int trace__printf_interrupted_entry(struct trace *trace)
2665 {
2666 struct thread_trace *ttrace;
2667 size_t printed;
2668 int len;
2669
2670 if (trace->failure_only || trace->current == NULL)
2671 return 0;
2672
2673 ttrace = thread__priv(trace->current);
2674
2675 if (!ttrace->entry_pending)
2676 return 0;
2677
2678 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
2679 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2680
2681 if (len < trace->args_alignment - 4)
2682 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2683
2684 printed += fprintf(trace->output, " ...\n");
2685
2686 ttrace->entry_pending = false;
2687 ++trace->nr_events_printed;
2688
2689 return printed;
2690 }
2691
trace__fprintf_sample(struct trace * trace,struct evsel * evsel,struct perf_sample * sample,struct thread * thread)2692 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
2693 struct perf_sample *sample, struct thread *thread)
2694 {
2695 int printed = 0;
2696
2697 if (trace->print_sample) {
2698 double ts = (double)sample->time / NSEC_PER_MSEC;
2699
2700 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2701 evsel__name(evsel), ts,
2702 thread__comm_str(thread),
2703 sample->pid, sample->tid, sample->cpu);
2704 }
2705
2706 return printed;
2707 }
2708
syscall__augmented_args(struct syscall * sc,struct perf_sample * sample,int * augmented_args_size,int raw_augmented_args_size)2709 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
2710 {
2711 /*
2712 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
2713 * and there we get all 6 syscall args plus the tracepoint common fields
2714 * that gets calculated at the start and the syscall_nr (another long).
2715 * So we check if that is the case and if so don't look after the
2716 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
2717 * which is fixed.
2718 *
2719 * We'll revisit this later to pass s->args_size to the BPF augmenter
2720 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
2721 * copies only what we need for each syscall, like what happens when we
2722 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
2723 * traffic to just what is needed for each syscall.
2724 */
2725 int args_size = raw_augmented_args_size ?: sc->args_size;
2726
2727 *augmented_args_size = sample->raw_size - args_size;
2728 if (*augmented_args_size > 0) {
2729 static uintptr_t argbuf[1024]; /* assuming single-threaded */
2730
2731 if ((size_t)(*augmented_args_size) > sizeof(argbuf))
2732 return NULL;
2733
2734 /*
2735 * The perf ring-buffer is 8-byte aligned but sample->raw_data
2736 * is not because it's preceded by u32 size. Later, beautifier
2737 * will use the augmented args with stricter alignments like in
2738 * some struct. To make sure it's aligned, let's copy the args
2739 * into a static buffer as it's single-threaded for now.
2740 */
2741 memcpy(argbuf, sample->raw_data + args_size, *augmented_args_size);
2742
2743 return argbuf;
2744 }
2745 return NULL;
2746 }
2747
trace__sys_enter(struct trace * trace,struct evsel * evsel,union perf_event * event __maybe_unused,struct perf_sample * sample)2748 static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
2749 union perf_event *event __maybe_unused,
2750 struct perf_sample *sample)
2751 {
2752 char *msg;
2753 void *args;
2754 int printed = 0;
2755 struct thread *thread;
2756 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2757 int augmented_args_size = 0, e_machine;
2758 void *augmented_args = NULL;
2759 struct syscall *sc;
2760 struct thread_trace *ttrace;
2761
2762 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2763 e_machine = thread__e_machine(thread, trace->host);
2764 sc = trace__syscall_info(trace, evsel, e_machine, id);
2765 if (sc == NULL)
2766 goto out_put;
2767 ttrace = thread__trace(thread, trace);
2768 if (ttrace == NULL)
2769 goto out_put;
2770
2771 trace__fprintf_sample(trace, evsel, sample, thread);
2772
2773 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2774
2775 if (ttrace->entry_str == NULL) {
2776 ttrace->entry_str = malloc(trace__entry_str_size);
2777 if (!ttrace->entry_str)
2778 goto out_put;
2779 }
2780
2781 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2782 trace__printf_interrupted_entry(trace);
2783 /*
2784 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
2785 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
2786 * this breaks syscall__augmented_args() check for augmented args, as we calculate
2787 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
2788 * so when handling, say the openat syscall, we end up getting 6 args for the
2789 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
2790 * thinking that the extra 2 u64 args are the augmented filename, so just check
2791 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
2792 */
2793 if (evsel != trace->syscalls.events.sys_enter)
2794 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2795 ttrace->entry_time = sample->time;
2796 msg = ttrace->entry_str;
2797 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
2798
2799 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
2800 args, augmented_args, augmented_args_size, trace, thread);
2801
2802 if (sc->is_exit) {
2803 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2804 int alignment = 0;
2805
2806 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2807 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2808 if (trace->args_alignment > printed)
2809 alignment = trace->args_alignment - printed;
2810 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2811 }
2812 } else {
2813 ttrace->entry_pending = true;
2814 /* See trace__vfs_getname & trace__sys_exit */
2815 ttrace->filename.pending_open = false;
2816 }
2817
2818 if (trace->current != thread) {
2819 thread__put(trace->current);
2820 trace->current = thread__get(thread);
2821 }
2822 err = 0;
2823 out_put:
2824 thread__put(thread);
2825 return err;
2826 }
2827
trace__fprintf_sys_enter(struct trace * trace,struct evsel * evsel,struct perf_sample * sample)2828 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2829 struct perf_sample *sample)
2830 {
2831 struct thread_trace *ttrace;
2832 struct thread *thread;
2833 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2834 struct syscall *sc;
2835 char msg[1024];
2836 void *args, *augmented_args = NULL;
2837 int augmented_args_size, e_machine;
2838 size_t printed = 0;
2839
2840
2841 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2842 e_machine = thread__e_machine(thread, trace->host);
2843 sc = trace__syscall_info(trace, evsel, e_machine, id);
2844 if (sc == NULL)
2845 return -1;
2846 ttrace = thread__trace(thread, trace);
2847 /*
2848 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2849 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2850 */
2851 if (ttrace == NULL)
2852 goto out_put;
2853
2854 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2855 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2856 printed += syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2857 fprintf(trace->output, "%.*s", (int)printed, msg);
2858 err = 0;
2859 out_put:
2860 thread__put(thread);
2861 return err;
2862 }
2863
trace__resolve_callchain(struct trace * trace,struct evsel * evsel,struct perf_sample * sample,struct callchain_cursor * cursor)2864 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2865 struct perf_sample *sample,
2866 struct callchain_cursor *cursor)
2867 {
2868 struct addr_location al;
2869 int max_stack = evsel->core.attr.sample_max_stack ?
2870 evsel->core.attr.sample_max_stack :
2871 trace->max_stack;
2872 int err = -1;
2873
2874 addr_location__init(&al);
2875 if (machine__resolve(trace->host, &al, sample) < 0)
2876 goto out;
2877
2878 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
2879 out:
2880 addr_location__exit(&al);
2881 return err;
2882 }
2883
trace__fprintf_callchain(struct trace * trace,struct perf_sample * sample)2884 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2885 {
2886 /* TODO: user-configurable print_opts */
2887 const unsigned int print_opts = EVSEL__PRINT_SYM |
2888 EVSEL__PRINT_DSO |
2889 EVSEL__PRINT_UNKNOWN_AS_ADDR;
2890
2891 return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output);
2892 }
2893
errno_to_name(struct evsel * evsel,int err)2894 static const char *errno_to_name(struct evsel *evsel, int err)
2895 {
2896 struct perf_env *env = evsel__env(evsel);
2897
2898 return perf_env__arch_strerrno(env, err);
2899 }
2900
trace__sys_exit(struct trace * trace,struct evsel * evsel,union perf_event * event __maybe_unused,struct perf_sample * sample)2901 static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2902 union perf_event *event __maybe_unused,
2903 struct perf_sample *sample)
2904 {
2905 long ret;
2906 u64 duration = 0;
2907 bool duration_calculated = false;
2908 struct thread *thread;
2909 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2910 int alignment = trace->args_alignment, e_machine;
2911 struct syscall *sc;
2912 struct thread_trace *ttrace;
2913
2914 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2915 e_machine = thread__e_machine(thread, trace->host);
2916 sc = trace__syscall_info(trace, evsel, e_machine, id);
2917 if (sc == NULL)
2918 goto out_put;
2919 ttrace = thread__trace(thread, trace);
2920 if (ttrace == NULL)
2921 goto out_put;
2922
2923 trace__fprintf_sample(trace, evsel, sample, thread);
2924
2925 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2926
2927 if (trace->summary)
2928 thread__update_stats(thread, ttrace, id, sample, ret, trace);
2929
2930 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2931 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2932 ttrace->filename.pending_open = false;
2933 ++trace->stats.vfs_getname;
2934 }
2935
2936 if (ttrace->entry_time) {
2937 duration = sample->time - ttrace->entry_time;
2938 if (trace__filter_duration(trace, duration))
2939 goto out;
2940 duration_calculated = true;
2941 } else if (trace->duration_filter)
2942 goto out;
2943
2944 if (sample->callchain) {
2945 struct callchain_cursor *cursor = get_tls_callchain_cursor();
2946
2947 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
2948 if (callchain_ret == 0) {
2949 if (cursor->nr < trace->min_stack)
2950 goto out;
2951 callchain_ret = 1;
2952 }
2953 }
2954
2955 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2956 goto out;
2957
2958 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2959
2960 if (ttrace->entry_pending) {
2961 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2962 } else {
2963 printed += fprintf(trace->output, " ... [");
2964 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2965 printed += 9;
2966 printed += fprintf(trace->output, "]: %s()", sc->name);
2967 }
2968
2969 printed++; /* the closing ')' */
2970
2971 if (alignment > printed)
2972 alignment -= printed;
2973 else
2974 alignment = 0;
2975
2976 fprintf(trace->output, ")%*s= ", alignment, " ");
2977
2978 if (sc->fmt == NULL) {
2979 if (ret < 0)
2980 goto errno_print;
2981 signed_print:
2982 fprintf(trace->output, "%ld", ret);
2983 } else if (ret < 0) {
2984 errno_print: {
2985 char bf[STRERR_BUFSIZE];
2986 const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2987 *e = errno_to_name(evsel, -ret);
2988
2989 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2990 }
2991 } else if (ret == 0 && sc->fmt->timeout)
2992 fprintf(trace->output, "0 (Timeout)");
2993 else if (ttrace->ret_scnprintf) {
2994 char bf[1024];
2995 struct syscall_arg arg = {
2996 .val = ret,
2997 .thread = thread,
2998 .trace = trace,
2999 };
3000 ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
3001 ttrace->ret_scnprintf = NULL;
3002 fprintf(trace->output, "%s", bf);
3003 } else if (sc->fmt->hexret)
3004 fprintf(trace->output, "%#lx", ret);
3005 else if (sc->fmt->errpid) {
3006 struct thread *child = machine__find_thread(trace->host, ret, ret);
3007
3008 if (child != NULL) {
3009 fprintf(trace->output, "%ld", ret);
3010 if (thread__comm_set(child))
3011 fprintf(trace->output, " (%s)", thread__comm_str(child));
3012 thread__put(child);
3013 }
3014 } else
3015 goto signed_print;
3016
3017 fputc('\n', trace->output);
3018
3019 /*
3020 * We only consider an 'event' for the sake of --max-events a non-filtered
3021 * sys_enter + sys_exit and other tracepoint events.
3022 */
3023 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
3024 interrupted = true;
3025
3026 if (callchain_ret > 0)
3027 trace__fprintf_callchain(trace, sample);
3028 else if (callchain_ret < 0)
3029 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
3030 out:
3031 ttrace->entry_pending = false;
3032 err = 0;
3033 out_put:
3034 thread__put(thread);
3035 return err;
3036 }
3037
trace__vfs_getname(struct trace * trace,struct evsel * evsel,union perf_event * event __maybe_unused,struct perf_sample * sample)3038 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
3039 union perf_event *event __maybe_unused,
3040 struct perf_sample *sample)
3041 {
3042 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3043 struct thread_trace *ttrace;
3044 size_t filename_len, entry_str_len, to_move;
3045 ssize_t remaining_space;
3046 char *pos;
3047 const char *filename = evsel__rawptr(evsel, sample, "pathname");
3048
3049 if (!thread)
3050 goto out;
3051
3052 ttrace = thread__priv(thread);
3053 if (!ttrace)
3054 goto out_put;
3055
3056 filename_len = strlen(filename);
3057 if (filename_len == 0)
3058 goto out_put;
3059
3060 if (ttrace->filename.namelen < filename_len) {
3061 char *f = realloc(ttrace->filename.name, filename_len + 1);
3062
3063 if (f == NULL)
3064 goto out_put;
3065
3066 ttrace->filename.namelen = filename_len;
3067 ttrace->filename.name = f;
3068 }
3069
3070 strcpy(ttrace->filename.name, filename);
3071 ttrace->filename.pending_open = true;
3072
3073 if (!ttrace->filename.ptr)
3074 goto out_put;
3075
3076 entry_str_len = strlen(ttrace->entry_str);
3077 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
3078 if (remaining_space <= 0)
3079 goto out_put;
3080
3081 if (filename_len > (size_t)remaining_space) {
3082 filename += filename_len - remaining_space;
3083 filename_len = remaining_space;
3084 }
3085
3086 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
3087 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
3088 memmove(pos + filename_len, pos, to_move);
3089 memcpy(pos, filename, filename_len);
3090
3091 ttrace->filename.ptr = 0;
3092 ttrace->filename.entry_str_pos = 0;
3093 out_put:
3094 thread__put(thread);
3095 out:
3096 return 0;
3097 }
3098
trace__sched_stat_runtime(struct trace * trace,struct evsel * evsel,union perf_event * event __maybe_unused,struct perf_sample * sample)3099 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
3100 union perf_event *event __maybe_unused,
3101 struct perf_sample *sample)
3102 {
3103 u64 runtime = evsel__intval(evsel, sample, "runtime");
3104 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
3105 struct thread *thread = machine__findnew_thread(trace->host,
3106 sample->pid,
3107 sample->tid);
3108 struct thread_trace *ttrace = thread__trace(thread, trace);
3109
3110 if (ttrace == NULL)
3111 goto out_dump;
3112
3113 ttrace->runtime_ms += runtime_ms;
3114 trace->runtime_ms += runtime_ms;
3115 out_put:
3116 thread__put(thread);
3117 return 0;
3118
3119 out_dump:
3120 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
3121 evsel->name,
3122 evsel__strval(evsel, sample, "comm"),
3123 (pid_t)evsel__intval(evsel, sample, "pid"),
3124 runtime,
3125 evsel__intval(evsel, sample, "vruntime"));
3126 goto out_put;
3127 }
3128
bpf_output__printer(enum binary_printer_ops op,unsigned int val,void * extra __maybe_unused,FILE * fp)3129 static int bpf_output__printer(enum binary_printer_ops op,
3130 unsigned int val, void *extra __maybe_unused, FILE *fp)
3131 {
3132 unsigned char ch = (unsigned char)val;
3133
3134 switch (op) {
3135 case BINARY_PRINT_CHAR_DATA:
3136 return fprintf(fp, "%c", isprint(ch) ? ch : '.');
3137 case BINARY_PRINT_DATA_BEGIN:
3138 case BINARY_PRINT_LINE_BEGIN:
3139 case BINARY_PRINT_ADDR:
3140 case BINARY_PRINT_NUM_DATA:
3141 case BINARY_PRINT_NUM_PAD:
3142 case BINARY_PRINT_SEP:
3143 case BINARY_PRINT_CHAR_PAD:
3144 case BINARY_PRINT_LINE_END:
3145 case BINARY_PRINT_DATA_END:
3146 default:
3147 break;
3148 }
3149
3150 return 0;
3151 }
3152
bpf_output__fprintf(struct trace * trace,struct perf_sample * sample)3153 static void bpf_output__fprintf(struct trace *trace,
3154 struct perf_sample *sample)
3155 {
3156 binary__fprintf(sample->raw_data, sample->raw_size, 8,
3157 bpf_output__printer, NULL, trace->output);
3158 ++trace->nr_events_printed;
3159 }
3160
trace__fprintf_tp_fields(struct trace * trace,struct evsel * evsel,struct perf_sample * sample,struct thread * thread,void * augmented_args,int augmented_args_size)3161 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
3162 struct thread *thread, void *augmented_args, int augmented_args_size)
3163 {
3164 char bf[2048];
3165 size_t size = sizeof(bf);
3166 const struct tep_event *tp_format = evsel__tp_format(evsel);
3167 struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL;
3168 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
3169 size_t printed = 0, btf_printed;
3170 unsigned long val;
3171 u8 bit = 1;
3172 struct syscall_arg syscall_arg = {
3173 .augmented = {
3174 .size = augmented_args_size,
3175 .args = augmented_args,
3176 },
3177 .idx = 0,
3178 .mask = 0,
3179 .trace = trace,
3180 .thread = thread,
3181 .show_string_prefix = trace->show_string_prefix,
3182 };
3183
3184 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
3185 if (syscall_arg.mask & bit)
3186 continue;
3187
3188 syscall_arg.len = 0;
3189 syscall_arg.fmt = arg;
3190 if (field->flags & TEP_FIELD_IS_ARRAY) {
3191 int offset = field->offset;
3192
3193 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
3194 offset = format_field__intval(field, sample, evsel->needs_swap);
3195 syscall_arg.len = offset >> 16;
3196 offset &= 0xffff;
3197 if (tep_field_is_relative(field->flags))
3198 offset += field->offset + field->size;
3199 }
3200
3201 val = (uintptr_t)(sample->raw_data + offset);
3202 } else
3203 val = format_field__intval(field, sample, evsel->needs_swap);
3204 /*
3205 * Some syscall args need some mask, most don't and
3206 * return val untouched.
3207 */
3208 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
3209
3210 /* Suppress this argument if its value is zero and show_zero property isn't set. */
3211 if (val == 0 && !trace->show_zeros && !arg->show_zero && arg->strtoul != STUL_BTF_TYPE)
3212 continue;
3213
3214 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
3215
3216 if (trace->show_arg_names)
3217 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
3218
3219 btf_printed = trace__btf_scnprintf(trace, &syscall_arg, bf + printed, size - printed, val, field->type);
3220 if (btf_printed) {
3221 printed += btf_printed;
3222 continue;
3223 }
3224
3225 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
3226 }
3227
3228 return fprintf(trace->output, "%.*s", (int)printed, bf);
3229 }
3230
trace__event_handler(struct trace * trace,struct evsel * evsel,union perf_event * event __maybe_unused,struct perf_sample * sample)3231 static int trace__event_handler(struct trace *trace, struct evsel *evsel,
3232 union perf_event *event __maybe_unused,
3233 struct perf_sample *sample)
3234 {
3235 struct thread *thread;
3236 int callchain_ret = 0;
3237
3238 if (evsel->nr_events_printed >= evsel->max_events)
3239 return 0;
3240
3241 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3242
3243 if (sample->callchain) {
3244 struct callchain_cursor *cursor = get_tls_callchain_cursor();
3245
3246 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
3247 if (callchain_ret == 0) {
3248 if (cursor->nr < trace->min_stack)
3249 goto out;
3250 callchain_ret = 1;
3251 }
3252 }
3253
3254 trace__printf_interrupted_entry(trace);
3255 trace__fprintf_tstamp(trace, sample->time, trace->output);
3256
3257 if (trace->trace_syscalls && trace->show_duration)
3258 fprintf(trace->output, "( ): ");
3259
3260 if (thread)
3261 trace__fprintf_comm_tid(trace, thread, trace->output);
3262
3263 if (evsel == trace->syscalls.events.bpf_output) {
3264 int id = perf_evsel__sc_tp_uint(evsel, id, sample);
3265 int e_machine = thread ? thread__e_machine(thread, trace->host) : EM_HOST;
3266 struct syscall *sc = trace__syscall_info(trace, evsel, e_machine, id);
3267
3268 if (sc) {
3269 fprintf(trace->output, "%s(", sc->name);
3270 trace__fprintf_sys_enter(trace, evsel, sample);
3271 fputc(')', trace->output);
3272 goto newline;
3273 }
3274
3275 /*
3276 * XXX: Not having the associated syscall info or not finding/adding
3277 * the thread should never happen, but if it does...
3278 * fall thru and print it as a bpf_output event.
3279 */
3280 }
3281
3282 fprintf(trace->output, "%s(", evsel->name);
3283
3284 if (evsel__is_bpf_output(evsel)) {
3285 bpf_output__fprintf(trace, sample);
3286 } else {
3287 const struct tep_event *tp_format = evsel__tp_format(evsel);
3288
3289 if (tp_format && (strncmp(tp_format->name, "sys_enter_", 10) ||
3290 trace__fprintf_sys_enter(trace, evsel, sample))) {
3291 if (trace->libtraceevent_print) {
3292 event_format__fprintf(tp_format, sample->cpu,
3293 sample->raw_data, sample->raw_size,
3294 trace->output);
3295 } else {
3296 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
3297 }
3298 }
3299 }
3300
3301 newline:
3302 fprintf(trace->output, ")\n");
3303
3304 if (callchain_ret > 0)
3305 trace__fprintf_callchain(trace, sample);
3306 else if (callchain_ret < 0)
3307 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
3308
3309 ++trace->nr_events_printed;
3310
3311 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
3312 evsel__disable(evsel);
3313 evsel__close(evsel);
3314 }
3315 out:
3316 thread__put(thread);
3317 return 0;
3318 }
3319
print_location(FILE * f,struct perf_sample * sample,struct addr_location * al,bool print_dso,bool print_sym)3320 static void print_location(FILE *f, struct perf_sample *sample,
3321 struct addr_location *al,
3322 bool print_dso, bool print_sym)
3323 {
3324
3325 if ((verbose > 0 || print_dso) && al->map)
3326 fprintf(f, "%s@", dso__long_name(map__dso(al->map)));
3327
3328 if ((verbose > 0 || print_sym) && al->sym)
3329 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
3330 al->addr - al->sym->start);
3331 else if (al->map)
3332 fprintf(f, "0x%" PRIx64, al->addr);
3333 else
3334 fprintf(f, "0x%" PRIx64, sample->addr);
3335 }
3336
trace__pgfault(struct trace * trace,struct evsel * evsel,union perf_event * event __maybe_unused,struct perf_sample * sample)3337 static int trace__pgfault(struct trace *trace,
3338 struct evsel *evsel,
3339 union perf_event *event __maybe_unused,
3340 struct perf_sample *sample)
3341 {
3342 struct thread *thread;
3343 struct addr_location al;
3344 char map_type = 'd';
3345 struct thread_trace *ttrace;
3346 int err = -1;
3347 int callchain_ret = 0;
3348
3349 addr_location__init(&al);
3350 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3351
3352 if (sample->callchain) {
3353 struct callchain_cursor *cursor = get_tls_callchain_cursor();
3354
3355 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
3356 if (callchain_ret == 0) {
3357 if (cursor->nr < trace->min_stack)
3358 goto out_put;
3359 callchain_ret = 1;
3360 }
3361 }
3362
3363 ttrace = thread__trace(thread, trace);
3364 if (ttrace == NULL)
3365 goto out_put;
3366
3367 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ) {
3368 ttrace->pfmaj++;
3369 trace->pfmaj++;
3370 } else {
3371 ttrace->pfmin++;
3372 trace->pfmin++;
3373 }
3374
3375 if (trace->summary_only)
3376 goto out;
3377
3378 thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
3379
3380 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
3381
3382 fprintf(trace->output, "%sfault [",
3383 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
3384 "maj" : "min");
3385
3386 print_location(trace->output, sample, &al, false, true);
3387
3388 fprintf(trace->output, "] => ");
3389
3390 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
3391
3392 if (!al.map) {
3393 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
3394
3395 if (al.map)
3396 map_type = 'x';
3397 else
3398 map_type = '?';
3399 }
3400
3401 print_location(trace->output, sample, &al, true, false);
3402
3403 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
3404
3405 if (callchain_ret > 0)
3406 trace__fprintf_callchain(trace, sample);
3407 else if (callchain_ret < 0)
3408 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
3409
3410 ++trace->nr_events_printed;
3411 out:
3412 err = 0;
3413 out_put:
3414 thread__put(thread);
3415 addr_location__exit(&al);
3416 return err;
3417 }
3418
trace__set_base_time(struct trace * trace,struct evsel * evsel,struct perf_sample * sample)3419 static void trace__set_base_time(struct trace *trace,
3420 struct evsel *evsel,
3421 struct perf_sample *sample)
3422 {
3423 /*
3424 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
3425 * and don't use sample->time unconditionally, we may end up having
3426 * some other event in the future without PERF_SAMPLE_TIME for good
3427 * reason, i.e. we may not be interested in its timestamps, just in
3428 * it taking place, picking some piece of information when it
3429 * appears in our event stream (vfs_getname comes to mind).
3430 */
3431 if (trace->base_time == 0 && !trace->full_time &&
3432 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
3433 trace->base_time = sample->time;
3434 }
3435
trace__process_sample(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine __maybe_unused)3436 static int trace__process_sample(const struct perf_tool *tool,
3437 union perf_event *event,
3438 struct perf_sample *sample,
3439 struct evsel *evsel,
3440 struct machine *machine __maybe_unused)
3441 {
3442 struct trace *trace = container_of(tool, struct trace, tool);
3443 struct thread *thread;
3444 int err = 0;
3445
3446 tracepoint_handler handler = evsel->handler;
3447
3448 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3449 if (thread && thread__is_filtered(thread))
3450 goto out;
3451
3452 trace__set_base_time(trace, evsel, sample);
3453
3454 if (handler) {
3455 ++trace->nr_events;
3456 handler(trace, evsel, event, sample);
3457 }
3458 out:
3459 thread__put(thread);
3460 return err;
3461 }
3462
trace__record(struct trace * trace,int argc,const char ** argv)3463 static int trace__record(struct trace *trace, int argc, const char **argv)
3464 {
3465 unsigned int rec_argc, i, j;
3466 const char **rec_argv;
3467 const char * const record_args[] = {
3468 "record",
3469 "-R",
3470 "-m", "1024",
3471 "-c", "1",
3472 };
3473 pid_t pid = getpid();
3474 char *filter = asprintf__tp_filter_pids(1, &pid);
3475 const char * const sc_args[] = { "-e", };
3476 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
3477 const char * const majpf_args[] = { "-e", "major-faults" };
3478 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
3479 const char * const minpf_args[] = { "-e", "minor-faults" };
3480 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
3481 int err = -1;
3482
3483 /* +3 is for the event string below and the pid filter */
3484 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 +
3485 majpf_args_nr + minpf_args_nr + argc;
3486 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3487
3488 if (rec_argv == NULL || filter == NULL)
3489 goto out_free;
3490
3491 j = 0;
3492 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3493 rec_argv[j++] = record_args[i];
3494
3495 if (trace->trace_syscalls) {
3496 for (i = 0; i < sc_args_nr; i++)
3497 rec_argv[j++] = sc_args[i];
3498
3499 /* event string may be different for older kernels - e.g., RHEL6 */
3500 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
3501 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
3502 else if (is_valid_tracepoint("syscalls:sys_enter"))
3503 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
3504 else {
3505 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
3506 goto out_free;
3507 }
3508 }
3509
3510 rec_argv[j++] = "--filter";
3511 rec_argv[j++] = filter;
3512
3513 if (trace->trace_pgfaults & TRACE_PFMAJ)
3514 for (i = 0; i < majpf_args_nr; i++)
3515 rec_argv[j++] = majpf_args[i];
3516
3517 if (trace->trace_pgfaults & TRACE_PFMIN)
3518 for (i = 0; i < minpf_args_nr; i++)
3519 rec_argv[j++] = minpf_args[i];
3520
3521 for (i = 0; i < (unsigned int)argc; i++)
3522 rec_argv[j++] = argv[i];
3523
3524 err = cmd_record(j, rec_argv);
3525 out_free:
3526 free(filter);
3527 free(rec_argv);
3528 return err;
3529 }
3530
3531 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3532 static size_t trace__fprintf_total_summary(struct trace *trace, FILE *fp);
3533
evlist__add_vfs_getname(struct evlist * evlist)3534 static bool evlist__add_vfs_getname(struct evlist *evlist)
3535 {
3536 bool found = false;
3537 struct evsel *evsel, *tmp;
3538 struct parse_events_error err;
3539 int ret;
3540
3541 parse_events_error__init(&err);
3542 ret = parse_events(evlist, "probe:vfs_getname*", &err);
3543 parse_events_error__exit(&err);
3544 if (ret)
3545 return false;
3546
3547 evlist__for_each_entry_safe(evlist, evsel, tmp) {
3548 if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
3549 continue;
3550
3551 if (evsel__field(evsel, "pathname")) {
3552 evsel->handler = trace__vfs_getname;
3553 found = true;
3554 continue;
3555 }
3556
3557 list_del_init(&evsel->core.node);
3558 evsel->evlist = NULL;
3559 evsel__delete(evsel);
3560 }
3561
3562 return found;
3563 }
3564
evsel__new_pgfault(u64 config)3565 static struct evsel *evsel__new_pgfault(u64 config)
3566 {
3567 struct evsel *evsel;
3568 struct perf_event_attr attr = {
3569 .type = PERF_TYPE_SOFTWARE,
3570 .mmap_data = 1,
3571 };
3572
3573 attr.config = config;
3574 attr.sample_period = 1;
3575
3576 event_attr_init(&attr);
3577
3578 evsel = evsel__new(&attr);
3579 if (evsel)
3580 evsel->handler = trace__pgfault;
3581
3582 return evsel;
3583 }
3584
evlist__free_syscall_tp_fields(struct evlist * evlist)3585 static void evlist__free_syscall_tp_fields(struct evlist *evlist)
3586 {
3587 struct evsel *evsel;
3588
3589 evlist__for_each_entry(evlist, evsel) {
3590 evsel_trace__delete(evsel->priv);
3591 evsel->priv = NULL;
3592 }
3593 }
3594
trace__handle_event(struct trace * trace,union perf_event * event,struct perf_sample * sample)3595 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
3596 {
3597 const u32 type = event->header.type;
3598 struct evsel *evsel;
3599
3600 if (type != PERF_RECORD_SAMPLE) {
3601 trace__process_event(trace, trace->host, event, sample);
3602 return;
3603 }
3604
3605 evsel = evlist__id2evsel(trace->evlist, sample->id);
3606 if (evsel == NULL) {
3607 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3608 return;
3609 }
3610
3611 if (evswitch__discard(&trace->evswitch, evsel))
3612 return;
3613
3614 trace__set_base_time(trace, evsel, sample);
3615
3616 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
3617 sample->raw_data == NULL) {
3618 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3619 evsel__name(evsel), sample->tid,
3620 sample->cpu, sample->raw_size);
3621 } else {
3622 tracepoint_handler handler = evsel->handler;
3623 handler(trace, evsel, event, sample);
3624 }
3625
3626 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3627 interrupted = true;
3628 }
3629
trace__add_syscall_newtp(struct trace * trace)3630 static int trace__add_syscall_newtp(struct trace *trace)
3631 {
3632 int ret = -1;
3633 struct evlist *evlist = trace->evlist;
3634 struct evsel *sys_enter, *sys_exit;
3635
3636 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
3637 if (sys_enter == NULL)
3638 goto out;
3639
3640 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
3641 goto out_delete_sys_enter;
3642
3643 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
3644 if (sys_exit == NULL)
3645 goto out_delete_sys_enter;
3646
3647 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
3648 goto out_delete_sys_exit;
3649
3650 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3651 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3652
3653 evlist__add(evlist, sys_enter);
3654 evlist__add(evlist, sys_exit);
3655
3656 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3657 /*
3658 * We're interested only in the user space callchain
3659 * leading to the syscall, allow overriding that for
3660 * debugging reasons using --kernel_syscall_callchains
3661 */
3662 sys_exit->core.attr.exclude_callchain_kernel = 1;
3663 }
3664
3665 trace->syscalls.events.sys_enter = sys_enter;
3666 trace->syscalls.events.sys_exit = sys_exit;
3667
3668 ret = 0;
3669 out:
3670 return ret;
3671
3672 out_delete_sys_exit:
3673 evsel__delete_priv(sys_exit);
3674 out_delete_sys_enter:
3675 evsel__delete_priv(sys_enter);
3676 goto out;
3677 }
3678
trace__set_ev_qualifier_tp_filter(struct trace * trace)3679 static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
3680 {
3681 int err = -1;
3682 struct evsel *sys_exit;
3683 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3684 trace->ev_qualifier_ids.nr,
3685 trace->ev_qualifier_ids.entries);
3686
3687 if (filter == NULL)
3688 goto out_enomem;
3689
3690 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3691 sys_exit = trace->syscalls.events.sys_exit;
3692 err = evsel__append_tp_filter(sys_exit, filter);
3693 }
3694
3695 free(filter);
3696 out:
3697 return err;
3698 out_enomem:
3699 errno = ENOMEM;
3700 goto out;
3701 }
3702
3703 #ifdef HAVE_BPF_SKEL
syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt * arg_fmt,struct btf * btf,char * type)3704 static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type)
3705 {
3706 int id;
3707
3708 if (arg_fmt->type != NULL)
3709 return -1;
3710
3711 id = btf__find_by_name(btf, type);
3712 if (id < 0)
3713 return -1;
3714
3715 arg_fmt->type = btf__type_by_id(btf, id);
3716 arg_fmt->type_id = id;
3717
3718 return 0;
3719 }
3720
trace__find_bpf_program_by_title(struct trace * trace,const char * name)3721 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
3722 {
3723 struct bpf_program *pos, *prog = NULL;
3724 const char *sec_name;
3725
3726 if (trace->skel->obj == NULL)
3727 return NULL;
3728
3729 bpf_object__for_each_program(pos, trace->skel->obj) {
3730 sec_name = bpf_program__section_name(pos);
3731 if (sec_name && !strcmp(sec_name, name)) {
3732 prog = pos;
3733 break;
3734 }
3735 }
3736
3737 return prog;
3738 }
3739
trace__find_syscall_bpf_prog(struct trace * trace,struct syscall * sc,const char * prog_name,const char * type)3740 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
3741 const char *prog_name, const char *type)
3742 {
3743 struct bpf_program *prog;
3744
3745 if (prog_name == NULL) {
3746 char default_prog_name[256];
3747 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name);
3748 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3749 if (prog != NULL)
3750 goto out_found;
3751 if (sc->fmt && sc->fmt->alias) {
3752 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias);
3753 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3754 if (prog != NULL)
3755 goto out_found;
3756 }
3757 goto out_unaugmented;
3758 }
3759
3760 prog = trace__find_bpf_program_by_title(trace, prog_name);
3761
3762 if (prog != NULL) {
3763 out_found:
3764 return prog;
3765 }
3766
3767 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
3768 prog_name, type, sc->name);
3769 out_unaugmented:
3770 return trace->skel->progs.syscall_unaugmented;
3771 }
3772
trace__init_syscall_bpf_progs(struct trace * trace,int e_machine,int id)3773 static void trace__init_syscall_bpf_progs(struct trace *trace, int e_machine, int id)
3774 {
3775 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
3776
3777 if (sc == NULL)
3778 return;
3779
3780 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3781 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
3782 }
3783
trace__bpf_prog_sys_enter_fd(struct trace * trace,int e_machine,int id)3784 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int e_machine, int id)
3785 {
3786 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
3787 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
3788 }
3789
trace__bpf_prog_sys_exit_fd(struct trace * trace,int e_machine,int id)3790 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int e_machine, int id)
3791 {
3792 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
3793 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
3794 }
3795
trace__bpf_sys_enter_beauty_map(struct trace * trace,int e_machine,int key,unsigned int * beauty_array)3796 static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int e_machine, int key, unsigned int *beauty_array)
3797 {
3798 struct tep_format_field *field;
3799 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, key);
3800 const struct btf_type *bt;
3801 char *struct_offset, *tmp, name[32];
3802 bool can_augment = false;
3803 int i, cnt;
3804
3805 if (sc == NULL)
3806 return -1;
3807
3808 trace__load_vmlinux_btf(trace);
3809 if (trace->btf == NULL)
3810 return -1;
3811
3812 for (i = 0, field = sc->args; field; ++i, field = field->next) {
3813 // XXX We're only collecting pointer payloads _from_ user space
3814 if (!sc->arg_fmt[i].from_user)
3815 continue;
3816
3817 struct_offset = strstr(field->type, "struct ");
3818 if (struct_offset == NULL)
3819 struct_offset = strstr(field->type, "union ");
3820 else
3821 struct_offset++; // "union" is shorter
3822
3823 if (field->flags & TEP_FIELD_IS_POINTER && struct_offset) { /* struct or union (think BPF's attr arg) */
3824 struct_offset += 6;
3825
3826 /* for 'struct foo *', we only want 'foo' */
3827 for (tmp = struct_offset, cnt = 0; *tmp != ' ' && *tmp != '\0'; ++tmp, ++cnt) {
3828 }
3829
3830 strncpy(name, struct_offset, cnt);
3831 name[cnt] = '\0';
3832
3833 /* cache struct's btf_type and type_id */
3834 if (syscall_arg_fmt__cache_btf_struct(&sc->arg_fmt[i], trace->btf, name))
3835 continue;
3836
3837 bt = sc->arg_fmt[i].type;
3838 beauty_array[i] = bt->size;
3839 can_augment = true;
3840 } else if (field->flags & TEP_FIELD_IS_POINTER && /* string */
3841 strcmp(field->type, "const char *") == 0 &&
3842 (strstr(field->name, "name") ||
3843 strstr(field->name, "path") ||
3844 strstr(field->name, "file") ||
3845 strstr(field->name, "root") ||
3846 strstr(field->name, "key") ||
3847 strstr(field->name, "special") ||
3848 strstr(field->name, "type") ||
3849 strstr(field->name, "description"))) {
3850 beauty_array[i] = 1;
3851 can_augment = true;
3852 } else if (field->flags & TEP_FIELD_IS_POINTER && /* buffer */
3853 strstr(field->type, "char *") &&
3854 (strstr(field->name, "buf") ||
3855 strstr(field->name, "val") ||
3856 strstr(field->name, "msg"))) {
3857 int j;
3858 struct tep_format_field *field_tmp;
3859
3860 /* find the size of the buffer that appears in pairs with buf */
3861 for (j = 0, field_tmp = sc->args; field_tmp; ++j, field_tmp = field_tmp->next) {
3862 if (!(field_tmp->flags & TEP_FIELD_IS_POINTER) && /* only integers */
3863 (strstr(field_tmp->name, "count") ||
3864 strstr(field_tmp->name, "siz") || /* size, bufsiz */
3865 (strstr(field_tmp->name, "len") && strcmp(field_tmp->name, "filename")))) {
3866 /* filename's got 'len' in it, we don't want that */
3867 beauty_array[i] = -(j + 1);
3868 can_augment = true;
3869 break;
3870 }
3871 }
3872 }
3873 }
3874
3875 if (can_augment)
3876 return 0;
3877
3878 return -1;
3879 }
3880
trace__find_usable_bpf_prog_entry(struct trace * trace,struct syscall * sc)3881 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace,
3882 struct syscall *sc)
3883 {
3884 struct tep_format_field *field, *candidate_field;
3885 /*
3886 * We're only interested in syscalls that have a pointer:
3887 */
3888 for (field = sc->args; field; field = field->next) {
3889 if (field->flags & TEP_FIELD_IS_POINTER)
3890 goto try_to_find_pair;
3891 }
3892
3893 return NULL;
3894
3895 try_to_find_pair:
3896 for (int i = 0, num_idx = syscalltbl__num_idx(sc->e_machine); i < num_idx; ++i) {
3897 int id = syscalltbl__id_at_idx(sc->e_machine, i);
3898 struct syscall *pair = trace__syscall_info(trace, NULL, sc->e_machine, id);
3899 struct bpf_program *pair_prog;
3900 bool is_candidate = false;
3901
3902 if (pair == NULL || pair->id == sc->id ||
3903 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented)
3904 continue;
3905
3906 for (field = sc->args, candidate_field = pair->args;
3907 field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
3908 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
3909 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
3910
3911 if (is_pointer) {
3912 if (!candidate_is_pointer) {
3913 // The candidate just doesn't copies our pointer arg, might copy other pointers we want.
3914 continue;
3915 }
3916 } else {
3917 if (candidate_is_pointer) {
3918 // The candidate might copy a pointer we don't have, skip it.
3919 goto next_candidate;
3920 }
3921 continue;
3922 }
3923
3924 if (strcmp(field->type, candidate_field->type))
3925 goto next_candidate;
3926
3927 /*
3928 * This is limited in the BPF program but sys_write
3929 * uses "const char *" for its "buf" arg so we need to
3930 * use some heuristic that is kinda future proof...
3931 */
3932 if (strcmp(field->type, "const char *") == 0 &&
3933 !(strstr(field->name, "name") ||
3934 strstr(field->name, "path") ||
3935 strstr(field->name, "file") ||
3936 strstr(field->name, "root") ||
3937 strstr(field->name, "description")))
3938 goto next_candidate;
3939
3940 is_candidate = true;
3941 }
3942
3943 if (!is_candidate)
3944 goto next_candidate;
3945
3946 /*
3947 * Check if the tentative pair syscall augmenter has more pointers, if it has,
3948 * then it may be collecting that and we then can't use it, as it would collect
3949 * more than what is common to the two syscalls.
3950 */
3951 if (candidate_field) {
3952 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
3953 if (candidate_field->flags & TEP_FIELD_IS_POINTER)
3954 goto next_candidate;
3955 }
3956
3957 pair_prog = pair->bpf_prog.sys_enter;
3958 /*
3959 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
3960 * have been searched for, so search it here and if it returns the
3961 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
3962 * program for a filtered syscall on a non-filtered one.
3963 *
3964 * For instance, we have "!syscalls:sys_enter_renameat" and that is
3965 * useful for "renameat2".
3966 */
3967 if (pair_prog == NULL) {
3968 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3969 if (pair_prog == trace->skel->progs.syscall_unaugmented)
3970 goto next_candidate;
3971 }
3972
3973 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name,
3974 sc->name);
3975 return pair_prog;
3976 next_candidate:
3977 continue;
3978 }
3979
3980 return NULL;
3981 }
3982
trace__init_syscalls_bpf_prog_array_maps(struct trace * trace,int e_machine)3983 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_machine)
3984 {
3985 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
3986 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
3987 int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter);
3988 int err = 0;
3989 unsigned int beauty_array[6];
3990
3991 for (int i = 0, num_idx = syscalltbl__num_idx(e_machine); i < num_idx; ++i) {
3992 int prog_fd, key = syscalltbl__id_at_idx(e_machine, i);
3993
3994 if (!trace__syscall_enabled(trace, key))
3995 continue;
3996
3997 trace__init_syscall_bpf_progs(trace, e_machine, key);
3998
3999 // It'll get at least the "!raw_syscalls:unaugmented"
4000 prog_fd = trace__bpf_prog_sys_enter_fd(trace, e_machine, key);
4001 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
4002 if (err)
4003 break;
4004 prog_fd = trace__bpf_prog_sys_exit_fd(trace, e_machine, key);
4005 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
4006 if (err)
4007 break;
4008
4009 /* use beauty_map to tell BPF how many bytes to collect, set beauty_map's value here */
4010 memset(beauty_array, 0, sizeof(beauty_array));
4011 err = trace__bpf_sys_enter_beauty_map(trace, e_machine, key, (unsigned int *)beauty_array);
4012 if (err)
4013 continue;
4014 err = bpf_map_update_elem(beauty_map_fd, &key, beauty_array, BPF_ANY);
4015 if (err)
4016 break;
4017 }
4018
4019 /*
4020 * Now lets do a second pass looking for enabled syscalls without
4021 * an augmenter that have a signature that is a superset of another
4022 * syscall with an augmenter so that we can auto-reuse it.
4023 *
4024 * I.e. if we have an augmenter for the "open" syscall that has
4025 * this signature:
4026 *
4027 * int open(const char *pathname, int flags, mode_t mode);
4028 *
4029 * I.e. that will collect just the first string argument, then we
4030 * can reuse it for the 'creat' syscall, that has this signature:
4031 *
4032 * int creat(const char *pathname, mode_t mode);
4033 *
4034 * and for:
4035 *
4036 * int stat(const char *pathname, struct stat *statbuf);
4037 * int lstat(const char *pathname, struct stat *statbuf);
4038 *
4039 * Because the 'open' augmenter will collect the first arg as a string,
4040 * and leave alone all the other args, which already helps with
4041 * beautifying 'stat' and 'lstat''s pathname arg.
4042 *
4043 * Then, in time, when 'stat' gets an augmenter that collects both
4044 * first and second arg (this one on the raw_syscalls:sys_exit prog
4045 * array tail call, then that one will be used.
4046 */
4047 for (int i = 0, num_idx = syscalltbl__num_idx(e_machine); i < num_idx; ++i) {
4048 int key = syscalltbl__id_at_idx(e_machine, i);
4049 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, key);
4050 struct bpf_program *pair_prog;
4051 int prog_fd;
4052
4053 if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
4054 continue;
4055
4056 /*
4057 * For now we're just reusing the sys_enter prog, and if it
4058 * already has an augmenter, we don't need to find one.
4059 */
4060 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented)
4061 continue;
4062
4063 /*
4064 * Look at all the other syscalls for one that has a signature
4065 * that is close enough that we can share:
4066 */
4067 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
4068 if (pair_prog == NULL)
4069 continue;
4070
4071 sc->bpf_prog.sys_enter = pair_prog;
4072
4073 /*
4074 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
4075 * with the fd for the program we're reusing:
4076 */
4077 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
4078 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
4079 if (err)
4080 break;
4081 }
4082
4083 return err;
4084 }
4085 #endif // HAVE_BPF_SKEL
4086
trace__set_ev_qualifier_filter(struct trace * trace)4087 static int trace__set_ev_qualifier_filter(struct trace *trace)
4088 {
4089 if (trace->syscalls.events.sys_enter)
4090 return trace__set_ev_qualifier_tp_filter(trace);
4091 return 0;
4092 }
4093
bpf_map__set_filter_pids(struct bpf_map * map __maybe_unused,size_t npids __maybe_unused,pid_t * pids __maybe_unused)4094 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
4095 size_t npids __maybe_unused, pid_t *pids __maybe_unused)
4096 {
4097 int err = 0;
4098 #ifdef HAVE_LIBBPF_SUPPORT
4099 bool value = true;
4100 int map_fd = bpf_map__fd(map);
4101 size_t i;
4102
4103 for (i = 0; i < npids; ++i) {
4104 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
4105 if (err)
4106 break;
4107 }
4108 #endif
4109 return err;
4110 }
4111
trace__set_filter_loop_pids(struct trace * trace)4112 static int trace__set_filter_loop_pids(struct trace *trace)
4113 {
4114 unsigned int nr = 1, err;
4115 pid_t pids[32] = {
4116 getpid(),
4117 };
4118 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
4119
4120 while (thread && nr < ARRAY_SIZE(pids)) {
4121 struct thread *parent = machine__find_thread(trace->host,
4122 thread__ppid(thread),
4123 thread__ppid(thread));
4124
4125 if (parent == NULL)
4126 break;
4127
4128 if (!strcmp(thread__comm_str(parent), "sshd") ||
4129 strstarts(thread__comm_str(parent), "gnome-terminal")) {
4130 pids[nr++] = thread__tid(parent);
4131 break;
4132 }
4133 thread = parent;
4134 }
4135
4136 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
4137 if (!err && trace->filter_pids.map)
4138 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
4139
4140 return err;
4141 }
4142
trace__set_filter_pids(struct trace * trace)4143 static int trace__set_filter_pids(struct trace *trace)
4144 {
4145 int err = 0;
4146 /*
4147 * Better not use !target__has_task() here because we need to cover the
4148 * case where no threads were specified in the command line, but a
4149 * workload was, and in that case we will fill in the thread_map when
4150 * we fork the workload in evlist__prepare_workload.
4151 */
4152 if (trace->filter_pids.nr > 0) {
4153 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
4154 trace->filter_pids.entries);
4155 if (!err && trace->filter_pids.map) {
4156 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
4157 trace->filter_pids.entries);
4158 }
4159 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
4160 err = trace__set_filter_loop_pids(trace);
4161 }
4162
4163 return err;
4164 }
4165
__trace__deliver_event(struct trace * trace,union perf_event * event)4166 static int __trace__deliver_event(struct trace *trace, union perf_event *event)
4167 {
4168 struct evlist *evlist = trace->evlist;
4169 struct perf_sample sample;
4170 int err;
4171
4172 perf_sample__init(&sample, /*all=*/false);
4173 err = evlist__parse_sample(evlist, event, &sample);
4174 if (err)
4175 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
4176 else
4177 trace__handle_event(trace, event, &sample);
4178
4179 perf_sample__exit(&sample);
4180 return 0;
4181 }
4182
__trace__flush_events(struct trace * trace)4183 static int __trace__flush_events(struct trace *trace)
4184 {
4185 u64 first = ordered_events__first_time(&trace->oe.data);
4186 u64 flush = trace->oe.last - NSEC_PER_SEC;
4187
4188 /* Is there some thing to flush.. */
4189 if (first && first < flush)
4190 return ordered_events__flush_time(&trace->oe.data, flush);
4191
4192 return 0;
4193 }
4194
trace__flush_events(struct trace * trace)4195 static int trace__flush_events(struct trace *trace)
4196 {
4197 return !trace->sort_events ? 0 : __trace__flush_events(trace);
4198 }
4199
trace__deliver_event(struct trace * trace,union perf_event * event)4200 static int trace__deliver_event(struct trace *trace, union perf_event *event)
4201 {
4202 int err;
4203
4204 if (!trace->sort_events)
4205 return __trace__deliver_event(trace, event);
4206
4207 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
4208 if (err && err != -1)
4209 return err;
4210
4211 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
4212 if (err)
4213 return err;
4214
4215 return trace__flush_events(trace);
4216 }
4217
ordered_events__deliver_event(struct ordered_events * oe,struct ordered_event * event)4218 static int ordered_events__deliver_event(struct ordered_events *oe,
4219 struct ordered_event *event)
4220 {
4221 struct trace *trace = container_of(oe, struct trace, oe.data);
4222
4223 return __trace__deliver_event(trace, event->event);
4224 }
4225
evsel__find_syscall_arg_fmt_by_name(struct evsel * evsel,char * arg,char ** type)4226 static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg,
4227 char **type)
4228 {
4229 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
4230 const struct tep_event *tp_format;
4231
4232 if (!fmt)
4233 return NULL;
4234
4235 tp_format = evsel__tp_format(evsel);
4236 if (!tp_format)
4237 return NULL;
4238
4239 for (const struct tep_format_field *field = tp_format->format.fields; field;
4240 field = field->next, ++fmt) {
4241 if (strcmp(field->name, arg) == 0) {
4242 *type = field->type;
4243 return fmt;
4244 }
4245 }
4246
4247 return NULL;
4248 }
4249
trace__expand_filter(struct trace * trace,struct evsel * evsel)4250 static int trace__expand_filter(struct trace *trace, struct evsel *evsel)
4251 {
4252 char *tok, *left = evsel->filter, *new_filter = evsel->filter;
4253
4254 while ((tok = strpbrk(left, "=<>!")) != NULL) {
4255 char *right = tok + 1, *right_end;
4256
4257 if (*right == '=')
4258 ++right;
4259
4260 while (isspace(*right))
4261 ++right;
4262
4263 if (*right == '\0')
4264 break;
4265
4266 while (!isalpha(*left))
4267 if (++left == tok) {
4268 /*
4269 * Bail out, can't find the name of the argument that is being
4270 * used in the filter, let it try to set this filter, will fail later.
4271 */
4272 return 0;
4273 }
4274
4275 right_end = right + 1;
4276 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|')
4277 ++right_end;
4278
4279 if (isalpha(*right)) {
4280 struct syscall_arg_fmt *fmt;
4281 int left_size = tok - left,
4282 right_size = right_end - right;
4283 char arg[128], *type;
4284
4285 while (isspace(left[left_size - 1]))
4286 --left_size;
4287
4288 scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
4289
4290 fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg, &type);
4291 if (fmt == NULL) {
4292 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
4293 arg, evsel->name, evsel->filter);
4294 return -1;
4295 }
4296
4297 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
4298 arg, (int)(right - tok), tok, right_size, right);
4299
4300 if (fmt->strtoul) {
4301 u64 val;
4302 struct syscall_arg syscall_arg = {
4303 .trace = trace,
4304 .fmt = fmt,
4305 .type_name = type,
4306 .parm = fmt->parm,
4307 };
4308
4309 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) {
4310 char *n, expansion[19];
4311 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val);
4312 int expansion_offset = right - new_filter;
4313
4314 pr_debug("%s", expansion);
4315
4316 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) {
4317 pr_debug(" out of memory!\n");
4318 free(new_filter);
4319 return -1;
4320 }
4321 if (new_filter != evsel->filter)
4322 free(new_filter);
4323 left = n + expansion_offset + expansion_lenght;
4324 new_filter = n;
4325 } else {
4326 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n",
4327 right_size, right, arg, evsel->name, evsel->filter);
4328 return -1;
4329 }
4330 } else {
4331 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n",
4332 arg, evsel->name, evsel->filter);
4333 return -1;
4334 }
4335
4336 pr_debug("\n");
4337 } else {
4338 left = right_end;
4339 }
4340 }
4341
4342 if (new_filter != evsel->filter) {
4343 pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
4344 evsel__set_filter(evsel, new_filter);
4345 free(new_filter);
4346 }
4347
4348 return 0;
4349 }
4350
trace__expand_filters(struct trace * trace,struct evsel ** err_evsel)4351 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
4352 {
4353 struct evlist *evlist = trace->evlist;
4354 struct evsel *evsel;
4355
4356 evlist__for_each_entry(evlist, evsel) {
4357 if (evsel->filter == NULL)
4358 continue;
4359
4360 if (trace__expand_filter(trace, evsel)) {
4361 *err_evsel = evsel;
4362 return -1;
4363 }
4364 }
4365
4366 return 0;
4367 }
4368
trace__run(struct trace * trace,int argc,const char ** argv)4369 static int trace__run(struct trace *trace, int argc, const char **argv)
4370 {
4371 struct evlist *evlist = trace->evlist;
4372 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
4373 int err = -1, i;
4374 unsigned long before;
4375 const bool forks = argc > 0;
4376 bool draining = false;
4377
4378 trace->live = true;
4379
4380 if (!trace->raw_augmented_syscalls) {
4381 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
4382 goto out_error_raw_syscalls;
4383
4384 if (trace->trace_syscalls)
4385 trace->vfs_getname = evlist__add_vfs_getname(evlist);
4386 }
4387
4388 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
4389 pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
4390 if (pgfault_maj == NULL)
4391 goto out_error_mem;
4392 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
4393 evlist__add(evlist, pgfault_maj);
4394 }
4395
4396 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
4397 pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
4398 if (pgfault_min == NULL)
4399 goto out_error_mem;
4400 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
4401 evlist__add(evlist, pgfault_min);
4402 }
4403
4404 /* Enable ignoring missing threads when -u/-p option is defined. */
4405 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
4406
4407 if (trace->sched &&
4408 evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
4409 goto out_error_sched_stat_runtime;
4410 /*
4411 * If a global cgroup was set, apply it to all the events without an
4412 * explicit cgroup. I.e.:
4413 *
4414 * trace -G A -e sched:*switch
4415 *
4416 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
4417 * _and_ sched:sched_switch to the 'A' cgroup, while:
4418 *
4419 * trace -e sched:*switch -G A
4420 *
4421 * will only set the sched:sched_switch event to the 'A' cgroup, all the
4422 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
4423 * a cgroup (on the root cgroup, sys wide, etc).
4424 *
4425 * Multiple cgroups:
4426 *
4427 * trace -G A -e sched:*switch -G B
4428 *
4429 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
4430 * to the 'B' cgroup.
4431 *
4432 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
4433 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
4434 */
4435 if (trace->cgroup)
4436 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
4437
4438 err = evlist__create_maps(evlist, &trace->opts.target);
4439 if (err < 0) {
4440 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
4441 goto out_delete_evlist;
4442 }
4443
4444 err = trace__symbols_init(trace, evlist);
4445 if (err < 0) {
4446 fprintf(trace->output, "Problems initializing symbol libraries!\n");
4447 goto out_delete_evlist;
4448 }
4449
4450 if (trace->summary_mode == SUMMARY__BY_TOTAL) {
4451 trace->syscall_stats = alloc_syscall_stats();
4452 if (trace->syscall_stats == NULL)
4453 goto out_delete_evlist;
4454 }
4455
4456 evlist__config(evlist, &trace->opts, &callchain_param);
4457
4458 if (forks) {
4459 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
4460 if (err < 0) {
4461 fprintf(trace->output, "Couldn't run the workload!\n");
4462 goto out_delete_evlist;
4463 }
4464 workload_pid = evlist->workload.pid;
4465 }
4466
4467 err = evlist__open(evlist);
4468 if (err < 0)
4469 goto out_error_open;
4470 #ifdef HAVE_BPF_SKEL
4471 if (trace->syscalls.events.bpf_output) {
4472 struct perf_cpu cpu;
4473
4474 /*
4475 * Set up the __augmented_syscalls__ BPF map to hold for each
4476 * CPU the bpf-output event's file descriptor.
4477 */
4478 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
4479 int mycpu = cpu.cpu;
4480
4481 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__,
4482 &mycpu, sizeof(mycpu),
4483 xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
4484 mycpu, 0),
4485 sizeof(__u32), BPF_ANY);
4486 }
4487 }
4488
4489 if (trace->skel)
4490 trace->filter_pids.map = trace->skel->maps.pids_filtered;
4491 #endif
4492 err = trace__set_filter_pids(trace);
4493 if (err < 0)
4494 goto out_error_mem;
4495
4496 #ifdef HAVE_BPF_SKEL
4497 if (trace->skel && trace->skel->progs.sys_enter) {
4498 /*
4499 * TODO: Initialize for all host binary machine types, not just
4500 * those matching the perf binary.
4501 */
4502 trace__init_syscalls_bpf_prog_array_maps(trace, EM_HOST);
4503 }
4504 #endif
4505
4506 if (trace->ev_qualifier_ids.nr > 0) {
4507 err = trace__set_ev_qualifier_filter(trace);
4508 if (err < 0)
4509 goto out_errno;
4510
4511 if (trace->syscalls.events.sys_exit) {
4512 pr_debug("event qualifier tracepoint filter: %s\n",
4513 trace->syscalls.events.sys_exit->filter);
4514 }
4515 }
4516
4517 /*
4518 * If the "close" syscall is not traced, then we will not have the
4519 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
4520 * fd->pathname table and were ending up showing the last value set by
4521 * syscalls opening a pathname and associating it with a descriptor or
4522 * reading it from /proc/pid/fd/ in cases where that doesn't make
4523 * sense.
4524 *
4525 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
4526 * not in use.
4527 */
4528 /* TODO: support for more than just perf binary machine type close. */
4529 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(EM_HOST, "close"));
4530
4531 err = trace__expand_filters(trace, &evsel);
4532 if (err)
4533 goto out_delete_evlist;
4534 err = evlist__apply_filters(evlist, &evsel, &trace->opts.target);
4535 if (err < 0)
4536 goto out_error_apply_filters;
4537
4538 err = evlist__mmap(evlist, trace->opts.mmap_pages);
4539 if (err < 0)
4540 goto out_error_mmap;
4541
4542 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay)
4543 evlist__enable(evlist);
4544
4545 if (forks)
4546 evlist__start_workload(evlist);
4547
4548 if (trace->opts.target.initial_delay) {
4549 usleep(trace->opts.target.initial_delay * 1000);
4550 evlist__enable(evlist);
4551 }
4552
4553 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4554 perf_thread_map__nr(evlist->core.threads) > 1 ||
4555 evlist__first(evlist)->core.attr.inherit;
4556
4557 /*
4558 * Now that we already used evsel->core.attr to ask the kernel to setup the
4559 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
4560 * trace__resolve_callchain(), allowing per-event max-stack settings
4561 * to override an explicitly set --max-stack global setting.
4562 */
4563 evlist__for_each_entry(evlist, evsel) {
4564 if (evsel__has_callchain(evsel) &&
4565 evsel->core.attr.sample_max_stack == 0)
4566 evsel->core.attr.sample_max_stack = trace->max_stack;
4567 }
4568 again:
4569 before = trace->nr_events;
4570
4571 for (i = 0; i < evlist->core.nr_mmaps; i++) {
4572 union perf_event *event;
4573 struct mmap *md;
4574
4575 md = &evlist->mmap[i];
4576 if (perf_mmap__read_init(&md->core) < 0)
4577 continue;
4578
4579 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
4580 ++trace->nr_events;
4581
4582 err = trace__deliver_event(trace, event);
4583 if (err)
4584 goto out_disable;
4585
4586 perf_mmap__consume(&md->core);
4587
4588 if (interrupted)
4589 goto out_disable;
4590
4591 if (done && !draining) {
4592 evlist__disable(evlist);
4593 draining = true;
4594 }
4595 }
4596 perf_mmap__read_done(&md->core);
4597 }
4598
4599 if (trace->nr_events == before) {
4600 int timeout = done ? 100 : -1;
4601
4602 if (!draining && evlist__poll(evlist, timeout) > 0) {
4603 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
4604 draining = true;
4605
4606 goto again;
4607 } else {
4608 if (trace__flush_events(trace))
4609 goto out_disable;
4610 }
4611 } else {
4612 goto again;
4613 }
4614
4615 out_disable:
4616 thread__zput(trace->current);
4617
4618 evlist__disable(evlist);
4619
4620 if (trace->sort_events)
4621 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4622
4623 if (!err) {
4624 if (trace->summary) {
4625 if (trace->summary_mode == SUMMARY__BY_TOTAL)
4626 trace__fprintf_total_summary(trace, trace->output);
4627 else
4628 trace__fprintf_thread_summary(trace, trace->output);
4629 }
4630
4631 if (trace->show_tool_stats) {
4632 fprintf(trace->output, "Stats:\n "
4633 " vfs_getname : %" PRIu64 "\n"
4634 " proc_getname: %" PRIu64 "\n",
4635 trace->stats.vfs_getname,
4636 trace->stats.proc_getname);
4637 }
4638 }
4639
4640 out_delete_evlist:
4641 delete_syscall_stats(trace->syscall_stats);
4642 trace__symbols__exit(trace);
4643 evlist__free_syscall_tp_fields(evlist);
4644 evlist__delete(evlist);
4645 cgroup__put(trace->cgroup);
4646 trace->evlist = NULL;
4647 trace->live = false;
4648 return err;
4649 {
4650 char errbuf[BUFSIZ];
4651
4652 out_error_sched_stat_runtime:
4653 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
4654 goto out_error;
4655
4656 out_error_raw_syscalls:
4657 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
4658 goto out_error;
4659
4660 out_error_mmap:
4661 evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
4662 goto out_error;
4663
4664 out_error_open:
4665 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
4666
4667 out_error:
4668 fprintf(trace->output, "%s\n", errbuf);
4669 goto out_delete_evlist;
4670
4671 out_error_apply_filters:
4672 fprintf(trace->output,
4673 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
4674 evsel->filter, evsel__name(evsel), errno,
4675 str_error_r(errno, errbuf, sizeof(errbuf)));
4676 goto out_delete_evlist;
4677 }
4678 out_error_mem:
4679 fprintf(trace->output, "Not enough memory to run!\n");
4680 goto out_delete_evlist;
4681
4682 out_errno:
4683 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4684 goto out_delete_evlist;
4685 }
4686
trace__replay(struct trace * trace)4687 static int trace__replay(struct trace *trace)
4688 {
4689 const struct evsel_str_handler handlers[] = {
4690 { "probe:vfs_getname", trace__vfs_getname, },
4691 };
4692 struct perf_data data = {
4693 .path = input_name,
4694 .mode = PERF_DATA_MODE_READ,
4695 .force = trace->force,
4696 };
4697 struct perf_session *session;
4698 struct evsel *evsel;
4699 int err = -1;
4700
4701 perf_tool__init(&trace->tool, /*ordered_events=*/true);
4702 trace->tool.sample = trace__process_sample;
4703 trace->tool.mmap = perf_event__process_mmap;
4704 trace->tool.mmap2 = perf_event__process_mmap2;
4705 trace->tool.comm = perf_event__process_comm;
4706 trace->tool.exit = perf_event__process_exit;
4707 trace->tool.fork = perf_event__process_fork;
4708 trace->tool.attr = perf_event__process_attr;
4709 trace->tool.tracing_data = perf_event__process_tracing_data;
4710 trace->tool.build_id = perf_event__process_build_id;
4711 trace->tool.namespaces = perf_event__process_namespaces;
4712
4713 trace->tool.ordered_events = true;
4714 trace->tool.ordering_requires_timestamps = true;
4715
4716 /* add tid to output */
4717 trace->multiple_threads = true;
4718
4719 session = perf_session__new(&data, &trace->tool);
4720 if (IS_ERR(session))
4721 return PTR_ERR(session);
4722
4723 if (trace->opts.target.pid)
4724 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4725
4726 if (trace->opts.target.tid)
4727 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4728
4729 if (symbol__init(&session->header.env) < 0)
4730 goto out;
4731
4732 trace->host = &session->machines.host;
4733
4734 err = perf_session__set_tracepoints_handlers(session, handlers);
4735 if (err)
4736 goto out;
4737
4738 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
4739 trace->syscalls.events.sys_enter = evsel;
4740 /* older kernels have syscalls tp versus raw_syscalls */
4741 if (evsel == NULL)
4742 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
4743
4744 if (evsel &&
4745 (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
4746 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
4747 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
4748 goto out;
4749 }
4750
4751 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
4752 trace->syscalls.events.sys_exit = evsel;
4753 if (evsel == NULL)
4754 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
4755 if (evsel &&
4756 (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
4757 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
4758 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
4759 goto out;
4760 }
4761
4762 evlist__for_each_entry(session->evlist, evsel) {
4763 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
4764 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
4765 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
4766 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
4767 evsel->handler = trace__pgfault;
4768 }
4769
4770 if (trace->summary_mode == SUMMARY__BY_TOTAL) {
4771 trace->syscall_stats = alloc_syscall_stats();
4772 if (trace->syscall_stats == NULL)
4773 goto out;
4774 }
4775
4776 setup_pager();
4777
4778 err = perf_session__process_events(session);
4779 if (err)
4780 pr_err("Failed to process events, error %d", err);
4781
4782 else if (trace->summary)
4783 trace__fprintf_thread_summary(trace, trace->output);
4784
4785 out:
4786 delete_syscall_stats(trace->syscall_stats);
4787 perf_session__delete(session);
4788
4789 return err;
4790 }
4791
trace__fprintf_summary_header(FILE * fp)4792 static size_t trace__fprintf_summary_header(FILE *fp)
4793 {
4794 size_t printed;
4795
4796 printed = fprintf(fp, "\n Summary of events:\n\n");
4797
4798 return printed;
4799 }
4800
4801 struct syscall_entry {
4802 struct syscall_stats *stats;
4803 double msecs;
4804 int syscall;
4805 };
4806
entry_cmp(const void * e1,const void * e2)4807 static int entry_cmp(const void *e1, const void *e2)
4808 {
4809 const struct syscall_entry *entry1 = e1;
4810 const struct syscall_entry *entry2 = e2;
4811
4812 return entry1->msecs > entry2->msecs ? -1 : 1;
4813 }
4814
syscall__sort_stats(struct hashmap * syscall_stats)4815 static struct syscall_entry *syscall__sort_stats(struct hashmap *syscall_stats)
4816 {
4817 struct syscall_entry *entry;
4818 struct hashmap_entry *pos;
4819 unsigned bkt, i, nr;
4820
4821 nr = syscall_stats->sz;
4822 entry = malloc(nr * sizeof(*entry));
4823 if (entry == NULL)
4824 return NULL;
4825
4826 i = 0;
4827 hashmap__for_each_entry(syscall_stats, pos, bkt) {
4828 struct syscall_stats *ss = pos->pvalue;
4829 struct stats *st = &ss->stats;
4830
4831 entry[i].stats = ss;
4832 entry[i].msecs = (u64)st->n * (avg_stats(st) / NSEC_PER_MSEC);
4833 entry[i].syscall = pos->key;
4834 i++;
4835 }
4836 assert(i == nr);
4837
4838 qsort(entry, nr, sizeof(*entry), entry_cmp);
4839 return entry;
4840 }
4841
syscall__dump_stats(struct trace * trace,int e_machine,FILE * fp,struct hashmap * syscall_stats)4842 static size_t syscall__dump_stats(struct trace *trace, int e_machine, FILE *fp,
4843 struct hashmap *syscall_stats)
4844 {
4845 size_t printed = 0;
4846 struct syscall *sc;
4847 struct syscall_entry *entries;
4848
4849 entries = syscall__sort_stats(syscall_stats);
4850 if (entries == NULL)
4851 return 0;
4852
4853 printed += fprintf(fp, "\n");
4854
4855 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n");
4856 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
4857 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n");
4858
4859 for (size_t i = 0; i < syscall_stats->sz; i++) {
4860 struct syscall_entry *entry = &entries[i];
4861 struct syscall_stats *stats = entry->stats;
4862
4863 if (stats) {
4864 double min = (double)(stats->stats.min) / NSEC_PER_MSEC;
4865 double max = (double)(stats->stats.max) / NSEC_PER_MSEC;
4866 double avg = avg_stats(&stats->stats);
4867 double pct;
4868 u64 n = (u64)stats->stats.n;
4869
4870 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
4871 avg /= NSEC_PER_MSEC;
4872
4873 sc = trace__syscall_info(trace, /*evsel=*/NULL, e_machine, entry->syscall);
4874 if (!sc)
4875 continue;
4876
4877 printed += fprintf(fp, " %-15s", sc->name);
4878 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f",
4879 n, stats->nr_failures, entry->msecs, min, avg);
4880 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
4881
4882 if (trace->errno_summary && stats->nr_failures) {
4883 int e;
4884
4885 for (e = 0; e < stats->max_errno; ++e) {
4886 if (stats->errnos[e] != 0)
4887 fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]);
4888 }
4889 }
4890 }
4891 }
4892
4893 free(entries);
4894 printed += fprintf(fp, "\n\n");
4895
4896 return printed;
4897 }
4898
thread__dump_stats(struct thread_trace * ttrace,struct trace * trace,int e_machine,FILE * fp)4899 static size_t thread__dump_stats(struct thread_trace *ttrace,
4900 struct trace *trace, int e_machine, FILE *fp)
4901 {
4902 return syscall__dump_stats(trace, e_machine, fp, ttrace->syscall_stats);
4903 }
4904
system__dump_stats(struct trace * trace,int e_machine,FILE * fp)4905 static size_t system__dump_stats(struct trace *trace, int e_machine, FILE *fp)
4906 {
4907 return syscall__dump_stats(trace, e_machine, fp, trace->syscall_stats);
4908 }
4909
trace__fprintf_thread(FILE * fp,struct thread * thread,struct trace * trace)4910 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
4911 {
4912 size_t printed = 0;
4913 struct thread_trace *ttrace = thread__priv(thread);
4914 int e_machine = thread__e_machine(thread, trace->host);
4915 double ratio;
4916
4917 if (ttrace == NULL)
4918 return 0;
4919
4920 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4921
4922 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread__tid(thread));
4923 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
4924 printed += fprintf(fp, "%.1f%%", ratio);
4925 if (ttrace->pfmaj)
4926 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
4927 if (ttrace->pfmin)
4928 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
4929 if (trace->sched)
4930 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
4931 else if (fputc('\n', fp) != EOF)
4932 ++printed;
4933
4934 printed += thread__dump_stats(ttrace, trace, e_machine, fp);
4935
4936 return printed;
4937 }
4938
thread__nr_events(struct thread_trace * ttrace)4939 static unsigned long thread__nr_events(struct thread_trace *ttrace)
4940 {
4941 return ttrace ? ttrace->nr_events : 0;
4942 }
4943
trace_nr_events_cmp(void * priv __maybe_unused,const struct list_head * la,const struct list_head * lb)4944 static int trace_nr_events_cmp(void *priv __maybe_unused,
4945 const struct list_head *la,
4946 const struct list_head *lb)
4947 {
4948 struct thread_list *a = list_entry(la, struct thread_list, list);
4949 struct thread_list *b = list_entry(lb, struct thread_list, list);
4950 unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread));
4951 unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread));
4952
4953 if (a_nr_events != b_nr_events)
4954 return a_nr_events < b_nr_events ? -1 : 1;
4955
4956 /* Identical number of threads, place smaller tids first. */
4957 return thread__tid(a->thread) < thread__tid(b->thread)
4958 ? -1
4959 : (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0);
4960 }
4961
trace__fprintf_thread_summary(struct trace * trace,FILE * fp)4962 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
4963 {
4964 size_t printed = trace__fprintf_summary_header(fp);
4965 LIST_HEAD(threads);
4966
4967 if (machine__thread_list(trace->host, &threads) == 0) {
4968 struct thread_list *pos;
4969
4970 list_sort(NULL, &threads, trace_nr_events_cmp);
4971
4972 list_for_each_entry(pos, &threads, list)
4973 printed += trace__fprintf_thread(fp, pos->thread, trace);
4974 }
4975 thread_list__delete(&threads);
4976 return printed;
4977 }
4978
trace__fprintf_total_summary(struct trace * trace,FILE * fp)4979 static size_t trace__fprintf_total_summary(struct trace *trace, FILE *fp)
4980 {
4981 size_t printed = trace__fprintf_summary_header(fp);
4982
4983 printed += fprintf(fp, " total, ");
4984 printed += fprintf(fp, "%lu events", trace->nr_events);
4985
4986 if (trace->pfmaj)
4987 printed += fprintf(fp, ", %lu majfaults", trace->pfmaj);
4988 if (trace->pfmin)
4989 printed += fprintf(fp, ", %lu minfaults", trace->pfmin);
4990 if (trace->sched)
4991 printed += fprintf(fp, ", %.3f msec\n", trace->runtime_ms);
4992 else if (fputc('\n', fp) != EOF)
4993 ++printed;
4994
4995 /* TODO: get all system e_machines. */
4996 printed += system__dump_stats(trace, EM_HOST, fp);
4997
4998 return printed;
4999 }
5000
trace__set_duration(const struct option * opt,const char * str,int unset __maybe_unused)5001 static int trace__set_duration(const struct option *opt, const char *str,
5002 int unset __maybe_unused)
5003 {
5004 struct trace *trace = opt->value;
5005
5006 trace->duration_filter = atof(str);
5007 return 0;
5008 }
5009
trace__set_filter_pids_from_option(const struct option * opt,const char * str,int unset __maybe_unused)5010 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
5011 int unset __maybe_unused)
5012 {
5013 int ret = -1;
5014 size_t i;
5015 struct trace *trace = opt->value;
5016 /*
5017 * FIXME: introduce a intarray class, plain parse csv and create a
5018 * { int nr, int entries[] } struct...
5019 */
5020 struct intlist *list = intlist__new(str);
5021
5022 if (list == NULL)
5023 return -1;
5024
5025 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
5026 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
5027
5028 if (trace->filter_pids.entries == NULL)
5029 goto out;
5030
5031 trace->filter_pids.entries[0] = getpid();
5032
5033 for (i = 1; i < trace->filter_pids.nr; ++i)
5034 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
5035
5036 intlist__delete(list);
5037 ret = 0;
5038 out:
5039 return ret;
5040 }
5041
trace__open_output(struct trace * trace,const char * filename)5042 static int trace__open_output(struct trace *trace, const char *filename)
5043 {
5044 struct stat st;
5045
5046 if (!stat(filename, &st) && st.st_size) {
5047 char oldname[PATH_MAX];
5048
5049 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
5050 unlink(oldname);
5051 rename(filename, oldname);
5052 }
5053
5054 trace->output = fopen(filename, "w");
5055
5056 return trace->output == NULL ? -errno : 0;
5057 }
5058
parse_pagefaults(const struct option * opt,const char * str,int unset __maybe_unused)5059 static int parse_pagefaults(const struct option *opt, const char *str,
5060 int unset __maybe_unused)
5061 {
5062 int *trace_pgfaults = opt->value;
5063
5064 if (strcmp(str, "all") == 0)
5065 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
5066 else if (strcmp(str, "maj") == 0)
5067 *trace_pgfaults |= TRACE_PFMAJ;
5068 else if (strcmp(str, "min") == 0)
5069 *trace_pgfaults |= TRACE_PFMIN;
5070 else
5071 return -1;
5072
5073 return 0;
5074 }
5075
evlist__set_default_evsel_handler(struct evlist * evlist,void * handler)5076 static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler)
5077 {
5078 struct evsel *evsel;
5079
5080 evlist__for_each_entry(evlist, evsel) {
5081 if (evsel->handler == NULL)
5082 evsel->handler = handler;
5083 }
5084 }
5085
evsel__set_syscall_arg_fmt(struct evsel * evsel,const char * name)5086 static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
5087 {
5088 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
5089
5090 if (fmt) {
5091 const struct syscall_fmt *scfmt = syscall_fmt__find(name);
5092
5093 if (scfmt) {
5094 const struct tep_event *tp_format = evsel__tp_format(evsel);
5095
5096 if (tp_format) {
5097 int skip = 0;
5098
5099 if (strcmp(tp_format->format.fields->name, "__syscall_nr") == 0 ||
5100 strcmp(tp_format->format.fields->name, "nr") == 0)
5101 ++skip;
5102
5103 memcpy(fmt + skip, scfmt->arg,
5104 (tp_format->format.nr_fields - skip) * sizeof(*fmt));
5105 }
5106 }
5107 }
5108 }
5109
evlist__set_syscall_tp_fields(struct evlist * evlist,bool * use_btf)5110 static int evlist__set_syscall_tp_fields(struct evlist *evlist, bool *use_btf)
5111 {
5112 struct evsel *evsel;
5113
5114 evlist__for_each_entry(evlist, evsel) {
5115 const struct tep_event *tp_format;
5116
5117 if (evsel->priv)
5118 continue;
5119
5120 tp_format = evsel__tp_format(evsel);
5121 if (!tp_format)
5122 continue;
5123
5124 if (strcmp(tp_format->system, "syscalls")) {
5125 evsel__init_tp_arg_scnprintf(evsel, use_btf);
5126 continue;
5127 }
5128
5129 if (evsel__init_syscall_tp(evsel))
5130 return -1;
5131
5132 if (!strncmp(tp_format->name, "sys_enter_", 10)) {
5133 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
5134
5135 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
5136 return -1;
5137
5138 evsel__set_syscall_arg_fmt(evsel,
5139 tp_format->name + sizeof("sys_enter_") - 1);
5140 } else if (!strncmp(tp_format->name, "sys_exit_", 9)) {
5141 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
5142
5143 if (__tp_field__init_uint(&sc->ret, sizeof(u64),
5144 sc->id.offset + sizeof(u64),
5145 evsel->needs_swap))
5146 return -1;
5147
5148 evsel__set_syscall_arg_fmt(evsel,
5149 tp_format->name + sizeof("sys_exit_") - 1);
5150 }
5151 }
5152
5153 return 0;
5154 }
5155
5156 /*
5157 * XXX: Hackish, just splitting the combined -e+--event (syscalls
5158 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
5159 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
5160 *
5161 * It'd be better to introduce a parse_options() variant that would return a
5162 * list with the terms it didn't match to an event...
5163 */
trace__parse_events_option(const struct option * opt,const char * str,int unset __maybe_unused)5164 static int trace__parse_events_option(const struct option *opt, const char *str,
5165 int unset __maybe_unused)
5166 {
5167 struct trace *trace = (struct trace *)opt->value;
5168 const char *s = str;
5169 char *sep = NULL, *lists[2] = { NULL, NULL, };
5170 int len = strlen(str) + 1, err = -1, list, idx;
5171 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
5172 char group_name[PATH_MAX];
5173 const struct syscall_fmt *fmt;
5174
5175 if (strace_groups_dir == NULL)
5176 return -1;
5177
5178 if (*s == '!') {
5179 ++s;
5180 trace->not_ev_qualifier = true;
5181 }
5182
5183 while (1) {
5184 if ((sep = strchr(s, ',')) != NULL)
5185 *sep = '\0';
5186
5187 list = 0;
5188 /* TODO: support for more than just perf binary machine type syscalls. */
5189 if (syscalltbl__id(EM_HOST, s) >= 0 ||
5190 syscalltbl__strglobmatch_first(EM_HOST, s, &idx) >= 0) {
5191 list = 1;
5192 goto do_concat;
5193 }
5194
5195 fmt = syscall_fmt__find_by_alias(s);
5196 if (fmt != NULL) {
5197 list = 1;
5198 s = fmt->name;
5199 } else {
5200 path__join(group_name, sizeof(group_name), strace_groups_dir, s);
5201 if (access(group_name, R_OK) == 0)
5202 list = 1;
5203 }
5204 do_concat:
5205 if (lists[list]) {
5206 sprintf(lists[list] + strlen(lists[list]), ",%s", s);
5207 } else {
5208 lists[list] = malloc(len);
5209 if (lists[list] == NULL)
5210 goto out;
5211 strcpy(lists[list], s);
5212 }
5213
5214 if (!sep)
5215 break;
5216
5217 *sep = ',';
5218 s = sep + 1;
5219 }
5220
5221 if (lists[1] != NULL) {
5222 struct strlist_config slist_config = {
5223 .dirname = strace_groups_dir,
5224 };
5225
5226 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
5227 if (trace->ev_qualifier == NULL) {
5228 fputs("Not enough memory to parse event qualifier", trace->output);
5229 goto out;
5230 }
5231
5232 if (trace__validate_ev_qualifier(trace))
5233 goto out;
5234 trace->trace_syscalls = true;
5235 }
5236
5237 err = 0;
5238
5239 if (lists[0]) {
5240 struct parse_events_option_args parse_events_option_args = {
5241 .evlistp = &trace->evlist,
5242 };
5243 struct option o = {
5244 .value = &parse_events_option_args,
5245 };
5246 err = parse_events_option(&o, lists[0], 0);
5247 }
5248 out:
5249 free(strace_groups_dir);
5250 free(lists[0]);
5251 free(lists[1]);
5252 if (sep)
5253 *sep = ',';
5254
5255 return err;
5256 }
5257
trace__parse_cgroups(const struct option * opt,const char * str,int unset)5258 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
5259 {
5260 struct trace *trace = opt->value;
5261
5262 if (!list_empty(&trace->evlist->core.entries)) {
5263 struct option o = {
5264 .value = &trace->evlist,
5265 };
5266 return parse_cgroups(&o, str, unset);
5267 }
5268 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
5269
5270 return 0;
5271 }
5272
trace__parse_summary_mode(const struct option * opt,const char * str,int unset __maybe_unused)5273 static int trace__parse_summary_mode(const struct option *opt, const char *str,
5274 int unset __maybe_unused)
5275 {
5276 struct trace *trace = opt->value;
5277
5278 if (!strcmp(str, "thread")) {
5279 trace->summary_mode = SUMMARY__BY_THREAD;
5280 } else if (!strcmp(str, "total")) {
5281 trace->summary_mode = SUMMARY__BY_TOTAL;
5282 } else {
5283 pr_err("Unknown summary mode: %s\n", str);
5284 return -1;
5285 }
5286
5287 return 0;
5288 }
5289
trace__config(const char * var,const char * value,void * arg)5290 static int trace__config(const char *var, const char *value, void *arg)
5291 {
5292 struct trace *trace = arg;
5293 int err = 0;
5294
5295 if (!strcmp(var, "trace.add_events")) {
5296 trace->perfconfig_events = strdup(value);
5297 if (trace->perfconfig_events == NULL) {
5298 pr_err("Not enough memory for %s\n", "trace.add_events");
5299 return -1;
5300 }
5301 } else if (!strcmp(var, "trace.show_timestamp")) {
5302 trace->show_tstamp = perf_config_bool(var, value);
5303 } else if (!strcmp(var, "trace.show_duration")) {
5304 trace->show_duration = perf_config_bool(var, value);
5305 } else if (!strcmp(var, "trace.show_arg_names")) {
5306 trace->show_arg_names = perf_config_bool(var, value);
5307 if (!trace->show_arg_names)
5308 trace->show_zeros = true;
5309 } else if (!strcmp(var, "trace.show_zeros")) {
5310 bool new_show_zeros = perf_config_bool(var, value);
5311 if (!trace->show_arg_names && !new_show_zeros) {
5312 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
5313 goto out;
5314 }
5315 trace->show_zeros = new_show_zeros;
5316 } else if (!strcmp(var, "trace.show_prefix")) {
5317 trace->show_string_prefix = perf_config_bool(var, value);
5318 } else if (!strcmp(var, "trace.no_inherit")) {
5319 trace->opts.no_inherit = perf_config_bool(var, value);
5320 } else if (!strcmp(var, "trace.args_alignment")) {
5321 int args_alignment = 0;
5322 if (perf_config_int(&args_alignment, var, value) == 0)
5323 trace->args_alignment = args_alignment;
5324 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
5325 if (strcasecmp(value, "libtraceevent") == 0)
5326 trace->libtraceevent_print = true;
5327 else if (strcasecmp(value, "libbeauty") == 0)
5328 trace->libtraceevent_print = false;
5329 }
5330 out:
5331 return err;
5332 }
5333
trace__exit(struct trace * trace)5334 static void trace__exit(struct trace *trace)
5335 {
5336 strlist__delete(trace->ev_qualifier);
5337 zfree(&trace->ev_qualifier_ids.entries);
5338 if (trace->syscalls.table) {
5339 for (size_t i = 0; i < trace->syscalls.table_size; i++)
5340 syscall__delete(trace->syscalls.table[i]);
5341 zfree(&trace->syscalls.table);
5342 }
5343 zfree(&trace->perfconfig_events);
5344 evlist__delete(trace->evlist);
5345 trace->evlist = NULL;
5346 #ifdef HAVE_LIBBPF_SUPPORT
5347 btf__free(trace->btf);
5348 trace->btf = NULL;
5349 #endif
5350 }
5351
5352 #ifdef HAVE_BPF_SKEL
bpf__setup_bpf_output(struct evlist * evlist)5353 static int bpf__setup_bpf_output(struct evlist *evlist)
5354 {
5355 int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/");
5356
5357 if (err)
5358 pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n");
5359
5360 return err;
5361 }
5362 #endif
5363
cmd_trace(int argc,const char ** argv)5364 int cmd_trace(int argc, const char **argv)
5365 {
5366 const char *trace_usage[] = {
5367 "perf trace [<options>] [<command>]",
5368 "perf trace [<options>] -- <command> [<options>]",
5369 "perf trace record [<options>] [<command>]",
5370 "perf trace record [<options>] -- <command> [<options>]",
5371 NULL
5372 };
5373 struct trace trace = {
5374 .opts = {
5375 .target = {
5376 .uid = UINT_MAX,
5377 .uses_mmap = true,
5378 },
5379 .user_freq = UINT_MAX,
5380 .user_interval = ULLONG_MAX,
5381 .no_buffering = true,
5382 .mmap_pages = UINT_MAX,
5383 },
5384 .output = stderr,
5385 .show_comm = true,
5386 .show_tstamp = true,
5387 .show_duration = true,
5388 .show_arg_names = true,
5389 .args_alignment = 70,
5390 .trace_syscalls = false,
5391 .kernel_syscallchains = false,
5392 .max_stack = UINT_MAX,
5393 .max_events = ULONG_MAX,
5394 };
5395 const char *output_name = NULL;
5396 const struct option trace_options[] = {
5397 OPT_CALLBACK('e', "event", &trace, "event",
5398 "event/syscall selector. use 'perf list' to list available events",
5399 trace__parse_events_option),
5400 OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
5401 "event filter", parse_filter),
5402 OPT_BOOLEAN(0, "comm", &trace.show_comm,
5403 "show the thread COMM next to its id"),
5404 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
5405 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
5406 trace__parse_events_option),
5407 OPT_STRING('o', "output", &output_name, "file", "output file name"),
5408 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
5409 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
5410 "trace events on existing process id"),
5411 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
5412 "trace events on existing thread id"),
5413 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
5414 "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
5415 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
5416 "system-wide collection from all CPUs"),
5417 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
5418 "list of cpus to monitor"),
5419 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
5420 "child tasks do not inherit counters"),
5421 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
5422 "number of mmap data pages", evlist__parse_mmap_pages),
5423 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
5424 "user to profile"),
5425 OPT_CALLBACK(0, "duration", &trace, "float",
5426 "show only events with duration > N.M ms",
5427 trace__set_duration),
5428 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
5429 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
5430 OPT_BOOLEAN('T', "time", &trace.full_time,
5431 "Show full timestamp, not time relative to first start"),
5432 OPT_BOOLEAN(0, "failure", &trace.failure_only,
5433 "Show only syscalls that failed"),
5434 OPT_BOOLEAN('s', "summary", &trace.summary_only,
5435 "Show only syscall summary with statistics"),
5436 OPT_BOOLEAN('S', "with-summary", &trace.summary,
5437 "Show all syscalls and summary with statistics"),
5438 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
5439 "Show errno stats per syscall, use with -s or -S"),
5440 OPT_CALLBACK(0, "summary-mode", &trace, "mode",
5441 "How to show summary: select thread (default) or total",
5442 trace__parse_summary_mode),
5443 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
5444 "Trace pagefaults", parse_pagefaults, "maj"),
5445 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
5446 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
5447 OPT_CALLBACK(0, "call-graph", &trace.opts,
5448 "record_mode[,record_size]", record_callchain_help,
5449 &record_parse_callchain_opt),
5450 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
5451 "Use libtraceevent to print the tracepoint arguments."),
5452 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
5453 "Show the kernel callchains on the syscall exit path"),
5454 OPT_ULONG(0, "max-events", &trace.max_events,
5455 "Set the maximum number of events to print, exit after that is reached. "),
5456 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
5457 "Set the minimum stack depth when parsing the callchain, "
5458 "anything below the specified depth will be ignored."),
5459 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
5460 "Set the maximum stack depth when parsing the callchain, "
5461 "anything beyond the specified depth will be ignored. "
5462 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
5463 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
5464 "Sort batch of events before processing, use if getting out of order events"),
5465 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
5466 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
5467 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
5468 "per thread proc mmap processing timeout in ms"),
5469 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
5470 trace__parse_cgroups),
5471 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay,
5472 "ms to wait before starting measurement after program "
5473 "start"),
5474 OPT_BOOLEAN(0, "force-btf", &trace.force_btf, "Prefer btf_dump general pretty printer"
5475 "to customized ones"),
5476 OPTS_EVSWITCH(&trace.evswitch),
5477 OPT_END()
5478 };
5479 bool __maybe_unused max_stack_user_set = true;
5480 bool mmap_pages_user_set = true;
5481 struct evsel *evsel;
5482 const char * const trace_subcommands[] = { "record", NULL };
5483 int err = -1;
5484 char bf[BUFSIZ];
5485 struct sigaction sigchld_act;
5486
5487 signal(SIGSEGV, sighandler_dump_stack);
5488 signal(SIGFPE, sighandler_dump_stack);
5489 signal(SIGINT, sighandler_interrupt);
5490
5491 memset(&sigchld_act, 0, sizeof(sigchld_act));
5492 sigchld_act.sa_flags = SA_SIGINFO;
5493 sigchld_act.sa_sigaction = sighandler_chld;
5494 sigaction(SIGCHLD, &sigchld_act, NULL);
5495
5496 trace.evlist = evlist__new();
5497
5498 if (trace.evlist == NULL) {
5499 pr_err("Not enough memory to run!\n");
5500 err = -ENOMEM;
5501 goto out;
5502 }
5503
5504 /*
5505 * Parsing .perfconfig may entail creating a BPF event, that may need
5506 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
5507 * is too small. This affects just this process, not touching the
5508 * global setting. If it fails we'll get something in 'perf trace -v'
5509 * to help diagnose the problem.
5510 */
5511 rlimit__bump_memlock();
5512
5513 err = perf_config(trace__config, &trace);
5514 if (err)
5515 goto out;
5516
5517 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
5518 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
5519
5520 /*
5521 * Here we already passed thru trace__parse_events_option() and it has
5522 * already figured out if -e syscall_name, if not but if --event
5523 * foo:bar was used, the user is interested _just_ in those, say,
5524 * tracepoint events, not in the strace-like syscall-name-based mode.
5525 *
5526 * This is important because we need to check if strace-like mode is
5527 * needed to decided if we should filter out the eBPF
5528 * __augmented_syscalls__ code, if it is in the mix, say, via
5529 * .perfconfig trace.add_events, and filter those out.
5530 */
5531 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
5532 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
5533 trace.trace_syscalls = true;
5534 }
5535 /*
5536 * Now that we have --verbose figured out, lets see if we need to parse
5537 * events from .perfconfig, so that if those events fail parsing, say some
5538 * BPF program fails, then we'll be able to use --verbose to see what went
5539 * wrong in more detail.
5540 */
5541 if (trace.perfconfig_events != NULL) {
5542 struct parse_events_error parse_err;
5543
5544 parse_events_error__init(&parse_err);
5545 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
5546 if (err)
5547 parse_events_error__print(&parse_err, trace.perfconfig_events);
5548 parse_events_error__exit(&parse_err);
5549 if (err)
5550 goto out;
5551 }
5552
5553 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
5554 usage_with_options_msg(trace_usage, trace_options,
5555 "cgroup monitoring only available in system-wide mode");
5556 }
5557
5558 #ifdef HAVE_BPF_SKEL
5559 if (!trace.trace_syscalls)
5560 goto skip_augmentation;
5561
5562 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) {
5563 pr_debug("Syscall augmentation fails with record, disabling augmentation");
5564 goto skip_augmentation;
5565 }
5566
5567 trace.skel = augmented_raw_syscalls_bpf__open();
5568 if (!trace.skel) {
5569 pr_debug("Failed to open augmented syscalls BPF skeleton");
5570 } else {
5571 /*
5572 * Disable attaching the BPF programs except for sys_enter and
5573 * sys_exit that tail call into this as necessary.
5574 */
5575 struct bpf_program *prog;
5576
5577 bpf_object__for_each_program(prog, trace.skel->obj) {
5578 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit)
5579 bpf_program__set_autoattach(prog, /*autoattach=*/false);
5580 }
5581
5582 err = augmented_raw_syscalls_bpf__load(trace.skel);
5583
5584 if (err < 0) {
5585 libbpf_strerror(err, bf, sizeof(bf));
5586 pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf);
5587 } else {
5588 augmented_raw_syscalls_bpf__attach(trace.skel);
5589 trace__add_syscall_newtp(&trace);
5590 }
5591 }
5592
5593 err = bpf__setup_bpf_output(trace.evlist);
5594 if (err) {
5595 libbpf_strerror(err, bf, sizeof(bf));
5596 pr_err("ERROR: Setup BPF output event failed: %s\n", bf);
5597 goto out;
5598 }
5599 trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
5600 assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__"));
5601 skip_augmentation:
5602 #endif
5603 err = -1;
5604
5605 if (trace.trace_pgfaults) {
5606 trace.opts.sample_address = true;
5607 trace.opts.sample_time = true;
5608 }
5609
5610 if (trace.opts.mmap_pages == UINT_MAX)
5611 mmap_pages_user_set = false;
5612
5613 if (trace.max_stack == UINT_MAX) {
5614 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
5615 max_stack_user_set = false;
5616 }
5617
5618 #ifdef HAVE_DWARF_UNWIND_SUPPORT
5619 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
5620 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
5621 }
5622 #endif
5623
5624 if (callchain_param.enabled) {
5625 if (!mmap_pages_user_set && geteuid() == 0)
5626 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
5627
5628 symbol_conf.use_callchain = true;
5629 }
5630
5631 if (trace.evlist->core.nr_entries > 0) {
5632 bool use_btf = false;
5633
5634 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
5635 if (evlist__set_syscall_tp_fields(trace.evlist, &use_btf)) {
5636 perror("failed to set syscalls:* tracepoint fields");
5637 goto out;
5638 }
5639
5640 if (use_btf)
5641 trace__load_vmlinux_btf(&trace);
5642 }
5643
5644 if (trace.sort_events) {
5645 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
5646 ordered_events__set_copy_on_queue(&trace.oe.data, true);
5647 }
5648
5649 /*
5650 * If we are augmenting syscalls, then combine what we put in the
5651 * __augmented_syscalls__ BPF map with what is in the
5652 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
5653 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
5654 *
5655 * We'll switch to look at two BPF maps, one for sys_enter and the
5656 * other for sys_exit when we start augmenting the sys_exit paths with
5657 * buffers that are being copied from kernel to userspace, think 'read'
5658 * syscall.
5659 */
5660 if (trace.syscalls.events.bpf_output) {
5661 evlist__for_each_entry(trace.evlist, evsel) {
5662 bool raw_syscalls_sys_exit = evsel__name_is(evsel, "raw_syscalls:sys_exit");
5663
5664 if (raw_syscalls_sys_exit) {
5665 trace.raw_augmented_syscalls = true;
5666 goto init_augmented_syscall_tp;
5667 }
5668
5669 if (trace.syscalls.events.bpf_output->priv == NULL &&
5670 strstr(evsel__name(evsel), "syscalls:sys_enter")) {
5671 struct evsel *augmented = trace.syscalls.events.bpf_output;
5672 if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
5673 evsel__init_augmented_syscall_tp_args(augmented))
5674 goto out;
5675 /*
5676 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
5677 * Above we made sure we can get from the payload the tp fields
5678 * that we get from syscalls:sys_enter tracefs format file.
5679 */
5680 augmented->handler = trace__sys_enter;
5681 /*
5682 * Now we do the same for the *syscalls:sys_enter event so that
5683 * if we handle it directly, i.e. if the BPF prog returns 0 so
5684 * as not to filter it, then we'll handle it just like we would
5685 * for the BPF_OUTPUT one:
5686 */
5687 if (evsel__init_augmented_syscall_tp(evsel, evsel) ||
5688 evsel__init_augmented_syscall_tp_args(evsel))
5689 goto out;
5690 evsel->handler = trace__sys_enter;
5691 }
5692
5693 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
5694 struct syscall_tp *sc;
5695 init_augmented_syscall_tp:
5696 if (evsel__init_augmented_syscall_tp(evsel, evsel))
5697 goto out;
5698 sc = __evsel__syscall_tp(evsel);
5699 /*
5700 * For now with BPF raw_augmented we hook into
5701 * raw_syscalls:sys_enter and there we get all
5702 * 6 syscall args plus the tracepoint common
5703 * fields and the syscall_nr (another long).
5704 * So we check if that is the case and if so
5705 * don't look after the sc->args_size but
5706 * always after the full raw_syscalls:sys_enter
5707 * payload, which is fixed.
5708 *
5709 * We'll revisit this later to pass
5710 * s->args_size to the BPF augmenter (now
5711 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
5712 * so that it copies only what we need for each
5713 * syscall, like what happens when we use
5714 * syscalls:sys_enter_NAME, so that we reduce
5715 * the kernel/userspace traffic to just what is
5716 * needed for each syscall.
5717 */
5718 if (trace.raw_augmented_syscalls)
5719 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5720 evsel__init_augmented_syscall_tp_ret(evsel);
5721 evsel->handler = trace__sys_exit;
5722 }
5723 }
5724 }
5725
5726 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) {
5727 err = trace__record(&trace, argc-1, &argv[1]);
5728 goto out;
5729 }
5730
5731 /* Using just --errno-summary will trigger --summary */
5732 if (trace.errno_summary && !trace.summary && !trace.summary_only)
5733 trace.summary_only = true;
5734
5735 /* summary_only implies summary option, but don't overwrite summary if set */
5736 if (trace.summary_only)
5737 trace.summary = trace.summary_only;
5738
5739 /* Keep exited threads, otherwise information might be lost for summary */
5740 if (trace.summary) {
5741 symbol_conf.keep_exited_threads = true;
5742 if (trace.summary_mode == SUMMARY__NONE)
5743 trace.summary_mode = SUMMARY__BY_THREAD;
5744 }
5745
5746 if (output_name != NULL) {
5747 err = trace__open_output(&trace, output_name);
5748 if (err < 0) {
5749 perror("failed to create output file");
5750 goto out;
5751 }
5752 }
5753
5754 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5755 if (err)
5756 goto out_close;
5757
5758 err = target__validate(&trace.opts.target);
5759 if (err) {
5760 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5761 fprintf(trace.output, "%s", bf);
5762 goto out_close;
5763 }
5764
5765 err = target__parse_uid(&trace.opts.target);
5766 if (err) {
5767 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5768 fprintf(trace.output, "%s", bf);
5769 goto out_close;
5770 }
5771
5772 if (!argc && target__none(&trace.opts.target))
5773 trace.opts.target.system_wide = true;
5774
5775 if (input_name)
5776 err = trace__replay(&trace);
5777 else
5778 err = trace__run(&trace, argc, argv);
5779
5780 out_close:
5781 if (output_name != NULL)
5782 fclose(trace.output);
5783 out:
5784 trace__exit(&trace);
5785 #ifdef HAVE_BPF_SKEL
5786 augmented_raw_syscalls_bpf__destroy(trace.skel);
5787 #endif
5788 return err;
5789 }
5790