Lines Matching +full:oe +full:- +full:extra +full:- +full:delay

2  * builtin-trace.c
8 * event may be specified using --event.
38 #include "util/synthetic-events.h"
43 #include <subcmd/exec-cmd.h>
50 #include <subcmd/parse-options.h>
58 #include "trace-event.h"
59 #include "util/parse-events.h"
88 #include <traceevent/event-parse.h>
203 } oe; member
218 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
231 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
241 field->offset = offset; in __tp_field__init_uint()
245 field->integer = tp_field__u8; in __tp_field__init_uint()
248 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16; in __tp_field__init_uint()
251 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32; in __tp_field__init_uint()
254 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64; in __tp_field__init_uint()
257 return -1; in __tp_field__init_uint()
265 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap); in tp_field__init_uint()
270 return sample->raw_data + field->offset; in tp_field__ptr()
275 field->offset = offset; in __tp_field__init_ptr()
276 field->pointer = tp_field__ptr; in __tp_field__init_ptr()
282 return __tp_field__init_ptr(field, format_field->offset); in tp_field__init_ptr()
293 * The evsel->priv as used by 'perf trace'
312 zfree(&et->fmt); in evsel_trace__delete()
322 struct evsel_trace *et = evsel->priv; in __evsel__syscall_tp()
324 return &et->sc; in __evsel__syscall_tp()
329 if (evsel->priv == NULL) { in evsel__syscall_tp()
330 evsel->priv = evsel_trace__new(); in evsel__syscall_tp()
331 if (evsel->priv == NULL) in evsel__syscall_tp()
343 struct evsel_trace *et = evsel->priv; in __evsel__syscall_arg_fmt()
345 return et->fmt; in __evsel__syscall_arg_fmt()
350 struct evsel_trace *et = evsel->priv; in evsel__syscall_arg_fmt()
352 if (evsel->priv == NULL) { in evsel__syscall_arg_fmt()
353 et = evsel->priv = evsel_trace__new(); in evsel__syscall_arg_fmt()
359 if (et->fmt == NULL) { in evsel__syscall_arg_fmt()
360 et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt)); in evsel__syscall_arg_fmt()
361 if (et->fmt == NULL) in evsel__syscall_arg_fmt()
368 evsel_trace__delete(evsel->priv); in evsel__syscall_arg_fmt()
369 evsel->priv = NULL; in evsel__syscall_arg_fmt()
378 return -1; in evsel__init_tp_uint_field()
380 return tp_field__init_uint(field, format_field, evsel->needs_swap); in evsel__init_tp_uint_field()
385 evsel__init_tp_uint_field(evsel, &sc->name, #name); })
392 return -1; in evsel__init_tp_ptr_field()
399 evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
403 zfree(&evsel->priv); in evsel__delete_priv()
412 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") && in evsel__init_syscall_tp()
413 evsel__init_tp_uint_field(evsel, &sc->id, "nr")) in evsel__init_syscall_tp()
414 return -ENOENT; in evsel__init_syscall_tp()
419 return -ENOMEM; in evsel__init_syscall_tp()
431 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap)) in evsel__init_augmented_syscall_tp()
432 return -EINVAL; in evsel__init_augmented_syscall_tp()
437 return -ENOMEM; in evsel__init_augmented_syscall_tp()
444 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)); in evsel__init_augmented_syscall_tp_args()
451 …return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap… in evsel__init_augmented_syscall_tp_ret()
458 return -ENOENT; in evsel__init_raw_syscall_tp()
460 evsel->handler = handler; in evsel__init_raw_syscall_tp()
464 return -ENOMEM; in evsel__init_raw_syscall_tp()
490 fields->name.integer(&fields->name, sample); })
494 fields->name.pointer(&fields->name, sample); })
498 int idx = val - sa->offset; in strarray__scnprintf_suffix()
500 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { in strarray__scnprintf_suffix()
503 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); in strarray__scnprintf_suffix()
507 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : ""); in strarray__scnprintf_suffix()
512 int idx = val - sa->offset; in strarray__scnprintf()
514 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { in strarray__scnprintf()
517 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); in strarray__scnprintf()
521 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); in strarray__scnprintf()
528 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val); in __syscall_arg__scnprintf_strarray()
541 return strarray__strtoul(arg->parm, bf, size, ret); in syscall_arg__strtoul_strarray()
546 return strarray__strtoul_flags(arg->parm, bf, size, ret); in syscall_arg__strtoul_strarray_flags()
551 return strarrays__strtoul(arg->parm, bf, size, ret); in syscall_arg__strtoul_strarrays()
556 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val); in syscall_arg__scnprintf_strarray_flags()
564 for (i = 0; i < sas->nr_entries; ++i) { in strarrays__scnprintf()
565 struct strarray *sa = sas->entries[i]; in strarrays__scnprintf()
566 int idx = val - sa->offset; in strarrays__scnprintf()
568 if (idx >= 0 && idx < sa->nr_entries) { in strarrays__scnprintf()
569 if (sa->entries[idx] == NULL) in strarrays__scnprintf()
571 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); in strarrays__scnprintf()
577 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix); in strarrays__scnprintf()
585 for (i = 0; i < sa->nr_entries; ++i) { in strarray__strtoul()
586 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') { in strarray__strtoul()
587 *ret = sa->offset + i; in strarray__strtoul()
607 size -= sep - tok + 1; in strarray__strtoul_flags()
609 end = sep - 1; in strarray__strtoul_flags()
611 --end; in strarray__strtoul_flags()
613 toklen = end - tok + 1; in strarray__strtoul_flags()
625 *ret |= (1 << (val - 1)); in strarray__strtoul_flags()
639 for (i = 0; i < sas->nr_entries; ++i) { in strarrays__strtoul()
640 struct strarray *sa = sas->entries[i]; in strarrays__strtoul()
652 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val); in syscall_arg__scnprintf_strarrays()
656 #define AT_FDCWD -100
662 int fd = arg->val; in syscall_arg__scnprintf_fd_at()
666 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD"); in syscall_arg__scnprintf_fd_at()
680 return scnprintf(bf, size, "%#lx", arg->val); in syscall_arg__scnprintf_hex()
685 if (arg->val == 0) in syscall_arg__scnprintf_ptr()
692 return scnprintf(bf, size, "%d", arg->val); in syscall_arg__scnprintf_int()
697 return scnprintf(bf, size, "%ld", arg->val); in syscall_arg__scnprintf_long()
705 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val); in syscall_arg__scnprintf_char_array()
801 bool show_prefix = arg->show_string_prefix; in syscall_arg__scnprintf_access_mode()
804 int mode = arg->val; in syscall_arg__scnprintf_access_mode()
810 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \ in syscall_arg__scnprintf_access_mode()
820 printed += scnprintf(bf + printed, size - printed, "|%#x", mode); in syscall_arg__scnprintf_access_mode()
835 bool show_prefix = arg->show_string_prefix; in syscall_arg__scnprintf_pipe_flags()
837 int printed = 0, flags = arg->val; in syscall_arg__scnprintf_pipe_flags()
841 …printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? pre… in syscall_arg__scnprintf_pipe_flags()
850 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); in syscall_arg__scnprintf_pipe_flags()
867 bool show_prefix = arg->show_string_prefix; in syscall_arg__scnprintf_getrandom_flags()
869 int printed = 0, flags = arg->val; in syscall_arg__scnprintf_getrandom_flags()
873 …printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? pre… in syscall_arg__scnprintf_getrandom_flags()
882 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); in syscall_arg__scnprintf_getrandom_flags()
1174 return strcmp(name, fmt->name); in syscall_fmt__cmp()
1290 ttrace->files.max = -1; in thread_trace__new()
1291 ttrace->syscall_stats = intlist__new(NULL); in thread_trace__new()
1306 intlist__delete(ttrace->syscall_stats); in thread_trace__delete()
1307 ttrace->syscall_stats = NULL; in thread_trace__delete()
1309 zfree(&ttrace->entry_str); in thread_trace__delete()
1327 ++ttrace->nr_events; in thread__trace()
1340 struct thread_trace *ttrace = thread__priv(arg->thread); in syscall_arg__set_ret_scnprintf()
1342 ttrace->ret_scnprintf = ret_scnprintf; in syscall_arg__set_ret_scnprintf()
1352 for (int i = 0; i < ttrace->files.max; ++i) { in thread_trace__free_files()
1353 struct file *file = ttrace->files.table + i; in thread_trace__free_files()
1354 zfree(&file->pathname); in thread_trace__free_files()
1357 zfree(&ttrace->files.table); in thread_trace__free_files()
1358 ttrace->files.max = -1; in thread_trace__free_files()
1366 if (fd > ttrace->files.max) { in thread_trace__files_entry()
1367 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); in thread_trace__files_entry()
1372 if (ttrace->files.max != -1) { in thread_trace__files_entry()
1373 memset(nfiles + ttrace->files.max + 1, 0, in thread_trace__files_entry()
1374 (fd - ttrace->files.max) * sizeof(struct file)); in thread_trace__files_entry()
1379 ttrace->files.table = nfiles; in thread_trace__files_entry()
1380 ttrace->files.max = fd; in thread_trace__files_entry()
1383 return ttrace->files.table + fd; in thread_trace__files_entry()
1399 file->dev_maj = major(st.st_rdev); in trace__set_fd_pathname()
1400 file->pathname = strdup(pathname); in trace__set_fd_pathname()
1401 if (file->pathname) in trace__set_fd_pathname()
1405 return -1; in trace__set_fd_pathname()
1424 return -1; in thread__read_fd_path()
1429 return -1; in thread__read_fd_path()
1440 if (ttrace == NULL || trace->fd_path_disabled) in thread__fd_path()
1446 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { in thread__fd_path()
1447 if (!trace->live) in thread__fd_path()
1449 ++trace->stats.proc_getname; in thread__fd_path()
1454 return ttrace->files.table[fd].pathname; in thread__fd_path()
1459 int fd = arg->val; in syscall_arg__scnprintf_fd()
1461 const char *path = thread__fd_path(arg->thread, fd, arg->trace); in syscall_arg__scnprintf_fd()
1464 printed += scnprintf(bf + printed, size - printed, "<%s>", path); in syscall_arg__scnprintf_fd()
1472 struct thread *thread = machine__find_thread(trace->host, pid, pid); in pid__scnprintf_fd()
1478 printed += scnprintf(bf + printed, size - printed, "<%s>", path); in pid__scnprintf_fd()
1489 int fd = arg->val; in syscall_arg__scnprintf_close_fd()
1491 struct thread_trace *ttrace = thread__priv(arg->thread); in syscall_arg__scnprintf_close_fd()
1493 if (ttrace && fd >= 0 && fd <= ttrace->files.max) in syscall_arg__scnprintf_close_fd()
1494 zfree(&ttrace->files.table[fd].pathname); in syscall_arg__scnprintf_close_fd()
1504 ttrace->filename.ptr = ptr; in thread__set_filename_pos()
1505 ttrace->filename.entry_str_pos = bf - ttrace->entry_str; in thread__set_filename_pos()
1510 struct augmented_arg *augmented_arg = arg->augmented.args; in syscall_arg__scnprintf_augmented_string()
1511 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value); in syscall_arg__scnprintf_augmented_string()
1516 int consumed = sizeof(*augmented_arg) + augmented_arg->size; in syscall_arg__scnprintf_augmented_string()
1518 arg->augmented.args = ((void *)arg->augmented.args) + consumed; in syscall_arg__scnprintf_augmented_string()
1519 arg->augmented.size -= consumed; in syscall_arg__scnprintf_augmented_string()
1527 unsigned long ptr = arg->val; in syscall_arg__scnprintf_filename()
1529 if (arg->augmented.args) in syscall_arg__scnprintf_filename()
1532 if (!arg->trace->vfs_getname) in syscall_arg__scnprintf_filename()
1535 thread__set_filename_pos(arg->thread, bf, ptr); in syscall_arg__scnprintf_filename()
1541 return t < (trace->duration_filter * NSEC_PER_MSEC); in trace__filter_duration()
1546 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; in __trace__fprintf_tstamp()
1553 * using ttrace->entry_time for a thread that receives a sys_exit without
1565 static pid_t workload_pid = -1;
1577 if (info->si_pid == workload_pid) in sighandler_chld()
1585 if (trace->multiple_threads) { in trace__fprintf_comm_tid()
1586 if (trace->show_comm) in trace__fprintf_comm_tid()
1599 if (trace->show_tstamp) in trace__fprintf_entry_head()
1601 if (trace->show_duration) in trace__fprintf_entry_head()
1611 switch (event->header.type) { in trace__process_event()
1613 color_fprintf(trace->output, PERF_COLOR_RED, in trace__process_event()
1614 "LOST %" PRIu64 " events!\n", event->lost.lost); in trace__process_event()
1638 if (machine->kptr_restrict_warned) in trace__machine__resolve_kernel_addr()
1645 machine->kptr_restrict_warned = true; in trace__machine__resolve_kernel_addr()
1659 trace->host = machine__new_host(); in trace__symbols_init()
1660 if (trace->host == NULL) in trace__symbols_init()
1661 return -ENOMEM; in trace__symbols_init()
1665 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); in trace__symbols_init()
1669 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, in trace__symbols_init()
1670 evlist->core.threads, trace__tool_process, in trace__symbols_init()
1681 machine__exit(trace->host); in trace__symbols__exit()
1682 trace->host = NULL; in trace__symbols__exit()
1691 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0) in syscall__alloc_arg_fmts()
1692 nr_args = sc->fmt->nr_args; in syscall__alloc_arg_fmts()
1694 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); in syscall__alloc_arg_fmts()
1695 if (sc->arg_fmt == NULL) in syscall__alloc_arg_fmts()
1696 return -1; in syscall__alloc_arg_fmts()
1699 if (sc->fmt) in syscall__alloc_arg_fmts()
1700 sc->arg_fmt[idx] = sc->fmt->arg[idx]; in syscall__alloc_arg_fmts()
1703 sc->nr_args = nr_args; in syscall__alloc_arg_fmts()
1715 return strcmp(name, fmt->name); in syscall_arg_fmt__cmp()
1737 for (; field; field = field->next, ++arg) { in syscall_arg_fmt__init_array()
1740 if (arg->scnprintf) in syscall_arg_fmt__init_array()
1743 len = strlen(field->name); in syscall_arg_fmt__init_array()
1745 if (strcmp(field->type, "const char *") == 0 && in syscall_arg_fmt__init_array()
1746 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) || in syscall_arg_fmt__init_array()
1747 strstr(field->name, "path") != NULL)) in syscall_arg_fmt__init_array()
1748 arg->scnprintf = SCA_FILENAME; in syscall_arg_fmt__init_array()
1749 else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr")) in syscall_arg_fmt__init_array()
1750 arg->scnprintf = SCA_PTR; in syscall_arg_fmt__init_array()
1751 else if (strcmp(field->type, "pid_t") == 0) in syscall_arg_fmt__init_array()
1752 arg->scnprintf = SCA_PID; in syscall_arg_fmt__init_array()
1753 else if (strcmp(field->type, "umode_t") == 0) in syscall_arg_fmt__init_array()
1754 arg->scnprintf = SCA_MODE_T; in syscall_arg_fmt__init_array()
1755 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) { in syscall_arg_fmt__init_array()
1756 arg->scnprintf = SCA_CHAR_ARRAY; in syscall_arg_fmt__init_array()
1757 arg->nr_entries = field->arraylen; in syscall_arg_fmt__init_array()
1758 } else if ((strcmp(field->type, "int") == 0 || in syscall_arg_fmt__init_array()
1759 strcmp(field->type, "unsigned int") == 0 || in syscall_arg_fmt__init_array()
1760 strcmp(field->type, "long") == 0) && in syscall_arg_fmt__init_array()
1761 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) { in syscall_arg_fmt__init_array()
1764 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c in syscall_arg_fmt__init_array()
1769 arg->scnprintf = SCA_FD; in syscall_arg_fmt__init_array()
1772 syscall_arg_fmt__find_by_name(field->name); in syscall_arg_fmt__init_array()
1775 arg->scnprintf = fmt->scnprintf; in syscall_arg_fmt__init_array()
1776 arg->strtoul = fmt->strtoul; in syscall_arg_fmt__init_array()
1786 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args); in syscall__set_arg_fmts()
1789 sc->args_size = last_field->offset + last_field->size; in syscall__set_arg_fmts()
1798 const char *name = syscalltbl__name(trace->sctbl, id); in trace__read_syscall_info()
1801 if (trace->syscalls.table == NULL) { in trace__read_syscall_info()
1802 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); in trace__read_syscall_info()
1803 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
1804 return -ENOMEM; in trace__read_syscall_info()
1807 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) { in trace__read_syscall_info()
1809 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); in trace__read_syscall_info()
1812 return -ENOMEM; in trace__read_syscall_info()
1815 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
1818 …memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof… in trace__read_syscall_info()
1820 trace->syscalls.table = table; in trace__read_syscall_info()
1821 trace->sctbl->syscalls.max_id = id; in trace__read_syscall_info()
1824 sc = trace->syscalls.table + id; in trace__read_syscall_info()
1825 if (sc->nonexistent) in trace__read_syscall_info()
1826 return -EEXIST; in trace__read_syscall_info()
1829 sc->nonexistent = true; in trace__read_syscall_info()
1830 return -EEXIST; in trace__read_syscall_info()
1833 sc->name = name; in trace__read_syscall_info()
1834 sc->fmt = syscall_fmt__find(sc->name); in trace__read_syscall_info()
1836 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); in trace__read_syscall_info()
1837 sc->tp_format = trace_event__tp_format("syscalls", tp_name); in trace__read_syscall_info()
1839 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) { in trace__read_syscall_info()
1840 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); in trace__read_syscall_info()
1841 sc->tp_format = trace_event__tp_format("syscalls", tp_name); in trace__read_syscall_info()
1848 if (IS_ERR(sc->tp_format)) { in trace__read_syscall_info()
1849 sc->nonexistent = true; in trace__read_syscall_info()
1850 return PTR_ERR(sc->tp_format); in trace__read_syscall_info()
1853 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? in trace__read_syscall_info()
1854 RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields)) in trace__read_syscall_info()
1855 return -ENOMEM; in trace__read_syscall_info()
1857 sc->args = sc->tp_format->format.fields; in trace__read_syscall_info()
1863 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) { in trace__read_syscall_info()
1864 sc->args = sc->args->next; in trace__read_syscall_info()
1865 --sc->nr_args; in trace__read_syscall_info()
1868 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit"); in trace__read_syscall_info()
1869 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat"); in trace__read_syscall_info()
1879 syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields); in evsel__init_tp_arg_scnprintf()
1883 return -ENOMEM; in evsel__init_tp_arg_scnprintf()
1890 return *one - *another; in intcmp()
1898 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); in trace__validate_ev_qualifier()
1900 trace->ev_qualifier_ids.entries = malloc(nr_allocated * in trace__validate_ev_qualifier()
1901 sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
1903 if (trace->ev_qualifier_ids.entries == NULL) { in trace__validate_ev_qualifier()
1905 trace->output); in trace__validate_ev_qualifier()
1906 err = -EINVAL; in trace__validate_ev_qualifier()
1910 strlist__for_each_entry(pos, trace->ev_qualifier) { in trace__validate_ev_qualifier()
1911 const char *sc = pos->s; in trace__validate_ev_qualifier()
1912 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; in trace__validate_ev_qualifier()
1915 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
1930 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
1931 if (match_next == -1) in trace__validate_ev_qualifier()
1935 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
1942 entries = realloc(trace->ev_qualifier_ids.entries, in trace__validate_ev_qualifier()
1943 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
1945 err = -ENOMEM; in trace__validate_ev_qualifier()
1946 fputs("\nError:\t Not enough memory for parsing\n", trace->output); in trace__validate_ev_qualifier()
1949 trace->ev_qualifier_ids.entries = entries; in trace__validate_ev_qualifier()
1951 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
1955 trace->ev_qualifier_ids.nr = nr_used; in trace__validate_ev_qualifier()
1956 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); in trace__validate_ev_qualifier()
1962 zfree(&trace->ev_qualifier_ids.entries); in trace__validate_ev_qualifier()
1963 trace->ev_qualifier_ids.nr = 0; in trace__validate_ev_qualifier()
1971 if (trace->ev_qualifier_ids.nr == 0) in trace__syscall_enabled()
1974 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, in trace__syscall_enabled()
1975 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; in trace__syscall_enabled()
1978 return !trace->not_ev_qualifier; in trace__syscall_enabled()
1980 return trace->not_ev_qualifier; in trace__syscall_enabled()
1985 * 8-byte unaligned accesses. args points to raw_data within the event
1986 * and raw_data is guaranteed to be 8-byte unaligned because it is
1994 unsigned char *p = arg->args + sizeof(unsigned long) * idx; in syscall_arg__val()
2003 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name) in syscall__scnprintf_name()
2004 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name); in syscall__scnprintf_name()
2006 return scnprintf(bf, size, "arg%d: ", arg->idx); in syscall__scnprintf_name()
2016 if (fmt && fmt->mask_val) in syscall_arg_fmt__mask_val()
2017 return fmt->mask_val(arg, val); in syscall_arg_fmt__mask_val()
2025 if (fmt && fmt->scnprintf) { in syscall_arg_fmt__scnprintf_val()
2026 arg->val = val; in syscall_arg_fmt__scnprintf_val()
2027 if (fmt->parm) in syscall_arg_fmt__scnprintf_val()
2028 arg->parm = fmt->parm; in syscall_arg_fmt__scnprintf_val()
2029 return fmt->scnprintf(bf, size, arg); in syscall_arg_fmt__scnprintf_val()
2051 .show_string_prefix = trace->show_string_prefix, in syscall__scnprintf_args()
2060 ttrace->ret_scnprintf = NULL; in syscall__scnprintf_args()
2062 if (sc->args != NULL) { in syscall__scnprintf_args()
2065 for (field = sc->args; field; in syscall__scnprintf_args()
2066 field = field->next, ++arg.idx, bit <<= 1) { in syscall__scnprintf_args()
2070 arg.fmt = &sc->arg_fmt[arg.idx]; in syscall__scnprintf_args()
2076 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val); in syscall__scnprintf_args()
2084 !trace->show_zeros && in syscall__scnprintf_args()
2085 !(sc->arg_fmt && in syscall__scnprintf_args()
2086 (sc->arg_fmt[arg.idx].show_zero || in syscall__scnprintf_args()
2087 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY || in syscall__scnprintf_args()
2088 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) && in syscall__scnprintf_args()
2089 sc->arg_fmt[arg.idx].parm)) in syscall__scnprintf_args()
2092 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); in syscall__scnprintf_args()
2094 if (trace->show_arg_names) in syscall__scnprintf_args()
2095 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); in syscall__scnprintf_args()
2097 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], in syscall__scnprintf_args()
2098 bf + printed, size - printed, &arg, val); in syscall__scnprintf_args()
2100 } else if (IS_ERR(sc->tp_format)) { in syscall__scnprintf_args()
2106 while (arg.idx < sc->nr_args) { in syscall__scnprintf_args()
2111 printed += scnprintf(bf + printed, size - printed, ", "); in syscall__scnprintf_args()
2112 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg); in syscall__scnprintf_args()
2113 …printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &ar… in syscall__scnprintf_args()
2140 * grep "NR -1 " /t/trace_pipe
2146 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2152 err = -EINVAL;
2155 if (id > trace->sctbl->syscalls.max_id) {
2157 if (id >= trace->sctbl->syscalls.max_id) {
2169 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2173 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
2176 return &trace->syscalls.table[id];
2181 …fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, s…
2182 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2183 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2184 fputs(" information\n", trace->output);
2203 inode = intlist__findnew(ttrace->syscall_stats, id);
2207 stats = inode->priv;
2213 init_stats(&stats->stats);
2214 inode->priv = stats;
2217 if (ttrace->entry_time && sample->time > ttrace->entry_time)
2218 duration = sample->time - ttrace->entry_time;
2220 update_stats(&stats->stats, duration);
2223 ++stats->nr_failures;
2228 err = -err;
2229 if (err > stats->max_errno) {
2230 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32));
2233 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
2241 stats->errnos = new_errnos;
2242 stats->max_errno = err;
2245 ++stats->errnos[err - 1];
2255 if (trace->failure_only || trace->current == NULL)
2258 ttrace = thread__priv(trace->current);
2260 if (!ttrace->entry_pending)
2263 …printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->o…
2264 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2266 if (len < trace->args_alignment - 4)
2267 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2269 printed += fprintf(trace->output, " ...\n");
2271 ttrace->entry_pending = false;
2272 ++trace->nr_events_printed;
2282 if (trace->print_sample) {
2283 double ts = (double)sample->time / NSEC_PER_MSEC;
2285 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2288 sample->pid, sample->tid, sample->cpu);
2302 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
2305 * We'll revisit this later to pass s->args_size to the BPF augmenter
2311 int args_size = raw_augmented_args_size ?: sc->args_size;
2313 *augmented_args_size = sample->raw_size - args_size;
2315 augmented_args = sample->raw_data + args_size;
2325 zfree(&sc->arg_fmt);
2336 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2343 return -1;
2345 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2346 ttrace = thread__trace(thread, trace->output);
2354 if (ttrace->entry_str == NULL) {
2355 ttrace->entry_str = malloc(trace__entry_str_size);
2356 if (!ttrace->entry_str)
2360 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2366 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
2369 * thinking that the extra 2 u64 args are the augmented filename, so just check
2372 if (evsel != trace->syscalls.events.sys_enter)
2373 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy…
2374 ttrace->entry_time = sample->time;
2375 msg = ttrace->entry_str;
2376 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
2378 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
2381 if (sc->is_exit) {
2382 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2385 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2386 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2387 if (trace->args_alignment > printed)
2388 alignment = trace->args_alignment - printed;
2389 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2392 ttrace->entry_pending = true;
2394 ttrace->filename.pending_open = false;
2397 if (trace->current != thread) {
2398 thread__put(trace->current);
2399 trace->current = thread__get(thread);
2412 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2419 return -1;
2421 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2422 ttrace = thread__trace(thread, trace->output);
2431 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy…
2433 fprintf(trace->output, "%s", msg);
2445 int max_stack = evsel->core.attr.sample_max_stack ?
2446 evsel->core.attr.sample_max_stack :
2447 trace->max_stack;
2448 int err = -1;
2451 if (machine__resolve(trace->host, &al, sample) < 0)
2462 /* TODO: user-configurable print_opts */
2467 …chain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output);
2485 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2486 int alignment = trace->args_alignment;
2491 return -1;
2493 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2494 ttrace = thread__trace(thread, trace->output);
2502 if (trace->summary)
2503 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2505 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2506 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2507 ttrace->filename.pending_open = false;
2508 ++trace->stats.vfs_getname;
2511 if (ttrace->entry_time) {
2512 duration = sample->time - ttrace->entry_time;
2516 } else if (trace->duration_filter)
2519 if (sample->callchain) {
2524 if (cursor->nr < trace->min_stack)
2530 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2533 …trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace-
2535 if (ttrace->entry_pending) {
2536 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2538 printed += fprintf(trace->output, " ... [");
2539 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2541 printed += fprintf(trace->output, "]: %s()", sc->name);
2547 alignment -= printed;
2551 fprintf(trace->output, ")%*s= ", alignment, " ");
2553 if (sc->fmt == NULL) {
2557 fprintf(trace->output, "%ld", ret);
2561 const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2562 *e = errno_to_name(evsel, -ret);
2564 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2566 } else if (ret == 0 && sc->fmt->timeout)
2567 fprintf(trace->output, "0 (Timeout)");
2568 else if (ttrace->ret_scnprintf) {
2575 ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2576 ttrace->ret_scnprintf = NULL;
2577 fprintf(trace->output, "%s", bf);
2578 } else if (sc->fmt->hexret)
2579 fprintf(trace->output, "%#lx", ret);
2580 else if (sc->fmt->errpid) {
2581 struct thread *child = machine__find_thread(trace->host, ret, ret);
2584 fprintf(trace->output, "%ld", ret);
2586 fprintf(trace->output, " (%s)", thread__comm_str(child));
2592 fputc('\n', trace->output);
2595 * We only consider an 'event' for the sake of --max-events a non-filtered
2598 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2606 ttrace->entry_pending = false;
2617 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2635 if (ttrace->filename.namelen < filename_len) {
2636 char *f = realloc(ttrace->filename.name, filename_len + 1);
2641 ttrace->filename.namelen = filename_len;
2642 ttrace->filename.name = f;
2645 strcpy(ttrace->filename.name, filename);
2646 ttrace->filename.pending_open = true;
2648 if (!ttrace->filename.ptr)
2651 entry_str_len = strlen(ttrace->entry_str);
2652 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2657 filename += filename_len - remaining_space;
2661 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2662 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2666 ttrace->filename.ptr = 0;
2667 ttrace->filename.entry_str_pos = 0;
2680 struct thread *thread = machine__findnew_thread(trace->host,
2681 sample->pid,
2682 sample->tid);
2683 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2688 ttrace->runtime_ms += runtime_ms;
2689 trace->runtime_ms += runtime_ms;
2695 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2696 evsel->name,
2705 unsigned int val, void *extra __maybe_unused, FILE *fp)
2731 binary__fprintf(sample->raw_data, sample->raw_size, 8,
2732 bpf_output__printer, NULL, trace->output);
2733 ++trace->nr_events_printed;
2741 struct tep_format_field *field = evsel->tp_format->format.fields;
2755 .show_string_prefix = trace->show_string_prefix,
2758 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
2764 if (field->flags & TEP_FIELD_IS_ARRAY) {
2765 int offset = field->offset;
2767 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2768 offset = format_field__intval(field, sample, evsel->needs_swap);
2771 if (tep_field_is_relative(field->flags))
2772 offset += field->offset + field->size;
2775 val = (uintptr_t)(sample->raw_data + offset);
2777 val = format_field__intval(field, sample, evsel->needs_swap);
2790 !trace->show_zeros &&
2791 !((arg->show_zero ||
2792 arg->scnprintf == SCA_STRARRAY ||
2793 arg->scnprintf == SCA_STRARRAYS) &&
2794 arg->parm))
2797 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2799 if (trace->show_arg_names)
2800 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2802 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
2805 return printed + fprintf(trace->output, "%s", bf);
2820 if (evsel->disabled)
2823 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2825 if (sample->callchain) {
2830 if (cursor->nr < trace->min_stack)
2837 trace__fprintf_tstamp(trace, sample->time, trace->output);
2839 if (trace->trace_syscalls && trace->show_duration)
2840 fprintf(trace->output, "( ): ");
2843 trace__fprintf_comm_tid(trace, thread, trace->output);
2845 if (evsel == trace->syscalls.events.bpf_output) {
2850 fprintf(trace->output, "%s(", sc->name);
2852 fputc(')', trace->output);
2863 fprintf(trace->output, "%s(", evsel->name);
2867 } else if (evsel->tp_format) {
2868 if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
2870 if (trace->libtraceevent_print) {
2871 event_format__fprintf(evsel->tp_format, sample->cpu,
2872 sample->raw_data, sample->raw_size,
2873 trace->output);
2881 fprintf(trace->output, ")\n");
2888 ++trace->nr_events_printed;
2890 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
2904 if ((verbose > 0 || print_dso) && al->map)
2905 fprintf(f, "%s@", map__dso(al->map)->long_name);
2907 if ((verbose > 0 || print_sym) && al->sym)
2908 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2909 al->addr - al->sym->start);
2910 else if (al->map)
2911 fprintf(f, "0x%" PRIx64, al->addr);
2913 fprintf(f, "0x%" PRIx64, sample->addr);
2925 int err = -1;
2929 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2931 if (sample->callchain) {
2936 if (cursor->nr < trace->min_stack)
2942 ttrace = thread__trace(thread, trace->output);
2946 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2947 ttrace->pfmaj++;
2949 ttrace->pfmin++;
2951 if (trace->summary_only)
2954 thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
2956 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2958 fprintf(trace->output, "%sfault [",
2959 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2962 print_location(trace->output, sample, &al, false, true);
2964 fprintf(trace->output, "] => ");
2966 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2969 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2977 print_location(trace->output, sample, &al, true, false);
2979 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2986 ++trace->nr_events_printed;
3001 * and don't use sample->time unconditionally, we may end up having
3007 if (trace->base_time == 0 && !trace->full_time &&
3008 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
3009 trace->base_time = sample->time;
3022 tracepoint_handler handler = evsel->handler;
3024 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3031 ++trace->nr_events;
3045 "-R",
3046 "-m", "1024",
3047 "-c", "1",
3051 const char * const sc_args[] = { "-e", };
3053 const char * const majpf_args[] = { "-e", "major-faults" };
3055 const char * const minpf_args[] = { "-e", "minor-faults" };
3057 int err = -1;
3071 if (trace->trace_syscalls) {
3075 /* event string may be different for older kernels - e.g., RHEL6 */
3086 rec_argv[j++] = "--filter";
3089 if (trace->trace_pgfaults & TRACE_PFMAJ)
3093 if (trace->trace_pgfaults & TRACE_PFMIN)
3127 evsel->handler = trace__vfs_getname;
3132 list_del_init(&evsel->core.node);
3133 evsel->evlist = NULL;
3155 evsel->handler = trace__pgfault;
3165 evsel_trace__delete(evsel->priv);
3166 evsel->priv = NULL;
3172 const u32 type = event->header.type;
3176 trace__process_event(trace, trace->host, event, sample);
3180 evsel = evlist__id2evsel(trace->evlist, sample->id);
3182 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3186 if (evswitch__discard(&trace->evswitch, evsel))
3191 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
3192 sample->raw_data == NULL) {
3193 …fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3194 evsel__name(evsel), sample->tid,
3195 sample->cpu, sample->raw_size);
3197 tracepoint_handler handler = evsel->handler;
3201 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3207 int ret = -1;
3208 struct evlist *evlist = trace->evlist;
3225 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3226 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3231 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3235 * debugging reasons using --kernel_syscall_callchains
3237 sys_exit->core.attr.exclude_callchain_kernel = 1;
3240 trace->syscalls.events.sys_enter = sys_enter;
3241 trace->syscalls.events.sys_exit = sys_exit;
3256 int err = -1;
3258 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3259 trace->ev_qualifier_ids.nr,
3260 trace->ev_qualifier_ids.entries);
3265 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3266 sys_exit = trace->syscalls.events.sys_exit;
3284 if (trace->skel->obj == NULL)
3287 bpf_object__for_each_program(pos, trace->skel->obj) {
3305 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name);
3309 if (sc->fmt && sc->fmt->alias) {
3310 …scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->al…
3326 prog_name, type, sc->name);
3328 return trace->skel->progs.syscall_unaugmented;
3338 …sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.…
3339 …sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.…
3345 …return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_u…
3351 …return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_un…
3362 for (field = sc->args; field; field = field->next) {
3363 if (field->flags & TEP_FIELD_IS_POINTER)
3370 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3376 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented)
3379 for (field = sc->args, candidate_field = pair->args;
3380 field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
3381 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
3382 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
3397 if (strcmp(field->type, candidate_field->type))
3405 if (strcmp(field->type, "const char *") == 0 &&
3406 !(strstr(field->name, "name") ||
3407 strstr(field->name, "path") ||
3408 strstr(field->name, "file") ||
3409 strstr(field->name, "root") ||
3410 strstr(field->name, "description")))
3425 …for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->…
3426 if (candidate_field->flags & TEP_FIELD_IS_POINTER)
3430 pair_prog = pair->bpf_prog.sys_enter;
3435 * program for a filtered syscall on a non-filtered one.
3441 …pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_ent…
3442 if (pair_prog == trace->skel->progs.syscall_unaugmented)
3446 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
3457 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
3458 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
3461 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3483 * syscall with an augmenter so that we can auto-reuse it.
3508 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3513 if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
3520 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented)
3531 sc->bpf_prog.sys_enter = pair_prog;
3537 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
3549 if (trace->syscalls.events.sys_enter)
3578 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3581 struct thread *parent = machine__find_thread(trace->host,
3589 strstarts(thread__comm_str(parent), "gnome-terminal")) {
3596 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3597 if (!err && trace->filter_pids.map)
3598 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3612 if (trace->filter_pids.nr > 0) {
3613 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3614 trace->filter_pids.entries);
3615 if (!err && trace->filter_pids.map) {
3616 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3617 trace->filter_pids.entries);
3619 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3628 struct evlist *evlist = trace->evlist;
3633 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3642 u64 first = ordered_events__first_time(&trace->oe.data);
3643 u64 flush = trace->oe.last - NSEC_PER_SEC;
3647 return ordered_events__flush_time(&trace->oe.data, flush);
3654 return !trace->sort_events ? 0 : __trace__flush_events(trace);
3661 if (!trace->sort_events)
3664 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3665 if (err && err != -1)
3668 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
3675 static int ordered_events__deliver_event(struct ordered_events *oe, argument
3678 struct trace *trace = container_of(oe, struct trace, oe.data);
3680 return __trace__deliver_event(trace, event->event);
3688 if (evsel->tp_format == NULL || fmt == NULL)
3691 for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
3692 if (strcmp(field->name, arg) == 0)
3700 char *tok, *left = evsel->filter, *new_filter = evsel->filter;
3729 int left_size = tok - left,
3730 right_size = right_end - right;
3733 while (isspace(left[left_size - 1]))
3734 --left_size;
3741 arg, evsel->name, evsel->filter);
3742 return -1;
3745 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
3746 arg, (int)(right - tok), tok, right_size, right);
3748 if (fmt->strtoul) {
3751 .parm = fmt->parm,
3754 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) {
3757 int expansion_offset = right - new_filter;
3764 return -1;
3766 if (new_filter != evsel->filter)
3772 right_size, right, arg, evsel->name, evsel->filter);
3773 return -1;
3777 arg, evsel->name, evsel->filter);
3778 return -1;
3787 if (new_filter != evsel->filter) {
3788 pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
3798 struct evlist *evlist = trace->evlist;
3802 if (evsel->filter == NULL)
3807 return -1;
3816 struct evlist *evlist = trace->evlist;
3818 int err = -1, i;
3823 trace->live = true;
3825 if (!trace->raw_augmented_syscalls) {
3826 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3829 if (trace->trace_syscalls)
3830 trace->vfs_getname = evlist__add_vfs_getname(evlist);
3833 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3837 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3841 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3845 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3849 /* Enable ignoring missing threads when -u/-p option is defined. */
3850 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
3852 if (trace->sched &&
3859 * trace -G A -e sched:*switch
3864 * trace -e sched:*switch -G A
3872 * trace -G A -e sched:*switch -G B
3878 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3880 if (trace->cgroup)
3881 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3883 err = evlist__create_maps(evlist, &trace->opts.target);
3885 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3891 fprintf(trace->output, "Problems initializing symbol libraries!\n");
3895 evlist__config(evlist, &trace->opts, &callchain_param);
3898 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
3900 fprintf(trace->output, "Couldn't run the workload!\n");
3903 workload_pid = evlist->workload.pid;
3910 if (trace->syscalls.events.bpf_output) {
3915 * CPU the bpf-output event's file descriptor.
3917 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
3918 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__,
3920 xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
3931 if (trace->skel && trace->skel->progs.sys_enter)
3935 if (trace->ev_qualifier_ids.nr > 0) {
3940 if (trace->syscalls.events.sys_exit) {
3942 trace->syscalls.events.sys_exit->filter);
3949 * fd->pathname table and were ending up showing the last value set by
3957 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
3966 err = evlist__mmap(evlist, trace->opts.mmap_pages);
3970 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay)
3976 if (trace->opts.target.initial_delay) {
3977 usleep(trace->opts.target.initial_delay * 1000);
3981 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
3982 perf_thread_map__nr(evlist->core.threads) > 1 ||
3983 evlist__first(evlist)->core.attr.inherit;
3986 * Now that we already used evsel->core.attr to ask the kernel to setup the
3987 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
3988 * trace__resolve_callchain(), allowing per-event max-stack settings
3989 * to override an explicitly set --max-stack global setting.
3993 evsel->core.attr.sample_max_stack == 0)
3994 evsel->core.attr.sample_max_stack = trace->max_stack;
3997 before = trace->nr_events;
3999 for (i = 0; i < evlist->core.nr_mmaps; i++) {
4003 md = &evlist->mmap[i];
4004 if (perf_mmap__read_init(&md->core) < 0)
4007 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
4008 ++trace->nr_events;
4014 perf_mmap__consume(&md->core);
4024 perf_mmap__read_done(&md->core);
4027 if (trace->nr_events == before) {
4028 int timeout = done ? 100 : -1;
4044 thread__zput(trace->current);
4048 if (trace->sort_events)
4049 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4052 if (trace->summary)
4053 trace__fprintf_thread_summary(trace, trace->output);
4055 if (trace->show_tool_stats) {
4056 fprintf(trace->output, "Stats:\n "
4059 trace->stats.vfs_getname,
4060 trace->stats.proc_getname);
4068 cgroup__put(trace->cgroup);
4069 trace->evlist = NULL;
4070 trace->live = false;
4091 fprintf(trace->output, "%s\n", errbuf);
4095 fprintf(trace->output,
4097 evsel->filter, evsel__name(evsel), errno,
4102 fprintf(trace->output, "Not enough memory to run!\n");
4106 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4118 .force = trace->force,
4122 int err = -1;
4124 trace->tool.sample = trace__process_sample;
4125 trace->tool.mmap = perf_event__process_mmap;
4126 trace->tool.mmap2 = perf_event__process_mmap2;
4127 trace->tool.comm = perf_event__process_comm;
4128 trace->tool.exit = perf_event__process_exit;
4129 trace->tool.fork = perf_event__process_fork;
4130 trace->tool.attr = perf_event__process_attr;
4131 trace->tool.tracing_data = perf_event__process_tracing_data;
4132 trace->tool.build_id = perf_event__process_build_id;
4133 trace->tool.namespaces = perf_event__process_namespaces;
4135 trace->tool.ordered_events = true;
4136 trace->tool.ordering_requires_timestamps = true;
4139 trace->multiple_threads = true;
4141 session = perf_session__new(&data, &trace->tool);
4145 if (trace->opts.target.pid)
4146 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4148 if (trace->opts.target.tid)
4149 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4151 if (symbol__init(&session->header.env) < 0)
4154 trace->host = &session->machines.host;
4160 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
4161 trace->syscalls.events.sys_enter = evsel;
4164 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
4173 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
4174 trace->syscalls.events.sys_exit = evsel;
4176 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
4184 evlist__for_each_entry(session->evlist, evsel) {
4185 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
4186 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
4187 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
4188 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
4189 evsel->handler = trace__pgfault;
4198 else if (trace->summary)
4199 trace__fprintf_thread_summary(trace, trace->output);
4216 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
4223 struct syscall_stats *stats = source->priv;
4225 entry->syscall = source->i;
4226 entry->stats = stats;
4227 entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
4236 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
4245 …printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- ---------
4248 struct syscall_stats *stats = syscall_stats_entry->stats;
4250 double min = (double)(stats->stats.min) / NSEC_PER_MSEC;
4251 double max = (double)(stats->stats.max) / NSEC_PER_MSEC;
4252 double avg = avg_stats(&stats->stats);
4254 u64 n = (u64)stats->stats.n;
4256 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
4259 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4260 printed += fprintf(fp, " %-15s", sc->name);
4262 n, stats->nr_failures, syscall_stats_entry->msecs, min, avg);
4265 if (trace->errno_summary && stats->nr_failures) {
4268 for (e = 0; e < stats->max_errno; ++e) {
4269 if (stats->errnos[e] != 0)
4270 …fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]…
4291 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4294 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
4296 if (ttrace->pfmaj)
4297 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
4298 if (ttrace->pfmin)
4299 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
4300 if (trace->sched)
4301 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
4312 return ttrace ? ttrace->nr_events : 0;
4316 (thread__nr_events(thread__priv(a->thread)) <
4317 thread__nr_events(thread__priv(b->thread))),
4321 entry->thread = rb_entry(nd, struct thread_rb_node, rb_node)->thread;
4331 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
4339 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
4349 struct trace *trace = opt->value;
4351 trace->duration_filter = atof(str);
4358 int ret = -1;
4360 struct trace *trace = opt->value;
4368 return -1;
4370 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4371 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4373 if (trace->filter_pids.entries == NULL)
4376 trace->filter_pids.entries[0] = getpid();
4378 for (i = 1; i < trace->filter_pids.nr; ++i)
4379 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4399 trace->output = fopen(filename, "w");
4401 return trace->output == NULL ? -errno : 0;
4407 int *trace_pgfaults = opt->value;
4416 return -1;
4426 if (evsel->handler == NULL)
4427 evsel->handler = handler;
4441 if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 ||
4442 strcmp(evsel->tp_format->format.fields->name, "nr") == 0)
4445 memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt));
4455 if (evsel->priv || !evsel->tp_format)
4458 if (strcmp(evsel->tp_format->system, "syscalls")) {
4464 return -1;
4466 if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
4469 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
4470 return -1;
4472 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1);
4473 } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
4476 if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
4477 return -1;
4479 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1);
4487 * XXX: Hackish, just splitting the combined -e+--event (syscalls
4489 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4497 struct trace *trace = (struct trace *)opt->value;
4500 int len = strlen(str) + 1, err = -1, list, idx;
4506 return -1;
4510 trace->not_ev_qualifier = true;
4518 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4519 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4527 s = fmt->name;
4555 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4556 if (trace->ev_qualifier == NULL) {
4557 fputs("Not enough memory to parse event qualifier", trace->output);
4563 trace->trace_syscalls = true;
4570 .evlistp = &trace->evlist,
4589 struct trace *trace = opt->value;
4591 if (!list_empty(&trace->evlist->core.entries)) {
4593 .value = &trace->evlist,
4597 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4608 trace->perfconfig_events = strdup(value);
4609 if (trace->perfconfig_events == NULL) {
4611 return -1;
4614 trace->show_tstamp = perf_config_bool(var, value);
4616 trace->show_duration = perf_config_bool(var, value);
4618 trace->show_arg_names = perf_config_bool(var, value);
4619 if (!trace->show_arg_names)
4620 trace->show_zeros = true;
4623 if (!trace->show_arg_names && !new_show_zeros) {
4627 trace->show_zeros = new_show_zeros;
4629 trace->show_string_prefix = perf_config_bool(var, value);
4631 trace->opts.no_inherit = perf_config_bool(var, value);
4635 trace->args_alignment = args_alignment;
4638 trace->libtraceevent_print = true;
4640 trace->libtraceevent_print = false;
4650 strlist__delete(trace->ev_qualifier);
4651 zfree(&trace->ev_qualifier_ids.entries);
4652 if (trace->syscalls.table) {
4653 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
4654 syscall__exit(&trace->syscalls.table[i]);
4655 zfree(&trace->syscalls.table);
4657 syscalltbl__delete(trace->sctbl);
4658 zfree(&trace->perfconfig_events);
4664 int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/");
4667 pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n");
4677 "perf trace [<options>] -- <command> [<options>]",
4679 "perf trace record [<options>] -- <command> [<options>]",
4722 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4724 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4725 "system-wide collection from all CPUs"),
4728 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4730 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4745 OPT_BOOLEAN('S', "with-summary", &trace.summary,
4747 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
4748 "Show errno stats per syscall, use with -s or -S"),
4753 OPT_CALLBACK(0, "call-graph", &trace.opts,
4758 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4760 OPT_ULONG(0, "max-events", &trace.max_events,
4762 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4765 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4769 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4771 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4773 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
4777 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay,
4787 int err = -1;
4805 err = -ENOMEM;
4813 * global setting. If it fails we'll get something in 'perf trace -v'
4827 * already figured out if -e syscall_name, if not but if --event
4829 * tracepoint events, not in the strace-like syscall-name-based mode.
4831 * This is important because we need to check if strace-like mode is
4837 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4841 * Now that we have --verbose figured out, lets see if we need to parse
4843 * BPF program fails, then we'll be able to use --verbose to see what went
4860 "cgroup monitoring only available in system-wide mode");
4877 bpf_object__for_each_program(prog, trace.skel->obj) {
4878 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit)
4903 err = -1;
4931 if (trace.evlist->core.nr_entries > 0) {
4940 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
4941 ordered_events__set_copy_on_queue(&trace.oe.data, true);
4964 if (trace.syscalls.events.bpf_output->priv == NULL &&
4975 augmented->handler = trace__sys_enter;
4985 evsel->handler = trace__sys_enter;
5000 * don't look after the sc->args_size but
5005 * s->args_size to the BPF augmenter (now
5014 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5016 evsel->handler = trace__sys_exit;
5022 return trace__record(&trace, argc-1, &argv[1]);
5024 /* Using just --errno-summary will trigger --summary */