Lines Matching +full:record +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
3 * builtin-record.c
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
11 #include "util/build-id.h"
12 #include <subcmd/parse-options.h>
14 #include "util/parse-events.h"
30 #include "util/record.h"
37 #include "util/parse-branch-options.h"
38 #include "util/parse-regs-options.h"
41 #include "util/perf-hooks.h"
42 #include "util/cpu-set-sched.h"
43 #include "util/synthetic-events.h"
44 #include "util/time-utils.h"
46 #include "util/bpf-event.h"
53 #include "util/bpf-filter.h"
87 unsigned long size; member
113 struct record *rec;
152 struct record { struct
203 static int record__threads_enabled(struct record *rec) in record__threads_enabled() argument
205 return rec->opts.threads_spec; in record__threads_enabled()
208 static bool switch_output_signal(struct record *rec) in switch_output_signal()
210 return rec->switch_output.signal && in switch_output_signal()
214 static bool switch_output_size(struct record *rec) in switch_output_size()
216 return rec->switch_output.size && in switch_output_size()
218 (rec->bytes_written >= rec->switch_output.size); in switch_output_size()
221 static bool switch_output_time(struct record *rec) in switch_output_time()
223 return rec->switch_output.time && in switch_output_time()
227 static u64 record__bytes_written(struct record *rec) in record__bytes_written()
229 return rec->bytes_written + rec->thread_bytes_written; in record__bytes_written()
232 static bool record__output_max_size_exceeded(struct record *rec) in record__output_max_size_exceeded()
234 return rec->output_max_size && in record__output_max_size_exceeded()
235 (record__bytes_written(rec) >= rec->output_max_size); in record__output_max_size_exceeded()
238 static int record__write(struct record *rec, struct mmap *map __maybe_unused, in record__write()
239 void *bf, size_t size) in record__write() argument
241 struct perf_data_file *file = &rec->session->data->file; in record__write()
243 if (map && map->file) in record__write()
244 file = map->file; in record__write()
246 if (perf_data_file__write(file, bf, size) < 0) { in record__write()
248 return -1; in record__write()
251 if (map && map->file) { in record__write()
252 thread->bytes_written += size; in record__write()
253 rec->thread_bytes_written += size; in record__write()
255 rec->bytes_written += size; in record__write()
259 fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB)," in record__write()
271 static int record__aio_enabled(struct record *rec);
272 static int record__comp_enabled(struct record *rec);
278 void *buf, size_t size, off_t off) in record__aio_write() argument
282 cblock->aio_fildes = trace_fd; in record__aio_write()
283 cblock->aio_buf = buf; in record__aio_write()
284 cblock->aio_nbytes = size; in record__aio_write()
285 cblock->aio_offset = off; in record__aio_write()
286 cblock->aio_sigevent.sigev_notify = SIGEV_NONE; in record__aio_write()
293 cblock->aio_fildes = -1; in record__aio_write()
321 rem_size = cblock->aio_nbytes - written; in record__aio_complete()
324 cblock->aio_fildes = -1; in record__aio_complete()
326 * md->refcount is incremented in record__aio_pushfn() for in record__aio_complete()
330 perf_mmap__put(&md->core); in record__aio_complete()
338 rem_off = cblock->aio_offset + written; in record__aio_complete()
339 rem_buf = (void *)(cblock->aio_buf + written); in record__aio_complete()
340 record__aio_write(cblock, cblock->aio_fildes, in record__aio_complete()
350 struct aiocb **aiocb = md->aio.aiocb; in record__aio_sync()
351 struct aiocb *cblocks = md->aio.cblocks; in record__aio_sync()
357 for (i = 0; i < md->aio.nr_cblocks; ++i) { in record__aio_sync()
358 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) { in record__aio_sync()
374 return -1; in record__aio_sync()
376 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) { in record__aio_sync()
384 struct record *rec;
386 size_t size; member
389 static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size) in record__aio_pushfn() argument
394 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer in record__aio_pushfn()
399 * the kernel buffer earlier than other per-cpu kernel buffers are handled. in record__aio_pushfn()
403 * part of data from map->start till the upper bound and then the reminder in record__aio_pushfn()
407 if (record__comp_enabled(aio->rec)) { in record__aio_pushfn()
408 ssize_t compressed = zstd_compress(aio->rec->session, NULL, aio->data + aio->size, in record__aio_pushfn()
409 mmap__mmap_len(map) - aio->size, in record__aio_pushfn()
410 buf, size); in record__aio_pushfn()
414 size = compressed; in record__aio_pushfn()
416 memcpy(aio->data + aio->size, buf, size); in record__aio_pushfn()
419 if (!aio->size) { in record__aio_pushfn()
421 * Increment map->refcount to guard map->aio.data[] buffer in record__aio_pushfn()
424 * map->aio.data[] buffer is complete. in record__aio_pushfn()
430 perf_mmap__get(&map->core); in record__aio_pushfn()
433 aio->size += size; in record__aio_pushfn()
435 return size; in record__aio_pushfn()
438 static int record__aio_push(struct record *rec, struct mmap *map, off_t *off) in record__aio_push()
441 int trace_fd = rec->session->data->file.fd; in record__aio_push()
442 struct record_aio aio = { .rec = rec, .size = 0 }; in record__aio_push()
445 * Call record__aio_sync() to wait till map->aio.data[] buffer in record__aio_push()
450 aio.data = map->aio.data[idx]; in record__aio_push()
452 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */ in record__aio_push()
455 rec->samples++; in record__aio_push()
456 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off); in record__aio_push()
458 *off += aio.size; in record__aio_push()
459 rec->bytes_written += aio.size; in record__aio_push()
464 * Decrement map->refcount incremented in record__aio_pushfn() in record__aio_push()
466 * map->refcount is decremented in record__aio_complete() after in record__aio_push()
469 perf_mmap__put(&map->core); in record__aio_push()
485 static void record__aio_mmap_read_sync(struct record *rec) in record__aio_mmap_read_sync()
488 struct evlist *evlist = rec->evlist; in record__aio_mmap_read_sync()
489 struct mmap *maps = evlist->mmap; in record__aio_mmap_read_sync()
494 for (i = 0; i < evlist->core.nr_mmaps; i++) { in record__aio_mmap_read_sync()
497 if (map->core.base) in record__aio_mmap_read_sync()
509 struct record_opts *opts = (struct record_opts *)opt->value; in record__aio_parse()
512 opts->nr_cblocks = 0; in record__aio_parse()
515 opts->nr_cblocks = strtol(str, NULL, 0); in record__aio_parse()
516 if (!opts->nr_cblocks) in record__aio_parse()
517 opts->nr_cblocks = nr_cblocks_default; in record__aio_parse()
525 static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused, in record__aio_push()
528 return -1; in record__aio_push()
533 return -1; in record__aio_get_pos()
540 static void record__aio_mmap_read_sync(struct record *rec __maybe_unused) in record__aio_mmap_read_sync()
545 static int record__aio_enabled(struct record *rec) in record__aio_enabled()
547 return rec->opts.nr_cblocks > 0; in record__aio_enabled()
556 struct record_opts *opts = (struct record_opts *)opt->value; in record__mmap_flush_parse()
569 opts->mmap_flush = parse_tag_value(str, tags); in record__mmap_flush_parse()
570 if (opts->mmap_flush == (int)-1) in record__mmap_flush_parse()
571 opts->mmap_flush = strtol(str, NULL, 0); in record__mmap_flush_parse()
574 if (!opts->mmap_flush) in record__mmap_flush_parse()
575 opts->mmap_flush = MMAP_FLUSH_DEFAULT; in record__mmap_flush_parse()
577 flush_max = evlist__mmap_size(opts->mmap_pages); in record__mmap_flush_parse()
579 if (opts->mmap_flush > flush_max) in record__mmap_flush_parse()
580 opts->mmap_flush = flush_max; in record__mmap_flush_parse()
590 struct record_opts *opts = opt->value; in record__parse_comp_level()
593 opts->comp_level = 0; in record__parse_comp_level()
596 opts->comp_level = strtol(str, NULL, 0); in record__parse_comp_level()
597 if (!opts->comp_level) in record__parse_comp_level()
598 opts->comp_level = comp_level_default; in record__parse_comp_level()
606 static int record__comp_enabled(struct record *rec) in record__comp_enabled()
608 return rec->opts.comp_level > 0; in record__comp_enabled()
616 struct record *rec = container_of(tool, struct record, tool); in process_synthesized_event()
617 return record__write(rec, NULL, event, event->header.size); in process_synthesized_event()
635 static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size) in record__pushfn() argument
637 struct record *rec = to; in record__pushfn()
640 ssize_t compressed = zstd_compress(rec->session, map, map->data, in record__pushfn()
641 mmap__mmap_len(map), bf, size); in record__pushfn()
646 size = compressed; in record__pushfn()
647 bf = map->data; in record__pushfn()
650 thread->samples++; in record__pushfn()
651 return record__write(rec, map, bf, size); in record__pushfn()
654 static volatile sig_atomic_t signr = -1;
657 static volatile sig_atomic_t done_fd = -1;
698 if (signr == -1) in record__sig_exit()
712 struct record *rec = container_of(tool, struct record, tool); in record__process_auxtrace()
713 struct perf_data *data = &rec->data; in record__process_auxtrace()
723 if (file_offset == -1) in record__process_auxtrace()
724 return -1; in record__process_auxtrace()
725 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index, in record__process_auxtrace()
731 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */ in record__process_auxtrace()
734 padding = 8 - padding; in record__process_auxtrace()
736 record__write(rec, map, event, event->header.size); in record__process_auxtrace()
745 static int record__auxtrace_mmap_read(struct record *rec, in record__auxtrace_mmap_read()
750 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool, in record__auxtrace_mmap_read()
756 rec->samples++; in record__auxtrace_mmap_read()
761 static int record__auxtrace_mmap_read_snapshot(struct record *rec, in record__auxtrace_mmap_read_snapshot()
766 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool, in record__auxtrace_mmap_read_snapshot()
768 rec->opts.auxtrace_snapshot_size); in record__auxtrace_mmap_read_snapshot()
773 rec->samples++; in record__auxtrace_mmap_read_snapshot()
778 static int record__auxtrace_read_snapshot_all(struct record *rec) in record__auxtrace_read_snapshot_all()
783 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) { in record__auxtrace_read_snapshot_all()
784 struct mmap *map = &rec->evlist->mmap[i]; in record__auxtrace_read_snapshot_all()
786 if (!map->auxtrace_mmap.base) in record__auxtrace_read_snapshot_all()
790 rc = -1; in record__auxtrace_read_snapshot_all()
798 static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit) in record__read_auxtrace_snapshot()
804 if (auxtrace_record__snapshot_finish(rec->itr, on_exit)) in record__read_auxtrace_snapshot()
811 static int record__auxtrace_snapshot_exit(struct record *rec) in record__auxtrace_snapshot_exit()
817 auxtrace_record__snapshot_start(rec->itr)) in record__auxtrace_snapshot_exit()
818 return -1; in record__auxtrace_snapshot_exit()
822 return -1; in record__auxtrace_snapshot_exit()
827 static int record__auxtrace_init(struct record *rec) in record__auxtrace_init()
831 if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts) in record__auxtrace_init()
834 return -EINVAL; in record__auxtrace_init()
837 if (!rec->itr) { in record__auxtrace_init()
838 rec->itr = auxtrace_record__init(rec->evlist, &err); in record__auxtrace_init()
843 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts, in record__auxtrace_init()
844 rec->opts.auxtrace_snapshot_opts); in record__auxtrace_init()
848 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts, in record__auxtrace_init()
849 rec->opts.auxtrace_sample_opts); in record__auxtrace_init()
853 auxtrace_regroup_aux_output(rec->evlist); in record__auxtrace_init()
855 return auxtrace_parse_filters(rec->evlist); in record__auxtrace_init()
861 int record__auxtrace_mmap_read(struct record *rec __maybe_unused, in record__auxtrace_mmap_read()
868 void record__read_auxtrace_snapshot(struct record *rec __maybe_unused, in record__read_auxtrace_snapshot()
880 int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused) in record__auxtrace_snapshot_exit()
885 static int record__auxtrace_init(struct record *rec __maybe_unused) in record__auxtrace_init()
898 if (evsel->core.attr.text_poke) in record__config_text_poke()
904 return -ENOMEM; in record__config_text_poke()
906 evsel->core.attr.text_poke = 1; in record__config_text_poke()
907 evsel->core.attr.ksymbol = 1; in record__config_text_poke()
908 evsel->immediate = true; in record__config_text_poke()
914 static int record__config_off_cpu(struct record *rec) in record__config_off_cpu()
916 return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts); in record__config_off_cpu()
919 static bool record__tracking_system_wide(struct record *rec) in record__tracking_system_wide()
921 struct evlist *evlist = rec->evlist; in record__tracking_system_wide()
925 * If non-dummy evsel exists, system_wide sideband is need to in record__tracking_system_wide()
938 static int record__config_tracking_events(struct record *rec) in record__config_tracking_events()
940 struct record_opts *opts = &rec->opts; in record__config_tracking_events()
941 struct evlist *evlist = rec->evlist; in record__config_tracking_events()
950 if (opts->target.initial_delay || target__has_cpu(&opts->target) || in record__config_tracking_events()
957 if (!!opts->target.cpu_list && record__tracking_system_wide(rec)) in record__config_tracking_events()
962 return -ENOMEM; in record__config_tracking_events()
968 if (opts->target.initial_delay && !evsel->immediate && in record__config_tracking_events()
969 !target__has_cpu(&opts->target)) in record__config_tracking_events()
970 evsel->core.attr.enable_on_exec = 1; in record__config_tracking_events()
972 evsel->immediate = 1; in record__config_tracking_events()
983 scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir); in record__kcore_readable()
1000 snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir); in record__kcore_copy()
1011 thread_data->pipes.msg[0] = -1; in record__thread_data_init_pipes()
1012 thread_data->pipes.msg[1] = -1; in record__thread_data_init_pipes()
1013 thread_data->pipes.ack[0] = -1; in record__thread_data_init_pipes()
1014 thread_data->pipes.ack[1] = -1; in record__thread_data_init_pipes()
1019 if (pipe(thread_data->pipes.msg)) in record__thread_data_open_pipes()
1020 return -EINVAL; in record__thread_data_open_pipes()
1022 if (pipe(thread_data->pipes.ack)) { in record__thread_data_open_pipes()
1023 close(thread_data->pipes.msg[0]); in record__thread_data_open_pipes()
1024 thread_data->pipes.msg[0] = -1; in record__thread_data_open_pipes()
1025 close(thread_data->pipes.msg[1]); in record__thread_data_open_pipes()
1026 thread_data->pipes.msg[1] = -1; in record__thread_data_open_pipes()
1027 return -EINVAL; in record__thread_data_open_pipes()
1031 thread_data->pipes.msg[0], thread_data->pipes.msg[1], in record__thread_data_open_pipes()
1032 thread_data->pipes.ack[0], thread_data->pipes.ack[1]); in record__thread_data_open_pipes()
1039 if (thread_data->pipes.msg[0] != -1) { in record__thread_data_close_pipes()
1040 close(thread_data->pipes.msg[0]); in record__thread_data_close_pipes()
1041 thread_data->pipes.msg[0] = -1; in record__thread_data_close_pipes()
1043 if (thread_data->pipes.msg[1] != -1) { in record__thread_data_close_pipes()
1044 close(thread_data->pipes.msg[1]); in record__thread_data_close_pipes()
1045 thread_data->pipes.msg[1] = -1; in record__thread_data_close_pipes()
1047 if (thread_data->pipes.ack[0] != -1) { in record__thread_data_close_pipes()
1048 close(thread_data->pipes.ack[0]); in record__thread_data_close_pipes()
1049 thread_data->pipes.ack[0] = -1; in record__thread_data_close_pipes()
1051 if (thread_data->pipes.ack[1] != -1) { in record__thread_data_close_pipes()
1052 close(thread_data->pipes.ack[1]); in record__thread_data_close_pipes()
1053 thread_data->pipes.ack[1] = -1; in record__thread_data_close_pipes()
1059 return cpu_map__is_dummy(evlist->core.user_requested_cpus); in evlist__per_thread()
1064 int m, tm, nr_mmaps = evlist->core.nr_mmaps; in record__thread_data_init_maps()
1065 struct mmap *mmap = evlist->mmap; in record__thread_data_init_maps()
1066 struct mmap *overwrite_mmap = evlist->overwrite_mmap; in record__thread_data_init_maps()
1067 struct perf_cpu_map *cpus = evlist->core.all_cpus; in record__thread_data_init_maps()
1071 thread_data->nr_mmaps = nr_mmaps; in record__thread_data_init_maps()
1073 thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits, in record__thread_data_init_maps()
1074 thread_data->mask->maps.nbits); in record__thread_data_init_maps()
1076 thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *)); in record__thread_data_init_maps()
1077 if (!thread_data->maps) in record__thread_data_init_maps()
1078 return -ENOMEM; in record__thread_data_init_maps()
1081 thread_data->overwrite_maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *)); in record__thread_data_init_maps()
1082 if (!thread_data->overwrite_maps) { in record__thread_data_init_maps()
1083 zfree(&thread_data->maps); in record__thread_data_init_maps()
1084 return -ENOMEM; in record__thread_data_init_maps()
1088 thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps); in record__thread_data_init_maps()
1090 for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) { in record__thread_data_init_maps()
1092 test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) { in record__thread_data_init_maps()
1093 if (thread_data->maps) { in record__thread_data_init_maps()
1094 thread_data->maps[tm] = &mmap[m]; in record__thread_data_init_maps()
1095 pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n", in record__thread_data_init_maps()
1098 if (thread_data->overwrite_maps) { in record__thread_data_init_maps()
1099 thread_data->overwrite_maps[tm] = &overwrite_mmap[m]; in record__thread_data_init_maps()
1100 pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n", in record__thread_data_init_maps()
1115 fdarray__init(&thread_data->pollfd, 64); in record__thread_data_init_pollfd()
1117 for (tm = 0; tm < thread_data->nr_mmaps; tm++) { in record__thread_data_init_pollfd()
1118 map = thread_data->maps ? thread_data->maps[tm] : NULL; in record__thread_data_init_pollfd()
1119 overwrite_map = thread_data->overwrite_maps ? in record__thread_data_init_pollfd()
1120 thread_data->overwrite_maps[tm] : NULL; in record__thread_data_init_pollfd()
1122 for (f = 0; f < evlist->core.pollfd.nr; f++) { in record__thread_data_init_pollfd()
1123 void *ptr = evlist->core.pollfd.priv[f].ptr; in record__thread_data_init_pollfd()
1126 pos = fdarray__dup_entry_from(&thread_data->pollfd, f, in record__thread_data_init_pollfd()
1127 &evlist->core.pollfd); in record__thread_data_init_pollfd()
1130 pr_debug2("thread_data[%p]: pollfd[%d] <- event_fd=%d\n", in record__thread_data_init_pollfd()
1131 thread_data, pos, evlist->core.pollfd.entries[f].fd); in record__thread_data_init_pollfd()
1139 static void record__free_thread_data(struct record *rec) in record__free_thread_data()
1142 struct record_thread *thread_data = rec->thread_data; in record__free_thread_data()
1147 for (t = 0; t < rec->nr_threads; t++) { in record__free_thread_data()
1154 zfree(&rec->thread_data); in record__free_thread_data()
1157 static int record__map_thread_evlist_pollfd_indexes(struct record *rec, in record__map_thread_evlist_pollfd_indexes()
1161 size_t x = rec->index_map_cnt; in record__map_thread_evlist_pollfd_indexes()
1163 if (realloc_array_as_needed(rec->index_map, rec->index_map_sz, x, NULL)) in record__map_thread_evlist_pollfd_indexes()
1164 return -ENOMEM; in record__map_thread_evlist_pollfd_indexes()
1165 rec->index_map[x].evlist_pollfd_index = evlist_pollfd_index; in record__map_thread_evlist_pollfd_indexes()
1166 rec->index_map[x].thread_pollfd_index = thread_pollfd_index; in record__map_thread_evlist_pollfd_indexes()
1167 rec->index_map_cnt += 1; in record__map_thread_evlist_pollfd_indexes()
1171 static int record__update_evlist_pollfd_from_thread(struct record *rec, in record__update_evlist_pollfd_from_thread()
1175 struct pollfd *e_entries = evlist->core.pollfd.entries; in record__update_evlist_pollfd_from_thread()
1176 struct pollfd *t_entries = thread_data->pollfd.entries; in record__update_evlist_pollfd_from_thread()
1180 for (i = 0; i < rec->index_map_cnt; i++) { in record__update_evlist_pollfd_from_thread()
1181 int e_pos = rec->index_map[i].evlist_pollfd_index; in record__update_evlist_pollfd_from_thread()
1182 int t_pos = rec->index_map[i].thread_pollfd_index; in record__update_evlist_pollfd_from_thread()
1187 err = -EINVAL; in record__update_evlist_pollfd_from_thread()
1195 static int record__dup_non_perf_events(struct record *rec, in record__dup_non_perf_events()
1199 struct fdarray *fda = &evlist->core.pollfd; in record__dup_non_perf_events()
1202 for (i = 0; i < fda->nr; i++) { in record__dup_non_perf_events()
1203 if (!(fda->priv[i].flags & fdarray_flag__non_perf_event)) in record__dup_non_perf_events()
1205 ret = fdarray__dup_entry_from(&thread_data->pollfd, i, fda); in record__dup_non_perf_events()
1210 pr_debug2("thread_data[%p]: pollfd[%d] <- non_perf_event fd=%d\n", in record__dup_non_perf_events()
1211 thread_data, ret, fda->entries[i].fd); in record__dup_non_perf_events()
1221 static int record__alloc_thread_data(struct record *rec, struct evlist *evlist) in record__alloc_thread_data()
1226 rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data))); in record__alloc_thread_data()
1227 if (!rec->thread_data) { in record__alloc_thread_data()
1229 return -ENOMEM; in record__alloc_thread_data()
1231 thread_data = rec->thread_data; in record__alloc_thread_data()
1233 for (t = 0; t < rec->nr_threads; t++) in record__alloc_thread_data()
1236 for (t = 0; t < rec->nr_threads; t++) { in record__alloc_thread_data()
1238 thread_data[t].mask = &rec->thread_masks[t]; in record__alloc_thread_data()
1250 thread_data[t].tid = -1; in record__alloc_thread_data()
1263 pr_debug2("thread_data[%p]: pollfd[%d] <- ctl_fd=%d\n", in record__alloc_thread_data()
1273 thread_data[t].ctlfd_pos = -1; /* Not used */ in record__alloc_thread_data()
1285 static int record__mmap_evlist(struct record *rec, in record__mmap_evlist()
1289 struct record_opts *opts = &rec->opts; in record__mmap_evlist()
1290 bool auxtrace_overwrite = opts->auxtrace_snapshot_mode || in record__mmap_evlist()
1291 opts->auxtrace_sample_mode; in record__mmap_evlist()
1294 if (opts->affinity != PERF_AFFINITY_SYS) in record__mmap_evlist()
1297 if (evlist__mmap_ex(evlist, opts->mmap_pages, in record__mmap_evlist()
1298 opts->auxtrace_mmap_pages, in record__mmap_evlist()
1300 opts->nr_cblocks, opts->affinity, in record__mmap_evlist()
1301 opts->mmap_flush, opts->comp_level) < 0) { in record__mmap_evlist()
1306 "or try again with a smaller value of -m/--mmap_pages.\n" in record__mmap_evlist()
1308 opts->mmap_pages, opts->auxtrace_mmap_pages); in record__mmap_evlist()
1309 return -errno; in record__mmap_evlist()
1314 return -errno; in record__mmap_evlist()
1316 return -EINVAL; in record__mmap_evlist()
1320 if (evlist__initialize_ctlfd(evlist, opts->ctl_fd, opts->ctl_fd_ack)) in record__mmap_evlist()
1321 return -1; in record__mmap_evlist()
1328 ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps); in record__mmap_evlist()
1330 pr_err("Failed to create data directory: %s\n", strerror(-ret)); in record__mmap_evlist()
1333 for (i = 0; i < evlist->core.nr_mmaps; i++) { in record__mmap_evlist()
1334 if (evlist->mmap) in record__mmap_evlist()
1335 evlist->mmap[i].file = &rec->data.dir.files[i]; in record__mmap_evlist()
1336 if (evlist->overwrite_mmap) in record__mmap_evlist()
1337 evlist->overwrite_mmap[i].file = &rec->data.dir.files[i]; in record__mmap_evlist()
1344 static int record__mmap(struct record *rec) in record__mmap()
1346 return record__mmap_evlist(rec, rec->evlist); in record__mmap()
1349 static int record__open(struct record *rec) in record__open()
1353 struct evlist *evlist = rec->evlist; in record__open()
1354 struct perf_session *session = rec->session; in record__open()
1355 struct record_opts *opts = &rec->opts; in record__open()
1362 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) { in record__open()
1363 if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) { in record__open()
1369 pos->core.leader != &pos->core && in record__open()
1370 pos->weak_group) { in record__open()
1374 rc = -errno; in record__open()
1375 evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg)); in record__open()
1380 pos->supported = true; in record__open()
1396 pos->filter ?: "BPF", evsel__name(pos), errno, in record__open()
1398 rc = -1; in record__open()
1406 session->evlist = evlist; in record__open()
1412 static void set_timestamp_boundary(struct record *rec, u64 sample_time) in set_timestamp_boundary()
1414 if (rec->evlist->first_sample_time == 0) in set_timestamp_boundary()
1415 rec->evlist->first_sample_time = sample_time; in set_timestamp_boundary()
1418 rec->evlist->last_sample_time = sample_time; in set_timestamp_boundary()
1427 struct record *rec = container_of(tool, struct record, tool); in process_sample_event()
1429 set_timestamp_boundary(rec, sample->time); in process_sample_event()
1431 if (rec->buildid_all) in process_sample_event()
1434 rec->samples++; in process_sample_event()
1438 static int process_buildids(struct record *rec) in process_buildids()
1440 struct perf_session *session = rec->session; in process_buildids()
1442 if (perf_data__size(&rec->data) == 0) in process_buildids()
1447 * dso->long_name to a real pathname it found. In this case in process_buildids()
1451 * rather than build-id path (in debug directory). in process_buildids()
1452 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551 in process_buildids()
1457 * If --buildid-all is given, it marks all DSO regardless of hits, in process_buildids()
1462 if (rec->buildid_all && !rec->timestamp_boundary) in process_buildids()
1463 rec->tool.sample = NULL; in process_buildids()
1473 *As for guest kernel when processing subcommand record&report, in perf_event__synthesize_guest_os()
1483 pr_err("Couldn't record guest kernel [%d]'s reference" in perf_event__synthesize_guest_os()
1484 " relocation symbol.\n", machine->pid); in perf_event__synthesize_guest_os()
1493 pr_err("Couldn't record guest kernel [%d]'s reference" in perf_event__synthesize_guest_os()
1494 " relocation symbol.\n", machine->pid); in perf_event__synthesize_guest_os()
1498 .size = sizeof(struct perf_event_header),
1503 .size = sizeof(struct perf_event_header),
1507 static void record__adjust_affinity(struct record *rec, struct mmap *map) in record__adjust_affinity()
1509 if (rec->opts.affinity != PERF_AFFINITY_SYS && in record__adjust_affinity()
1510 !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits, in record__adjust_affinity()
1511 thread->mask->affinity.nbits)) { in record__adjust_affinity()
1512 bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits); in record__adjust_affinity()
1513 bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits, in record__adjust_affinity()
1514 map->affinity_mask.bits, thread->mask->affinity.nbits); in record__adjust_affinity()
1515 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), in record__adjust_affinity()
1516 (cpu_set_t *)thread->mask->affinity.bits); in record__adjust_affinity()
1518 pr_debug("threads[%d]: running on cpu%d: ", thread->tid, sched_getcpu()); in record__adjust_affinity()
1519 mmap_cpu_mask__scnprintf(&thread->mask->affinity, "affinity"); in record__adjust_affinity()
1524 static size_t process_comp_header(void *record, size_t increment) in process_comp_header() argument
1526 struct perf_record_compressed *event = record; in process_comp_header()
1527 size_t size = sizeof(*event); in process_comp_header() local
1530 event->header.size += increment; in process_comp_header()
1534 event->header.type = PERF_RECORD_COMPRESSED; in process_comp_header()
1535 event->header.size = size; in process_comp_header()
1537 return size; in process_comp_header()
1544 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1; in zstd_compress()
1545 struct zstd_data *zstd_data = &session->zstd_data; in zstd_compress()
1547 if (map && map->file) in zstd_compress()
1548 zstd_data = &map->zstd_data; in zstd_compress()
1555 if (map && map->file) { in zstd_compress()
1556 thread->bytes_transferred += src_size; in zstd_compress()
1557 thread->bytes_compressed += compressed; in zstd_compress()
1559 session->bytes_transferred += src_size; in zstd_compress()
1560 session->bytes_compressed += compressed; in zstd_compress()
1566 static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist, in record__mmap_read_evlist()
1569 u64 bytes_written = rec->bytes_written; in record__mmap_read_evlist()
1574 int trace_fd = rec->data.file.fd; in record__mmap_read_evlist()
1580 nr_mmaps = thread->nr_mmaps; in record__mmap_read_evlist()
1581 maps = overwrite ? thread->overwrite_maps : thread->maps; in record__mmap_read_evlist()
1586 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING) in record__mmap_read_evlist()
1596 if (map->core.base) { in record__mmap_read_evlist()
1599 flush = map->core.flush; in record__mmap_read_evlist()
1600 map->core.flush = 1; in record__mmap_read_evlist()
1605 map->core.flush = flush; in record__mmap_read_evlist()
1606 rc = -1; in record__mmap_read_evlist()
1613 map->core.flush = flush; in record__mmap_read_evlist()
1614 rc = -1; in record__mmap_read_evlist()
1619 map->core.flush = flush; in record__mmap_read_evlist()
1622 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode && in record__mmap_read_evlist()
1623 !rec->opts.auxtrace_sample_mode && in record__mmap_read_evlist()
1625 rc = -1; in record__mmap_read_evlist()
1638 * because per-cpu maps and files have data in record__mmap_read_evlist()
1641 if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written) in record__mmap_read_evlist()
1650 static int record__mmap_read_all(struct record *rec, bool synch) in record__mmap_read_all()
1654 err = record__mmap_read_evlist(rec, rec->evlist, false, synch); in record__mmap_read_all()
1658 return record__mmap_read_evlist(rec, rec->evlist, true, synch); in record__mmap_read_all()
1664 struct perf_mmap *map = fda->priv[fd].ptr; in record__thread_munmap_filtered()
1678 thread->tid = gettid(); in record__thread()
1680 err = write(thread->pipes.ack[1], &msg, sizeof(msg)); in record__thread()
1681 if (err == -1) in record__thread()
1683 thread->tid, strerror(errno)); in record__thread()
1685 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu()); in record__thread()
1687 pollfd = &thread->pollfd; in record__thread()
1688 ctlfd_pos = thread->ctlfd_pos; in record__thread()
1691 unsigned long long hits = thread->samples; in record__thread()
1693 if (record__mmap_read_all(thread->rec, false) < 0 || terminate) in record__thread()
1696 if (hits == thread->samples) { in record__thread()
1698 err = fdarray__poll(pollfd, -1); in record__thread()
1705 thread->waking++; in record__thread()
1712 if (pollfd->entries[ctlfd_pos].revents & POLLHUP) { in record__thread()
1714 close(thread->pipes.msg[0]); in record__thread()
1715 thread->pipes.msg[0] = -1; in record__thread()
1716 pollfd->entries[ctlfd_pos].fd = -1; in record__thread()
1717 pollfd->entries[ctlfd_pos].events = 0; in record__thread()
1720 pollfd->entries[ctlfd_pos].revents = 0; in record__thread()
1722 record__mmap_read_all(thread->rec, true); in record__thread()
1724 err = write(thread->pipes.ack[1], &msg, sizeof(msg)); in record__thread()
1725 if (err == -1) in record__thread()
1727 thread->tid, strerror(errno)); in record__thread()
1732 static void record__init_features(struct record *rec) in record__init_features()
1734 struct perf_session *session = rec->session; in record__init_features()
1738 perf_header__set_feat(&session->header, feat); in record__init_features()
1740 if (rec->no_buildid) in record__init_features()
1741 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); in record__init_features()
1744 if (!have_tracepoints(&rec->evlist->core.entries)) in record__init_features()
1745 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); in record__init_features()
1748 if (!rec->opts.branch_stack) in record__init_features()
1749 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); in record__init_features()
1751 if (!rec->opts.full_auxtrace) in record__init_features()
1752 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); in record__init_features()
1754 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns)) in record__init_features()
1755 perf_header__clear_feat(&session->header, HEADER_CLOCKID); in record__init_features()
1757 if (!rec->opts.use_clockid) in record__init_features()
1758 perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA); in record__init_features()
1761 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); in record__init_features()
1764 perf_header__clear_feat(&session->header, HEADER_COMPRESSED); in record__init_features()
1766 perf_header__clear_feat(&session->header, HEADER_STAT); in record__init_features()
1770 record__finish_output(struct record *rec) in record__finish_output()
1773 struct perf_data *data = &rec->data; in record__finish_output()
1776 if (data->is_pipe) in record__finish_output()
1779 rec->session->header.data_size += rec->bytes_written; in record__finish_output()
1780 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR); in record__finish_output()
1782 for (i = 0; i < data->dir.nr; i++) in record__finish_output()
1783 data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR); in record__finish_output()
1786 if (!rec->no_buildid) { in record__finish_output()
1789 if (rec->buildid_all) in record__finish_output()
1790 dsos__hit_all(rec->session); in record__finish_output()
1792 perf_session__write_header(rec->session, rec->evlist, fd, true); in record__finish_output()
1797 static int record__synthesize_workload(struct record *rec, bool tail) in record__synthesize_workload()
1801 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP; in record__synthesize_workload()
1803 if (rec->opts.tail_synthesize != tail) in record__synthesize_workload()
1806 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid); in record__synthesize_workload()
1808 return -1; in record__synthesize_workload()
1810 err = perf_event__synthesize_thread_map(&rec->tool, thread_map, in record__synthesize_workload()
1812 &rec->session->machines.host, in record__synthesize_workload()
1814 rec->opts.sample_address); in record__synthesize_workload()
1819 static int write_finished_init(struct record *rec, bool tail) in write_finished_init()
1821 if (rec->opts.tail_synthesize != tail) in write_finished_init()
1827 static int record__synthesize(struct record *rec, bool tail);
1830 record__switch_output(struct record *rec, bool at_exit) in record__switch_output()
1832 struct perf_data *data = &rec->data; in record__switch_output()
1836 /* Same Size: "2015122520103046"*/ in record__switch_output()
1844 if (target__none(&rec->opts.target)) in record__switch_output()
1847 rec->samples = 0; in record__switch_output()
1852 return -EINVAL; in record__switch_output()
1856 rec->session->header.data_offset, in record__switch_output()
1859 rec->bytes_written = 0; in record__switch_output()
1860 rec->session->header.data_size = 0; in record__switch_output()
1864 fprintf(stderr, "[ perf record: Dump %s.%s ]\n", in record__switch_output()
1865 data->path, timestamp); in record__switch_output()
1867 if (rec->switch_output.num_files) { in record__switch_output()
1868 int n = rec->switch_output.cur_file + 1; in record__switch_output()
1870 if (n >= rec->switch_output.num_files) in record__switch_output()
1872 rec->switch_output.cur_file = n; in record__switch_output()
1873 if (rec->switch_output.filenames[n]) { in record__switch_output()
1874 remove(rec->switch_output.filenames[n]); in record__switch_output()
1875 zfree(&rec->switch_output.filenames[n]); in record__switch_output()
1877 rec->switch_output.filenames[n] = new_filename; in record__switch_output()
1887 * In 'perf record --switch-output' without -a, in record__switch_output()
1895 if (target__none(&rec->opts.target)) in record__switch_output()
1902 static void __record__save_lost_samples(struct record *rec, struct evsel *evsel, in __record__save_lost_samples()
1911 lost->lost = lost_count; in __record__save_lost_samples()
1912 if (evsel->core.ids) { in __record__save_lost_samples()
1913 sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx); in __record__save_lost_samples()
1914 sample.id = sid->id; in __record__save_lost_samples()
1918 evsel->core.attr.sample_type, &sample); in __record__save_lost_samples()
1919 lost->header.size = sizeof(*lost) + id_hdr_size; in __record__save_lost_samples()
1920 lost->header.misc = misc_flag; in __record__save_lost_samples()
1921 record__write(rec, NULL, lost, lost->header.size); in __record__save_lost_samples()
1924 static void record__read_lost_samples(struct record *rec) in record__read_lost_samples()
1926 struct perf_session *session = rec->session; in record__read_lost_samples()
1931 if (session->evlist == NULL) in record__read_lost_samples()
1934 evlist__for_each_entry(session->evlist, evsel) { in record__read_lost_samples()
1935 struct xyarray *xy = evsel->core.sample_id; in record__read_lost_samples()
1938 if (xy == NULL || evsel->core.fd == NULL) in record__read_lost_samples()
1940 if (xyarray__max_x(evsel->core.fd) != xyarray__max_x(xy) || in record__read_lost_samples()
1941 xyarray__max_y(evsel->core.fd) != xyarray__max_y(xy)) { in record__read_lost_samples()
1950 if (perf_evsel__read(&evsel->core, x, y, &count) < 0) { in record__read_lost_samples()
1958 session->machines.host.id_hdr_size); in record__read_lost_samples()
1963 lost->header.type = PERF_RECORD_LOST_SAMPLES; in record__read_lost_samples()
1975 session->machines.host.id_hdr_size); in record__read_lost_samples()
1980 lost->header.type = PERF_RECORD_LOST_SAMPLES; in record__read_lost_samples()
2001 workload_exec_errno = info->si_value.sival_int; in workload_exec_failed_signal()
2012 if (evlist->mmap && evlist->mmap[0].core.base) in evlist__pick_pc()
2013 return evlist->mmap[0].core.base; in evlist__pick_pc()
2014 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base) in evlist__pick_pc()
2015 return evlist->overwrite_mmap[0].core.base; in evlist__pick_pc()
2020 static const struct perf_event_mmap_page *record__pick_pc(struct record *rec) in record__pick_pc()
2022 const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist); in record__pick_pc()
2028 static int record__synthesize(struct record *rec, bool tail) in record__synthesize()
2030 struct perf_session *session = rec->session; in record__synthesize()
2031 struct machine *machine = &session->machines.host; in record__synthesize()
2032 struct perf_data *data = &rec->data; in record__synthesize()
2033 struct record_opts *opts = &rec->opts; in record__synthesize()
2034 struct perf_tool *tool = &rec->tool; in record__synthesize()
2038 if (rec->opts.tail_synthesize != tail) in record__synthesize()
2041 if (data->is_pipe) { in record__synthesize()
2047 rec->bytes_written += err; in record__synthesize()
2058 session->evlist, machine); in record__synthesize()
2062 if (rec->opts.full_auxtrace) { in record__synthesize()
2063 err = perf_event__synthesize_auxtrace_info(rec->itr, tool, in record__synthesize()
2069 if (!evlist__exclude_kernel(rec->evlist)) { in record__synthesize()
2072 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n" in record__synthesize()
2078 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n" in record__synthesize()
2084 machines__process_guests(&session->machines, in record__synthesize()
2088 err = perf_event__synthesize_extra_attr(&rec->tool, in record__synthesize()
2089 rec->evlist, in record__synthesize()
2091 data->is_pipe); in record__synthesize()
2095 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads, in record__synthesize()
2103 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus, in record__synthesize()
2117 if (rec->opts.synth & PERF_SYNTH_CGROUP) { in record__synthesize()
2126 if (rec->opts.nr_threads_synthesize > 1) { in record__synthesize()
2132 if (rec->opts.synth & PERF_SYNTH_TASK) { in record__synthesize()
2133 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP; in record__synthesize()
2135 err = __machine__synthesize_threads(machine, tool, &opts->target, in record__synthesize()
2136 rec->evlist->core.threads, in record__synthesize()
2137 f, needs_mmap, opts->sample_address, in record__synthesize()
2138 rec->opts.nr_threads_synthesize); in record__synthesize()
2141 if (rec->opts.nr_threads_synthesize > 1) { in record__synthesize()
2152 struct record *rec = data; in record__process_signal_event()
2153 pthread_kill(rec->thread_id, SIGUSR2); in record__process_signal_event()
2157 static int record__setup_sb_evlist(struct record *rec) in record__setup_sb_evlist()
2159 struct record_opts *opts = &rec->opts; in record__setup_sb_evlist()
2161 if (rec->sb_evlist != NULL) { in record__setup_sb_evlist()
2163 * We get here if --switch-output-event populated the in record__setup_sb_evlist()
2167 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec); in record__setup_sb_evlist()
2168 rec->thread_id = pthread_self(); in record__setup_sb_evlist()
2171 if (!opts->no_bpf_event) { in record__setup_sb_evlist()
2172 if (rec->sb_evlist == NULL) { in record__setup_sb_evlist()
2173 rec->sb_evlist = evlist__new(); in record__setup_sb_evlist()
2175 if (rec->sb_evlist == NULL) { in record__setup_sb_evlist()
2177 return -1; in record__setup_sb_evlist()
2181 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) { in record__setup_sb_evlist()
2183 return -1; in record__setup_sb_evlist()
2187 if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) { in record__setup_sb_evlist()
2189 opts->no_bpf_event = true; in record__setup_sb_evlist()
2195 static int record__init_clock(struct record *rec) in record__init_clock()
2197 struct perf_session *session = rec->session; in record__init_clock()
2202 if (!rec->opts.use_clockid) in record__init_clock()
2205 if (rec->opts.use_clockid && rec->opts.clockid_res_ns) in record__init_clock()
2206 session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns; in record__init_clock()
2208 session->header.env.clock.clockid = rec->opts.clockid; in record__init_clock()
2212 return -1; in record__init_clock()
2215 if (clock_gettime(rec->opts.clockid, &ref_clockid)) { in record__init_clock()
2217 return -1; in record__init_clock()
2223 session->header.env.clock.tod_ns = ref; in record__init_clock()
2228 session->header.env.clock.clockid_ns = ref; in record__init_clock()
2232 static void hit_auxtrace_snapshot_trigger(struct record *rec) in hit_auxtrace_snapshot_trigger()
2237 if (auxtrace_record__snapshot_start(rec->itr)) in hit_auxtrace_snapshot_trigger()
2246 pid_t tid = thread_data->tid; in record__terminate_thread()
2248 close(thread_data->pipes.msg[1]); in record__terminate_thread()
2249 thread_data->pipes.msg[1] = -1; in record__terminate_thread()
2250 err = read(thread_data->pipes.ack[0], &ack, sizeof(ack)); in record__terminate_thread()
2255 thread->tid, tid); in record__terminate_thread()
2260 static int record__start_threads(struct record *rec) in record__start_threads()
2262 int t, tt, err, ret = 0, nr_threads = rec->nr_threads; in record__start_threads()
2263 struct record_thread *thread_data = rec->thread_data; in record__start_threads()
2276 return -1; in record__start_threads()
2287 MMAP_CPU_MASK_BYTES(&(thread_data[t].mask->affinity)), in record__start_threads()
2288 (cpu_set_t *)(thread_data[t].mask->affinity.bits)); in record__start_threads()
2294 ret = -1; in record__start_threads()
2300 pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid, in record__start_threads()
2304 thread->tid, rec->thread_data[t].tid); in record__start_threads()
2307 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), in record__start_threads()
2308 (cpu_set_t *)thread->mask->affinity.bits); in record__start_threads()
2310 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu()); in record__start_threads()
2317 ret = -1; in record__start_threads()
2323 static int record__stop_threads(struct record *rec) in record__stop_threads()
2326 struct record_thread *thread_data = rec->thread_data; in record__stop_threads()
2328 for (t = 1; t < rec->nr_threads; t++) in record__stop_threads()
2331 for (t = 0; t < rec->nr_threads; t++) { in record__stop_threads()
2332 rec->samples += thread_data[t].samples; in record__stop_threads()
2335 rec->session->bytes_transferred += thread_data[t].bytes_transferred; in record__stop_threads()
2336 rec->session->bytes_compressed += thread_data[t].bytes_compressed; in record__stop_threads()
2349 static unsigned long record__waking(struct record *rec) in record__waking()
2353 struct record_thread *thread_data = rec->thread_data; in record__waking()
2355 for (t = 0; t < rec->nr_threads; t++) in record__waking()
2361 static int __cmd_record(struct record *rec, int argc, const char **argv) in __cmd_record()
2366 struct perf_tool *tool = &rec->tool; in __cmd_record()
2367 struct record_opts *opts = &rec->opts; in __cmd_record()
2368 struct perf_data *data = &rec->data; in __cmd_record()
2381 if (rec->opts.record_namespaces) in __cmd_record()
2382 tool->namespace_events = true; in __cmd_record()
2384 if (rec->opts.record_cgroup) { in __cmd_record()
2386 tool->cgroup_events = true; in __cmd_record()
2389 return -1; in __cmd_record()
2393 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) { in __cmd_record()
2395 if (rec->opts.auxtrace_snapshot_mode) in __cmd_record()
2397 if (rec->switch_output.enabled) in __cmd_record()
2410 if (perf_data__is_pipe(&rec->data)) { in __cmd_record()
2412 return -1; in __cmd_record()
2414 if (rec->opts.full_auxtrace) { in __cmd_record()
2416 return -1; in __cmd_record()
2421 rec->session = session; in __cmd_record()
2423 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) { in __cmd_record()
2425 return -1; in __cmd_record()
2431 status = -1; in __cmd_record()
2434 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd); in __cmd_record()
2442 session->header.env.comp_type = PERF_COMP_ZSTD; in __cmd_record()
2443 session->header.env.comp_level = rec->opts.comp_level; in __cmd_record()
2445 if (rec->opts.kcore && in __cmd_record()
2446 !record__kcore_readable(&session->machines.host)) { in __cmd_record()
2448 return -1; in __cmd_record()
2452 return -1; in __cmd_record()
2457 err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe, in __cmd_record()
2472 if (data->is_pipe && rec->evlist->core.nr_entries == 1) in __cmd_record()
2473 rec->opts.sample_id = true; in __cmd_record()
2475 evlist__uniquify_name(rec->evlist); in __cmd_record()
2478 pr_debug3("perf record opening and mmapping events\n"); in __cmd_record()
2480 err = -1; in __cmd_record()
2484 pr_debug3("perf record done opening and mmapping events\n"); in __cmd_record()
2485 session->header.env.comp_mmap_len = session->evlist->core.mmap_len; in __cmd_record()
2487 if (rec->opts.kcore) { in __cmd_record()
2488 err = record__kcore_copy(&session->machines.host, data); in __cmd_record()
2499 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) { in __cmd_record()
2501 rec->tool.ordered_events = false; in __cmd_record()
2504 if (evlist__nr_groups(rec->evlist) == 0) in __cmd_record()
2505 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); in __cmd_record()
2507 if (data->is_pipe) { in __cmd_record()
2512 err = perf_session__write_header(session, rec->evlist, fd, false); in __cmd_record()
2517 err = -1; in __cmd_record()
2518 if (!rec->no_buildid in __cmd_record()
2519 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) { in __cmd_record()
2521 "Use --no-buildid to profile anyway.\n"); in __cmd_record()
2533 if (rec->realtime_prio) { in __cmd_record()
2536 param.sched_priority = rec->realtime_prio; in __cmd_record()
2539 err = -1; in __cmd_record()
2552 if (!target__none(&opts->target) && !opts->target.initial_delay) in __cmd_record()
2553 evlist__enable(rec->evlist); in __cmd_record()
2559 struct machine *machine = &session->machines.host; in __cmd_record()
2563 event = malloc(sizeof(event->comm) + machine->id_hdr_size); in __cmd_record()
2565 err = -ENOMEM; in __cmd_record()
2576 rec->evlist->workload.pid, in __cmd_record()
2581 if (tgid == -1) in __cmd_record()
2584 event = malloc(sizeof(event->namespaces) + in __cmd_record()
2586 machine->id_hdr_size); in __cmd_record()
2588 err = -ENOMEM; in __cmd_record()
2596 rec->evlist->workload.pid, in __cmd_record()
2601 evlist__start_workload(rec->evlist); in __cmd_record()
2604 if (opts->target.initial_delay) { in __cmd_record()
2606 if (opts->target.initial_delay > 0) { in __cmd_record()
2607 usleep(opts->target.initial_delay * USEC_PER_MSEC); in __cmd_record()
2608 evlist__enable(rec->evlist); in __cmd_record()
2613 err = event_enable_timer__start(rec->evlist->eet); in __cmd_record()
2618 pr_debug3("perf record has started\n"); in __cmd_record()
2634 unsigned long long hits = thread->samples; in __cmd_record()
2637 * rec->evlist->bkw_mmap_state is possible to be in __cmd_record()
2639 * hits != rec->samples in previous round. in __cmd_record()
2645 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING); in __cmd_record()
2650 err = -1; in __cmd_record()
2660 err = -1; in __cmd_record()
2675 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING) in __cmd_record()
2684 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING); in __cmd_record()
2687 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n", in __cmd_record()
2689 thread->waking = 0; in __cmd_record()
2698 /* re-arm the alarm */ in __cmd_record()
2699 if (rec->switch_output.time) in __cmd_record()
2700 alarm(rec->switch_output.time); in __cmd_record()
2703 if (hits == thread->samples) { in __cmd_record()
2706 err = fdarray__poll(&thread->pollfd, -1); in __cmd_record()
2713 thread->waking++; in __cmd_record()
2715 if (fdarray__filter(&thread->pollfd, POLLERR | POLLHUP, in __cmd_record()
2719 err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread); in __cmd_record()
2724 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) { in __cmd_record()
2728 evlist__ctlfd_ack(rec->evlist); in __cmd_record()
2744 err = event_enable_timer__process(rec->evlist->eet); in __cmd_record()
2757 if (done && !disabled && !target__none(&opts->target)) { in __cmd_record()
2759 evlist__disable(rec->evlist); in __cmd_record()
2767 if (opts->auxtrace_snapshot_on_exit) in __cmd_record()
2774 evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels); in __cmd_record()
2778 err = -1; in __cmd_record()
2783 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", in __cmd_record()
2788 if (target__none(&rec->opts.target)) in __cmd_record()
2796 evlist__finalize_ctlfd(rec->evlist); in __cmd_record()
2799 if (rec->session->bytes_transferred && rec->session->bytes_compressed) { in __cmd_record()
2800 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed; in __cmd_record()
2801 session->header.env.comp_ratio = ratio + 0.5; in __cmd_record()
2808 kill(rec->evlist->workload.pid, SIGTERM); in __cmd_record()
2821 if (rec->off_cpu) in __cmd_record()
2822 rec->bytes_written += off_cpu_write(rec->session); in __cmd_record()
2827 rec->samples = 0; in __cmd_record()
2830 if (!rec->timestamp_filename) { in __cmd_record()
2845 const char *postfix = rec->timestamp_filename ? in __cmd_record()
2848 if (rec->samples && !rec->opts.full_auxtrace) in __cmd_record()
2850 " (%" PRIu64 " samples)", rec->samples); in __cmd_record()
2854 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s", in __cmd_record()
2856 data->path, postfix, samples); in __cmd_record()
2859 rec->session->bytes_transferred / 1024.0 / 1024.0, in __cmd_record()
2869 done_fd = -1; in __cmd_record()
2874 zstd_fini(&session->zstd_data); in __cmd_record()
2877 if (!opts->no_bpf_event) in __cmd_record()
2878 evlist__stop_sb_thread(rec->sb_evlist); in __cmd_record()
2886 pr_debug("callchain: type %s\n", str[callchain->record_mode]); in callchain_debug()
2888 if (callchain->record_mode == CALLCHAIN_DWARF) in callchain_debug()
2889 pr_debug("callchain: stack dump size %d\n", in callchain_debug()
2890 callchain->dump_size); in callchain_debug()
2893 int record_opts__parse_callchain(struct record_opts *record, in record_opts__parse_callchain() argument
2898 callchain->enabled = !unset; in record_opts__parse_callchain()
2900 /* --no-call-graph */ in record_opts__parse_callchain()
2902 callchain->record_mode = CALLCHAIN_NONE; in record_opts__parse_callchain()
2910 if (callchain->record_mode == CALLCHAIN_DWARF) in record_opts__parse_callchain()
2911 record->sample_address = true; in record_opts__parse_callchain()
2922 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset); in record_parse_callchain_opt()
2929 struct callchain_param *callchain = opt->value; in record_callchain_opt()
2931 callchain->enabled = true; in record_callchain_opt()
2933 if (callchain->record_mode == CALLCHAIN_NONE) in record_callchain_opt()
2934 callchain->record_mode = CALLCHAIN_FP; in record_callchain_opt()
2942 struct record *rec = cb; in perf_record_config()
2944 if (!strcmp(var, "record.build-id")) { in perf_record_config()
2946 rec->no_buildid_cache = false; in perf_record_config()
2947 else if (!strcmp(value, "no-cache")) in perf_record_config()
2948 rec->no_buildid_cache = true; in perf_record_config()
2950 rec->no_buildid = true; in perf_record_config()
2952 rec->buildid_mmap = true; in perf_record_config()
2954 return -1; in perf_record_config()
2957 if (!strcmp(var, "record.call-graph")) { in perf_record_config()
2958 var = "call-graph.record-mode"; in perf_record_config()
2962 if (!strcmp(var, "record.aio")) { in perf_record_config()
2963 rec->opts.nr_cblocks = strtol(value, NULL, 0); in perf_record_config()
2964 if (!rec->opts.nr_cblocks) in perf_record_config()
2965 rec->opts.nr_cblocks = nr_cblocks_default; in perf_record_config()
2968 if (!strcmp(var, "record.debuginfod")) { in perf_record_config()
2969 rec->debuginfod.urls = strdup(value); in perf_record_config()
2970 if (!rec->debuginfod.urls) in perf_record_config()
2971 return -ENOMEM; in perf_record_config()
2972 rec->debuginfod.set = true; in perf_record_config()
2980 struct record *rec = (struct record *)opt->value; in record__parse_event_enable_time()
2982 return evlist__parse_event_enable_time(rec->evlist, &rec->opts, str, unset); in record__parse_event_enable_time()
2987 struct record_opts *opts = (struct record_opts *)opt->value; in record__parse_affinity()
2993 opts->affinity = PERF_AFFINITY_NODE; in record__parse_affinity()
2995 opts->affinity = PERF_AFFINITY_CPU; in record__parse_affinity()
3002 mask->nbits = nr_bits; in record__mmap_cpu_mask_alloc()
3003 mask->bits = bitmap_zalloc(mask->nbits); in record__mmap_cpu_mask_alloc()
3004 if (!mask->bits) in record__mmap_cpu_mask_alloc()
3005 return -ENOMEM; in record__mmap_cpu_mask_alloc()
3012 bitmap_free(mask->bits); in record__mmap_cpu_mask_free()
3013 mask->nbits = 0; in record__mmap_cpu_mask_free()
3020 ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits); in record__thread_mask_alloc()
3022 mask->affinity.bits = NULL; in record__thread_mask_alloc()
3026 ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits); in record__thread_mask_alloc()
3028 record__mmap_cpu_mask_free(&mask->maps); in record__thread_mask_alloc()
3029 mask->maps.bits = NULL; in record__thread_mask_alloc()
3037 record__mmap_cpu_mask_free(&mask->maps); in record__thread_mask_free()
3038 record__mmap_cpu_mask_free(&mask->affinity); in record__thread_mask_free()
3044 struct record_opts *opts = opt->value; in record__parse_threads()
3047 opts->threads_spec = THREAD_SPEC__CPU; in record__parse_threads()
3051 opts->threads_user_spec = strdup(str); in record__parse_threads()
3052 if (!opts->threads_user_spec) in record__parse_threads()
3053 return -ENOMEM; in record__parse_threads()
3054 opts->threads_spec = THREAD_SPEC__USER; in record__parse_threads()
3058 opts->threads_spec = s; in record__parse_threads()
3064 if (opts->threads_spec == THREAD_SPEC__USER) in record__parse_threads()
3065 pr_debug("threads_spec: %s\n", opts->threads_user_spec); in record__parse_threads()
3067 pr_debug("threads_spec: %s\n", thread_spec_tags[opts->threads_spec]); in record__parse_threads()
3075 unsigned long *s = (unsigned long *)opt->value; in parse_output_max_size()
3091 if (val != (unsigned long) -1) { in parse_output_max_size()
3096 return -1; in parse_output_max_size()
3103 struct record_opts *opts = opt->value; in record__parse_mmap_pages()
3109 return -EINVAL; in record__parse_mmap_pages()
3113 return -ENOMEM; in record__parse_mmap_pages()
3123 opts->mmap_pages = mmap_pages; in record__parse_mmap_pages()
3135 opts->auxtrace_mmap_pages = mmap_pages; in record__parse_mmap_pages()
3150 struct record_opts *opts = opt->value; in parse_control_option()
3152 return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close); in parse_control_option()
3155 static void switch_output_size_warn(struct record *rec) in switch_output_size_warn()
3157 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages); in switch_output_size_warn()
3158 struct switch_output *s = &rec->switch_output; in switch_output_size_warn()
3162 if (s->size < wakeup_size) { in switch_output_size_warn()
3166 pr_warning("WARNING: switch-output data size lower than " in switch_output_size_warn()
3167 "wakeup kernel buffer size (%s) " in switch_output_size_warn()
3172 static int switch_output_setup(struct record *rec) in switch_output_setup()
3174 struct switch_output *s = &rec->switch_output; in switch_output_setup()
3192 * If we're using --switch-output-events, then we imply its in switch_output_setup()
3193 * --switch-output=signal, as we'll send a SIGUSR2 from the side band in switch_output_setup()
3196 if (rec->switch_output_event_set) { in switch_output_setup()
3198 …pr_warning("WARNING: --switch-output-event option is not available in parallel streaming mode.\n"); in switch_output_setup()
3204 if (!s->set) in switch_output_setup()
3208 pr_warning("WARNING: --switch-output option is not available in parallel streaming mode.\n"); in switch_output_setup()
3212 if (!strcmp(s->str, "signal")) { in switch_output_setup()
3214 s->signal = true; in switch_output_setup()
3215 pr_debug("switch-output with SIGUSR2 signal\n"); in switch_output_setup()
3219 val = parse_tag_value(s->str, tags_size); in switch_output_setup()
3220 if (val != (unsigned long) -1) { in switch_output_setup()
3221 s->size = val; in switch_output_setup()
3222 pr_debug("switch-output with %s size threshold\n", s->str); in switch_output_setup()
3226 val = parse_tag_value(s->str, tags_time); in switch_output_setup()
3227 if (val != (unsigned long) -1) { in switch_output_setup()
3228 s->time = val; in switch_output_setup()
3229 pr_debug("switch-output with %s time threshold (%lu seconds)\n", in switch_output_setup()
3230 s->str, s->time); in switch_output_setup()
3234 return -1; in switch_output_setup()
3237 rec->timestamp_filename = true; in switch_output_setup()
3238 s->enabled = true; in switch_output_setup()
3240 if (s->size && !rec->opts.no_buffering) in switch_output_setup()
3247 "perf record [<options>] [<command>]",
3248 "perf record [<options>] -- <command> [<options>]",
3260 if (!(event->header.misc & PERF_RECORD_MISC_USER)) in build_id__process_mmap()
3272 if (!(event->header.misc & PERF_RECORD_MISC_USER)) in build_id__process_mmap2()
3283 struct record *rec = container_of(tool, struct record, tool); in process_timestamp_boundary()
3285 set_timestamp_boundary(rec, sample->time); in process_timestamp_boundary()
3293 struct record_opts *opts = opt->value; in parse_record_synth_option()
3297 return -1; in parse_record_synth_option()
3299 opts->synth = parse_synth_opt(p); in parse_record_synth_option()
3302 if (opts->synth < 0) { in parse_record_synth_option()
3304 return -1; in parse_record_synth_option()
3313 * builtin-script, leave it here.
3319 static struct record record = { variable
3332 .ctl_fd = -1,
3333 .ctl_fd_ack = -1,
3356 .evlistp = &record.evlist,
3360 .evlistp = &record.sb_evlist,
3364 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
3366 * from builtin-record.c, i.e. use record_opts,
3367 * evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
3374 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
3376 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
3377 NULL, "don't record events from perf itself",
3379 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
3380 "record events on existing process id"),
3381 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
3382 "record events on existing thread id"),
3383 OPT_INTEGER('r', "realtime", &record.realtime_prio,
3385 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
3387 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
3389 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
3390 "system-wide collection from all CPUs"),
3391 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
3393 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
3394 OPT_STRING('o', "output", &record.data.path, "file",
3396 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
3397 &record.opts.no_inherit_set,
3399 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
3400 "synthesize non-sample events at the end of output"),
3401 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
3402 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
3403 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
3405 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
3408 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
3411 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
3415 NULL, "enables call-graph recording" ,
3417 OPT_CALLBACK(0, "call-graph", &record.opts,
3423 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
3425 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
3426 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
3427 "Record the sample physical addresses"),
3428 OPT_BOOLEAN(0, "data-page-size", &record.opts.sample_data_page_size,
3429 "Record the sampled data address data page size"),
3430 OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
3431 "Record the sampled code address (ip) page size"),
3432 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
3433 OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
3434 "Record the sample identifier"),
3435 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
3436 &record.opts.sample_time_set,
3437 "Record the sample timestamps"),
3438 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
3439 "Record the sample period"),
3440 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
3442 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
3443 &record.no_buildid_cache_set,
3445 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
3446 &record.no_buildid_set,
3448 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
3451 OPT_CALLBACK('D', "delay", &record, "ms",
3452 … "ms to wait before starting measurement after program start (-1: start with events disabled), "
3453 "or ranges of time to enable events e.g. '-D 10-20,30-40'",
3455 OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
3456 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
3459 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
3463 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
3466 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
3468 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
3470 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
3471 "use per-thread mmaps"),
3472 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
3474 " use '-I?' to list register names", parse_intr_regs),
3475 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
3477 " use '--user-regs=?' to list register names", parse_user_regs),
3478 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
3479 "Record running/enabled time of read (:S) events"),
3480 OPT_CALLBACK('k', "clockid", &record.opts,
3483 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
3485 OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
3487 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
3489 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
3490 "Record namespaces events"),
3491 OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
3492 "Record cgroup events"),
3493 OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
3494 &record.opts.record_switch_events_set,
3495 "Record context switch events"),
3496 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
3499 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
3502 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
3504 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
3508 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
3509 "Record build-id of all DSOs regardless of hits"),
3510 OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
3511 "Record build-id in map events"),
3512 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
3514 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
3515 "Record timestamp boundary (time of first/last samples)"),
3516 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
3517 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
3518 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
3520 OPT_CALLBACK_SET(0, "switch-output-event", &switch_output_parse_events_option_args,
3521 &record.switch_output_event_set, "switch output event",
3524 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
3526 OPT_BOOLEAN(0, "dry-run", &dry_run,
3529 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
3533 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
3537 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default, "n",
3538 …"Compress records using specified level (default: 1 - fastest compression, 22 - greatest compressi…
3541 OPT_CALLBACK(0, "max-size", &record.output_max_size,
3542 "size", "Limit the maximum size of the output file", parse_output_max_size),
3543 OPT_UINTEGER(0, "num-thread-synthesize",
3544 &record.opts.nr_threads_synthesize,
3547 OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
3551 OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
3552 …"Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable…
3554 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
3555 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
3557 OPT_CALLBACK(0, "synth", &record.opts, "no|all|task|mmap|cgroup",
3558 "Fine-tune event synthesis: default=all", parse_record_synth_option),
3559 OPT_STRING_OPTARG_SET(0, "debuginfod", &record.debuginfod.urls,
3560 &record.debuginfod.set, "debuginfod urls",
3563 OPT_CALLBACK_OPTARG(0, "threads", &record.opts, NULL, "spec",
3566 OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
3582 if ((unsigned long)cpu.cpu > mask->nbits) in record__mmap_cpu_mask_init()
3583 return -ENODEV; in record__mmap_cpu_mask_init()
3584 __set_bit(cpu.cpu, mask->bits); in record__mmap_cpu_mask_init()
3596 return -ENOMEM; in record__mmap_cpu_mask_init_spec()
3598 bitmap_zero(mask->bits, mask->nbits); in record__mmap_cpu_mask_init_spec()
3600 return -ENODEV; in record__mmap_cpu_mask_init_spec()
3607 static void record__free_thread_masks(struct record *rec, int nr_threads) in record__free_thread_masks()
3611 if (rec->thread_masks) in record__free_thread_masks()
3613 record__thread_mask_free(&rec->thread_masks[t]); in record__free_thread_masks()
3615 zfree(&rec->thread_masks); in record__free_thread_masks()
3618 static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits) in record__alloc_thread_masks()
3622 rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks))); in record__alloc_thread_masks()
3623 if (!rec->thread_masks) { in record__alloc_thread_masks()
3625 return -ENOMEM; in record__alloc_thread_masks()
3629 ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits); in record__alloc_thread_masks()
3644 static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map *cpus) in record__init_thread_cpu_masks()
3652 rec->nr_threads = nr_cpus; in record__init_thread_cpu_masks()
3653 pr_debug("nr_threads: %d\n", rec->nr_threads); in record__init_thread_cpu_masks()
3655 for (t = 0; t < rec->nr_threads; t++) { in record__init_thread_cpu_masks()
3656 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits); in record__init_thread_cpu_masks()
3657 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits); in record__init_thread_cpu_masks()
3660 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps"); in record__init_thread_cpu_masks()
3662 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity"); in record__init_thread_cpu_masks()
3669 static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus, in record__init_thread_masks_spec()
3718 ret = -EINVAL; in record__init_thread_masks_spec()
3724 ret = -EINVAL; in record__init_thread_masks_spec()
3732 ret = -EINVAL; in record__init_thread_masks_spec()
3738 ret = -EINVAL; in record__init_thread_masks_spec()
3747 thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask)); in record__init_thread_masks_spec()
3750 ret = -ENOMEM; in record__init_thread_masks_spec()
3753 rec->thread_masks = thread_masks; in record__init_thread_masks_spec()
3754 rec->thread_masks[t] = thread_mask; in record__init_thread_masks_spec()
3757 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps"); in record__init_thread_masks_spec()
3759 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity"); in record__init_thread_masks_spec()
3768 rec->nr_threads = t; in record__init_thread_masks_spec()
3769 pr_debug("nr_threads: %d\n", rec->nr_threads); in record__init_thread_masks_spec()
3770 if (!rec->nr_threads) in record__init_thread_masks_spec()
3771 ret = -EINVAL; in record__init_thread_masks_spec()
3783 static int record__init_thread_core_masks(struct record *rec, struct perf_cpu_map *cpus) in record__init_thread_core_masks()
3791 return -ENOMEM; in record__init_thread_core_masks()
3794 ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list, in record__init_thread_core_masks()
3795 topo->core_cpus_list, topo->core_cpus_lists); in record__init_thread_core_masks()
3801 static int record__init_thread_package_masks(struct record *rec, struct perf_cpu_map *cpus) in record__init_thread_package_masks()
3809 return -ENOMEM; in record__init_thread_package_masks()
3812 ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list, in record__init_thread_package_masks()
3813 topo->package_cpus_list, topo->package_cpus_lists); in record__init_thread_package_masks()
3819 static int record__init_thread_numa_masks(struct record *rec, struct perf_cpu_map *cpus) in record__init_thread_numa_masks()
3829 return -ENOMEM; in record__init_thread_numa_masks()
3832 spec = zalloc(topo->nr * sizeof(char *)); in record__init_thread_numa_masks()
3835 ret = -ENOMEM; in record__init_thread_numa_masks()
3838 for (s = 0; s < topo->nr; s++) in record__init_thread_numa_masks()
3839 spec[s] = topo->nodes[s].cpus; in record__init_thread_numa_masks()
3841 ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr); in record__init_thread_numa_masks()
3851 static int record__init_thread_user_masks(struct record *rec, struct perf_cpu_map *cpus) in record__init_thread_user_masks()
3858 for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) { in record__init_thread_user_masks()
3870 ret = -ENOMEM; in record__init_thread_user_masks()
3877 ret = -ENOMEM; in record__init_thread_user_masks()
3883 ret = -EINVAL; in record__init_thread_user_masks()
3890 ret = -ENOMEM; in record__init_thread_user_masks()
3897 ret = -ENOMEM; in record__init_thread_user_masks()
3921 static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus) in record__init_thread_default_masks()
3929 if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus)) in record__init_thread_default_masks()
3930 return -ENODEV; in record__init_thread_default_masks()
3932 rec->nr_threads = 1; in record__init_thread_default_masks()
3937 static int record__init_thread_masks(struct record *rec) in record__init_thread_masks()
3940 struct perf_cpu_map *cpus = rec->evlist->core.all_cpus; in record__init_thread_masks()
3945 if (evlist__per_thread(rec->evlist)) { in record__init_thread_masks()
3946 pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n"); in record__init_thread_masks()
3947 return -EINVAL; in record__init_thread_masks()
3950 switch (rec->opts.threads_spec) { in record__init_thread_masks()
3976 struct record *rec = &record; in cmd_record()
3983 set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true); in cmd_record()
3987 /* Disable eager loading of kernel symbols that adds overhead to perf record. */ in cmd_record()
3989 rec->opts.affinity = PERF_AFFINITY_SYS; in cmd_record()
3991 rec->evlist = evlist__new(); in cmd_record()
3992 if (rec->evlist == NULL) in cmd_record()
3993 return -ENOMEM; in cmd_record()
4008 perf_debuginfod_setup(&record.debuginfod); in cmd_record()
4010 /* Make system wide (-a) the default target. */ in cmd_record()
4011 if (!argc && target__none(&rec->opts.target)) in cmd_record()
4012 rec->opts.target.system_wide = true; in cmd_record()
4014 if (nr_cgroups && !rec->opts.target.system_wide) { in cmd_record()
4016 "cgroup monitoring only available in system-wide mode"); in cmd_record()
4020 if (rec->buildid_mmap) { in cmd_record()
4022 pr_err("Failed: no support to record build id in mmap events, update your kernel.\n"); in cmd_record()
4023 err = -EINVAL; in cmd_record()
4030 rec->opts.build_id = true; in cmd_record()
4032 rec->no_buildid = true; in cmd_record()
4035 if (rec->opts.record_cgroup && !perf_can_record_cgroup()) { in cmd_record()
4037 err = -EINVAL; in cmd_record()
4041 if (rec->opts.kcore) in cmd_record()
4042 rec->opts.text_poke = true; in cmd_record()
4044 if (rec->opts.kcore || record__threads_enabled(rec)) in cmd_record()
4045 rec->data.is_dir = true; in cmd_record()
4048 if (rec->opts.affinity != PERF_AFFINITY_SYS) { in cmd_record()
4049 pr_err("--affinity option is mutually exclusive to parallel streaming mode.\n"); in cmd_record()
4053 … pr_err("Asynchronous streaming mode (--aio) is mutually exclusive to parallel streaming mode.\n"); in cmd_record()
4058 if (rec->opts.comp_level != 0) { in cmd_record()
4060 rec->no_buildid = true; in cmd_record()
4063 if (rec->opts.record_switch_events && in cmd_record()
4066 parse_options_usage(record_usage, record_options, "switch-events", 0); in cmd_record()
4067 err = -EINVAL; in cmd_record()
4072 parse_options_usage(record_usage, record_options, "switch-output", 0); in cmd_record()
4073 err = -EINVAL; in cmd_record()
4077 if (rec->switch_output.time) { in cmd_record()
4079 alarm(rec->switch_output.time); in cmd_record()
4082 if (rec->switch_output.num_files) { in cmd_record()
4083 rec->switch_output.filenames = calloc(rec->switch_output.num_files, in cmd_record()
4085 if (!rec->switch_output.filenames) { in cmd_record()
4086 err = -EINVAL; in cmd_record()
4091 if (rec->timestamp_filename && record__threads_enabled(rec)) { in cmd_record()
4092 rec->timestamp_filename = false; in cmd_record()
4093 pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n"); in cmd_record()
4111 err = -ENOMEM; in cmd_record()
4113 if (rec->no_buildid_cache || rec->no_buildid) { in cmd_record()
4115 } else if (rec->switch_output.enabled) { in cmd_record()
4117 * In 'perf record --switch-output', disable buildid in cmd_record()
4122 * perf record --switch-output --no-no-buildid \ in cmd_record()
4123 * --no-no-buildid-cache in cmd_record()
4127 * if ((rec->no_buildid || !rec->no_buildid_set) && in cmd_record()
4128 * (rec->no_buildid_cache || !rec->no_buildid_cache_set)) in cmd_record()
4133 if (rec->no_buildid_set && !rec->no_buildid) in cmd_record()
4135 if (rec->no_buildid_cache_set && !rec->no_buildid_cache) in cmd_record()
4138 rec->no_buildid = true; in cmd_record()
4139 rec->no_buildid_cache = true; in cmd_record()
4144 if (record.opts.overwrite) in cmd_record()
4145 record.opts.tail_synthesize = true; in cmd_record()
4147 if (rec->evlist->core.nr_entries == 0) { in cmd_record()
4150 err = parse_event(rec->evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu"); in cmd_record()
4155 if (rec->opts.target.tid && !rec->opts.no_inherit_set) in cmd_record()
4156 rec->opts.no_inherit = true; in cmd_record()
4158 err = target__validate(&rec->opts.target); in cmd_record()
4160 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); in cmd_record()
4164 err = target__parse_uid(&rec->opts.target); in cmd_record()
4168 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); in cmd_record()
4171 err = -saved_errno; in cmd_record()
4175 /* Enable ignoring missing threads when -u/-p option is defined. */ in cmd_record()
4176 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid; in cmd_record()
4178 evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list); in cmd_record()
4181 arch__add_leaf_frame_record_opts(&rec->opts); in cmd_record()
4183 err = -ENOMEM; in cmd_record()
4184 if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) { in cmd_record()
4185 if (rec->opts.target.pid != NULL) { in cmd_record()
4194 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts); in cmd_record()
4203 if (rec->opts.full_auxtrace) in cmd_record()
4204 rec->buildid_all = true; in cmd_record()
4206 if (rec->opts.text_poke) { in cmd_record()
4207 err = record__config_text_poke(rec->evlist); in cmd_record()
4214 if (rec->off_cpu) { in cmd_record()
4222 if (record_opts__config(&rec->opts)) { in cmd_record()
4223 err = -EINVAL; in cmd_record()
4239 if (rec->opts.nr_cblocks > nr_cblocks_max) in cmd_record()
4240 rec->opts.nr_cblocks = nr_cblocks_max; in cmd_record()
4241 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks); in cmd_record()
4243 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]); in cmd_record()
4244 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush); in cmd_record()
4246 if (rec->opts.comp_level > comp_level_max) in cmd_record()
4247 rec->opts.comp_level = comp_level_max; in cmd_record()
4248 pr_debug("comp level: %d\n", rec->opts.comp_level); in cmd_record()
4250 err = __cmd_record(&record, argc, argv); in cmd_record()
4252 evlist__delete(rec->evlist); in cmd_record()
4254 auxtrace_record__free(rec->itr); in cmd_record()
4256 record__free_thread_masks(rec, rec->nr_threads); in cmd_record()
4257 rec->nr_threads = 0; in cmd_record()
4258 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close); in cmd_record()
4264 struct record *rec = &record; in snapshot_sig_handler()
4274 struct record *rec = &record; in alarm_sig_handler()