Lines Matching +full:cpu +full:- +full:centric
1 // SPDX-License-Identifier: GPL-2.0
3 * builtin-record.c
6 * (or a CPU, or a PID) into the perf.data output file - for
11 #include "util/build-id.h"
12 #include <subcmd/parse-options.h>
14 #include "util/parse-events.h"
37 #include "util/parse-branch-options.h"
38 #include "util/parse-regs-options.h"
41 #include "util/perf-hooks.h"
42 #include "util/cpu-set-sched.h"
43 #include "util/synthetic-events.h"
44 #include "util/time-utils.h"
46 #include "util/bpf-event.h"
53 #include "util/bpf-filter.h"
144 "undefined", "cpu", "core", "package", "numa", "user"
195 "SYS", "NODE", "CPU"
216 return rec->opts.threads_spec; in record__threads_enabled()
221 return rec->switch_output.signal && in switch_output_signal()
227 return rec->switch_output.size && in switch_output_size()
229 (rec->bytes_written >= rec->switch_output.size); in switch_output_size()
234 return rec->switch_output.time && in switch_output_time()
240 return rec->bytes_written + rec->thread_bytes_written; in record__bytes_written()
245 return rec->output_max_size && in record__output_max_size_exceeded()
246 (record__bytes_written(rec) >= rec->output_max_size); in record__output_max_size_exceeded()
252 struct perf_data_file *file = &rec->session->data->file; in record__write()
254 if (map && map->file) in record__write()
255 file = map->file; in record__write()
259 return -1; in record__write()
262 if (map && map->file) { in record__write()
263 thread->bytes_written += size; in record__write()
264 rec->thread_bytes_written += size; in record__write()
266 rec->bytes_written += size; in record__write()
293 cblock->aio_fildes = trace_fd; in record__aio_write()
294 cblock->aio_buf = buf; in record__aio_write()
295 cblock->aio_nbytes = size; in record__aio_write()
296 cblock->aio_offset = off; in record__aio_write()
297 cblock->aio_sigevent.sigev_notify = SIGEV_NONE; in record__aio_write()
304 cblock->aio_fildes = -1; in record__aio_write()
332 rem_size = cblock->aio_nbytes - written; in record__aio_complete()
335 cblock->aio_fildes = -1; in record__aio_complete()
337 * md->refcount is incremented in record__aio_pushfn() for in record__aio_complete()
341 perf_mmap__put(&md->core); in record__aio_complete()
349 rem_off = cblock->aio_offset + written; in record__aio_complete()
350 rem_buf = (void *)(cblock->aio_buf + written); in record__aio_complete()
351 record__aio_write(cblock, cblock->aio_fildes, in record__aio_complete()
361 struct aiocb **aiocb = md->aio.aiocb; in record__aio_sync()
362 struct aiocb *cblocks = md->aio.cblocks; in record__aio_sync()
368 for (i = 0; i < md->aio.nr_cblocks; ++i) { in record__aio_sync()
369 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) { in record__aio_sync()
385 return -1; in record__aio_sync()
387 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) { in record__aio_sync()
405 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer in record__aio_pushfn()
410 * the kernel buffer earlier than other per-cpu kernel buffers are handled. in record__aio_pushfn()
414 * part of data from map->start till the upper bound and then the remainder in record__aio_pushfn()
418 if (record__comp_enabled(aio->rec)) { in record__aio_pushfn()
419 ssize_t compressed = zstd_compress(aio->rec->session, NULL, aio->data + aio->size, in record__aio_pushfn()
420 mmap__mmap_len(map) - aio->size, in record__aio_pushfn()
427 memcpy(aio->data + aio->size, buf, size); in record__aio_pushfn()
430 if (!aio->size) { in record__aio_pushfn()
432 * Increment map->refcount to guard map->aio.data[] buffer in record__aio_pushfn()
435 * map->aio.data[] buffer is complete. in record__aio_pushfn()
441 perf_mmap__get(&map->core); in record__aio_pushfn()
444 aio->size += size; in record__aio_pushfn()
452 int trace_fd = rec->session->data->file.fd; in record__aio_push()
456 * Call record__aio_sync() to wait till map->aio.data[] buffer in record__aio_push()
461 aio.data = map->aio.data[idx]; in record__aio_push()
463 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */ in record__aio_push()
466 rec->samples++; in record__aio_push()
467 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off); in record__aio_push()
470 rec->bytes_written += aio.size; in record__aio_push()
475 * Decrement map->refcount incremented in record__aio_pushfn() in record__aio_push()
477 * map->refcount is decremented in record__aio_complete() after in record__aio_push()
480 perf_mmap__put(&map->core); in record__aio_push()
499 struct evlist *evlist = rec->evlist; in record__aio_mmap_read_sync()
500 struct mmap *maps = evlist->mmap; in record__aio_mmap_read_sync()
505 for (i = 0; i < evlist->core.nr_mmaps; i++) { in record__aio_mmap_read_sync()
508 if (map->core.base) in record__aio_mmap_read_sync()
520 struct record_opts *opts = (struct record_opts *)opt->value; in record__aio_parse()
523 opts->nr_cblocks = 0; in record__aio_parse()
526 opts->nr_cblocks = strtol(str, NULL, 0); in record__aio_parse()
527 if (!opts->nr_cblocks) in record__aio_parse()
528 opts->nr_cblocks = nr_cblocks_default; in record__aio_parse()
539 return -1; in record__aio_push()
544 return -1; in record__aio_get_pos()
558 return rec->opts.nr_cblocks > 0; in record__aio_enabled()
567 struct record_opts *opts = (struct record_opts *)opt->value; in record__mmap_flush_parse()
580 opts->mmap_flush = parse_tag_value(str, tags); in record__mmap_flush_parse()
581 if (opts->mmap_flush == (int)-1) in record__mmap_flush_parse()
582 opts->mmap_flush = strtol(str, NULL, 0); in record__mmap_flush_parse()
585 if (!opts->mmap_flush) in record__mmap_flush_parse()
586 opts->mmap_flush = MMAP_FLUSH_DEFAULT; in record__mmap_flush_parse()
588 flush_max = evlist__mmap_size(opts->mmap_pages); in record__mmap_flush_parse()
590 if (opts->mmap_flush > flush_max) in record__mmap_flush_parse()
591 opts->mmap_flush = flush_max; in record__mmap_flush_parse()
601 struct record_opts *opts = opt->value; in record__parse_comp_level()
604 opts->comp_level = 0; in record__parse_comp_level()
607 opts->comp_level = strtol(str, NULL, 0); in record__parse_comp_level()
608 if (!opts->comp_level) in record__parse_comp_level()
609 opts->comp_level = comp_level_default; in record__parse_comp_level()
619 return rec->opts.comp_level > 0; in record__comp_enabled()
628 return record__write(rec, NULL, event, event->header.size); in process_synthesized_event()
651 ssize_t compressed = zstd_compress(rec->session, map, map->data, in record__pushfn()
658 bf = map->data; in record__pushfn()
661 thread->samples++; in record__pushfn()
665 static volatile sig_atomic_t signr = -1;
668 static volatile sig_atomic_t done_fd = -1;
709 if (signr == -1) in record__sig_exit()
724 struct perf_data *data = &rec->data; in record__process_auxtrace()
734 if (file_offset == -1) in record__process_auxtrace()
735 return -1; in record__process_auxtrace()
736 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index, in record__process_auxtrace()
745 padding = 8 - padding; in record__process_auxtrace()
747 record__write(rec, map, event, event->header.size); in record__process_auxtrace()
761 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool, in record__auxtrace_mmap_read()
767 rec->samples++; in record__auxtrace_mmap_read()
777 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool, in record__auxtrace_mmap_read_snapshot()
779 rec->opts.auxtrace_snapshot_size); in record__auxtrace_mmap_read_snapshot()
784 rec->samples++; in record__auxtrace_mmap_read_snapshot()
794 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) { in record__auxtrace_read_snapshot_all()
795 struct mmap *map = &rec->evlist->mmap[i]; in record__auxtrace_read_snapshot_all()
797 if (!map->auxtrace_mmap.base) in record__auxtrace_read_snapshot_all()
801 rc = -1; in record__auxtrace_read_snapshot_all()
815 if (auxtrace_record__snapshot_finish(rec->itr, on_exit)) in record__read_auxtrace_snapshot()
828 auxtrace_record__snapshot_start(rec->itr)) in record__auxtrace_snapshot_exit()
829 return -1; in record__auxtrace_snapshot_exit()
833 return -1; in record__auxtrace_snapshot_exit()
842 if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts) in record__auxtrace_init()
845 return -EINVAL; in record__auxtrace_init()
848 if (!rec->itr) { in record__auxtrace_init()
849 rec->itr = auxtrace_record__init(rec->evlist, &err); in record__auxtrace_init()
854 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts, in record__auxtrace_init()
855 rec->opts.auxtrace_snapshot_opts); in record__auxtrace_init()
859 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts, in record__auxtrace_init()
860 rec->opts.auxtrace_sample_opts); in record__auxtrace_init()
864 err = auxtrace_parse_aux_action(rec->evlist); in record__auxtrace_init()
868 return auxtrace_parse_filters(rec->evlist); in record__auxtrace_init()
911 if (evsel->core.attr.text_poke) in record__config_text_poke()
917 return -ENOMEM; in record__config_text_poke()
919 evsel->core.attr.text_poke = 1; in record__config_text_poke()
920 evsel->core.attr.ksymbol = 1; in record__config_text_poke()
921 evsel->immediate = true; in record__config_text_poke()
929 return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts); in record__config_off_cpu()
934 struct evlist *evlist = rec->evlist; in record__tracking_system_wide()
938 * If non-dummy evsel exists, system_wide sideband is need to in record__tracking_system_wide()
953 struct record_opts *opts = &rec->opts; in record__config_tracking_events()
954 struct evlist *evlist = rec->evlist; in record__config_tracking_events()
963 if (opts->target.initial_delay || target__has_cpu(&opts->target) || in record__config_tracking_events()
970 if (!!opts->target.cpu_list && record__tracking_system_wide(rec)) in record__config_tracking_events()
975 return -ENOMEM; in record__config_tracking_events()
981 if (opts->target.initial_delay && !evsel->immediate && in record__config_tracking_events()
982 !target__has_cpu(&opts->target)) in record__config_tracking_events()
983 evsel->core.attr.enable_on_exec = 1; in record__config_tracking_events()
985 evsel->immediate = 1; in record__config_tracking_events()
996 scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir); in record__kcore_readable()
1013 snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir); in record__kcore_copy()
1024 thread_data->pipes.msg[0] = -1; in record__thread_data_init_pipes()
1025 thread_data->pipes.msg[1] = -1; in record__thread_data_init_pipes()
1026 thread_data->pipes.ack[0] = -1; in record__thread_data_init_pipes()
1027 thread_data->pipes.ack[1] = -1; in record__thread_data_init_pipes()
1032 if (pipe(thread_data->pipes.msg)) in record__thread_data_open_pipes()
1033 return -EINVAL; in record__thread_data_open_pipes()
1035 if (pipe(thread_data->pipes.ack)) { in record__thread_data_open_pipes()
1036 close(thread_data->pipes.msg[0]); in record__thread_data_open_pipes()
1037 thread_data->pipes.msg[0] = -1; in record__thread_data_open_pipes()
1038 close(thread_data->pipes.msg[1]); in record__thread_data_open_pipes()
1039 thread_data->pipes.msg[1] = -1; in record__thread_data_open_pipes()
1040 return -EINVAL; in record__thread_data_open_pipes()
1044 thread_data->pipes.msg[0], thread_data->pipes.msg[1], in record__thread_data_open_pipes()
1045 thread_data->pipes.ack[0], thread_data->pipes.ack[1]); in record__thread_data_open_pipes()
1052 if (thread_data->pipes.msg[0] != -1) { in record__thread_data_close_pipes()
1053 close(thread_data->pipes.msg[0]); in record__thread_data_close_pipes()
1054 thread_data->pipes.msg[0] = -1; in record__thread_data_close_pipes()
1056 if (thread_data->pipes.msg[1] != -1) { in record__thread_data_close_pipes()
1057 close(thread_data->pipes.msg[1]); in record__thread_data_close_pipes()
1058 thread_data->pipes.msg[1] = -1; in record__thread_data_close_pipes()
1060 if (thread_data->pipes.ack[0] != -1) { in record__thread_data_close_pipes()
1061 close(thread_data->pipes.ack[0]); in record__thread_data_close_pipes()
1062 thread_data->pipes.ack[0] = -1; in record__thread_data_close_pipes()
1064 if (thread_data->pipes.ack[1] != -1) { in record__thread_data_close_pipes()
1065 close(thread_data->pipes.ack[1]); in record__thread_data_close_pipes()
1066 thread_data->pipes.ack[1] = -1; in record__thread_data_close_pipes()
1072 return cpu_map__is_dummy(evlist->core.user_requested_cpus); in evlist__per_thread()
1077 int m, tm, nr_mmaps = evlist->core.nr_mmaps; in record__thread_data_init_maps()
1078 struct mmap *mmap = evlist->mmap; in record__thread_data_init_maps()
1079 struct mmap *overwrite_mmap = evlist->overwrite_mmap; in record__thread_data_init_maps()
1080 struct perf_cpu_map *cpus = evlist->core.all_cpus; in record__thread_data_init_maps()
1084 thread_data->nr_mmaps = nr_mmaps; in record__thread_data_init_maps()
1086 thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits, in record__thread_data_init_maps()
1087 thread_data->mask->maps.nbits); in record__thread_data_init_maps()
1089 thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *)); in record__thread_data_init_maps()
1090 if (!thread_data->maps) in record__thread_data_init_maps()
1091 return -ENOMEM; in record__thread_data_init_maps()
1094 thread_data->overwrite_maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *)); in record__thread_data_init_maps()
1095 if (!thread_data->overwrite_maps) { in record__thread_data_init_maps()
1096 zfree(&thread_data->maps); in record__thread_data_init_maps()
1097 return -ENOMEM; in record__thread_data_init_maps()
1101 thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps); in record__thread_data_init_maps()
1103 for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) { in record__thread_data_init_maps()
1105 test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) { in record__thread_data_init_maps()
1106 if (thread_data->maps) { in record__thread_data_init_maps()
1107 thread_data->maps[tm] = &mmap[m]; in record__thread_data_init_maps()
1108 pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n", in record__thread_data_init_maps()
1109 thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m); in record__thread_data_init_maps()
1111 if (thread_data->overwrite_maps) { in record__thread_data_init_maps()
1112 thread_data->overwrite_maps[tm] = &overwrite_mmap[m]; in record__thread_data_init_maps()
1113 pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n", in record__thread_data_init_maps()
1114 thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m); in record__thread_data_init_maps()
1128 fdarray__init(&thread_data->pollfd, 64); in record__thread_data_init_pollfd()
1130 for (tm = 0; tm < thread_data->nr_mmaps; tm++) { in record__thread_data_init_pollfd()
1131 map = thread_data->maps ? thread_data->maps[tm] : NULL; in record__thread_data_init_pollfd()
1132 overwrite_map = thread_data->overwrite_maps ? in record__thread_data_init_pollfd()
1133 thread_data->overwrite_maps[tm] : NULL; in record__thread_data_init_pollfd()
1135 for (f = 0; f < evlist->core.pollfd.nr; f++) { in record__thread_data_init_pollfd()
1136 void *ptr = evlist->core.pollfd.priv[f].ptr; in record__thread_data_init_pollfd()
1139 pos = fdarray__dup_entry_from(&thread_data->pollfd, f, in record__thread_data_init_pollfd()
1140 &evlist->core.pollfd); in record__thread_data_init_pollfd()
1143 pr_debug2("thread_data[%p]: pollfd[%d] <- event_fd=%d\n", in record__thread_data_init_pollfd()
1144 thread_data, pos, evlist->core.pollfd.entries[f].fd); in record__thread_data_init_pollfd()
1155 struct record_thread *thread_data = rec->thread_data; in record__free_thread_data()
1160 for (t = 0; t < rec->nr_threads; t++) { in record__free_thread_data()
1167 zfree(&rec->thread_data); in record__free_thread_data()
1174 size_t x = rec->index_map_cnt; in record__map_thread_evlist_pollfd_indexes()
1176 if (realloc_array_as_needed(rec->index_map, rec->index_map_sz, x, NULL)) in record__map_thread_evlist_pollfd_indexes()
1177 return -ENOMEM; in record__map_thread_evlist_pollfd_indexes()
1178 rec->index_map[x].evlist_pollfd_index = evlist_pollfd_index; in record__map_thread_evlist_pollfd_indexes()
1179 rec->index_map[x].thread_pollfd_index = thread_pollfd_index; in record__map_thread_evlist_pollfd_indexes()
1180 rec->index_map_cnt += 1; in record__map_thread_evlist_pollfd_indexes()
1188 struct pollfd *e_entries = evlist->core.pollfd.entries; in record__update_evlist_pollfd_from_thread()
1189 struct pollfd *t_entries = thread_data->pollfd.entries; in record__update_evlist_pollfd_from_thread()
1193 for (i = 0; i < rec->index_map_cnt; i++) { in record__update_evlist_pollfd_from_thread()
1194 int e_pos = rec->index_map[i].evlist_pollfd_index; in record__update_evlist_pollfd_from_thread()
1195 int t_pos = rec->index_map[i].thread_pollfd_index; in record__update_evlist_pollfd_from_thread()
1200 err = -EINVAL; in record__update_evlist_pollfd_from_thread()
1212 struct fdarray *fda = &evlist->core.pollfd; in record__dup_non_perf_events()
1215 for (i = 0; i < fda->nr; i++) { in record__dup_non_perf_events()
1216 if (!(fda->priv[i].flags & fdarray_flag__non_perf_event)) in record__dup_non_perf_events()
1218 ret = fdarray__dup_entry_from(&thread_data->pollfd, i, fda); in record__dup_non_perf_events()
1223 pr_debug2("thread_data[%p]: pollfd[%d] <- non_perf_event fd=%d\n", in record__dup_non_perf_events()
1224 thread_data, ret, fda->entries[i].fd); in record__dup_non_perf_events()
1239 rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data))); in record__alloc_thread_data()
1240 if (!rec->thread_data) { in record__alloc_thread_data()
1242 return -ENOMEM; in record__alloc_thread_data()
1244 thread_data = rec->thread_data; in record__alloc_thread_data()
1246 for (t = 0; t < rec->nr_threads; t++) in record__alloc_thread_data()
1249 for (t = 0; t < rec->nr_threads; t++) { in record__alloc_thread_data()
1251 thread_data[t].mask = &rec->thread_masks[t]; in record__alloc_thread_data()
1263 thread_data[t].tid = -1; in record__alloc_thread_data()
1276 pr_debug2("thread_data[%p]: pollfd[%d] <- ctl_fd=%d\n", in record__alloc_thread_data()
1286 thread_data[t].ctlfd_pos = -1; /* Not used */ in record__alloc_thread_data()
1302 struct record_opts *opts = &rec->opts; in record__mmap_evlist()
1303 bool auxtrace_overwrite = opts->auxtrace_snapshot_mode || in record__mmap_evlist()
1304 opts->auxtrace_sample_mode; in record__mmap_evlist()
1307 if (opts->affinity != PERF_AFFINITY_SYS) in record__mmap_evlist()
1310 if (evlist__mmap_ex(evlist, opts->mmap_pages, in record__mmap_evlist()
1311 opts->auxtrace_mmap_pages, in record__mmap_evlist()
1313 opts->nr_cblocks, opts->affinity, in record__mmap_evlist()
1314 opts->mmap_flush, opts->comp_level) < 0) { in record__mmap_evlist()
1319 "or try again with a smaller value of -m/--mmap_pages.\n" in record__mmap_evlist()
1321 opts->mmap_pages, opts->auxtrace_mmap_pages); in record__mmap_evlist()
1322 return -errno; in record__mmap_evlist()
1327 return -errno; in record__mmap_evlist()
1329 return -EINVAL; in record__mmap_evlist()
1333 if (evlist__initialize_ctlfd(evlist, opts->ctl_fd, opts->ctl_fd_ack)) in record__mmap_evlist()
1334 return -1; in record__mmap_evlist()
1341 ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps); in record__mmap_evlist()
1343 pr_err("Failed to create data directory: %s\n", strerror(-ret)); in record__mmap_evlist()
1346 for (i = 0; i < evlist->core.nr_mmaps; i++) { in record__mmap_evlist()
1347 if (evlist->mmap) in record__mmap_evlist()
1348 evlist->mmap[i].file = &rec->data.dir.files[i]; in record__mmap_evlist()
1349 if (evlist->overwrite_mmap) in record__mmap_evlist()
1350 evlist->overwrite_mmap[i].file = &rec->data.dir.files[i]; in record__mmap_evlist()
1359 return record__mmap_evlist(rec, rec->evlist); in record__mmap()
1366 struct evlist *evlist = rec->evlist; in record__open()
1367 struct perf_session *session = rec->session; in record__open()
1368 struct record_opts *opts = &rec->opts; in record__open()
1373 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) { in record__open()
1374 if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) { in record__open()
1380 pos->core.leader != &pos->core && in record__open()
1381 pos->weak_group) { in record__open()
1385 rc = -errno; in record__open()
1386 evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg)); in record__open()
1391 pos->supported = true; in record__open()
1405 if (evlist__apply_filters(evlist, &pos, &opts->target)) { in record__open()
1407 pos->filter ?: "BPF", evsel__name(pos), errno, in record__open()
1409 rc = -1; in record__open()
1417 session->evlist = evlist; in record__open()
1425 if (rec->evlist->first_sample_time == 0) in set_timestamp_boundary()
1426 rec->evlist->first_sample_time = sample_time; in set_timestamp_boundary()
1429 rec->evlist->last_sample_time = sample_time; in set_timestamp_boundary()
1440 set_timestamp_boundary(rec, sample->time); in process_sample_event()
1442 if (rec->buildid_all) in process_sample_event()
1445 rec->samples++; in process_sample_event()
1451 struct perf_session *session = rec->session; in process_buildids()
1453 if (perf_data__size(&rec->data) == 0) in process_buildids()
1458 * dso->long_name to a real pathname it found. In this case in process_buildids()
1462 * rather than build-id path (in debug directory). in process_buildids()
1463 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551 in process_buildids()
1468 * If --buildid-all is given, it marks all DSO regardless of hits, in process_buildids()
1473 if (rec->buildid_all && !rec->timestamp_boundary) in process_buildids()
1474 rec->tool.sample = process_event_sample_stub; in process_buildids()
1495 " relocation symbol.\n", machine->pid); in perf_event__synthesize_guest_os()
1505 " relocation symbol.\n", machine->pid); in perf_event__synthesize_guest_os()
1520 if (rec->opts.affinity != PERF_AFFINITY_SYS && in record__adjust_affinity()
1521 !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits, in record__adjust_affinity()
1522 thread->mask->affinity.nbits)) { in record__adjust_affinity()
1523 bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits); in record__adjust_affinity()
1524 bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits, in record__adjust_affinity()
1525 map->affinity_mask.bits, thread->mask->affinity.nbits); in record__adjust_affinity()
1526 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), in record__adjust_affinity()
1527 (cpu_set_t *)thread->mask->affinity.bits); in record__adjust_affinity()
1529 pr_debug("threads[%d]: running on cpu%d: ", thread->tid, sched_getcpu()); in record__adjust_affinity()
1530 mmap_cpu_mask__scnprintf(&thread->mask->affinity, "affinity"); in record__adjust_affinity()
1541 event->header.size += increment; in process_comp_header()
1545 event->header.type = PERF_RECORD_COMPRESSED; in process_comp_header()
1546 event->header.size = size; in process_comp_header()
1555 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1; in zstd_compress()
1556 struct zstd_data *zstd_data = &session->zstd_data; in zstd_compress()
1558 if (map && map->file) in zstd_compress()
1559 zstd_data = &map->zstd_data; in zstd_compress()
1566 if (map && map->file) { in zstd_compress()
1567 thread->bytes_transferred += src_size; in zstd_compress()
1568 thread->bytes_compressed += compressed; in zstd_compress()
1570 session->bytes_transferred += src_size; in zstd_compress()
1571 session->bytes_compressed += compressed; in zstd_compress()
1580 u64 bytes_written = rec->bytes_written; in record__mmap_read_evlist()
1585 int trace_fd = rec->data.file.fd; in record__mmap_read_evlist()
1591 nr_mmaps = thread->nr_mmaps; in record__mmap_read_evlist()
1592 maps = overwrite ? thread->overwrite_maps : thread->maps; in record__mmap_read_evlist()
1597 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING) in record__mmap_read_evlist()
1607 if (map->core.base) { in record__mmap_read_evlist()
1610 flush = map->core.flush; in record__mmap_read_evlist()
1611 map->core.flush = 1; in record__mmap_read_evlist()
1616 map->core.flush = flush; in record__mmap_read_evlist()
1617 rc = -1; in record__mmap_read_evlist()
1624 map->core.flush = flush; in record__mmap_read_evlist()
1625 rc = -1; in record__mmap_read_evlist()
1630 map->core.flush = flush; in record__mmap_read_evlist()
1633 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode && in record__mmap_read_evlist()
1634 !rec->opts.auxtrace_sample_mode && in record__mmap_read_evlist()
1636 rc = -1; in record__mmap_read_evlist()
1649 * because per-cpu maps and files have data in record__mmap_read_evlist()
1652 if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written) in record__mmap_read_evlist()
1665 err = record__mmap_read_evlist(rec, rec->evlist, false, synch); in record__mmap_read_all()
1669 return record__mmap_read_evlist(rec, rec->evlist, true, synch); in record__mmap_read_all()
1675 struct perf_mmap *map = fda->priv[fd].ptr; in record__thread_munmap_filtered()
1689 thread->tid = gettid(); in record__thread()
1691 err = write(thread->pipes.ack[1], &msg, sizeof(msg)); in record__thread()
1692 if (err == -1) in record__thread()
1694 thread->tid, strerror(errno)); in record__thread()
1696 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu()); in record__thread()
1698 pollfd = &thread->pollfd; in record__thread()
1699 ctlfd_pos = thread->ctlfd_pos; in record__thread()
1702 unsigned long long hits = thread->samples; in record__thread()
1704 if (record__mmap_read_all(thread->rec, false) < 0 || terminate) in record__thread()
1707 if (hits == thread->samples) { in record__thread()
1709 err = fdarray__poll(pollfd, -1); in record__thread()
1716 thread->waking++; in record__thread()
1723 if (pollfd->entries[ctlfd_pos].revents & POLLHUP) { in record__thread()
1725 close(thread->pipes.msg[0]); in record__thread()
1726 thread->pipes.msg[0] = -1; in record__thread()
1727 pollfd->entries[ctlfd_pos].fd = -1; in record__thread()
1728 pollfd->entries[ctlfd_pos].events = 0; in record__thread()
1731 pollfd->entries[ctlfd_pos].revents = 0; in record__thread()
1733 record__mmap_read_all(thread->rec, true); in record__thread()
1735 err = write(thread->pipes.ack[1], &msg, sizeof(msg)); in record__thread()
1736 if (err == -1) in record__thread()
1738 thread->tid, strerror(errno)); in record__thread()
1745 struct perf_session *session = rec->session; in record__init_features()
1749 perf_header__set_feat(&session->header, feat); in record__init_features()
1751 if (rec->no_buildid) in record__init_features()
1752 perf_header__clear_feat(&session->header, HEADER_BUILD_ID); in record__init_features()
1754 if (!have_tracepoints(&rec->evlist->core.entries)) in record__init_features()
1755 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); in record__init_features()
1757 if (!rec->opts.branch_stack) in record__init_features()
1758 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); in record__init_features()
1760 if (!rec->opts.full_auxtrace) in record__init_features()
1761 perf_header__clear_feat(&session->header, HEADER_AUXTRACE); in record__init_features()
1763 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns)) in record__init_features()
1764 perf_header__clear_feat(&session->header, HEADER_CLOCKID); in record__init_features()
1766 if (!rec->opts.use_clockid) in record__init_features()
1767 perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA); in record__init_features()
1770 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); in record__init_features()
1773 perf_header__clear_feat(&session->header, HEADER_COMPRESSED); in record__init_features()
1775 perf_header__clear_feat(&session->header, HEADER_STAT); in record__init_features()
1782 struct perf_data *data = &rec->data; in record__finish_output()
1785 if (data->is_pipe) { in record__finish_output()
1787 data->file.size = rec->bytes_written; in record__finish_output()
1791 rec->session->header.data_size += rec->bytes_written; in record__finish_output()
1792 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR); in record__finish_output()
1794 for (i = 0; i < data->dir.nr; i++) in record__finish_output()
1795 data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR); in record__finish_output()
1798 if (!rec->no_buildid) { in record__finish_output()
1801 if (rec->buildid_all) in record__finish_output()
1802 perf_session__dsos_hit_all(rec->session); in record__finish_output()
1804 perf_session__write_header(rec->session, rec->evlist, fd, true); in record__finish_output()
1813 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP; in record__synthesize_workload()
1815 if (rec->opts.tail_synthesize != tail) in record__synthesize_workload()
1818 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid); in record__synthesize_workload()
1820 return -1; in record__synthesize_workload()
1822 err = perf_event__synthesize_thread_map(&rec->tool, thread_map, in record__synthesize_workload()
1824 &rec->session->machines.host, in record__synthesize_workload()
1826 rec->opts.sample_address); in record__synthesize_workload()
1833 if (rec->opts.tail_synthesize != tail) in write_finished_init()
1844 struct perf_data *data = &rec->data; in record__switch_output()
1856 if (target__none(&rec->opts.target)) in record__switch_output()
1859 rec->samples = 0; in record__switch_output()
1864 return -EINVAL; in record__switch_output()
1868 rec->session->header.data_offset, in record__switch_output()
1871 rec->bytes_written = 0; in record__switch_output()
1872 rec->session->header.data_size = 0; in record__switch_output()
1877 data->path, timestamp); in record__switch_output()
1880 if (rec->switch_output.num_files) { in record__switch_output()
1881 int n = rec->switch_output.cur_file + 1; in record__switch_output()
1883 if (n >= rec->switch_output.num_files) in record__switch_output()
1885 rec->switch_output.cur_file = n; in record__switch_output()
1886 if (rec->switch_output.filenames[n]) { in record__switch_output()
1887 remove(rec->switch_output.filenames[n]); in record__switch_output()
1888 zfree(&rec->switch_output.filenames[n]); in record__switch_output()
1890 rec->switch_output.filenames[n] = new_filename; in record__switch_output()
1900 * In 'perf record --switch-output' without -a, in record__switch_output()
1908 if (target__none(&rec->opts.target)) in record__switch_output()
1925 lost->lost = lost_count; in __record__save_lost_samples()
1926 if (evsel->core.ids) { in __record__save_lost_samples()
1927 sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx); in __record__save_lost_samples()
1928 sample.id = sid->id; in __record__save_lost_samples()
1932 evsel->core.attr.sample_type, &sample); in __record__save_lost_samples()
1933 lost->header.size = sizeof(*lost) + id_hdr_size; in __record__save_lost_samples()
1934 lost->header.misc = misc_flag; in __record__save_lost_samples()
1935 record__write(rec, NULL, lost, lost->header.size); in __record__save_lost_samples()
1941 struct perf_session *session = rec->session; in record__read_lost_samples()
1946 if (session->evlist == NULL) in record__read_lost_samples()
1949 evlist__for_each_entry(session->evlist, evsel) { in record__read_lost_samples()
1950 struct xyarray *xy = evsel->core.sample_id; in record__read_lost_samples()
1953 if (xy == NULL || evsel->core.fd == NULL) in record__read_lost_samples()
1955 if (xyarray__max_x(evsel->core.fd) != xyarray__max_x(xy) || in record__read_lost_samples()
1956 xyarray__max_y(evsel->core.fd) != xyarray__max_y(xy)) { in record__read_lost_samples()
1965 if (perf_evsel__read(&evsel->core, x, y, &count) < 0) { in record__read_lost_samples()
2000 workload_exec_errno = info->si_value.sival_int; in workload_exec_failed_signal()
2011 if (evlist->mmap && evlist->mmap[0].core.base) in evlist__pick_pc()
2012 return evlist->mmap[0].core.base; in evlist__pick_pc()
2013 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base) in evlist__pick_pc()
2014 return evlist->overwrite_mmap[0].core.base; in evlist__pick_pc()
2021 const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist); in record__pick_pc()
2029 struct perf_session *session = rec->session; in record__synthesize()
2030 struct machine *machine = &session->machines.host; in record__synthesize()
2031 struct perf_data *data = &rec->data; in record__synthesize()
2032 struct record_opts *opts = &rec->opts; in record__synthesize()
2033 struct perf_tool *tool = &rec->tool; in record__synthesize()
2037 if (rec->opts.tail_synthesize != tail) in record__synthesize()
2040 if (data->is_pipe) { in record__synthesize()
2046 rec->bytes_written += err; in record__synthesize()
2057 session->evlist, machine); in record__synthesize()
2061 if (rec->opts.full_auxtrace) { in record__synthesize()
2062 err = perf_event__synthesize_auxtrace_info(rec->itr, tool, in record__synthesize()
2068 if (!evlist__exclude_kernel(rec->evlist)) { in record__synthesize()
2083 machines__process_guests(&session->machines, in record__synthesize()
2087 err = perf_event__synthesize_extra_attr(&rec->tool, in record__synthesize()
2088 rec->evlist, in record__synthesize()
2090 data->is_pipe); in record__synthesize()
2094 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads, in record__synthesize()
2102 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus, in record__synthesize()
2105 pr_err("Couldn't synthesize cpu map.\n"); in record__synthesize()
2116 if (rec->opts.synth & PERF_SYNTH_CGROUP) { in record__synthesize()
2125 if (rec->opts.nr_threads_synthesize > 1) { in record__synthesize()
2131 if (rec->opts.synth & PERF_SYNTH_TASK) { in record__synthesize()
2132 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP; in record__synthesize()
2134 err = __machine__synthesize_threads(machine, tool, &opts->target, in record__synthesize()
2135 rec->evlist->core.threads, in record__synthesize()
2136 f, needs_mmap, opts->sample_address, in record__synthesize()
2137 rec->opts.nr_threads_synthesize); in record__synthesize()
2140 if (rec->opts.nr_threads_synthesize > 1) { in record__synthesize()
2152 pthread_kill(rec->thread_id, SIGUSR2); in record__process_signal_event()
2158 struct record_opts *opts = &rec->opts; in record__setup_sb_evlist()
2160 if (rec->sb_evlist != NULL) { in record__setup_sb_evlist()
2162 * We get here if --switch-output-event populated the in record__setup_sb_evlist()
2166 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec); in record__setup_sb_evlist()
2167 rec->thread_id = pthread_self(); in record__setup_sb_evlist()
2170 if (!opts->no_bpf_event) { in record__setup_sb_evlist()
2171 if (rec->sb_evlist == NULL) { in record__setup_sb_evlist()
2172 rec->sb_evlist = evlist__new(); in record__setup_sb_evlist()
2174 if (rec->sb_evlist == NULL) { in record__setup_sb_evlist()
2176 return -1; in record__setup_sb_evlist()
2180 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) { in record__setup_sb_evlist()
2182 return -1; in record__setup_sb_evlist()
2186 if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) { in record__setup_sb_evlist()
2188 opts->no_bpf_event = true; in record__setup_sb_evlist()
2196 struct perf_session *session = rec->session; in record__init_clock()
2201 if (!rec->opts.use_clockid) in record__init_clock()
2204 if (rec->opts.use_clockid && rec->opts.clockid_res_ns) in record__init_clock()
2205 session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns; in record__init_clock()
2207 session->header.env.clock.clockid = rec->opts.clockid; in record__init_clock()
2211 return -1; in record__init_clock()
2214 if (clock_gettime(rec->opts.clockid, &ref_clockid)) { in record__init_clock()
2216 return -1; in record__init_clock()
2222 session->header.env.clock.tod_ns = ref; in record__init_clock()
2227 session->header.env.clock.clockid_ns = ref; in record__init_clock()
2236 if (auxtrace_record__snapshot_start(rec->itr)) in hit_auxtrace_snapshot_trigger()
2245 pid_t tid = thread_data->tid; in record__terminate_thread()
2247 close(thread_data->pipes.msg[1]); in record__terminate_thread()
2248 thread_data->pipes.msg[1] = -1; in record__terminate_thread()
2249 err = read(thread_data->pipes.ack[0], &ack, sizeof(ack)); in record__terminate_thread()
2254 thread->tid, tid); in record__terminate_thread()
2261 int t, tt, err, ret = 0, nr_threads = rec->nr_threads; in record__start_threads()
2262 struct record_thread *thread_data = rec->thread_data; in record__start_threads()
2275 return -1; in record__start_threads()
2286 MMAP_CPU_MASK_BYTES(&(thread_data[t].mask->affinity)), in record__start_threads()
2287 (cpu_set_t *)(thread_data[t].mask->affinity.bits)); in record__start_threads()
2293 ret = -1; in record__start_threads()
2299 pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid, in record__start_threads()
2303 thread->tid, rec->thread_data[t].tid); in record__start_threads()
2306 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity), in record__start_threads()
2307 (cpu_set_t *)thread->mask->affinity.bits); in record__start_threads()
2309 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu()); in record__start_threads()
2316 ret = -1; in record__start_threads()
2325 struct record_thread *thread_data = rec->thread_data; in record__stop_threads()
2327 for (t = 1; t < rec->nr_threads; t++) in record__stop_threads()
2330 for (t = 0; t < rec->nr_threads; t++) { in record__stop_threads()
2331 rec->samples += thread_data[t].samples; in record__stop_threads()
2334 rec->session->bytes_transferred += thread_data[t].bytes_transferred; in record__stop_threads()
2335 rec->session->bytes_compressed += thread_data[t].bytes_compressed; in record__stop_threads()
2352 struct record_thread *thread_data = rec->thread_data; in record__waking()
2354 for (t = 0; t < rec->nr_threads; t++) in record__waking()
2365 struct perf_tool *tool = &rec->tool; in __cmd_record()
2366 struct record_opts *opts = &rec->opts; in __cmd_record()
2367 struct perf_data *data = &rec->data; in __cmd_record()
2380 if (rec->opts.record_cgroup) { in __cmd_record()
2383 return -1; in __cmd_record()
2387 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) { in __cmd_record()
2389 if (rec->opts.auxtrace_snapshot_mode) in __cmd_record()
2391 if (rec->switch_output.enabled) in __cmd_record()
2398 tool->sample = process_sample_event; in __cmd_record()
2399 tool->fork = perf_event__process_fork; in __cmd_record()
2400 tool->exit = perf_event__process_exit; in __cmd_record()
2401 tool->comm = perf_event__process_comm; in __cmd_record()
2402 tool->namespaces = perf_event__process_namespaces; in __cmd_record()
2403 tool->mmap = build_id__process_mmap; in __cmd_record()
2404 tool->mmap2 = build_id__process_mmap2; in __cmd_record()
2405 tool->itrace_start = process_timestamp_boundary; in __cmd_record()
2406 tool->aux = process_timestamp_boundary; in __cmd_record()
2407 tool->namespace_events = rec->opts.record_namespaces; in __cmd_record()
2408 tool->cgroup_events = rec->opts.record_cgroup; in __cmd_record()
2416 if (perf_data__is_pipe(&rec->data)) { in __cmd_record()
2418 return -1; in __cmd_record()
2420 if (rec->opts.full_auxtrace) { in __cmd_record()
2422 return -1; in __cmd_record()
2427 rec->session = session; in __cmd_record()
2429 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) { in __cmd_record()
2431 return -1; in __cmd_record()
2437 status = -1; in __cmd_record()
2440 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd); in __cmd_record()
2448 session->header.env.comp_type = PERF_COMP_ZSTD; in __cmd_record()
2449 session->header.env.comp_level = rec->opts.comp_level; in __cmd_record()
2451 if (rec->opts.kcore && in __cmd_record()
2452 !record__kcore_readable(&session->machines.host)) { in __cmd_record()
2454 return -1; in __cmd_record()
2458 return -1; in __cmd_record()
2463 err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe, in __cmd_record()
2478 if (data->is_pipe && rec->evlist->core.nr_entries == 1) in __cmd_record()
2479 rec->opts.sample_id = true; in __cmd_record()
2481 if (rec->timestamp_filename && perf_data__is_pipe(data)) { in __cmd_record()
2482 rec->timestamp_filename = false; in __cmd_record()
2483 pr_warning("WARNING: --timestamp-filename option is not available in pipe mode.\n"); in __cmd_record()
2486 evlist__uniquify_name(rec->evlist); in __cmd_record()
2488 evlist__config(rec->evlist, opts, &callchain_param); in __cmd_record()
2493 err = -1; in __cmd_record()
2498 session->header.env.comp_mmap_len = session->evlist->core.mmap_len; in __cmd_record()
2500 if (rec->opts.kcore) { in __cmd_record()
2501 err = record__kcore_copy(&session->machines.host, data); in __cmd_record()
2512 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) { in __cmd_record()
2514 rec->tool.ordered_events = false; in __cmd_record()
2517 if (evlist__nr_groups(rec->evlist) == 0) in __cmd_record()
2518 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); in __cmd_record()
2520 if (data->is_pipe) { in __cmd_record()
2525 err = perf_session__write_header(session, rec->evlist, fd, false); in __cmd_record()
2530 err = -1; in __cmd_record()
2531 if (!rec->no_buildid in __cmd_record()
2532 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) { in __cmd_record()
2534 "Use --no-buildid to profile anyway.\n"); in __cmd_record()
2538 if (!evlist__needs_bpf_sb_event(rec->evlist)) in __cmd_record()
2539 opts->no_bpf_event = true; in __cmd_record()
2549 if (rec->realtime_prio) { in __cmd_record()
2552 param.sched_priority = rec->realtime_prio; in __cmd_record()
2555 err = -1; in __cmd_record()
2568 if (!target__none(&opts->target) && !opts->target.initial_delay) in __cmd_record()
2569 evlist__enable(rec->evlist); in __cmd_record()
2575 struct machine *machine = &session->machines.host; in __cmd_record()
2579 event = malloc(sizeof(event->comm) + machine->id_hdr_size); in __cmd_record()
2581 err = -ENOMEM; in __cmd_record()
2592 rec->evlist->workload.pid, in __cmd_record()
2597 if (tgid == -1) in __cmd_record()
2600 event = malloc(sizeof(event->namespaces) + in __cmd_record()
2602 machine->id_hdr_size); in __cmd_record()
2604 err = -ENOMEM; in __cmd_record()
2612 rec->evlist->workload.pid, in __cmd_record()
2617 evlist__start_workload(rec->evlist); in __cmd_record()
2620 if (opts->target.initial_delay) { in __cmd_record()
2622 if (opts->target.initial_delay > 0) { in __cmd_record()
2623 usleep(opts->target.initial_delay * USEC_PER_MSEC); in __cmd_record()
2624 evlist__enable(rec->evlist); in __cmd_record()
2629 err = event_enable_timer__start(rec->evlist->eet); in __cmd_record()
2650 unsigned long long hits = thread->samples; in __cmd_record()
2653 * rec->evlist->bkw_mmap_state is possible to be in __cmd_record()
2655 * hits != rec->samples in previous round. in __cmd_record()
2661 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING); in __cmd_record()
2666 err = -1; in __cmd_record()
2676 err = -1; in __cmd_record()
2691 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING) in __cmd_record()
2700 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING); in __cmd_record()
2705 thread->waking = 0; in __cmd_record()
2714 /* re-arm the alarm */ in __cmd_record()
2715 if (rec->switch_output.time) in __cmd_record()
2716 alarm(rec->switch_output.time); in __cmd_record()
2719 if (hits == thread->samples) { in __cmd_record()
2722 err = fdarray__poll(&thread->pollfd, -1); in __cmd_record()
2729 thread->waking++; in __cmd_record()
2731 if (fdarray__filter(&thread->pollfd, POLLERR | POLLHUP, in __cmd_record()
2735 err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread); in __cmd_record()
2740 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) { in __cmd_record()
2744 evlist__ctlfd_ack(rec->evlist); in __cmd_record()
2760 err = event_enable_timer__process(rec->evlist->eet); in __cmd_record()
2773 if (done && !disabled && !target__none(&opts->target)) { in __cmd_record()
2775 evlist__disable(rec->evlist); in __cmd_record()
2783 if (opts->auxtrace_snapshot_on_exit) in __cmd_record()
2790 evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels); in __cmd_record()
2794 err = -1; in __cmd_record()
2804 if (target__none(&rec->opts.target)) in __cmd_record()
2812 evlist__finalize_ctlfd(rec->evlist); in __cmd_record()
2815 if (rec->session->bytes_transferred && rec->session->bytes_compressed) { in __cmd_record()
2816 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed; in __cmd_record()
2817 session->header.env.comp_ratio = ratio + 0.5; in __cmd_record()
2824 kill(rec->evlist->workload.pid, SIGTERM); in __cmd_record()
2837 if (rec->off_cpu) in __cmd_record()
2838 rec->bytes_written += off_cpu_write(rec->session); in __cmd_record()
2843 rec->samples = 0; in __cmd_record()
2846 if (!rec->timestamp_filename) { in __cmd_record()
2861 const char *postfix = rec->timestamp_filename ? in __cmd_record()
2864 if (rec->samples && !rec->opts.full_auxtrace) in __cmd_record()
2866 " (%" PRIu64 " samples)", rec->samples); in __cmd_record()
2872 data->path, postfix, samples); in __cmd_record()
2875 rec->session->bytes_transferred / 1024.0 / 1024.0, in __cmd_record()
2885 done_fd = -1; in __cmd_record()
2890 zstd_fini(&session->zstd_data); in __cmd_record()
2891 if (!opts->no_bpf_event) in __cmd_record()
2892 evlist__stop_sb_thread(rec->sb_evlist); in __cmd_record()
2902 pr_debug("callchain: type %s\n", str[callchain->record_mode]); in callchain_debug()
2904 if (callchain->record_mode == CALLCHAIN_DWARF) in callchain_debug()
2906 callchain->dump_size); in callchain_debug()
2914 callchain->enabled = !unset; in record_opts__parse_callchain()
2916 /* --no-call-graph */ in record_opts__parse_callchain()
2918 callchain->record_mode = CALLCHAIN_NONE; in record_opts__parse_callchain()
2926 if (callchain->record_mode == CALLCHAIN_DWARF) in record_opts__parse_callchain()
2927 record->sample_address = true; in record_opts__parse_callchain()
2938 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset); in record_parse_callchain_opt()
2945 struct callchain_param *callchain = opt->value; in record_callchain_opt()
2947 callchain->enabled = true; in record_callchain_opt()
2949 if (callchain->record_mode == CALLCHAIN_NONE) in record_callchain_opt()
2950 callchain->record_mode = CALLCHAIN_FP; in record_callchain_opt()
2960 if (!strcmp(var, "record.build-id")) { in perf_record_config()
2962 rec->no_buildid_cache = false; in perf_record_config()
2963 else if (!strcmp(value, "no-cache")) in perf_record_config()
2964 rec->no_buildid_cache = true; in perf_record_config()
2966 rec->no_buildid = true; in perf_record_config()
2968 rec->buildid_mmap = true; in perf_record_config()
2970 return -1; in perf_record_config()
2973 if (!strcmp(var, "record.call-graph")) { in perf_record_config()
2974 var = "call-graph.record-mode"; in perf_record_config()
2979 rec->opts.nr_cblocks = strtol(value, NULL, 0); in perf_record_config()
2980 if (!rec->opts.nr_cblocks) in perf_record_config()
2981 rec->opts.nr_cblocks = nr_cblocks_default; in perf_record_config()
2985 rec->debuginfod.urls = strdup(value); in perf_record_config()
2986 if (!rec->debuginfod.urls) in perf_record_config()
2987 return -ENOMEM; in perf_record_config()
2988 rec->debuginfod.set = true; in perf_record_config()
2996 struct record *rec = (struct record *)opt->value; in record__parse_event_enable_time()
2998 return evlist__parse_event_enable_time(rec->evlist, &rec->opts, str, unset); in record__parse_event_enable_time()
3003 struct record_opts *opts = (struct record_opts *)opt->value; in record__parse_affinity()
3009 opts->affinity = PERF_AFFINITY_NODE; in record__parse_affinity()
3010 else if (!strcasecmp(str, "cpu")) in record__parse_affinity()
3011 opts->affinity = PERF_AFFINITY_CPU; in record__parse_affinity()
3018 mask->nbits = nr_bits; in record__mmap_cpu_mask_alloc()
3019 mask->bits = bitmap_zalloc(mask->nbits); in record__mmap_cpu_mask_alloc()
3020 if (!mask->bits) in record__mmap_cpu_mask_alloc()
3021 return -ENOMEM; in record__mmap_cpu_mask_alloc()
3028 bitmap_free(mask->bits); in record__mmap_cpu_mask_free()
3029 mask->nbits = 0; in record__mmap_cpu_mask_free()
3036 ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits); in record__thread_mask_alloc()
3038 mask->affinity.bits = NULL; in record__thread_mask_alloc()
3042 ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits); in record__thread_mask_alloc()
3044 record__mmap_cpu_mask_free(&mask->maps); in record__thread_mask_alloc()
3045 mask->maps.bits = NULL; in record__thread_mask_alloc()
3053 record__mmap_cpu_mask_free(&mask->maps); in record__thread_mask_free()
3054 record__mmap_cpu_mask_free(&mask->affinity); in record__thread_mask_free()
3060 struct record_opts *opts = opt->value; in record__parse_threads()
3063 opts->threads_spec = THREAD_SPEC__CPU; in record__parse_threads()
3067 opts->threads_user_spec = strdup(str); in record__parse_threads()
3068 if (!opts->threads_user_spec) in record__parse_threads()
3069 return -ENOMEM; in record__parse_threads()
3070 opts->threads_spec = THREAD_SPEC__USER; in record__parse_threads()
3074 opts->threads_spec = s; in record__parse_threads()
3080 if (opts->threads_spec == THREAD_SPEC__USER) in record__parse_threads()
3081 pr_debug("threads_spec: %s\n", opts->threads_user_spec); in record__parse_threads()
3083 pr_debug("threads_spec: %s\n", thread_spec_tags[opts->threads_spec]); in record__parse_threads()
3091 unsigned long *s = (unsigned long *)opt->value; in parse_output_max_size()
3107 if (val != (unsigned long) -1) { in parse_output_max_size()
3112 return -1; in parse_output_max_size()
3119 struct record_opts *opts = opt->value; in record__parse_mmap_pages()
3125 return -EINVAL; in record__parse_mmap_pages()
3129 return -ENOMEM; in record__parse_mmap_pages()
3139 opts->mmap_pages = mmap_pages; in record__parse_mmap_pages()
3151 opts->auxtrace_mmap_pages = mmap_pages; in record__parse_mmap_pages()
3166 struct record_opts *opts = opt->value; in parse_control_option()
3168 return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close); in parse_control_option()
3173 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages); in switch_output_size_warn()
3174 struct switch_output *s = &rec->switch_output; in switch_output_size_warn()
3178 if (s->size < wakeup_size) { in switch_output_size_warn()
3182 pr_warning("WARNING: switch-output data size lower than " in switch_output_size_warn()
3190 struct switch_output *s = &rec->switch_output; in switch_output_setup()
3208 * If we're using --switch-output-events, then we imply its in switch_output_setup()
3209 * --switch-output=signal, as we'll send a SIGUSR2 from the side band in switch_output_setup()
3212 if (rec->switch_output_event_set) { in switch_output_setup()
3214 …pr_warning("WARNING: --switch-output-event option is not available in parallel streaming mode.\n"); in switch_output_setup()
3220 if (!s->set) in switch_output_setup()
3224 pr_warning("WARNING: --switch-output option is not available in parallel streaming mode.\n"); in switch_output_setup()
3228 if (!strcmp(s->str, "signal")) { in switch_output_setup()
3230 s->signal = true; in switch_output_setup()
3231 pr_debug("switch-output with SIGUSR2 signal\n"); in switch_output_setup()
3235 val = parse_tag_value(s->str, tags_size); in switch_output_setup()
3236 if (val != (unsigned long) -1) { in switch_output_setup()
3237 s->size = val; in switch_output_setup()
3238 pr_debug("switch-output with %s size threshold\n", s->str); in switch_output_setup()
3242 val = parse_tag_value(s->str, tags_time); in switch_output_setup()
3243 if (val != (unsigned long) -1) { in switch_output_setup()
3244 s->time = val; in switch_output_setup()
3245 pr_debug("switch-output with %s time threshold (%lu seconds)\n", in switch_output_setup()
3246 s->str, s->time); in switch_output_setup()
3250 return -1; in switch_output_setup()
3253 rec->timestamp_filename = true; in switch_output_setup()
3254 s->enabled = true; in switch_output_setup()
3256 if (s->size && !rec->opts.no_buffering) in switch_output_setup()
3264 "perf record [<options>] -- <command> [<options>]",
3276 if (!(event->header.misc & PERF_RECORD_MISC_USER)) in build_id__process_mmap()
3288 if (!(event->header.misc & PERF_RECORD_MISC_USER)) in build_id__process_mmap2()
3301 set_timestamp_boundary(rec, sample->time); in process_timestamp_boundary()
3309 struct record_opts *opts = opt->value; in parse_record_synth_option()
3313 return -1; in parse_record_synth_option()
3315 opts->synth = parse_synth_opt(p); in parse_record_synth_option()
3318 if (opts->synth < 0) { in parse_record_synth_option()
3320 return -1; in parse_record_synth_option()
3329 * builtin-script, leave it here.
3348 .ctl_fd = -1,
3349 .ctl_fd_ack = -1,
3368 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
3370 * from builtin-record.c, i.e. use record_opts,
3382 "\t\t\t Use perf report --latency for latency-centric profile."),
3383 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
3392 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
3394 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
3396 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
3397 "system-wide collection from all CPUs"),
3398 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
3403 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
3406 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
3407 "synthesize non-sample events at the end of output"),
3409 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
3410 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
3415 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
3418 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
3422 NULL, "enables call-graph recording" ,
3424 OPT_CALLBACK(0, "call-graph", &record.opts,
3433 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
3435 OPT_BOOLEAN(0, "data-page-size", &record.opts.sample_data_page_size,
3437 OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
3439 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
3440 OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
3447 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
3449 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
3452 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
3459 … "ms to wait before starting measurement after program start (-1: start with events disabled), "
3460 "or ranges of time to enable events e.g. '-D 10-20,30-40'",
3466 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
3470 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
3477 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
3478 "use per-thread mmaps"),
3479 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
3481 " use '-I?' to list register names", parse_intr_regs),
3482 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
3484 " use '--user-regs=?' to list register names", parse_user_regs),
3485 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
3492 OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
3494 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
3498 OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
3500 OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
3503 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
3506 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
3509 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
3511 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
3515 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
3516 "Record build-id of all DSOs regardless of hits"),
3517 OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
3518 "Record build-id in map events"),
3519 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
3521 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
3523 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
3527 OPT_CALLBACK_SET(0, "switch-output-event", &switch_output_parse_events_option_args,
3531 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
3533 OPT_BOOLEAN(0, "dry-run", &dry_run,
3540 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
3541 … "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
3544 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default, "n",
3545 …"Compress records using specified level (default: 1 - fastest compression, 22 - greatest compressi…
3548 OPT_CALLBACK(0, "max-size", &record.output_max_size,
3550 OPT_UINTEGER(0, "num-thread-synthesize",
3554 OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
3558 OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
3559 …"Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable…
3561 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
3562 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
3565 "Fine-tune event synthesis: default=all", parse_record_synth_option),
3573 OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
3574 OPT_STRING(0, "setup-filter", &record.filter_action, "pin|unpin",
3583 struct perf_cpu cpu; in record__mmap_cpu_mask_init() local
3589 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, cpus) { in record__mmap_cpu_mask_init()
3590 /* Return ENODEV is input cpu is greater than max cpu */ in record__mmap_cpu_mask_init()
3591 if ((unsigned long)cpu.cpu > mask->nbits) in record__mmap_cpu_mask_init()
3592 return -ENODEV; in record__mmap_cpu_mask_init()
3593 __set_bit(cpu.cpu, mask->bits); in record__mmap_cpu_mask_init()
3605 return -ENOMEM; in record__mmap_cpu_mask_init_spec()
3607 bitmap_zero(mask->bits, mask->nbits); in record__mmap_cpu_mask_init_spec()
3609 return -ENODEV; in record__mmap_cpu_mask_init_spec()
3620 if (rec->thread_masks) in record__free_thread_masks()
3622 record__thread_mask_free(&rec->thread_masks[t]); in record__free_thread_masks()
3624 zfree(&rec->thread_masks); in record__free_thread_masks()
3631 rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks))); in record__alloc_thread_masks()
3632 if (!rec->thread_masks) { in record__alloc_thread_masks()
3634 return -ENOMEM; in record__alloc_thread_masks()
3638 ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits); in record__alloc_thread_masks()
3657 ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu); in record__init_thread_cpu_masks()
3661 rec->nr_threads = nr_cpus; in record__init_thread_cpu_masks()
3662 pr_debug("nr_threads: %d\n", rec->nr_threads); in record__init_thread_cpu_masks()
3664 for (t = 0; t < rec->nr_threads; t++) { in record__init_thread_cpu_masks()
3665 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits); in record__init_thread_cpu_masks()
3666 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits); in record__init_thread_cpu_masks()
3669 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps"); in record__init_thread_cpu_masks()
3671 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity"); in record__init_thread_cpu_masks()
3687 ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu().cpu); in record__init_thread_masks_spec()
3695 pr_err("Failed to init cpu mask\n"); in record__init_thread_masks_spec()
3699 ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu); in record__init_thread_masks_spec()
3705 ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu); in record__init_thread_masks_spec()
3727 ret = -EINVAL; in record__init_thread_masks_spec()
3733 ret = -EINVAL; in record__init_thread_masks_spec()
3741 ret = -EINVAL; in record__init_thread_masks_spec()
3747 ret = -EINVAL; in record__init_thread_masks_spec()
3756 thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask)); in record__init_thread_masks_spec()
3759 ret = -ENOMEM; in record__init_thread_masks_spec()
3762 rec->thread_masks = thread_masks; in record__init_thread_masks_spec()
3763 rec->thread_masks[t] = thread_mask; in record__init_thread_masks_spec()
3766 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps"); in record__init_thread_masks_spec()
3768 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity"); in record__init_thread_masks_spec()
3771 ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu); in record__init_thread_masks_spec()
3777 rec->nr_threads = t; in record__init_thread_masks_spec()
3778 pr_debug("nr_threads: %d\n", rec->nr_threads); in record__init_thread_masks_spec()
3779 if (!rec->nr_threads) in record__init_thread_masks_spec()
3780 ret = -EINVAL; in record__init_thread_masks_spec()
3799 pr_err("Failed to allocate CPU topology\n"); in record__init_thread_core_masks()
3800 return -ENOMEM; in record__init_thread_core_masks()
3803 ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list, in record__init_thread_core_masks()
3804 topo->core_cpus_list, topo->core_cpus_lists); in record__init_thread_core_masks()
3817 pr_err("Failed to allocate CPU topology\n"); in record__init_thread_package_masks()
3818 return -ENOMEM; in record__init_thread_package_masks()
3821 ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list, in record__init_thread_package_masks()
3822 topo->package_cpus_list, topo->package_cpus_lists); in record__init_thread_package_masks()
3838 return -ENOMEM; in record__init_thread_numa_masks()
3841 spec = zalloc(topo->nr * sizeof(char *)); in record__init_thread_numa_masks()
3844 ret = -ENOMEM; in record__init_thread_numa_masks()
3847 for (s = 0; s < topo->nr; s++) in record__init_thread_numa_masks()
3848 spec[s] = topo->nodes[s].cpus; in record__init_thread_numa_masks()
3850 ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr); in record__init_thread_numa_masks()
3867 for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) { in record__init_thread_user_masks()
3879 ret = -ENOMEM; in record__init_thread_user_masks()
3886 ret = -ENOMEM; in record__init_thread_user_masks()
3892 ret = -EINVAL; in record__init_thread_user_masks()
3899 ret = -ENOMEM; in record__init_thread_user_masks()
3906 ret = -ENOMEM; in record__init_thread_user_masks()
3934 ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu); in record__init_thread_default_masks()
3938 if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus)) in record__init_thread_default_masks()
3939 return -ENODEV; in record__init_thread_default_masks()
3941 rec->nr_threads = 1; in record__init_thread_default_masks()
3949 struct perf_cpu_map *cpus = rec->evlist->core.all_cpus; in record__init_thread_masks()
3954 if (evlist__per_thread(rec->evlist)) { in record__init_thread_masks()
3955 pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n"); in record__init_thread_masks()
3956 return -EINVAL; in record__init_thread_masks()
3959 switch (rec->opts.threads_spec) { in record__init_thread_masks()
3992 set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true); in cmd_record()
3998 rec->opts.affinity = PERF_AFFINITY_SYS; in cmd_record()
4000 rec->evlist = evlist__new(); in cmd_record()
4001 if (rec->evlist == NULL) in cmd_record()
4002 return -ENOMEM; in cmd_record()
4019 /* Make system wide (-a) the default target. */ in cmd_record()
4020 if (!argc && target__none(&rec->opts.target)) in cmd_record()
4021 rec->opts.target.system_wide = true; in cmd_record()
4023 if (nr_cgroups && !rec->opts.target.system_wide) { in cmd_record()
4025 "cgroup monitoring only available in system-wide mode"); in cmd_record()
4032 * can't work for system-wide mode, but exact semantics in cmd_record()
4038 pr_err("Failed: latency profiling is not supported with system-wide collection.\n"); in cmd_record()
4039 err = -EINVAL; in cmd_record()
4045 if (rec->buildid_mmap) { in cmd_record()
4048 err = -EINVAL; in cmd_record()
4055 rec->opts.build_id = true; in cmd_record()
4057 rec->no_buildid = true; in cmd_record()
4060 if (rec->opts.record_cgroup && !perf_can_record_cgroup()) { in cmd_record()
4062 err = -EINVAL; in cmd_record()
4066 if (rec->opts.kcore) in cmd_record()
4067 rec->opts.text_poke = true; in cmd_record()
4069 if (rec->opts.kcore || record__threads_enabled(rec)) in cmd_record()
4070 rec->data.is_dir = true; in cmd_record()
4073 if (rec->opts.affinity != PERF_AFFINITY_SYS) { in cmd_record()
4074 pr_err("--affinity option is mutually exclusive to parallel streaming mode.\n"); in cmd_record()
4078 … pr_err("Asynchronous streaming mode (--aio) is mutually exclusive to parallel streaming mode.\n"); in cmd_record()
4083 if (rec->opts.comp_level != 0) { in cmd_record()
4085 rec->no_buildid = true; in cmd_record()
4088 if (rec->opts.record_switch_events && in cmd_record()
4091 parse_options_usage(record_usage, record_options, "switch-events", 0); in cmd_record()
4092 err = -EINVAL; in cmd_record()
4097 parse_options_usage(record_usage, record_options, "switch-output", 0); in cmd_record()
4098 err = -EINVAL; in cmd_record()
4102 if (rec->switch_output.time) { in cmd_record()
4104 alarm(rec->switch_output.time); in cmd_record()
4107 if (rec->switch_output.num_files) { in cmd_record()
4108 rec->switch_output.filenames = calloc(rec->switch_output.num_files, in cmd_record()
4110 if (!rec->switch_output.filenames) { in cmd_record()
4111 err = -EINVAL; in cmd_record()
4116 if (rec->timestamp_filename && record__threads_enabled(rec)) { in cmd_record()
4117 rec->timestamp_filename = false; in cmd_record()
4118 pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n"); in cmd_record()
4121 if (rec->filter_action) { in cmd_record()
4122 if (!strcmp(rec->filter_action, "pin")) in cmd_record()
4124 else if (!strcmp(rec->filter_action, "unpin")) in cmd_record()
4127 pr_warning("Unknown BPF filter action: %s\n", rec->filter_action); in cmd_record()
4128 err = -EINVAL; in cmd_record()
4148 err = -ENOMEM; in cmd_record()
4150 if (rec->no_buildid_cache || rec->no_buildid) { in cmd_record()
4152 } else if (rec->switch_output.enabled) { in cmd_record()
4154 * In 'perf record --switch-output', disable buildid in cmd_record()
4159 * perf record --switch-output --no-no-buildid \ in cmd_record()
4160 * --no-no-buildid-cache in cmd_record()
4164 * if ((rec->no_buildid || !rec->no_buildid_set) && in cmd_record()
4165 * (rec->no_buildid_cache || !rec->no_buildid_cache_set)) in cmd_record()
4170 if (rec->no_buildid_set && !rec->no_buildid) in cmd_record()
4172 if (rec->no_buildid_cache_set && !rec->no_buildid_cache) in cmd_record()
4175 rec->no_buildid = true; in cmd_record()
4176 rec->no_buildid_cache = true; in cmd_record()
4184 if (rec->evlist->core.nr_entries == 0) { in cmd_record()
4185 err = parse_event(rec->evlist, "cycles:P"); in cmd_record()
4190 if (rec->opts.target.tid && !rec->opts.no_inherit_set) in cmd_record()
4191 rec->opts.no_inherit = true; in cmd_record()
4193 err = target__validate(&rec->opts.target); in cmd_record()
4195 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); in cmd_record()
4199 err = target__parse_uid(&rec->opts.target); in cmd_record()
4203 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); in cmd_record()
4206 err = -saved_errno; in cmd_record()
4210 /* Enable ignoring missing threads when -u/-p option is defined. */ in cmd_record()
4211 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid; in cmd_record()
4213 evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list); in cmd_record()
4216 arch__add_leaf_frame_record_opts(&rec->opts); in cmd_record()
4218 err = -ENOMEM; in cmd_record()
4219 if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) { in cmd_record()
4220 if (rec->opts.target.pid != NULL) { in cmd_record()
4221 pr_err("Couldn't create thread/CPU maps: %s\n", in cmd_record()
4229 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts); in cmd_record()
4238 if (rec->opts.full_auxtrace) in cmd_record()
4239 rec->buildid_all = true; in cmd_record()
4241 if (rec->opts.text_poke) { in cmd_record()
4242 err = record__config_text_poke(rec->evlist); in cmd_record()
4249 if (rec->off_cpu) { in cmd_record()
4257 if (record_opts__config(&rec->opts)) { in cmd_record()
4258 err = -EINVAL; in cmd_record()
4274 if (rec->opts.nr_cblocks > nr_cblocks_max) in cmd_record()
4275 rec->opts.nr_cblocks = nr_cblocks_max; in cmd_record()
4276 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks); in cmd_record()
4278 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]); in cmd_record()
4279 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush); in cmd_record()
4281 if (rec->opts.comp_level > comp_level_max) in cmd_record()
4282 rec->opts.comp_level = comp_level_max; in cmd_record()
4283 pr_debug("comp level: %d\n", rec->opts.comp_level); in cmd_record()
4287 record__free_thread_masks(rec, rec->nr_threads); in cmd_record()
4288 rec->nr_threads = 0; in cmd_record()
4290 auxtrace_record__free(rec->itr); in cmd_record()
4292 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close); in cmd_record()
4293 evlist__delete(rec->evlist); in cmd_record()