1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <signal.h>
4 #include <inttypes.h>
5 #include <linux/err.h>
6 #include <linux/kernel.h>
7 #include <linux/zalloc.h>
8 #include <api/fs/fs.h>
9
10 #include <byteswap.h>
11 #include <unistd.h>
12 #include <sys/types.h>
13 #include <sys/mman.h>
14 #include <perf/cpumap.h>
15
16 #include "map_symbol.h"
17 #include "branch.h"
18 #include "debug.h"
19 #include "env.h"
20 #include "evlist.h"
21 #include "evsel.h"
22 #include "memswap.h"
23 #include "map.h"
24 #include "symbol.h"
25 #include "session.h"
26 #include "tool.h"
27 #include "perf_regs.h"
28 #include "asm/bug.h"
29 #include "auxtrace.h"
30 #include "thread.h"
31 #include "thread-stack.h"
32 #include "sample-raw.h"
33 #include "stat.h"
34 #include "tsc.h"
35 #include "ui/progress.h"
36 #include "util.h"
37 #include "arch/common.h"
38 #include "units.h"
39 #include "annotate.h"
40 #include "perf.h"
41 #include <internal/lib.h>
42
43 static int perf_session__deliver_event(struct perf_session *session,
44 union perf_event *event,
45 const struct perf_tool *tool,
46 u64 file_offset,
47 const char *file_path);
48
perf_session__open(struct perf_session * session)49 static int perf_session__open(struct perf_session *session)
50 {
51 struct perf_data *data = session->data;
52
53 if (perf_session__read_header(session) < 0) {
54 pr_err("incompatible file format (rerun with -v to learn more)\n");
55 return -1;
56 }
57
58 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) {
59 /* Auxiliary events may reference exited threads, hold onto dead ones. */
60 symbol_conf.keep_exited_threads = true;
61 }
62
63 if (perf_data__is_pipe(data))
64 return 0;
65
66 if (perf_header__has_feat(&session->header, HEADER_STAT))
67 return 0;
68
69 if (!evlist__valid_sample_type(session->evlist)) {
70 pr_err("non matching sample_type\n");
71 return -1;
72 }
73
74 if (!evlist__valid_sample_id_all(session->evlist)) {
75 pr_err("non matching sample_id_all\n");
76 return -1;
77 }
78
79 if (!evlist__valid_read_format(session->evlist)) {
80 pr_err("non matching read_format\n");
81 return -1;
82 }
83
84 return 0;
85 }
86
perf_session__set_id_hdr_size(struct perf_session * session)87 void perf_session__set_id_hdr_size(struct perf_session *session)
88 {
89 u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
90
91 machines__set_id_hdr_size(&session->machines, id_hdr_size);
92 }
93
perf_session__create_kernel_maps(struct perf_session * session)94 int perf_session__create_kernel_maps(struct perf_session *session)
95 {
96 int ret = machine__create_kernel_maps(&session->machines.host);
97
98 if (ret >= 0)
99 ret = machines__create_guest_kernel_maps(&session->machines);
100 return ret;
101 }
102
perf_session__destroy_kernel_maps(struct perf_session * session)103 static void perf_session__destroy_kernel_maps(struct perf_session *session)
104 {
105 machines__destroy_kernel_maps(&session->machines);
106 }
107
perf_session__has_comm_exec(struct perf_session * session)108 static bool perf_session__has_comm_exec(struct perf_session *session)
109 {
110 struct evsel *evsel;
111
112 evlist__for_each_entry(session->evlist, evsel) {
113 if (evsel->core.attr.comm_exec)
114 return true;
115 }
116
117 return false;
118 }
119
perf_session__set_comm_exec(struct perf_session * session)120 static void perf_session__set_comm_exec(struct perf_session *session)
121 {
122 bool comm_exec = perf_session__has_comm_exec(session);
123
124 machines__set_comm_exec(&session->machines, comm_exec);
125 }
126
ordered_events__deliver_event(struct ordered_events * oe,struct ordered_event * event)127 static int ordered_events__deliver_event(struct ordered_events *oe,
128 struct ordered_event *event)
129 {
130 struct perf_session *session = container_of(oe, struct perf_session,
131 ordered_events);
132
133 return perf_session__deliver_event(session, event->event,
134 session->tool, event->file_offset,
135 event->file_path);
136 }
137
__perf_session__new(struct perf_data * data,struct perf_tool * tool,bool trace_event_repipe)138 struct perf_session *__perf_session__new(struct perf_data *data,
139 struct perf_tool *tool,
140 bool trace_event_repipe)
141 {
142 int ret = -ENOMEM;
143 struct perf_session *session = zalloc(sizeof(*session));
144
145 if (!session)
146 goto out;
147
148 session->trace_event_repipe = trace_event_repipe;
149 session->tool = tool;
150 session->decomp_data.zstd_decomp = &session->zstd_data;
151 session->active_decomp = &session->decomp_data;
152 INIT_LIST_HEAD(&session->auxtrace_index);
153 machines__init(&session->machines);
154 ordered_events__init(&session->ordered_events,
155 ordered_events__deliver_event, NULL);
156
157 perf_env__init(&session->header.env);
158 if (data) {
159 ret = perf_data__open(data);
160 if (ret < 0)
161 goto out_delete;
162
163 session->data = data;
164
165 if (perf_data__is_read(data)) {
166 ret = perf_session__open(session);
167 if (ret < 0)
168 goto out_delete;
169
170 /*
171 * set session attributes that are present in perf.data
172 * but not in pipe-mode.
173 */
174 if (!data->is_pipe) {
175 perf_session__set_id_hdr_size(session);
176 perf_session__set_comm_exec(session);
177 }
178
179 evlist__init_trace_event_sample_raw(session->evlist);
180
181 /* Open the directory data. */
182 if (data->is_dir) {
183 ret = perf_data__open_dir(data);
184 if (ret)
185 goto out_delete;
186 }
187
188 if (!symbol_conf.kallsyms_name &&
189 !symbol_conf.vmlinux_name)
190 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
191 }
192 } else {
193 session->machines.host.env = &perf_env;
194 }
195
196 session->machines.host.single_address_space =
197 perf_env__single_address_space(session->machines.host.env);
198
199 if (!data || perf_data__is_write(data)) {
200 /*
201 * In O_RDONLY mode this will be performed when reading the
202 * kernel MMAP event, in perf_event__process_mmap().
203 */
204 if (perf_session__create_kernel_maps(session) < 0)
205 pr_warning("Cannot read kernel map\n");
206 }
207
208 /*
209 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
210 * processed, so evlist__sample_id_all is not meaningful here.
211 */
212 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
213 tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
214 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
215 tool->ordered_events = false;
216 }
217
218 return session;
219
220 out_delete:
221 perf_session__delete(session);
222 out:
223 return ERR_PTR(ret);
224 }
225
perf_decomp__release_events(struct decomp * next)226 static void perf_decomp__release_events(struct decomp *next)
227 {
228 struct decomp *decomp;
229 size_t mmap_len;
230
231 do {
232 decomp = next;
233 if (decomp == NULL)
234 break;
235 next = decomp->next;
236 mmap_len = decomp->mmap_len;
237 munmap(decomp, mmap_len);
238 } while (1);
239 }
240
perf_session__delete(struct perf_session * session)241 void perf_session__delete(struct perf_session *session)
242 {
243 if (session == NULL)
244 return;
245 auxtrace__free(session);
246 auxtrace_index__free(&session->auxtrace_index);
247 debuginfo_cache__delete();
248 perf_session__destroy_kernel_maps(session);
249 perf_decomp__release_events(session->decomp_data.decomp);
250 perf_env__exit(&session->header.env);
251 machines__exit(&session->machines);
252 if (session->data) {
253 if (perf_data__is_read(session->data))
254 evlist__delete(session->evlist);
255 perf_data__close(session->data);
256 }
257 #ifdef HAVE_LIBTRACEEVENT
258 trace_event__cleanup(&session->tevent);
259 #endif
260 free(session);
261 }
262
swap_sample_id_all(union perf_event * event,void * data)263 static void swap_sample_id_all(union perf_event *event, void *data)
264 {
265 void *end = (void *) event + event->header.size;
266 int size = end - data;
267
268 BUG_ON(size % sizeof(u64));
269 mem_bswap_64(data, size);
270 }
271
perf_event__all64_swap(union perf_event * event,bool sample_id_all __maybe_unused)272 static void perf_event__all64_swap(union perf_event *event,
273 bool sample_id_all __maybe_unused)
274 {
275 struct perf_event_header *hdr = &event->header;
276 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
277 }
278
perf_event__comm_swap(union perf_event * event,bool sample_id_all)279 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
280 {
281 event->comm.pid = bswap_32(event->comm.pid);
282 event->comm.tid = bswap_32(event->comm.tid);
283
284 if (sample_id_all) {
285 void *data = &event->comm.comm;
286
287 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
288 swap_sample_id_all(event, data);
289 }
290 }
291
perf_event__mmap_swap(union perf_event * event,bool sample_id_all)292 static void perf_event__mmap_swap(union perf_event *event,
293 bool sample_id_all)
294 {
295 event->mmap.pid = bswap_32(event->mmap.pid);
296 event->mmap.tid = bswap_32(event->mmap.tid);
297 event->mmap.start = bswap_64(event->mmap.start);
298 event->mmap.len = bswap_64(event->mmap.len);
299 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
300
301 if (sample_id_all) {
302 void *data = &event->mmap.filename;
303
304 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
305 swap_sample_id_all(event, data);
306 }
307 }
308
perf_event__mmap2_swap(union perf_event * event,bool sample_id_all)309 static void perf_event__mmap2_swap(union perf_event *event,
310 bool sample_id_all)
311 {
312 event->mmap2.pid = bswap_32(event->mmap2.pid);
313 event->mmap2.tid = bswap_32(event->mmap2.tid);
314 event->mmap2.start = bswap_64(event->mmap2.start);
315 event->mmap2.len = bswap_64(event->mmap2.len);
316 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
317
318 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
319 event->mmap2.maj = bswap_32(event->mmap2.maj);
320 event->mmap2.min = bswap_32(event->mmap2.min);
321 event->mmap2.ino = bswap_64(event->mmap2.ino);
322 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
323 }
324
325 if (sample_id_all) {
326 void *data = &event->mmap2.filename;
327
328 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
329 swap_sample_id_all(event, data);
330 }
331 }
perf_event__task_swap(union perf_event * event,bool sample_id_all)332 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
333 {
334 event->fork.pid = bswap_32(event->fork.pid);
335 event->fork.tid = bswap_32(event->fork.tid);
336 event->fork.ppid = bswap_32(event->fork.ppid);
337 event->fork.ptid = bswap_32(event->fork.ptid);
338 event->fork.time = bswap_64(event->fork.time);
339
340 if (sample_id_all)
341 swap_sample_id_all(event, &event->fork + 1);
342 }
343
perf_event__read_swap(union perf_event * event,bool sample_id_all)344 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
345 {
346 event->read.pid = bswap_32(event->read.pid);
347 event->read.tid = bswap_32(event->read.tid);
348 event->read.value = bswap_64(event->read.value);
349 event->read.time_enabled = bswap_64(event->read.time_enabled);
350 event->read.time_running = bswap_64(event->read.time_running);
351 event->read.id = bswap_64(event->read.id);
352
353 if (sample_id_all)
354 swap_sample_id_all(event, &event->read + 1);
355 }
356
perf_event__aux_swap(union perf_event * event,bool sample_id_all)357 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
358 {
359 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
360 event->aux.aux_size = bswap_64(event->aux.aux_size);
361 event->aux.flags = bswap_64(event->aux.flags);
362
363 if (sample_id_all)
364 swap_sample_id_all(event, &event->aux + 1);
365 }
366
perf_event__itrace_start_swap(union perf_event * event,bool sample_id_all)367 static void perf_event__itrace_start_swap(union perf_event *event,
368 bool sample_id_all)
369 {
370 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
371 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
372
373 if (sample_id_all)
374 swap_sample_id_all(event, &event->itrace_start + 1);
375 }
376
perf_event__switch_swap(union perf_event * event,bool sample_id_all)377 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
378 {
379 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
380 event->context_switch.next_prev_pid =
381 bswap_32(event->context_switch.next_prev_pid);
382 event->context_switch.next_prev_tid =
383 bswap_32(event->context_switch.next_prev_tid);
384 }
385
386 if (sample_id_all)
387 swap_sample_id_all(event, &event->context_switch + 1);
388 }
389
perf_event__text_poke_swap(union perf_event * event,bool sample_id_all)390 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
391 {
392 event->text_poke.addr = bswap_64(event->text_poke.addr);
393 event->text_poke.old_len = bswap_16(event->text_poke.old_len);
394 event->text_poke.new_len = bswap_16(event->text_poke.new_len);
395
396 if (sample_id_all) {
397 size_t len = sizeof(event->text_poke.old_len) +
398 sizeof(event->text_poke.new_len) +
399 event->text_poke.old_len +
400 event->text_poke.new_len;
401 void *data = &event->text_poke.old_len;
402
403 data += PERF_ALIGN(len, sizeof(u64));
404 swap_sample_id_all(event, data);
405 }
406 }
407
perf_event__throttle_swap(union perf_event * event,bool sample_id_all)408 static void perf_event__throttle_swap(union perf_event *event,
409 bool sample_id_all)
410 {
411 event->throttle.time = bswap_64(event->throttle.time);
412 event->throttle.id = bswap_64(event->throttle.id);
413 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
414
415 if (sample_id_all)
416 swap_sample_id_all(event, &event->throttle + 1);
417 }
418
perf_event__namespaces_swap(union perf_event * event,bool sample_id_all)419 static void perf_event__namespaces_swap(union perf_event *event,
420 bool sample_id_all)
421 {
422 u64 i;
423
424 event->namespaces.pid = bswap_32(event->namespaces.pid);
425 event->namespaces.tid = bswap_32(event->namespaces.tid);
426 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
427
428 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
429 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
430
431 ns->dev = bswap_64(ns->dev);
432 ns->ino = bswap_64(ns->ino);
433 }
434
435 if (sample_id_all)
436 swap_sample_id_all(event, &event->namespaces.link_info[i]);
437 }
438
perf_event__cgroup_swap(union perf_event * event,bool sample_id_all)439 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
440 {
441 event->cgroup.id = bswap_64(event->cgroup.id);
442
443 if (sample_id_all) {
444 void *data = &event->cgroup.path;
445
446 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
447 swap_sample_id_all(event, data);
448 }
449 }
450
revbyte(u8 b)451 static u8 revbyte(u8 b)
452 {
453 int rev = (b >> 4) | ((b & 0xf) << 4);
454 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
455 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
456 return (u8) rev;
457 }
458
459 /*
460 * XXX this is hack in attempt to carry flags bitfield
461 * through endian village. ABI says:
462 *
463 * Bit-fields are allocated from right to left (least to most significant)
464 * on little-endian implementations and from left to right (most to least
465 * significant) on big-endian implementations.
466 *
467 * The above seems to be byte specific, so we need to reverse each
468 * byte of the bitfield. 'Internet' also says this might be implementation
469 * specific and we probably need proper fix and carry perf_event_attr
470 * bitfield flags in separate data file FEAT_ section. Thought this seems
471 * to work for now.
472 */
swap_bitfield(u8 * p,unsigned len)473 static void swap_bitfield(u8 *p, unsigned len)
474 {
475 unsigned i;
476
477 for (i = 0; i < len; i++) {
478 *p = revbyte(*p);
479 p++;
480 }
481 }
482
483 /* exported for swapping attributes in file header */
perf_event__attr_swap(struct perf_event_attr * attr)484 void perf_event__attr_swap(struct perf_event_attr *attr)
485 {
486 attr->type = bswap_32(attr->type);
487 attr->size = bswap_32(attr->size);
488
489 #define bswap_safe(f, n) \
490 (attr->size > (offsetof(struct perf_event_attr, f) + \
491 sizeof(attr->f) * (n)))
492 #define bswap_field(f, sz) \
493 do { \
494 if (bswap_safe(f, 0)) \
495 attr->f = bswap_##sz(attr->f); \
496 } while(0)
497 #define bswap_field_16(f) bswap_field(f, 16)
498 #define bswap_field_32(f) bswap_field(f, 32)
499 #define bswap_field_64(f) bswap_field(f, 64)
500
501 bswap_field_64(config);
502 bswap_field_64(sample_period);
503 bswap_field_64(sample_type);
504 bswap_field_64(read_format);
505 bswap_field_32(wakeup_events);
506 bswap_field_32(bp_type);
507 bswap_field_64(bp_addr);
508 bswap_field_64(bp_len);
509 bswap_field_64(branch_sample_type);
510 bswap_field_64(sample_regs_user);
511 bswap_field_32(sample_stack_user);
512 bswap_field_32(aux_watermark);
513 bswap_field_16(sample_max_stack);
514 bswap_field_32(aux_sample_size);
515
516 /*
517 * After read_format are bitfields. Check read_format because
518 * we are unable to use offsetof on bitfield.
519 */
520 if (bswap_safe(read_format, 1))
521 swap_bitfield((u8 *) (&attr->read_format + 1),
522 sizeof(u64));
523 #undef bswap_field_64
524 #undef bswap_field_32
525 #undef bswap_field
526 #undef bswap_safe
527 }
528
perf_event__hdr_attr_swap(union perf_event * event,bool sample_id_all __maybe_unused)529 static void perf_event__hdr_attr_swap(union perf_event *event,
530 bool sample_id_all __maybe_unused)
531 {
532 size_t size;
533
534 perf_event__attr_swap(&event->attr.attr);
535
536 size = event->header.size;
537 size -= perf_record_header_attr_id(event) - (void *)event;
538 mem_bswap_64(perf_record_header_attr_id(event), size);
539 }
540
perf_event__event_update_swap(union perf_event * event,bool sample_id_all __maybe_unused)541 static void perf_event__event_update_swap(union perf_event *event,
542 bool sample_id_all __maybe_unused)
543 {
544 event->event_update.type = bswap_64(event->event_update.type);
545 event->event_update.id = bswap_64(event->event_update.id);
546 }
547
perf_event__event_type_swap(union perf_event * event,bool sample_id_all __maybe_unused)548 static void perf_event__event_type_swap(union perf_event *event,
549 bool sample_id_all __maybe_unused)
550 {
551 event->event_type.event_type.event_id =
552 bswap_64(event->event_type.event_type.event_id);
553 }
554
perf_event__tracing_data_swap(union perf_event * event,bool sample_id_all __maybe_unused)555 static void perf_event__tracing_data_swap(union perf_event *event,
556 bool sample_id_all __maybe_unused)
557 {
558 event->tracing_data.size = bswap_32(event->tracing_data.size);
559 }
560
perf_event__auxtrace_info_swap(union perf_event * event,bool sample_id_all __maybe_unused)561 static void perf_event__auxtrace_info_swap(union perf_event *event,
562 bool sample_id_all __maybe_unused)
563 {
564 size_t size;
565
566 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
567
568 size = event->header.size;
569 size -= (void *)&event->auxtrace_info.priv - (void *)event;
570 mem_bswap_64(event->auxtrace_info.priv, size);
571 }
572
perf_event__auxtrace_swap(union perf_event * event,bool sample_id_all __maybe_unused)573 static void perf_event__auxtrace_swap(union perf_event *event,
574 bool sample_id_all __maybe_unused)
575 {
576 event->auxtrace.size = bswap_64(event->auxtrace.size);
577 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
578 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
579 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
580 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
581 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
582 }
583
perf_event__auxtrace_error_swap(union perf_event * event,bool sample_id_all __maybe_unused)584 static void perf_event__auxtrace_error_swap(union perf_event *event,
585 bool sample_id_all __maybe_unused)
586 {
587 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
588 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
589 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
590 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
591 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
592 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
593 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
594 if (event->auxtrace_error.fmt)
595 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
596 if (event->auxtrace_error.fmt >= 2) {
597 event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
598 event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
599 }
600 }
601
perf_event__thread_map_swap(union perf_event * event,bool sample_id_all __maybe_unused)602 static void perf_event__thread_map_swap(union perf_event *event,
603 bool sample_id_all __maybe_unused)
604 {
605 unsigned i;
606
607 event->thread_map.nr = bswap_64(event->thread_map.nr);
608
609 for (i = 0; i < event->thread_map.nr; i++)
610 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
611 }
612
perf_event__cpu_map_swap(union perf_event * event,bool sample_id_all __maybe_unused)613 static void perf_event__cpu_map_swap(union perf_event *event,
614 bool sample_id_all __maybe_unused)
615 {
616 struct perf_record_cpu_map_data *data = &event->cpu_map.data;
617
618 data->type = bswap_16(data->type);
619
620 switch (data->type) {
621 case PERF_CPU_MAP__CPUS:
622 data->cpus_data.nr = bswap_16(data->cpus_data.nr);
623
624 for (unsigned i = 0; i < data->cpus_data.nr; i++)
625 data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
626 break;
627 case PERF_CPU_MAP__MASK:
628 data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
629
630 switch (data->mask32_data.long_size) {
631 case 4:
632 data->mask32_data.nr = bswap_16(data->mask32_data.nr);
633 for (unsigned i = 0; i < data->mask32_data.nr; i++)
634 data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
635 break;
636 case 8:
637 data->mask64_data.nr = bswap_16(data->mask64_data.nr);
638 for (unsigned i = 0; i < data->mask64_data.nr; i++)
639 data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
640 break;
641 default:
642 pr_err("cpu_map swap: unsupported long size\n");
643 }
644 break;
645 case PERF_CPU_MAP__RANGE_CPUS:
646 data->range_cpu_data.start_cpu = bswap_16(data->range_cpu_data.start_cpu);
647 data->range_cpu_data.end_cpu = bswap_16(data->range_cpu_data.end_cpu);
648 break;
649 default:
650 break;
651 }
652 }
653
perf_event__stat_config_swap(union perf_event * event,bool sample_id_all __maybe_unused)654 static void perf_event__stat_config_swap(union perf_event *event,
655 bool sample_id_all __maybe_unused)
656 {
657 u64 size;
658
659 size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
660 size += 1; /* nr item itself */
661 mem_bswap_64(&event->stat_config.nr, size);
662 }
663
perf_event__stat_swap(union perf_event * event,bool sample_id_all __maybe_unused)664 static void perf_event__stat_swap(union perf_event *event,
665 bool sample_id_all __maybe_unused)
666 {
667 event->stat.id = bswap_64(event->stat.id);
668 event->stat.thread = bswap_32(event->stat.thread);
669 event->stat.cpu = bswap_32(event->stat.cpu);
670 event->stat.val = bswap_64(event->stat.val);
671 event->stat.ena = bswap_64(event->stat.ena);
672 event->stat.run = bswap_64(event->stat.run);
673 }
674
perf_event__stat_round_swap(union perf_event * event,bool sample_id_all __maybe_unused)675 static void perf_event__stat_round_swap(union perf_event *event,
676 bool sample_id_all __maybe_unused)
677 {
678 event->stat_round.type = bswap_64(event->stat_round.type);
679 event->stat_round.time = bswap_64(event->stat_round.time);
680 }
681
perf_event__time_conv_swap(union perf_event * event,bool sample_id_all __maybe_unused)682 static void perf_event__time_conv_swap(union perf_event *event,
683 bool sample_id_all __maybe_unused)
684 {
685 event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
686 event->time_conv.time_mult = bswap_64(event->time_conv.time_mult);
687 event->time_conv.time_zero = bswap_64(event->time_conv.time_zero);
688
689 if (event_contains(event->time_conv, time_cycles)) {
690 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
691 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
692 }
693 }
694
695 typedef void (*perf_event__swap_op)(union perf_event *event,
696 bool sample_id_all);
697
698 static perf_event__swap_op perf_event__swap_ops[] = {
699 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
700 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
701 [PERF_RECORD_COMM] = perf_event__comm_swap,
702 [PERF_RECORD_FORK] = perf_event__task_swap,
703 [PERF_RECORD_EXIT] = perf_event__task_swap,
704 [PERF_RECORD_LOST] = perf_event__all64_swap,
705 [PERF_RECORD_READ] = perf_event__read_swap,
706 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
707 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
708 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
709 [PERF_RECORD_AUX] = perf_event__aux_swap,
710 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
711 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
712 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
713 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
714 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
715 [PERF_RECORD_CGROUP] = perf_event__cgroup_swap,
716 [PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap,
717 [PERF_RECORD_AUX_OUTPUT_HW_ID] = perf_event__all64_swap,
718 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
719 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
720 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
721 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
722 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
723 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
724 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
725 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
726 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
727 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
728 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
729 [PERF_RECORD_STAT] = perf_event__stat_swap,
730 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
731 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
732 [PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
733 [PERF_RECORD_HEADER_MAX] = NULL,
734 };
735
736 /*
737 * When perf record finishes a pass on every buffers, it records this pseudo
738 * event.
739 * We record the max timestamp t found in the pass n.
740 * Assuming these timestamps are monotonic across cpus, we know that if
741 * a buffer still has events with timestamps below t, they will be all
742 * available and then read in the pass n + 1.
743 * Hence when we start to read the pass n + 2, we can safely flush every
744 * events with timestamps below t.
745 *
746 * ============ PASS n =================
747 * CPU 0 | CPU 1
748 * |
749 * cnt1 timestamps | cnt2 timestamps
750 * 1 | 2
751 * 2 | 3
752 * - | 4 <--- max recorded
753 *
754 * ============ PASS n + 1 ==============
755 * CPU 0 | CPU 1
756 * |
757 * cnt1 timestamps | cnt2 timestamps
758 * 3 | 5
759 * 4 | 6
760 * 5 | 7 <---- max recorded
761 *
762 * Flush every events below timestamp 4
763 *
764 * ============ PASS n + 2 ==============
765 * CPU 0 | CPU 1
766 * |
767 * cnt1 timestamps | cnt2 timestamps
768 * 6 | 8
769 * 7 | 9
770 * - | 10
771 *
772 * Flush every events below timestamp 7
773 * etc...
774 */
perf_event__process_finished_round(const struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct ordered_events * oe)775 int perf_event__process_finished_round(const struct perf_tool *tool __maybe_unused,
776 union perf_event *event __maybe_unused,
777 struct ordered_events *oe)
778 {
779 if (dump_trace)
780 fprintf(stdout, "\n");
781 return ordered_events__flush(oe, OE_FLUSH__ROUND);
782 }
783
perf_session__queue_event(struct perf_session * s,union perf_event * event,u64 timestamp,u64 file_offset,const char * file_path)784 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
785 u64 timestamp, u64 file_offset, const char *file_path)
786 {
787 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset, file_path);
788 }
789
callchain__lbr_callstack_printf(struct perf_sample * sample)790 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
791 {
792 struct ip_callchain *callchain = sample->callchain;
793 struct branch_stack *lbr_stack = sample->branch_stack;
794 struct branch_entry *entries = perf_sample__branch_entries(sample);
795 u64 kernel_callchain_nr = callchain->nr;
796 unsigned int i;
797
798 for (i = 0; i < kernel_callchain_nr; i++) {
799 if (callchain->ips[i] == PERF_CONTEXT_USER)
800 break;
801 }
802
803 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
804 u64 total_nr;
805 /*
806 * LBR callstack can only get user call chain,
807 * i is kernel call chain number,
808 * 1 is PERF_CONTEXT_USER.
809 *
810 * The user call chain is stored in LBR registers.
811 * LBR are pair registers. The caller is stored
812 * in "from" register, while the callee is stored
813 * in "to" register.
814 * For example, there is a call stack
815 * "A"->"B"->"C"->"D".
816 * The LBR registers will be recorded like
817 * "C"->"D", "B"->"C", "A"->"B".
818 * So only the first "to" register and all "from"
819 * registers are needed to construct the whole stack.
820 */
821 total_nr = i + 1 + lbr_stack->nr + 1;
822 kernel_callchain_nr = i + 1;
823
824 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
825
826 for (i = 0; i < kernel_callchain_nr; i++)
827 printf("..... %2d: %016" PRIx64 "\n",
828 i, callchain->ips[i]);
829
830 printf("..... %2d: %016" PRIx64 "\n",
831 (int)(kernel_callchain_nr), entries[0].to);
832 for (i = 0; i < lbr_stack->nr; i++)
833 printf("..... %2d: %016" PRIx64 "\n",
834 (int)(i + kernel_callchain_nr + 1), entries[i].from);
835 }
836 }
837
callchain__printf(struct evsel * evsel,struct perf_sample * sample)838 static void callchain__printf(struct evsel *evsel,
839 struct perf_sample *sample)
840 {
841 unsigned int i;
842 struct ip_callchain *callchain = sample->callchain;
843
844 if (evsel__has_branch_callstack(evsel))
845 callchain__lbr_callstack_printf(sample);
846
847 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
848
849 for (i = 0; i < callchain->nr; i++)
850 printf("..... %2d: %016" PRIx64 "\n",
851 i, callchain->ips[i]);
852 }
853
branch_stack__printf(struct perf_sample * sample,struct evsel * evsel)854 static void branch_stack__printf(struct perf_sample *sample,
855 struct evsel *evsel)
856 {
857 struct branch_entry *entries = perf_sample__branch_entries(sample);
858 bool callstack = evsel__has_branch_callstack(evsel);
859 u64 *branch_stack_cntr = sample->branch_stack_cntr;
860 uint64_t i;
861
862 if (!callstack) {
863 printf("%s: nr:%" PRIu64 "\n", "... branch stack", sample->branch_stack->nr);
864 } else {
865 /* the reason of adding 1 to nr is because after expanding
866 * branch stack it generates nr + 1 callstack records. e.g.,
867 * B()->C()
868 * A()->B()
869 * the final callstack should be:
870 * C()
871 * B()
872 * A()
873 */
874 printf("%s: nr:%" PRIu64 "\n", "... branch callstack", sample->branch_stack->nr+1);
875 }
876
877 for (i = 0; i < sample->branch_stack->nr; i++) {
878 struct branch_entry *e = &entries[i];
879
880 if (!callstack) {
881 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x %s %s\n",
882 i, e->from, e->to,
883 (unsigned short)e->flags.cycles,
884 e->flags.mispred ? "M" : " ",
885 e->flags.predicted ? "P" : " ",
886 e->flags.abort ? "A" : " ",
887 e->flags.in_tx ? "T" : " ",
888 (unsigned)e->flags.reserved,
889 get_branch_type(e),
890 e->flags.spec ? branch_spec_desc(e->flags.spec) : "");
891 } else {
892 if (i == 0) {
893 printf("..... %2"PRIu64": %016" PRIx64 "\n"
894 "..... %2"PRIu64": %016" PRIx64 "\n",
895 i, e->to, i+1, e->from);
896 } else {
897 printf("..... %2"PRIu64": %016" PRIx64 "\n", i+1, e->from);
898 }
899 }
900 }
901
902 if (branch_stack_cntr) {
903 unsigned int br_cntr_width, br_cntr_nr;
904
905 perf_env__find_br_cntr_info(evsel__env(evsel), &br_cntr_nr, &br_cntr_width);
906 printf("... branch stack counters: nr:%" PRIu64 " (counter width: %u max counter nr:%u)\n",
907 sample->branch_stack->nr, br_cntr_width, br_cntr_nr);
908 for (i = 0; i < sample->branch_stack->nr; i++)
909 printf("..... %2"PRIu64": %016" PRIx64 "\n", i, branch_stack_cntr[i]);
910 }
911 }
912
regs_dump__printf(u64 mask,u64 * regs,const char * arch)913 static void regs_dump__printf(u64 mask, u64 *regs, const char *arch)
914 {
915 unsigned rid, i = 0;
916
917 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
918 u64 val = regs[i++];
919
920 printf(".... %-5s 0x%016" PRIx64 "\n",
921 perf_reg_name(rid, arch), val);
922 }
923 }
924
925 static const char *regs_abi[] = {
926 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
927 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
928 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
929 };
930
regs_dump_abi(struct regs_dump * d)931 static inline const char *regs_dump_abi(struct regs_dump *d)
932 {
933 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
934 return "unknown";
935
936 return regs_abi[d->abi];
937 }
938
regs__printf(const char * type,struct regs_dump * regs,const char * arch)939 static void regs__printf(const char *type, struct regs_dump *regs, const char *arch)
940 {
941 u64 mask = regs->mask;
942
943 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
944 type,
945 mask,
946 regs_dump_abi(regs));
947
948 regs_dump__printf(mask, regs->regs, arch);
949 }
950
regs_user__printf(struct perf_sample * sample,const char * arch)951 static void regs_user__printf(struct perf_sample *sample, const char *arch)
952 {
953 struct regs_dump *user_regs;
954
955 if (!sample->user_regs)
956 return;
957
958 user_regs = perf_sample__user_regs(sample);
959
960 if (user_regs->regs)
961 regs__printf("user", user_regs, arch);
962 }
963
regs_intr__printf(struct perf_sample * sample,const char * arch)964 static void regs_intr__printf(struct perf_sample *sample, const char *arch)
965 {
966 struct regs_dump *intr_regs;
967
968 if (!sample->intr_regs)
969 return;
970
971 intr_regs = perf_sample__intr_regs(sample);
972
973 if (intr_regs->regs)
974 regs__printf("intr", intr_regs, arch);
975 }
976
stack_user__printf(struct stack_dump * dump)977 static void stack_user__printf(struct stack_dump *dump)
978 {
979 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
980 dump->size, dump->offset);
981 }
982
evlist__print_tstamp(struct evlist * evlist,union perf_event * event,struct perf_sample * sample)983 static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
984 {
985 u64 sample_type = __evlist__combined_sample_type(evlist);
986
987 if (event->header.type != PERF_RECORD_SAMPLE &&
988 !evlist__sample_id_all(evlist)) {
989 fputs("-1 -1 ", stdout);
990 return;
991 }
992
993 if ((sample_type & PERF_SAMPLE_CPU))
994 printf("%u ", sample->cpu);
995
996 if (sample_type & PERF_SAMPLE_TIME)
997 printf("%" PRIu64 " ", sample->time);
998 }
999
sample_read__printf(struct perf_sample * sample,u64 read_format)1000 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1001 {
1002 printf("... sample_read:\n");
1003
1004 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1005 printf("...... time enabled %016" PRIx64 "\n",
1006 sample->read.time_enabled);
1007
1008 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1009 printf("...... time running %016" PRIx64 "\n",
1010 sample->read.time_running);
1011
1012 if (read_format & PERF_FORMAT_GROUP) {
1013 struct sample_read_value *value = sample->read.group.values;
1014
1015 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1016
1017 sample_read_group__for_each(value, sample->read.group.nr, read_format) {
1018 printf("..... id %016" PRIx64
1019 ", value %016" PRIx64,
1020 value->id, value->value);
1021 if (read_format & PERF_FORMAT_LOST)
1022 printf(", lost %" PRIu64, value->lost);
1023 printf("\n");
1024 }
1025 } else {
1026 printf("..... id %016" PRIx64 ", value %016" PRIx64,
1027 sample->read.one.id, sample->read.one.value);
1028 if (read_format & PERF_FORMAT_LOST)
1029 printf(", lost %" PRIu64, sample->read.one.lost);
1030 printf("\n");
1031 }
1032 }
1033
dump_event(struct evlist * evlist,union perf_event * event,u64 file_offset,struct perf_sample * sample,const char * file_path)1034 static void dump_event(struct evlist *evlist, union perf_event *event,
1035 u64 file_offset, struct perf_sample *sample,
1036 const char *file_path)
1037 {
1038 if (!dump_trace)
1039 return;
1040
1041 printf("\n%#" PRIx64 "@%s [%#x]: event: %d\n",
1042 file_offset, file_path, event->header.size, event->header.type);
1043
1044 trace_event(event);
1045 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1046 evlist->trace_event_sample_raw(evlist, event, sample);
1047
1048 if (sample)
1049 evlist__print_tstamp(evlist, event, sample);
1050
1051 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1052 event->header.size, perf_event__name(event->header.type));
1053 }
1054
get_page_size_name(u64 size,char * str)1055 char *get_page_size_name(u64 size, char *str)
1056 {
1057 if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
1058 snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
1059
1060 return str;
1061 }
1062
dump_sample(struct evsel * evsel,union perf_event * event,struct perf_sample * sample,const char * arch)1063 static void dump_sample(struct evsel *evsel, union perf_event *event,
1064 struct perf_sample *sample, const char *arch)
1065 {
1066 u64 sample_type;
1067 char str[PAGE_SIZE_NAME_LEN];
1068
1069 if (!dump_trace)
1070 return;
1071
1072 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1073 event->header.misc, sample->pid, sample->tid, sample->ip,
1074 sample->period, sample->addr);
1075
1076 sample_type = evsel->core.attr.sample_type;
1077
1078 if (evsel__has_callchain(evsel))
1079 callchain__printf(evsel, sample);
1080
1081 if (evsel__has_br_stack(evsel))
1082 branch_stack__printf(sample, evsel);
1083
1084 if (sample_type & PERF_SAMPLE_REGS_USER)
1085 regs_user__printf(sample, arch);
1086
1087 if (sample_type & PERF_SAMPLE_REGS_INTR)
1088 regs_intr__printf(sample, arch);
1089
1090 if (sample_type & PERF_SAMPLE_STACK_USER)
1091 stack_user__printf(&sample->user_stack);
1092
1093 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1094 printf("... weight: %" PRIu64 "", sample->weight);
1095 if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1096 printf(",0x%"PRIx16"", sample->ins_lat);
1097 printf(",0x%"PRIx16"", sample->p_stage_cyc);
1098 }
1099 printf("\n");
1100 }
1101
1102 if (sample_type & PERF_SAMPLE_DATA_SRC)
1103 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1104
1105 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1106 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1107
1108 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1109 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
1110
1111 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1112 printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
1113
1114 if (sample_type & PERF_SAMPLE_TRANSACTION)
1115 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1116
1117 if (sample_type & PERF_SAMPLE_READ)
1118 sample_read__printf(sample, evsel->core.attr.read_format);
1119 }
1120
dump_read(struct evsel * evsel,union perf_event * event)1121 static void dump_read(struct evsel *evsel, union perf_event *event)
1122 {
1123 struct perf_record_read *read_event = &event->read;
1124 u64 read_format;
1125
1126 if (!dump_trace)
1127 return;
1128
1129 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1130 evsel__name(evsel), event->read.value);
1131
1132 if (!evsel)
1133 return;
1134
1135 read_format = evsel->core.attr.read_format;
1136
1137 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1138 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1139
1140 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1141 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1142
1143 if (read_format & PERF_FORMAT_ID)
1144 printf("... id : %" PRI_lu64 "\n", read_event->id);
1145
1146 if (read_format & PERF_FORMAT_LOST)
1147 printf("... lost : %" PRI_lu64 "\n", read_event->lost);
1148 }
1149
machines__find_for_cpumode(struct machines * machines,union perf_event * event,struct perf_sample * sample)1150 static struct machine *machines__find_for_cpumode(struct machines *machines,
1151 union perf_event *event,
1152 struct perf_sample *sample)
1153 {
1154 if (perf_guest &&
1155 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1156 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1157 u32 pid;
1158
1159 if (sample->machine_pid)
1160 pid = sample->machine_pid;
1161 else if (event->header.type == PERF_RECORD_MMAP
1162 || event->header.type == PERF_RECORD_MMAP2)
1163 pid = event->mmap.pid;
1164 else
1165 pid = sample->pid;
1166
1167 /*
1168 * Guest code machine is created as needed and does not use
1169 * DEFAULT_GUEST_KERNEL_ID.
1170 */
1171 if (symbol_conf.guest_code)
1172 return machines__findnew(machines, pid);
1173
1174 return machines__find_guest(machines, pid);
1175 }
1176
1177 return &machines->host;
1178 }
1179
deliver_sample_value(struct evlist * evlist,const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct sample_read_value * v,struct machine * machine,bool per_thread)1180 static int deliver_sample_value(struct evlist *evlist,
1181 const struct perf_tool *tool,
1182 union perf_event *event,
1183 struct perf_sample *sample,
1184 struct sample_read_value *v,
1185 struct machine *machine,
1186 bool per_thread)
1187 {
1188 struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1189 struct evsel *evsel;
1190 u64 *storage = NULL;
1191
1192 if (sid) {
1193 storage = perf_sample_id__get_period_storage(sid, sample->tid, per_thread);
1194 }
1195
1196 if (storage) {
1197 sample->id = v->id;
1198 sample->period = v->value - *storage;
1199 *storage = v->value;
1200 }
1201
1202 if (!storage || sid->evsel == NULL) {
1203 ++evlist->stats.nr_unknown_id;
1204 return 0;
1205 }
1206
1207 /*
1208 * There's no reason to deliver sample
1209 * for zero period, bail out.
1210 */
1211 if (!sample->period)
1212 return 0;
1213
1214 evsel = container_of(sid->evsel, struct evsel, core);
1215 return tool->sample(tool, event, sample, evsel, machine);
1216 }
1217
deliver_sample_group(struct evlist * evlist,const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine,u64 read_format,bool per_thread)1218 static int deliver_sample_group(struct evlist *evlist,
1219 const struct perf_tool *tool,
1220 union perf_event *event,
1221 struct perf_sample *sample,
1222 struct machine *machine,
1223 u64 read_format,
1224 bool per_thread)
1225 {
1226 int ret = -EINVAL;
1227 struct sample_read_value *v = sample->read.group.values;
1228
1229 if (tool->dont_split_sample_group)
1230 return deliver_sample_value(evlist, tool, event, sample, v, machine,
1231 per_thread);
1232
1233 sample_read_group__for_each(v, sample->read.group.nr, read_format) {
1234 ret = deliver_sample_value(evlist, tool, event, sample, v,
1235 machine, per_thread);
1236 if (ret)
1237 break;
1238 }
1239
1240 return ret;
1241 }
1242
evlist__deliver_sample(struct evlist * evlist,const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)1243 static int evlist__deliver_sample(struct evlist *evlist, const struct perf_tool *tool,
1244 union perf_event *event, struct perf_sample *sample,
1245 struct evsel *evsel, struct machine *machine)
1246 {
1247 /* We know evsel != NULL. */
1248 u64 sample_type = evsel->core.attr.sample_type;
1249 u64 read_format = evsel->core.attr.read_format;
1250 bool per_thread = perf_evsel__attr_has_per_thread_sample_period(&evsel->core);
1251
1252 /* Standard sample delivery. */
1253 if (!(sample_type & PERF_SAMPLE_READ))
1254 return tool->sample(tool, event, sample, evsel, machine);
1255
1256 /* For PERF_SAMPLE_READ we have either single or group mode. */
1257 if (read_format & PERF_FORMAT_GROUP)
1258 return deliver_sample_group(evlist, tool, event, sample,
1259 machine, read_format, per_thread);
1260 else
1261 return deliver_sample_value(evlist, tool, event, sample,
1262 &sample->read.one, machine,
1263 per_thread);
1264 }
1265
machines__deliver_event(struct machines * machines,struct evlist * evlist,union perf_event * event,struct perf_sample * sample,const struct perf_tool * tool,u64 file_offset,const char * file_path)1266 static int machines__deliver_event(struct machines *machines,
1267 struct evlist *evlist,
1268 union perf_event *event,
1269 struct perf_sample *sample,
1270 const struct perf_tool *tool, u64 file_offset,
1271 const char *file_path)
1272 {
1273 struct evsel *evsel;
1274 struct machine *machine;
1275
1276 dump_event(evlist, event, file_offset, sample, file_path);
1277
1278 evsel = evlist__id2evsel(evlist, sample->id);
1279
1280 machine = machines__find_for_cpumode(machines, event, sample);
1281
1282 switch (event->header.type) {
1283 case PERF_RECORD_SAMPLE:
1284 if (evsel == NULL) {
1285 ++evlist->stats.nr_unknown_id;
1286 return 0;
1287 }
1288 if (machine == NULL) {
1289 ++evlist->stats.nr_unprocessable_samples;
1290 dump_sample(evsel, event, sample, perf_env__arch(NULL));
1291 return 0;
1292 }
1293 dump_sample(evsel, event, sample, perf_env__arch(machine->env));
1294 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1295 case PERF_RECORD_MMAP:
1296 return tool->mmap(tool, event, sample, machine);
1297 case PERF_RECORD_MMAP2:
1298 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1299 ++evlist->stats.nr_proc_map_timeout;
1300 return tool->mmap2(tool, event, sample, machine);
1301 case PERF_RECORD_COMM:
1302 return tool->comm(tool, event, sample, machine);
1303 case PERF_RECORD_NAMESPACES:
1304 return tool->namespaces(tool, event, sample, machine);
1305 case PERF_RECORD_CGROUP:
1306 return tool->cgroup(tool, event, sample, machine);
1307 case PERF_RECORD_FORK:
1308 return tool->fork(tool, event, sample, machine);
1309 case PERF_RECORD_EXIT:
1310 return tool->exit(tool, event, sample, machine);
1311 case PERF_RECORD_LOST:
1312 if (tool->lost == perf_event__process_lost)
1313 evlist->stats.total_lost += event->lost.lost;
1314 return tool->lost(tool, event, sample, machine);
1315 case PERF_RECORD_LOST_SAMPLES:
1316 if (event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF)
1317 evlist->stats.total_dropped_samples += event->lost_samples.lost;
1318 else if (tool->lost_samples == perf_event__process_lost_samples)
1319 evlist->stats.total_lost_samples += event->lost_samples.lost;
1320 return tool->lost_samples(tool, event, sample, machine);
1321 case PERF_RECORD_READ:
1322 dump_read(evsel, event);
1323 return tool->read(tool, event, sample, evsel, machine);
1324 case PERF_RECORD_THROTTLE:
1325 return tool->throttle(tool, event, sample, machine);
1326 case PERF_RECORD_UNTHROTTLE:
1327 return tool->unthrottle(tool, event, sample, machine);
1328 case PERF_RECORD_AUX:
1329 if (tool->aux == perf_event__process_aux) {
1330 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1331 evlist->stats.total_aux_lost += 1;
1332 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1333 evlist->stats.total_aux_partial += 1;
1334 if (event->aux.flags & PERF_AUX_FLAG_COLLISION)
1335 evlist->stats.total_aux_collision += 1;
1336 }
1337 return tool->aux(tool, event, sample, machine);
1338 case PERF_RECORD_ITRACE_START:
1339 return tool->itrace_start(tool, event, sample, machine);
1340 case PERF_RECORD_SWITCH:
1341 case PERF_RECORD_SWITCH_CPU_WIDE:
1342 return tool->context_switch(tool, event, sample, machine);
1343 case PERF_RECORD_KSYMBOL:
1344 return tool->ksymbol(tool, event, sample, machine);
1345 case PERF_RECORD_BPF_EVENT:
1346 return tool->bpf(tool, event, sample, machine);
1347 case PERF_RECORD_TEXT_POKE:
1348 return tool->text_poke(tool, event, sample, machine);
1349 case PERF_RECORD_AUX_OUTPUT_HW_ID:
1350 return tool->aux_output_hw_id(tool, event, sample, machine);
1351 default:
1352 ++evlist->stats.nr_unknown_events;
1353 return -1;
1354 }
1355 }
1356
perf_session__deliver_event(struct perf_session * session,union perf_event * event,const struct perf_tool * tool,u64 file_offset,const char * file_path)1357 static int perf_session__deliver_event(struct perf_session *session,
1358 union perf_event *event,
1359 const struct perf_tool *tool,
1360 u64 file_offset,
1361 const char *file_path)
1362 {
1363 struct perf_sample sample;
1364 int ret;
1365
1366 perf_sample__init(&sample, /*all=*/false);
1367 ret = evlist__parse_sample(session->evlist, event, &sample);
1368 if (ret) {
1369 pr_err("Can't parse sample, err = %d\n", ret);
1370 goto out;
1371 }
1372
1373 ret = auxtrace__process_event(session, event, &sample, tool);
1374 if (ret < 0)
1375 goto out;
1376 if (ret > 0) {
1377 ret = 0;
1378 goto out;
1379 }
1380
1381 ret = machines__deliver_event(&session->machines, session->evlist,
1382 event, &sample, tool, file_offset, file_path);
1383
1384 if (dump_trace && sample.aux_sample.size)
1385 auxtrace__dump_auxtrace_sample(session, &sample);
1386 out:
1387 perf_sample__exit(&sample);
1388 return ret;
1389 }
1390
perf_session__process_user_event(struct perf_session * session,union perf_event * event,u64 file_offset,const char * file_path)1391 static s64 perf_session__process_user_event(struct perf_session *session,
1392 union perf_event *event,
1393 u64 file_offset,
1394 const char *file_path)
1395 {
1396 struct ordered_events *oe = &session->ordered_events;
1397 const struct perf_tool *tool = session->tool;
1398 struct perf_sample sample;
1399 int fd = perf_data__fd(session->data);
1400 int err;
1401
1402 perf_sample__init(&sample, /*all=*/true);
1403 if (event->header.type != PERF_RECORD_COMPRESSED || perf_tool__compressed_is_stub(tool))
1404 dump_event(session->evlist, event, file_offset, &sample, file_path);
1405
1406 /* These events are processed right away */
1407 switch (event->header.type) {
1408 case PERF_RECORD_HEADER_ATTR:
1409 err = tool->attr(tool, event, &session->evlist);
1410 if (err == 0) {
1411 perf_session__set_id_hdr_size(session);
1412 perf_session__set_comm_exec(session);
1413 }
1414 break;
1415 case PERF_RECORD_EVENT_UPDATE:
1416 err = tool->event_update(tool, event, &session->evlist);
1417 break;
1418 case PERF_RECORD_HEADER_EVENT_TYPE:
1419 /*
1420 * Deprecated, but we need to handle it for sake
1421 * of old data files create in pipe mode.
1422 */
1423 err = 0;
1424 break;
1425 case PERF_RECORD_HEADER_TRACING_DATA:
1426 /*
1427 * Setup for reading amidst mmap, but only when we
1428 * are in 'file' mode. The 'pipe' fd is in proper
1429 * place already.
1430 */
1431 if (!perf_data__is_pipe(session->data))
1432 lseek(fd, file_offset, SEEK_SET);
1433 err = tool->tracing_data(session, event);
1434 break;
1435 case PERF_RECORD_HEADER_BUILD_ID:
1436 err = tool->build_id(session, event);
1437 break;
1438 case PERF_RECORD_FINISHED_ROUND:
1439 err = tool->finished_round(tool, event, oe);
1440 break;
1441 case PERF_RECORD_ID_INDEX:
1442 err = tool->id_index(session, event);
1443 break;
1444 case PERF_RECORD_AUXTRACE_INFO:
1445 err = tool->auxtrace_info(session, event);
1446 break;
1447 case PERF_RECORD_AUXTRACE:
1448 /*
1449 * Setup for reading amidst mmap, but only when we
1450 * are in 'file' mode. The 'pipe' fd is in proper
1451 * place already.
1452 */
1453 if (!perf_data__is_pipe(session->data))
1454 lseek(fd, file_offset + event->header.size, SEEK_SET);
1455 err = tool->auxtrace(session, event);
1456 break;
1457 case PERF_RECORD_AUXTRACE_ERROR:
1458 perf_session__auxtrace_error_inc(session, event);
1459 err = tool->auxtrace_error(session, event);
1460 break;
1461 case PERF_RECORD_THREAD_MAP:
1462 err = tool->thread_map(session, event);
1463 break;
1464 case PERF_RECORD_CPU_MAP:
1465 err = tool->cpu_map(session, event);
1466 break;
1467 case PERF_RECORD_STAT_CONFIG:
1468 err = tool->stat_config(session, event);
1469 break;
1470 case PERF_RECORD_STAT:
1471 err = tool->stat(session, event);
1472 break;
1473 case PERF_RECORD_STAT_ROUND:
1474 err = tool->stat_round(session, event);
1475 break;
1476 case PERF_RECORD_TIME_CONV:
1477 session->time_conv = event->time_conv;
1478 err = tool->time_conv(session, event);
1479 break;
1480 case PERF_RECORD_HEADER_FEATURE:
1481 err = tool->feature(session, event);
1482 break;
1483 case PERF_RECORD_COMPRESSED:
1484 err = tool->compressed(session, event, file_offset, file_path);
1485 if (err)
1486 dump_event(session->evlist, event, file_offset, &sample, file_path);
1487 break;
1488 case PERF_RECORD_FINISHED_INIT:
1489 err = tool->finished_init(session, event);
1490 break;
1491 default:
1492 err = -EINVAL;
1493 break;
1494 }
1495 perf_sample__exit(&sample);
1496 return err;
1497 }
1498
perf_session__deliver_synth_event(struct perf_session * session,union perf_event * event,struct perf_sample * sample)1499 int perf_session__deliver_synth_event(struct perf_session *session,
1500 union perf_event *event,
1501 struct perf_sample *sample)
1502 {
1503 struct evlist *evlist = session->evlist;
1504 const struct perf_tool *tool = session->tool;
1505
1506 events_stats__inc(&evlist->stats, event->header.type);
1507
1508 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1509 return perf_session__process_user_event(session, event, 0, NULL);
1510
1511 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL);
1512 }
1513
perf_session__deliver_synth_attr_event(struct perf_session * session,const struct perf_event_attr * attr,u64 id)1514 int perf_session__deliver_synth_attr_event(struct perf_session *session,
1515 const struct perf_event_attr *attr,
1516 u64 id)
1517 {
1518 union {
1519 struct {
1520 struct perf_record_header_attr attr;
1521 u64 ids[1];
1522 } attr_id;
1523 union perf_event ev;
1524 } ev = {
1525 .attr_id.attr.header.type = PERF_RECORD_HEADER_ATTR,
1526 .attr_id.attr.header.size = sizeof(ev.attr_id),
1527 .attr_id.ids[0] = id,
1528 };
1529
1530 if (attr->size != sizeof(ev.attr_id.attr.attr)) {
1531 pr_debug("Unexpected perf_event_attr size\n");
1532 return -EINVAL;
1533 }
1534 ev.attr_id.attr.attr = *attr;
1535 return perf_session__deliver_synth_event(session, &ev.ev, NULL);
1536 }
1537
event_swap(union perf_event * event,bool sample_id_all)1538 static void event_swap(union perf_event *event, bool sample_id_all)
1539 {
1540 perf_event__swap_op swap;
1541
1542 swap = perf_event__swap_ops[event->header.type];
1543 if (swap)
1544 swap(event, sample_id_all);
1545 }
1546
perf_session__peek_event(struct perf_session * session,off_t file_offset,void * buf,size_t buf_sz,union perf_event ** event_ptr,struct perf_sample * sample)1547 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1548 void *buf, size_t buf_sz,
1549 union perf_event **event_ptr,
1550 struct perf_sample *sample)
1551 {
1552 union perf_event *event;
1553 size_t hdr_sz, rest;
1554 int fd;
1555
1556 if (session->one_mmap && !session->header.needs_swap) {
1557 event = file_offset - session->one_mmap_offset +
1558 session->one_mmap_addr;
1559 goto out_parse_sample;
1560 }
1561
1562 if (perf_data__is_pipe(session->data))
1563 return -1;
1564
1565 fd = perf_data__fd(session->data);
1566 hdr_sz = sizeof(struct perf_event_header);
1567
1568 if (buf_sz < hdr_sz)
1569 return -1;
1570
1571 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1572 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1573 return -1;
1574
1575 event = (union perf_event *)buf;
1576
1577 if (session->header.needs_swap)
1578 perf_event_header__bswap(&event->header);
1579
1580 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1581 return -1;
1582
1583 buf += hdr_sz;
1584 rest = event->header.size - hdr_sz;
1585
1586 if (readn(fd, buf, rest) != (ssize_t)rest)
1587 return -1;
1588
1589 if (session->header.needs_swap)
1590 event_swap(event, evlist__sample_id_all(session->evlist));
1591
1592 out_parse_sample:
1593
1594 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1595 evlist__parse_sample(session->evlist, event, sample))
1596 return -1;
1597
1598 *event_ptr = event;
1599
1600 return 0;
1601 }
1602
perf_session__peek_events(struct perf_session * session,u64 offset,u64 size,peek_events_cb_t cb,void * data)1603 int perf_session__peek_events(struct perf_session *session, u64 offset,
1604 u64 size, peek_events_cb_t cb, void *data)
1605 {
1606 u64 max_offset = offset + size;
1607 char buf[PERF_SAMPLE_MAX_SIZE];
1608 union perf_event *event;
1609 int err;
1610
1611 do {
1612 err = perf_session__peek_event(session, offset, buf,
1613 PERF_SAMPLE_MAX_SIZE, &event,
1614 NULL);
1615 if (err)
1616 return err;
1617
1618 err = cb(session, event, offset, data);
1619 if (err)
1620 return err;
1621
1622 offset += event->header.size;
1623 if (event->header.type == PERF_RECORD_AUXTRACE)
1624 offset += event->auxtrace.size;
1625
1626 } while (offset < max_offset);
1627
1628 return err;
1629 }
1630
perf_session__process_event(struct perf_session * session,union perf_event * event,u64 file_offset,const char * file_path)1631 static s64 perf_session__process_event(struct perf_session *session,
1632 union perf_event *event, u64 file_offset,
1633 const char *file_path)
1634 {
1635 struct evlist *evlist = session->evlist;
1636 const struct perf_tool *tool = session->tool;
1637 int ret;
1638
1639 if (session->header.needs_swap)
1640 event_swap(event, evlist__sample_id_all(evlist));
1641
1642 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1643 return -EINVAL;
1644
1645 events_stats__inc(&evlist->stats, event->header.type);
1646
1647 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1648 return perf_session__process_user_event(session, event, file_offset, file_path);
1649
1650 if (tool->ordered_events) {
1651 u64 timestamp = -1ULL;
1652
1653 ret = evlist__parse_sample_timestamp(evlist, event, ×tamp);
1654 if (ret && ret != -1)
1655 return ret;
1656
1657 ret = perf_session__queue_event(session, event, timestamp, file_offset, file_path);
1658 if (ret != -ETIME)
1659 return ret;
1660 }
1661
1662 return perf_session__deliver_event(session, event, tool, file_offset, file_path);
1663 }
1664
perf_event_header__bswap(struct perf_event_header * hdr)1665 void perf_event_header__bswap(struct perf_event_header *hdr)
1666 {
1667 hdr->type = bswap_32(hdr->type);
1668 hdr->misc = bswap_16(hdr->misc);
1669 hdr->size = bswap_16(hdr->size);
1670 }
1671
perf_session__findnew(struct perf_session * session,pid_t pid)1672 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1673 {
1674 return machine__findnew_thread(&session->machines.host, -1, pid);
1675 }
1676
perf_session__register_idle_thread(struct perf_session * session)1677 int perf_session__register_idle_thread(struct perf_session *session)
1678 {
1679 struct thread *thread = machine__idle_thread(&session->machines.host);
1680
1681 /* machine__idle_thread() got the thread, so put it */
1682 thread__put(thread);
1683 return thread ? 0 : -1;
1684 }
1685
1686 static void
perf_session__warn_order(const struct perf_session * session)1687 perf_session__warn_order(const struct perf_session *session)
1688 {
1689 const struct ordered_events *oe = &session->ordered_events;
1690 struct evsel *evsel;
1691 bool should_warn = true;
1692
1693 evlist__for_each_entry(session->evlist, evsel) {
1694 if (evsel->core.attr.write_backward)
1695 should_warn = false;
1696 }
1697
1698 if (!should_warn)
1699 return;
1700 if (oe->nr_unordered_events != 0)
1701 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1702 }
1703
perf_session__warn_about_errors(const struct perf_session * session)1704 static void perf_session__warn_about_errors(const struct perf_session *session)
1705 {
1706 const struct events_stats *stats = &session->evlist->stats;
1707
1708 if (session->tool->lost == perf_event__process_lost &&
1709 stats->nr_events[PERF_RECORD_LOST] != 0) {
1710 ui__warning("Processed %d events and lost %d chunks!\n\n"
1711 "Check IO/CPU overload!\n\n",
1712 stats->nr_events[0],
1713 stats->nr_events[PERF_RECORD_LOST]);
1714 }
1715
1716 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1717 double drop_rate;
1718
1719 drop_rate = (double)stats->total_lost_samples /
1720 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1721 if (drop_rate > 0.05) {
1722 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1723 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1724 drop_rate * 100.0);
1725 }
1726 }
1727
1728 if (session->tool->aux == perf_event__process_aux &&
1729 stats->total_aux_lost != 0) {
1730 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1731 stats->total_aux_lost,
1732 stats->nr_events[PERF_RECORD_AUX]);
1733 }
1734
1735 if (session->tool->aux == perf_event__process_aux &&
1736 stats->total_aux_partial != 0) {
1737 bool vmm_exclusive = false;
1738
1739 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1740 &vmm_exclusive);
1741
1742 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1743 "Are you running a KVM guest in the background?%s\n\n",
1744 stats->total_aux_partial,
1745 stats->nr_events[PERF_RECORD_AUX],
1746 vmm_exclusive ?
1747 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1748 "will reduce the gaps to only guest's timeslices." :
1749 "");
1750 }
1751
1752 if (session->tool->aux == perf_event__process_aux &&
1753 stats->total_aux_collision != 0) {
1754 ui__warning("AUX data detected collision %" PRIu64 " times out of %u!\n\n",
1755 stats->total_aux_collision,
1756 stats->nr_events[PERF_RECORD_AUX]);
1757 }
1758
1759 if (stats->nr_unknown_events != 0) {
1760 ui__warning("Found %u unknown events!\n\n"
1761 "Is this an older tool processing a perf.data "
1762 "file generated by a more recent tool?\n\n"
1763 "If that is not the case, consider "
1764 "reporting to linux-kernel@vger.kernel.org.\n\n",
1765 stats->nr_unknown_events);
1766 }
1767
1768 if (stats->nr_unknown_id != 0) {
1769 ui__warning("%u samples with id not present in the header\n",
1770 stats->nr_unknown_id);
1771 }
1772
1773 if (stats->nr_invalid_chains != 0) {
1774 ui__warning("Found invalid callchains!\n\n"
1775 "%u out of %u events were discarded for this reason.\n\n"
1776 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1777 stats->nr_invalid_chains,
1778 stats->nr_events[PERF_RECORD_SAMPLE]);
1779 }
1780
1781 if (stats->nr_unprocessable_samples != 0) {
1782 ui__warning("%u unprocessable samples recorded.\n"
1783 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1784 stats->nr_unprocessable_samples);
1785 }
1786
1787 perf_session__warn_order(session);
1788
1789 events_stats__auxtrace_error_warn(stats);
1790
1791 if (stats->nr_proc_map_timeout != 0) {
1792 ui__warning("%d map information files for pre-existing threads were\n"
1793 "not processed, if there are samples for addresses they\n"
1794 "will not be resolved, you may find out which are these\n"
1795 "threads by running with -v and redirecting the output\n"
1796 "to a file.\n"
1797 "The time limit to process proc map is too short?\n"
1798 "Increase it by --proc-map-timeout\n",
1799 stats->nr_proc_map_timeout);
1800 }
1801 }
1802
perf_session__flush_thread_stack(struct thread * thread,void * p __maybe_unused)1803 static int perf_session__flush_thread_stack(struct thread *thread,
1804 void *p __maybe_unused)
1805 {
1806 return thread_stack__flush(thread);
1807 }
1808
perf_session__flush_thread_stacks(struct perf_session * session)1809 static int perf_session__flush_thread_stacks(struct perf_session *session)
1810 {
1811 return machines__for_each_thread(&session->machines,
1812 perf_session__flush_thread_stack,
1813 NULL);
1814 }
1815
1816 volatile sig_atomic_t session_done;
1817
1818 static int __perf_session__process_decomp_events(struct perf_session *session);
1819
__perf_session__process_pipe_events(struct perf_session * session)1820 static int __perf_session__process_pipe_events(struct perf_session *session)
1821 {
1822 struct ordered_events *oe = &session->ordered_events;
1823 const struct perf_tool *tool = session->tool;
1824 struct ui_progress prog;
1825 union perf_event *event;
1826 uint32_t size, cur_size = 0;
1827 void *buf = NULL;
1828 s64 skip = 0;
1829 u64 head;
1830 ssize_t err;
1831 void *p;
1832 bool update_prog = false;
1833
1834 /*
1835 * If it's from a file saving pipe data (by redirection), it would have
1836 * a file name other than "-". Then we can get the total size and show
1837 * the progress.
1838 */
1839 if (strcmp(session->data->path, "-") && session->data->file.size) {
1840 ui_progress__init_size(&prog, session->data->file.size,
1841 "Processing events...");
1842 update_prog = true;
1843 }
1844
1845 head = 0;
1846 cur_size = sizeof(union perf_event);
1847
1848 buf = malloc(cur_size);
1849 if (!buf)
1850 return -errno;
1851 ordered_events__set_copy_on_queue(oe, true);
1852 more:
1853 event = buf;
1854 err = perf_data__read(session->data, event,
1855 sizeof(struct perf_event_header));
1856 if (err <= 0) {
1857 if (err == 0)
1858 goto done;
1859
1860 pr_err("failed to read event header\n");
1861 goto out_err;
1862 }
1863
1864 if (session->header.needs_swap)
1865 perf_event_header__bswap(&event->header);
1866
1867 size = event->header.size;
1868 if (size < sizeof(struct perf_event_header)) {
1869 pr_err("bad event header size\n");
1870 goto out_err;
1871 }
1872
1873 if (size > cur_size) {
1874 void *new = realloc(buf, size);
1875 if (!new) {
1876 pr_err("failed to allocate memory to read event\n");
1877 goto out_err;
1878 }
1879 buf = new;
1880 cur_size = size;
1881 event = buf;
1882 }
1883 p = event;
1884 p += sizeof(struct perf_event_header);
1885
1886 if (size - sizeof(struct perf_event_header)) {
1887 err = perf_data__read(session->data, p,
1888 size - sizeof(struct perf_event_header));
1889 if (err <= 0) {
1890 if (err == 0) {
1891 pr_err("unexpected end of event stream\n");
1892 goto done;
1893 }
1894
1895 pr_err("failed to read event data\n");
1896 goto out_err;
1897 }
1898 }
1899
1900 if ((skip = perf_session__process_event(session, event, head, "pipe")) < 0) {
1901 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1902 head, event->header.size, event->header.type);
1903 err = -EINVAL;
1904 goto out_err;
1905 }
1906
1907 head += size;
1908
1909 if (skip > 0)
1910 head += skip;
1911
1912 err = __perf_session__process_decomp_events(session);
1913 if (err)
1914 goto out_err;
1915
1916 if (update_prog)
1917 ui_progress__update(&prog, size);
1918
1919 if (!session_done())
1920 goto more;
1921 done:
1922 /* do the final flush for ordered samples */
1923 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1924 if (err)
1925 goto out_err;
1926 err = auxtrace__flush_events(session, tool);
1927 if (err)
1928 goto out_err;
1929 err = perf_session__flush_thread_stacks(session);
1930 out_err:
1931 free(buf);
1932 if (update_prog)
1933 ui_progress__finish();
1934 if (!tool->no_warn)
1935 perf_session__warn_about_errors(session);
1936 ordered_events__free(&session->ordered_events);
1937 auxtrace__free_events(session);
1938 return err;
1939 }
1940
1941 static union perf_event *
prefetch_event(char * buf,u64 head,size_t mmap_size,bool needs_swap,union perf_event * error)1942 prefetch_event(char *buf, u64 head, size_t mmap_size,
1943 bool needs_swap, union perf_event *error)
1944 {
1945 union perf_event *event;
1946 u16 event_size;
1947
1948 /*
1949 * Ensure we have enough space remaining to read
1950 * the size of the event in the headers.
1951 */
1952 if (head + sizeof(event->header) > mmap_size)
1953 return NULL;
1954
1955 event = (union perf_event *)(buf + head);
1956 if (needs_swap)
1957 perf_event_header__bswap(&event->header);
1958
1959 event_size = event->header.size;
1960 if (head + event_size <= mmap_size)
1961 return event;
1962
1963 /* We're not fetching the event so swap back again */
1964 if (needs_swap)
1965 perf_event_header__bswap(&event->header);
1966
1967 /* Check if the event fits into the next mmapped buf. */
1968 if (event_size <= mmap_size - head % page_size) {
1969 /* Remap buf and fetch again. */
1970 return NULL;
1971 }
1972
1973 /* Invalid input. Event size should never exceed mmap_size. */
1974 pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
1975 " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
1976
1977 return error;
1978 }
1979
1980 static union perf_event *
fetch_mmaped_event(u64 head,size_t mmap_size,char * buf,bool needs_swap)1981 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
1982 {
1983 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
1984 }
1985
1986 static union perf_event *
fetch_decomp_event(u64 head,size_t mmap_size,char * buf,bool needs_swap)1987 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
1988 {
1989 return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
1990 }
1991
__perf_session__process_decomp_events(struct perf_session * session)1992 static int __perf_session__process_decomp_events(struct perf_session *session)
1993 {
1994 s64 skip;
1995 u64 size;
1996 struct decomp *decomp = session->active_decomp->decomp_last;
1997
1998 if (!decomp)
1999 return 0;
2000
2001 while (decomp->head < decomp->size && !session_done()) {
2002 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2003 session->header.needs_swap);
2004
2005 if (!event)
2006 break;
2007
2008 size = event->header.size;
2009
2010 if (size < sizeof(struct perf_event_header) ||
2011 (skip = perf_session__process_event(session, event, decomp->file_pos,
2012 decomp->file_path)) < 0) {
2013 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2014 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2015 return -EINVAL;
2016 }
2017
2018 if (skip)
2019 size += skip;
2020
2021 decomp->head += size;
2022 }
2023
2024 return 0;
2025 }
2026
2027 /*
2028 * On 64bit we can mmap the data file in one go. No need for tiny mmap
2029 * slices. On 32bit we use 32MB.
2030 */
2031 #if BITS_PER_LONG == 64
2032 #define MMAP_SIZE ULLONG_MAX
2033 #define NUM_MMAPS 1
2034 #else
2035 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2036 #define NUM_MMAPS 128
2037 #endif
2038
2039 struct reader;
2040
2041 typedef s64 (*reader_cb_t)(struct perf_session *session,
2042 union perf_event *event,
2043 u64 file_offset,
2044 const char *file_path);
2045
2046 struct reader {
2047 int fd;
2048 const char *path;
2049 u64 data_size;
2050 u64 data_offset;
2051 reader_cb_t process;
2052 bool in_place_update;
2053 char *mmaps[NUM_MMAPS];
2054 size_t mmap_size;
2055 int mmap_idx;
2056 char *mmap_cur;
2057 u64 file_pos;
2058 u64 file_offset;
2059 u64 head;
2060 u64 size;
2061 bool done;
2062 struct zstd_data zstd_data;
2063 struct decomp_data decomp_data;
2064 };
2065
2066 static int
reader__init(struct reader * rd,bool * one_mmap)2067 reader__init(struct reader *rd, bool *one_mmap)
2068 {
2069 u64 data_size = rd->data_size;
2070 char **mmaps = rd->mmaps;
2071
2072 rd->head = rd->data_offset;
2073 data_size += rd->data_offset;
2074
2075 rd->mmap_size = MMAP_SIZE;
2076 if (rd->mmap_size > data_size) {
2077 rd->mmap_size = data_size;
2078 if (one_mmap)
2079 *one_mmap = true;
2080 }
2081
2082 memset(mmaps, 0, sizeof(rd->mmaps));
2083
2084 if (zstd_init(&rd->zstd_data, 0))
2085 return -1;
2086 rd->decomp_data.zstd_decomp = &rd->zstd_data;
2087
2088 return 0;
2089 }
2090
2091 static void
reader__release_decomp(struct reader * rd)2092 reader__release_decomp(struct reader *rd)
2093 {
2094 perf_decomp__release_events(rd->decomp_data.decomp);
2095 zstd_fini(&rd->zstd_data);
2096 }
2097
2098 static int
reader__mmap(struct reader * rd,struct perf_session * session)2099 reader__mmap(struct reader *rd, struct perf_session *session)
2100 {
2101 int mmap_prot, mmap_flags;
2102 char *buf, **mmaps = rd->mmaps;
2103 u64 page_offset;
2104
2105 mmap_prot = PROT_READ;
2106 mmap_flags = MAP_SHARED;
2107
2108 if (rd->in_place_update) {
2109 mmap_prot |= PROT_WRITE;
2110 } else if (session->header.needs_swap) {
2111 mmap_prot |= PROT_WRITE;
2112 mmap_flags = MAP_PRIVATE;
2113 }
2114
2115 if (mmaps[rd->mmap_idx]) {
2116 munmap(mmaps[rd->mmap_idx], rd->mmap_size);
2117 mmaps[rd->mmap_idx] = NULL;
2118 }
2119
2120 page_offset = page_size * (rd->head / page_size);
2121 rd->file_offset += page_offset;
2122 rd->head -= page_offset;
2123
2124 buf = mmap(NULL, rd->mmap_size, mmap_prot, mmap_flags, rd->fd,
2125 rd->file_offset);
2126 if (buf == MAP_FAILED) {
2127 pr_err("failed to mmap file\n");
2128 return -errno;
2129 }
2130 mmaps[rd->mmap_idx] = rd->mmap_cur = buf;
2131 rd->mmap_idx = (rd->mmap_idx + 1) & (ARRAY_SIZE(rd->mmaps) - 1);
2132 rd->file_pos = rd->file_offset + rd->head;
2133 if (session->one_mmap) {
2134 session->one_mmap_addr = buf;
2135 session->one_mmap_offset = rd->file_offset;
2136 }
2137
2138 return 0;
2139 }
2140
2141 enum {
2142 READER_OK,
2143 READER_NODATA,
2144 };
2145
2146 static int
reader__read_event(struct reader * rd,struct perf_session * session,struct ui_progress * prog)2147 reader__read_event(struct reader *rd, struct perf_session *session,
2148 struct ui_progress *prog)
2149 {
2150 u64 size;
2151 int err = READER_OK;
2152 union perf_event *event;
2153 s64 skip;
2154
2155 event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur,
2156 session->header.needs_swap);
2157 if (IS_ERR(event))
2158 return PTR_ERR(event);
2159
2160 if (!event)
2161 return READER_NODATA;
2162
2163 size = event->header.size;
2164
2165 skip = -EINVAL;
2166
2167 if (size < sizeof(struct perf_event_header) ||
2168 (skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) {
2169 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2170 rd->file_offset + rd->head, event->header.size,
2171 event->header.type, strerror(-skip));
2172 err = skip;
2173 goto out;
2174 }
2175
2176 if (skip)
2177 size += skip;
2178
2179 rd->size += size;
2180 rd->head += size;
2181 rd->file_pos += size;
2182
2183 err = __perf_session__process_decomp_events(session);
2184 if (err)
2185 goto out;
2186
2187 ui_progress__update(prog, size);
2188
2189 out:
2190 return err;
2191 }
2192
2193 static inline bool
reader__eof(struct reader * rd)2194 reader__eof(struct reader *rd)
2195 {
2196 return (rd->file_pos >= rd->data_size + rd->data_offset);
2197 }
2198
2199 static int
reader__process_events(struct reader * rd,struct perf_session * session,struct ui_progress * prog)2200 reader__process_events(struct reader *rd, struct perf_session *session,
2201 struct ui_progress *prog)
2202 {
2203 int err;
2204
2205 err = reader__init(rd, &session->one_mmap);
2206 if (err)
2207 goto out;
2208
2209 session->active_decomp = &rd->decomp_data;
2210
2211 remap:
2212 err = reader__mmap(rd, session);
2213 if (err)
2214 goto out;
2215
2216 more:
2217 err = reader__read_event(rd, session, prog);
2218 if (err < 0)
2219 goto out;
2220 else if (err == READER_NODATA)
2221 goto remap;
2222
2223 if (session_done())
2224 goto out;
2225
2226 if (!reader__eof(rd))
2227 goto more;
2228
2229 out:
2230 session->active_decomp = &session->decomp_data;
2231 return err;
2232 }
2233
process_simple(struct perf_session * session,union perf_event * event,u64 file_offset,const char * file_path)2234 static s64 process_simple(struct perf_session *session,
2235 union perf_event *event,
2236 u64 file_offset,
2237 const char *file_path)
2238 {
2239 return perf_session__process_event(session, event, file_offset, file_path);
2240 }
2241
__perf_session__process_events(struct perf_session * session)2242 static int __perf_session__process_events(struct perf_session *session)
2243 {
2244 struct reader rd = {
2245 .fd = perf_data__fd(session->data),
2246 .path = session->data->file.path,
2247 .data_size = session->header.data_size,
2248 .data_offset = session->header.data_offset,
2249 .process = process_simple,
2250 .in_place_update = session->data->in_place_update,
2251 };
2252 struct ordered_events *oe = &session->ordered_events;
2253 const struct perf_tool *tool = session->tool;
2254 struct ui_progress prog;
2255 int err;
2256
2257 if (rd.data_size == 0)
2258 return -1;
2259
2260 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2261
2262 err = reader__process_events(&rd, session, &prog);
2263 if (err)
2264 goto out_err;
2265 /* do the final flush for ordered samples */
2266 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2267 if (err)
2268 goto out_err;
2269 err = auxtrace__flush_events(session, tool);
2270 if (err)
2271 goto out_err;
2272 err = perf_session__flush_thread_stacks(session);
2273 out_err:
2274 ui_progress__finish();
2275 if (!tool->no_warn)
2276 perf_session__warn_about_errors(session);
2277 /*
2278 * We may switching perf.data output, make ordered_events
2279 * reusable.
2280 */
2281 ordered_events__reinit(&session->ordered_events);
2282 auxtrace__free_events(session);
2283 reader__release_decomp(&rd);
2284 session->one_mmap = false;
2285 return err;
2286 }
2287
2288 /*
2289 * Processing 2 MB of data from each reader in sequence,
2290 * because that's the way the ordered events sorting works
2291 * most efficiently.
2292 */
2293 #define READER_MAX_SIZE (2 * 1024 * 1024)
2294
2295 /*
2296 * This function reads, merge and process directory data.
2297 * It assumens the version 1 of directory data, where each
2298 * data file holds per-cpu data, already sorted by kernel.
2299 */
__perf_session__process_dir_events(struct perf_session * session)2300 static int __perf_session__process_dir_events(struct perf_session *session)
2301 {
2302 struct perf_data *data = session->data;
2303 const struct perf_tool *tool = session->tool;
2304 int i, ret, readers, nr_readers;
2305 struct ui_progress prog;
2306 u64 total_size = perf_data__size(session->data);
2307 struct reader *rd;
2308
2309 ui_progress__init_size(&prog, total_size, "Processing events...");
2310
2311 nr_readers = 1;
2312 for (i = 0; i < data->dir.nr; i++) {
2313 if (data->dir.files[i].size)
2314 nr_readers++;
2315 }
2316
2317 rd = zalloc(nr_readers * sizeof(struct reader));
2318 if (!rd)
2319 return -ENOMEM;
2320
2321 rd[0] = (struct reader) {
2322 .fd = perf_data__fd(session->data),
2323 .path = session->data->file.path,
2324 .data_size = session->header.data_size,
2325 .data_offset = session->header.data_offset,
2326 .process = process_simple,
2327 .in_place_update = session->data->in_place_update,
2328 };
2329 ret = reader__init(&rd[0], NULL);
2330 if (ret)
2331 goto out_err;
2332 ret = reader__mmap(&rd[0], session);
2333 if (ret)
2334 goto out_err;
2335 readers = 1;
2336
2337 for (i = 0; i < data->dir.nr; i++) {
2338 if (!data->dir.files[i].size)
2339 continue;
2340 rd[readers] = (struct reader) {
2341 .fd = data->dir.files[i].fd,
2342 .path = data->dir.files[i].path,
2343 .data_size = data->dir.files[i].size,
2344 .data_offset = 0,
2345 .process = process_simple,
2346 .in_place_update = session->data->in_place_update,
2347 };
2348 ret = reader__init(&rd[readers], NULL);
2349 if (ret)
2350 goto out_err;
2351 ret = reader__mmap(&rd[readers], session);
2352 if (ret)
2353 goto out_err;
2354 readers++;
2355 }
2356
2357 i = 0;
2358 while (readers) {
2359 if (session_done())
2360 break;
2361
2362 if (rd[i].done) {
2363 i = (i + 1) % nr_readers;
2364 continue;
2365 }
2366 if (reader__eof(&rd[i])) {
2367 rd[i].done = true;
2368 readers--;
2369 continue;
2370 }
2371
2372 session->active_decomp = &rd[i].decomp_data;
2373 ret = reader__read_event(&rd[i], session, &prog);
2374 if (ret < 0) {
2375 goto out_err;
2376 } else if (ret == READER_NODATA) {
2377 ret = reader__mmap(&rd[i], session);
2378 if (ret)
2379 goto out_err;
2380 }
2381
2382 if (rd[i].size >= READER_MAX_SIZE) {
2383 rd[i].size = 0;
2384 i = (i + 1) % nr_readers;
2385 }
2386 }
2387
2388 ret = ordered_events__flush(&session->ordered_events, OE_FLUSH__FINAL);
2389 if (ret)
2390 goto out_err;
2391
2392 ret = perf_session__flush_thread_stacks(session);
2393 out_err:
2394 ui_progress__finish();
2395
2396 if (!tool->no_warn)
2397 perf_session__warn_about_errors(session);
2398
2399 /*
2400 * We may switching perf.data output, make ordered_events
2401 * reusable.
2402 */
2403 ordered_events__reinit(&session->ordered_events);
2404
2405 session->one_mmap = false;
2406
2407 session->active_decomp = &session->decomp_data;
2408 for (i = 0; i < nr_readers; i++)
2409 reader__release_decomp(&rd[i]);
2410 zfree(&rd);
2411
2412 return ret;
2413 }
2414
perf_session__process_events(struct perf_session * session)2415 int perf_session__process_events(struct perf_session *session)
2416 {
2417 if (perf_session__register_idle_thread(session) < 0)
2418 return -ENOMEM;
2419
2420 if (perf_data__is_pipe(session->data))
2421 return __perf_session__process_pipe_events(session);
2422
2423 if (perf_data__is_dir(session->data) && session->data->dir.nr)
2424 return __perf_session__process_dir_events(session);
2425
2426 return __perf_session__process_events(session);
2427 }
2428
perf_session__has_traces(struct perf_session * session,const char * msg)2429 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2430 {
2431 struct evsel *evsel;
2432
2433 evlist__for_each_entry(session->evlist, evsel) {
2434 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2435 return true;
2436 }
2437
2438 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2439 return false;
2440 }
2441
perf_session__has_switch_events(struct perf_session * session)2442 bool perf_session__has_switch_events(struct perf_session *session)
2443 {
2444 struct evsel *evsel;
2445
2446 evlist__for_each_entry(session->evlist, evsel) {
2447 if (evsel->core.attr.context_switch)
2448 return true;
2449 }
2450
2451 return false;
2452 }
2453
map__set_kallsyms_ref_reloc_sym(struct map * map,const char * symbol_name,u64 addr)2454 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2455 {
2456 char *bracket;
2457 struct ref_reloc_sym *ref;
2458 struct kmap *kmap;
2459
2460 ref = zalloc(sizeof(struct ref_reloc_sym));
2461 if (ref == NULL)
2462 return -ENOMEM;
2463
2464 ref->name = strdup(symbol_name);
2465 if (ref->name == NULL) {
2466 free(ref);
2467 return -ENOMEM;
2468 }
2469
2470 bracket = strchr(ref->name, ']');
2471 if (bracket)
2472 *bracket = '\0';
2473
2474 ref->addr = addr;
2475
2476 kmap = map__kmap(map);
2477 if (kmap)
2478 kmap->ref_reloc_sym = ref;
2479
2480 return 0;
2481 }
2482
perf_session__fprintf_dsos(struct perf_session * session,FILE * fp)2483 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2484 {
2485 return machines__fprintf_dsos(&session->machines, fp);
2486 }
2487
perf_session__fprintf_dsos_buildid(struct perf_session * session,FILE * fp,bool (skip)(struct dso * dso,int parm),int parm)2488 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2489 bool (skip)(struct dso *dso, int parm), int parm)
2490 {
2491 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2492 }
2493
perf_session__fprintf_nr_events(struct perf_session * session,FILE * fp)2494 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2495 {
2496 size_t ret;
2497 const char *msg = "";
2498
2499 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2500 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2501
2502 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2503
2504 ret += events_stats__fprintf(&session->evlist->stats, fp);
2505 return ret;
2506 }
2507
perf_session__fprintf(struct perf_session * session,FILE * fp)2508 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2509 {
2510 /*
2511 * FIXME: Here we have to actually print all the machines in this
2512 * session, not just the host...
2513 */
2514 return machine__fprintf(&session->machines.host, fp);
2515 }
2516
perf_session__dump_kmaps(struct perf_session * session)2517 void perf_session__dump_kmaps(struct perf_session *session)
2518 {
2519 int save_verbose = verbose;
2520
2521 fflush(stdout);
2522 fprintf(stderr, "Kernel and module maps:\n");
2523 verbose = 0; /* Suppress verbose to print a summary only */
2524 maps__fprintf(machine__kernel_maps(&session->machines.host), stderr);
2525 verbose = save_verbose;
2526 }
2527
perf_session__find_first_evtype(struct perf_session * session,unsigned int type)2528 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2529 unsigned int type)
2530 {
2531 struct evsel *pos;
2532
2533 evlist__for_each_entry(session->evlist, pos) {
2534 if (pos->core.attr.type == type)
2535 return pos;
2536 }
2537 return NULL;
2538 }
2539
perf_session__cpu_bitmap(struct perf_session * session,const char * cpu_list,unsigned long * cpu_bitmap)2540 int perf_session__cpu_bitmap(struct perf_session *session,
2541 const char *cpu_list, unsigned long *cpu_bitmap)
2542 {
2543 int i, err = -1;
2544 struct perf_cpu_map *map;
2545 int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2546 struct perf_cpu cpu;
2547
2548 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2549 struct evsel *evsel;
2550
2551 evsel = perf_session__find_first_evtype(session, i);
2552 if (!evsel)
2553 continue;
2554
2555 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2556 pr_err("File does not contain CPU events. "
2557 "Remove -C option to proceed.\n");
2558 return -1;
2559 }
2560 }
2561
2562 map = perf_cpu_map__new(cpu_list);
2563 if (map == NULL) {
2564 pr_err("Invalid cpu_list\n");
2565 return -1;
2566 }
2567
2568 perf_cpu_map__for_each_cpu(cpu, i, map) {
2569 if (cpu.cpu >= nr_cpus) {
2570 pr_err("Requested CPU %d too large. "
2571 "Consider raising MAX_NR_CPUS\n", cpu.cpu);
2572 goto out_delete_map;
2573 }
2574
2575 __set_bit(cpu.cpu, cpu_bitmap);
2576 }
2577
2578 err = 0;
2579
2580 out_delete_map:
2581 perf_cpu_map__put(map);
2582 return err;
2583 }
2584
perf_session__fprintf_info(struct perf_session * session,FILE * fp,bool full)2585 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2586 bool full)
2587 {
2588 if (session == NULL || fp == NULL)
2589 return;
2590
2591 fprintf(fp, "# ========\n");
2592 perf_header__fprintf_info(session, fp, full);
2593 fprintf(fp, "# ========\n#\n");
2594 }
2595
perf_session__register_guest(struct perf_session * session,pid_t machine_pid)2596 static int perf_session__register_guest(struct perf_session *session, pid_t machine_pid)
2597 {
2598 struct machine *machine = machines__findnew(&session->machines, machine_pid);
2599 struct thread *thread;
2600
2601 if (!machine)
2602 return -ENOMEM;
2603
2604 machine->single_address_space = session->machines.host.single_address_space;
2605
2606 thread = machine__idle_thread(machine);
2607 if (!thread)
2608 return -ENOMEM;
2609 thread__put(thread);
2610
2611 machine->kallsyms_filename = perf_data__guest_kallsyms_name(session->data, machine_pid);
2612
2613 return 0;
2614 }
2615
perf_session__set_guest_cpu(struct perf_session * session,pid_t pid,pid_t tid,int guest_cpu)2616 static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid,
2617 pid_t tid, int guest_cpu)
2618 {
2619 struct machine *machine = &session->machines.host;
2620 struct thread *thread = machine__findnew_thread(machine, pid, tid);
2621
2622 if (!thread)
2623 return -ENOMEM;
2624 thread__set_guest_cpu(thread, guest_cpu);
2625 thread__put(thread);
2626
2627 return 0;
2628 }
2629
perf_event__process_id_index(struct perf_session * session,union perf_event * event)2630 int perf_event__process_id_index(struct perf_session *session,
2631 union perf_event *event)
2632 {
2633 struct evlist *evlist = session->evlist;
2634 struct perf_record_id_index *ie = &event->id_index;
2635 size_t sz = ie->header.size - sizeof(*ie);
2636 size_t i, nr, max_nr;
2637 size_t e1_sz = sizeof(struct id_index_entry);
2638 size_t e2_sz = sizeof(struct id_index_entry_2);
2639 size_t etot_sz = e1_sz + e2_sz;
2640 struct id_index_entry_2 *e2;
2641 pid_t last_pid = 0;
2642
2643 max_nr = sz / e1_sz;
2644 nr = ie->nr;
2645 if (nr > max_nr) {
2646 printf("Too big: nr %zu max_nr %zu\n", nr, max_nr);
2647 return -EINVAL;
2648 }
2649
2650 if (sz >= nr * etot_sz) {
2651 max_nr = sz / etot_sz;
2652 if (nr > max_nr) {
2653 printf("Too big2: nr %zu max_nr %zu\n", nr, max_nr);
2654 return -EINVAL;
2655 }
2656 e2 = (void *)ie + sizeof(*ie) + nr * e1_sz;
2657 } else {
2658 e2 = NULL;
2659 }
2660
2661 if (dump_trace)
2662 fprintf(stdout, " nr: %zu\n", nr);
2663
2664 for (i = 0; i < nr; i++, (e2 ? e2++ : 0)) {
2665 struct id_index_entry *e = &ie->entries[i];
2666 struct perf_sample_id *sid;
2667 int ret;
2668
2669 if (dump_trace) {
2670 fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2671 fprintf(stdout, " idx: %"PRI_lu64, e->idx);
2672 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
2673 fprintf(stdout, " tid: %"PRI_ld64, e->tid);
2674 if (e2) {
2675 fprintf(stdout, " machine_pid: %"PRI_ld64, e2->machine_pid);
2676 fprintf(stdout, " vcpu: %"PRI_lu64"\n", e2->vcpu);
2677 } else {
2678 fprintf(stdout, "\n");
2679 }
2680 }
2681
2682 sid = evlist__id2sid(evlist, e->id);
2683 if (!sid)
2684 return -ENOENT;
2685
2686 sid->idx = e->idx;
2687 sid->cpu.cpu = e->cpu;
2688 sid->tid = e->tid;
2689
2690 if (!e2)
2691 continue;
2692
2693 sid->machine_pid = e2->machine_pid;
2694 sid->vcpu.cpu = e2->vcpu;
2695
2696 if (!sid->machine_pid)
2697 continue;
2698
2699 if (sid->machine_pid != last_pid) {
2700 ret = perf_session__register_guest(session, sid->machine_pid);
2701 if (ret)
2702 return ret;
2703 last_pid = sid->machine_pid;
2704 perf_guest = true;
2705 }
2706
2707 ret = perf_session__set_guest_cpu(session, sid->machine_pid, e->tid, e2->vcpu);
2708 if (ret)
2709 return ret;
2710 }
2711 return 0;
2712 }
2713
perf_session__dsos_hit_all(struct perf_session * session)2714 int perf_session__dsos_hit_all(struct perf_session *session)
2715 {
2716 struct rb_node *nd;
2717 int err;
2718
2719 err = machine__hit_all_dsos(&session->machines.host);
2720 if (err)
2721 return err;
2722
2723 for (nd = rb_first_cached(&session->machines.guests); nd;
2724 nd = rb_next(nd)) {
2725 struct machine *pos = rb_entry(nd, struct machine, rb_node);
2726
2727 err = machine__hit_all_dsos(pos);
2728 if (err)
2729 return err;
2730 }
2731
2732 return 0;
2733 }
2734