xref: /linux/kernel/trace/trace_output.c (revision c1fe867b5bf9c57ab7856486d342720e2b205eed) !
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_output.c
4  *
5  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6  *
7  */
8 #include "trace.h"
9 #include <linux/module.h>
10 #include <linux/mutex.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/sched/clock.h>
14 #include <linux/sched/mm.h>
15 #include <linux/idr.h>
16 #include <linux/btf.h>
17 #include <linux/bpf.h>
18 #include <linux/hashtable.h>
19 
20 #include "trace_output.h"
21 #include "trace_btf.h"
22 
23 /* 2^7 = 128 */
24 #define EVENT_HASH_BITS 7
25 
26 DECLARE_RWSEM(trace_event_sem);
27 
28 static DEFINE_HASHTABLE(event_hash, EVENT_HASH_BITS);
29 
30 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
31 {
32 	struct trace_seq *s = &iter->seq;
33 	struct trace_entry *entry = iter->ent;
34 	struct bputs_entry *field;
35 
36 	trace_assign_type(field, entry);
37 
38 	trace_seq_puts(s, field->str);
39 
40 	return trace_handle_return(s);
41 }
42 
43 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
44 {
45 	struct trace_seq *s = &iter->seq;
46 	struct trace_entry *entry = iter->ent;
47 	struct bprint_entry *field;
48 
49 	trace_assign_type(field, entry);
50 
51 	trace_seq_bprintf(s, field->fmt, field->buf);
52 
53 	return trace_handle_return(s);
54 }
55 
56 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
57 {
58 	struct trace_seq *s = &iter->seq;
59 	struct trace_entry *entry = iter->ent;
60 	struct print_entry *field;
61 
62 	trace_assign_type(field, entry);
63 
64 	trace_seq_puts(s, field->buf);
65 
66 	return trace_handle_return(s);
67 }
68 
69 const char *
70 trace_print_flags_seq(struct trace_seq *p, const char *delim,
71 		      unsigned long flags,
72 		      const struct trace_print_flags *flag_array,
73 		      size_t flag_array_size)
74 {
75 	unsigned long mask;
76 	const char *str;
77 	const char *ret = trace_seq_buffer_ptr(p);
78 	int i, first = 1;
79 
80 	for (i = 0; i < flag_array_size && flags; i++) {
81 
82 		mask = flag_array[i].mask;
83 		if ((flags & mask) != mask)
84 			continue;
85 
86 		str = flag_array[i].name;
87 		flags &= ~mask;
88 		if (!first && delim)
89 			trace_seq_puts(p, delim);
90 		else
91 			first = 0;
92 		trace_seq_puts(p, str);
93 	}
94 
95 	/* check for left over flags */
96 	if (flags) {
97 		if (!first && delim)
98 			trace_seq_puts(p, delim);
99 		trace_seq_printf(p, "0x%lx", flags);
100 	}
101 
102 	trace_seq_putc(p, 0);
103 
104 	return ret;
105 }
106 EXPORT_SYMBOL(trace_print_flags_seq);
107 
108 const char *
109 trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
110 			const struct trace_print_flags *symbol_array,
111 			size_t symbol_array_size)
112 {
113 	int i;
114 	const char *ret = trace_seq_buffer_ptr(p);
115 
116 	for (i = 0; i < symbol_array_size; i++) {
117 
118 		if (val != symbol_array[i].mask)
119 			continue;
120 
121 		trace_seq_puts(p, symbol_array[i].name);
122 		break;
123 	}
124 
125 	if (ret == (const char *)(trace_seq_buffer_ptr(p)))
126 		trace_seq_printf(p, "0x%lx", val);
127 
128 	trace_seq_putc(p, 0);
129 
130 	return ret;
131 }
132 EXPORT_SYMBOL(trace_print_symbols_seq);
133 
134 #if BITS_PER_LONG == 32
135 const char *
136 trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
137 		      unsigned long long flags,
138 		      const struct trace_print_flags_u64 *flag_array,
139 		      size_t flag_array_size)
140 {
141 	unsigned long long mask;
142 	const char *str;
143 	const char *ret = trace_seq_buffer_ptr(p);
144 	int i, first = 1;
145 
146 	for (i = 0; i < flag_array_size && flags; i++) {
147 
148 		mask = flag_array[i].mask;
149 		if ((flags & mask) != mask)
150 			continue;
151 
152 		str = flag_array[i].name;
153 		flags &= ~mask;
154 		if (!first && delim)
155 			trace_seq_puts(p, delim);
156 		else
157 			first = 0;
158 		trace_seq_puts(p, str);
159 	}
160 
161 	/* check for left over flags */
162 	if (flags) {
163 		if (!first && delim)
164 			trace_seq_puts(p, delim);
165 		trace_seq_printf(p, "0x%llx", flags);
166 	}
167 
168 	trace_seq_putc(p, 0);
169 
170 	return ret;
171 }
172 EXPORT_SYMBOL(trace_print_flags_seq_u64);
173 
174 const char *
175 trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
176 			    const struct trace_print_flags_u64 *symbol_array,
177 			    size_t symbol_array_size)
178 {
179 	int i;
180 	const char *ret = trace_seq_buffer_ptr(p);
181 
182 	for (i = 0; i < symbol_array_size; i++) {
183 
184 		if (val != symbol_array[i].mask)
185 			continue;
186 
187 		trace_seq_puts(p, symbol_array[i].name);
188 		break;
189 	}
190 
191 	if (ret == (const char *)(trace_seq_buffer_ptr(p)))
192 		trace_seq_printf(p, "0x%llx", val);
193 
194 	trace_seq_putc(p, 0);
195 
196 	return ret;
197 }
198 EXPORT_SYMBOL(trace_print_symbols_seq_u64);
199 #endif
200 
201 /**
202  * trace_print_bitmask_seq - print a bitmask to a sequence buffer
203  * @iter: The trace iterator for the current event instance
204  * @bitmask_ptr: The pointer to the bitmask data
205  * @bitmask_size: The size of the bitmask in bytes
206  *
207  * Prints a bitmask into a sequence buffer as either a hex string or a
208  * human-readable range list, depending on the instance's "bitmask-list"
209  * trace option. The bitmask is formatted into the iterator's temporary
210  * scratchpad rather than the primary sequence buffer. This avoids
211  * duplication and pointer-collision issues when the returned string is
212  * processed by a "%s" specifier in a TP_printk() macro.
213  *
214  * Returns a pointer to the formatted string within the temporary buffer.
215  */
216 const char *
217 trace_print_bitmask_seq(struct trace_iterator *iter, void *bitmask_ptr,
218 			unsigned int bitmask_size)
219 {
220 	struct trace_seq *p = &iter->tmp_seq;
221 	const struct trace_array *tr = iter->tr;
222 	const char *ret;
223 
224 	trace_seq_init(p);
225 	ret = trace_seq_buffer_ptr(p);
226 
227 	if (tr->trace_flags & TRACE_ITER(BITMASK_LIST))
228 		trace_seq_bitmask_list(p, bitmask_ptr, bitmask_size * 8);
229 	else
230 		trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
231 
232 	trace_seq_putc(p, 0);
233 
234 	return ret;
235 }
236 EXPORT_SYMBOL_GPL(trace_print_bitmask_seq);
237 
238 /**
239  * trace_print_hex_seq - print buffer as hex sequence
240  * @p: trace seq struct to write to
241  * @buf: The buffer to print
242  * @buf_len: Length of @buf in bytes
243  * @concatenate: Print @buf as single hex string or with spacing
244  *
245  * Prints the passed buffer as a hex sequence either as a whole,
246  * single hex string if @concatenate is true or with spacing after
247  * each byte in case @concatenate is false.
248  */
249 const char *
250 trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
251 		    bool concatenate)
252 {
253 	int i;
254 	const char *ret = trace_seq_buffer_ptr(p);
255 	const char *fmt = concatenate ? "%*phN" : "%*ph";
256 
257 	for (i = 0; i < buf_len; i += 16) {
258 		if (!concatenate && i != 0)
259 			trace_seq_putc(p, ' ');
260 		trace_seq_printf(p, fmt, min(buf_len - i, 16), &buf[i]);
261 	}
262 	trace_seq_putc(p, 0);
263 
264 	return ret;
265 }
266 EXPORT_SYMBOL(trace_print_hex_seq);
267 
268 const char *
269 trace_print_array_seq(struct trace_seq *p, const void *buf, int count,
270 		      size_t el_size)
271 {
272 	const char *ret = trace_seq_buffer_ptr(p);
273 	const char *prefix = "";
274 	void *ptr = (void *)buf;
275 	size_t buf_len = count * el_size;
276 
277 	trace_seq_putc(p, '{');
278 
279 	while (ptr < buf + buf_len) {
280 		switch (el_size) {
281 		case 1:
282 			trace_seq_printf(p, "%s0x%x", prefix,
283 					 *(u8 *)ptr);
284 			break;
285 		case 2:
286 			trace_seq_printf(p, "%s0x%x", prefix,
287 					 *(u16 *)ptr);
288 			break;
289 		case 4:
290 			trace_seq_printf(p, "%s0x%x", prefix,
291 					 *(u32 *)ptr);
292 			break;
293 		case 8:
294 			trace_seq_printf(p, "%s0x%llx", prefix,
295 					 *(u64 *)ptr);
296 			break;
297 		default:
298 			trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size,
299 					 *(u8 *)ptr);
300 			el_size = 1;
301 		}
302 		prefix = ",";
303 		ptr += el_size;
304 	}
305 
306 	trace_seq_putc(p, '}');
307 	trace_seq_putc(p, 0);
308 
309 	return ret;
310 }
311 EXPORT_SYMBOL(trace_print_array_seq);
312 
313 const char *
314 trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
315 			 int prefix_type, int rowsize, int groupsize,
316 			 const void *buf, size_t len, bool ascii)
317 {
318 	const char *ret = trace_seq_buffer_ptr(p);
319 
320 	trace_seq_putc(p, '\n');
321 	trace_seq_hex_dump(p, prefix_str, prefix_type,
322 			   rowsize, groupsize, buf, len, ascii);
323 	trace_seq_putc(p, 0);
324 	return ret;
325 }
326 EXPORT_SYMBOL(trace_print_hex_dump_seq);
327 
328 int trace_raw_output_prep(struct trace_iterator *iter,
329 			  struct trace_event *trace_event)
330 {
331 	struct trace_event_call *event;
332 	struct trace_seq *s = &iter->seq;
333 	struct trace_seq *p = &iter->tmp_seq;
334 	struct trace_entry *entry;
335 
336 	event = container_of(trace_event, struct trace_event_call, event);
337 	entry = iter->ent;
338 
339 	if (entry->type != event->event.type) {
340 		WARN_ON_ONCE(1);
341 		return TRACE_TYPE_UNHANDLED;
342 	}
343 
344 	trace_seq_init(p);
345 	trace_seq_printf(s, "%s: ", trace_event_name(event));
346 
347 	return trace_handle_return(s);
348 }
349 EXPORT_SYMBOL(trace_raw_output_prep);
350 
351 void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
352 {
353 	struct trace_seq *s = &iter->seq;
354 	va_list ap;
355 
356 	if (ignore_event(iter))
357 		return;
358 
359 	va_start(ap, fmt);
360 	trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
361 	va_end(ap);
362 }
363 EXPORT_SYMBOL(trace_event_printf);
364 
365 static __printf(3, 0)
366 int trace_output_raw(struct trace_iterator *iter, char *name,
367 		     char *fmt, va_list ap)
368 {
369 	struct trace_seq *s = &iter->seq;
370 
371 	trace_seq_printf(s, "%s: ", name);
372 	trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
373 
374 	return trace_handle_return(s);
375 }
376 
377 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
378 {
379 	va_list ap;
380 	int ret;
381 
382 	va_start(ap, fmt);
383 	ret = trace_output_raw(iter, name, fmt, ap);
384 	va_end(ap);
385 
386 	return ret;
387 }
388 EXPORT_SYMBOL_GPL(trace_output_call);
389 
390 static inline const char *kretprobed(const char *name, unsigned long addr)
391 {
392 	if (is_kretprobe_trampoline(addr))
393 		return "[unknown/kretprobe'd]";
394 	return name;
395 }
396 
397 void
398 trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset)
399 {
400 #ifdef CONFIG_KALLSYMS
401 	char str[KSYM_SYMBOL_LEN];
402 	const char *name;
403 
404 	if (offset)
405 		sprint_symbol(str, address);
406 	else
407 		kallsyms_lookup(address, NULL, NULL, NULL, str);
408 	name = kretprobed(str, address);
409 
410 	if (name && strlen(name)) {
411 		trace_seq_puts(s, name);
412 		return;
413 	}
414 #endif
415 	trace_seq_printf(s, "0x%08lx", address);
416 }
417 
418 #ifndef CONFIG_64BIT
419 # define IP_FMT "%08lx"
420 #else
421 # define IP_FMT "%016lx"
422 #endif
423 
424 static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
425 			     unsigned long ip, unsigned long sym_flags)
426 {
427 	struct file *file = NULL;
428 	unsigned long vmstart = 0;
429 	int ret = 1;
430 
431 	if (s->full)
432 		return 0;
433 
434 	if (mm) {
435 		const struct vm_area_struct *vma;
436 
437 		mmap_read_lock(mm);
438 		vma = find_vma(mm, ip);
439 		if (vma) {
440 			file = vma->vm_file;
441 			vmstart = vma->vm_start;
442 		}
443 		if (file) {
444 			ret = trace_seq_path(s, file_user_path(file));
445 			if (ret)
446 				trace_seq_printf(s, "[+0x%lx]",
447 						 ip - vmstart);
448 		}
449 		mmap_read_unlock(mm);
450 	}
451 	if (ret && ((sym_flags & TRACE_ITER(SYM_ADDR)) || !file))
452 		trace_seq_printf(s, " <" IP_FMT ">", ip);
453 	return !trace_seq_has_overflowed(s);
454 }
455 
456 int
457 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
458 {
459 	if (!ip) {
460 		trace_seq_putc(s, '0');
461 		goto out;
462 	}
463 
464 	trace_seq_print_sym(s, ip, sym_flags & TRACE_ITER(SYM_OFFSET));
465 
466 	if (sym_flags & TRACE_ITER(SYM_ADDR))
467 		trace_seq_printf(s, " <" IP_FMT ">", ip);
468 
469  out:
470 	return !trace_seq_has_overflowed(s);
471 }
472 
473 /**
474  * trace_print_lat_fmt - print the irq, preempt and lockdep fields
475  * @s: trace seq struct to write to
476  * @entry: The trace entry field from the ring buffer
477  *
478  * Prints the generic fields of irqs off, in hard or softirq, preempt
479  * count.
480  */
481 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
482 {
483 	char hardsoft_irq;
484 	char need_resched;
485 	char irqs_off;
486 	int hardirq;
487 	int softirq;
488 	int bh_off;
489 	int nmi;
490 
491 	nmi = entry->flags & TRACE_FLAG_NMI;
492 	hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
493 	softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
494 	bh_off = entry->flags & TRACE_FLAG_BH_OFF;
495 
496 	irqs_off =
497 		(entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
498 		(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
499 		bh_off ? 'b' :
500 		'.';
501 
502 	switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY |
503 				TRACE_FLAG_PREEMPT_RESCHED)) {
504 	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
505 		need_resched = 'B';
506 		break;
507 	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
508 		need_resched = 'N';
509 		break;
510 	case TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
511 		need_resched = 'L';
512 		break;
513 	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY:
514 		need_resched = 'b';
515 		break;
516 	case TRACE_FLAG_NEED_RESCHED:
517 		need_resched = 'n';
518 		break;
519 	case TRACE_FLAG_PREEMPT_RESCHED:
520 		need_resched = 'p';
521 		break;
522 	case TRACE_FLAG_NEED_RESCHED_LAZY:
523 		need_resched = 'l';
524 		break;
525 	default:
526 		need_resched = '.';
527 		break;
528 	}
529 
530 	hardsoft_irq =
531 		(nmi && hardirq)     ? 'Z' :
532 		nmi                  ? 'z' :
533 		(hardirq && softirq) ? 'H' :
534 		hardirq              ? 'h' :
535 		softirq              ? 's' :
536 		                       '.' ;
537 
538 	trace_seq_printf(s, "%c%c%c",
539 			 irqs_off, need_resched, hardsoft_irq);
540 
541 	if (entry->preempt_count & 0xf)
542 		trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
543 	else
544 		trace_seq_putc(s, '.');
545 
546 	if (entry->preempt_count & 0xf0)
547 		trace_seq_printf(s, "%x", entry->preempt_count >> 4);
548 	else
549 		trace_seq_putc(s, '.');
550 
551 	return !trace_seq_has_overflowed(s);
552 }
553 
554 static int
555 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
556 {
557 	char comm[TASK_COMM_LEN];
558 
559 	trace_find_cmdline(entry->pid, comm);
560 
561 	trace_seq_printf(s, "%8.8s-%-7d %3d",
562 			 comm, entry->pid, cpu);
563 
564 	return trace_print_lat_fmt(s, entry);
565 }
566 
567 #undef MARK
568 #define MARK(v, s) {.val = v, .sym = s}
569 /* trace overhead mark */
570 static const struct trace_mark {
571 	unsigned long long	val; /* unit: nsec */
572 	char			sym;
573 } mark[] = {
574 	MARK(1000000000ULL	, '$'), /* 1 sec */
575 	MARK(100000000ULL	, '@'), /* 100 msec */
576 	MARK(10000000ULL	, '*'), /* 10 msec */
577 	MARK(1000000ULL		, '#'), /* 1000 usecs */
578 	MARK(100000ULL		, '!'), /* 100 usecs */
579 	MARK(10000ULL		, '+'), /* 10 usecs */
580 };
581 #undef MARK
582 
583 char trace_find_mark(unsigned long long d)
584 {
585 	int i;
586 	int size = ARRAY_SIZE(mark);
587 
588 	for (i = 0; i < size; i++) {
589 		if (d > mark[i].val)
590 			break;
591 	}
592 
593 	return (i == size) ? ' ' : mark[i].sym;
594 }
595 
596 static int
597 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
598 {
599 	struct trace_array *tr = iter->tr;
600 	unsigned long verbose = tr->trace_flags & TRACE_ITER(VERBOSE);
601 	unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
602 	unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
603 	unsigned long long rel_ts = next_ts - iter->ts;
604 	struct trace_seq *s = &iter->seq;
605 
606 	if (in_ns) {
607 		abs_ts = ns2usecs(abs_ts);
608 		rel_ts = ns2usecs(rel_ts);
609 	}
610 
611 	if (verbose && in_ns) {
612 		unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
613 		unsigned long abs_msec = (unsigned long)abs_ts;
614 		unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
615 		unsigned long rel_msec = (unsigned long)rel_ts;
616 
617 		trace_seq_printf(
618 			s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
619 			ns2usecs(iter->ts),
620 			abs_msec, abs_usec,
621 			rel_msec, rel_usec);
622 
623 	} else if (verbose && !in_ns) {
624 		trace_seq_printf(
625 			s, "[%016llx] %lld (+%lld): ",
626 			iter->ts, abs_ts, rel_ts);
627 
628 	} else if (!verbose && in_ns) {
629 		trace_seq_printf(
630 			s, " %4lldus%c: ",
631 			abs_ts,
632 			trace_find_mark(rel_ts * NSEC_PER_USEC));
633 
634 	} else { /* !verbose && !in_ns */
635 		trace_seq_printf(s, " %4lld: ", abs_ts);
636 	}
637 
638 	return !trace_seq_has_overflowed(s);
639 }
640 
641 static void trace_print_time(struct trace_seq *s, struct trace_iterator *iter,
642 			     unsigned long long ts)
643 {
644 	unsigned long secs, usec_rem;
645 	unsigned long long t;
646 
647 	if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
648 		t = ns2usecs(ts);
649 		usec_rem = do_div(t, USEC_PER_SEC);
650 		secs = (unsigned long)t;
651 		trace_seq_printf(s, " %5lu.%06lu", secs, usec_rem);
652 	} else
653 		trace_seq_printf(s, " %12llu", ts);
654 }
655 
656 int trace_print_context(struct trace_iterator *iter)
657 {
658 	struct trace_array *tr = iter->tr;
659 	struct trace_seq *s = &iter->seq;
660 	struct trace_entry *entry = iter->ent;
661 	char comm[TASK_COMM_LEN];
662 
663 	trace_find_cmdline(entry->pid, comm);
664 
665 	trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid);
666 
667 	if (tr->trace_flags & TRACE_ITER(RECORD_TGID)) {
668 		unsigned int tgid = trace_find_tgid(entry->pid);
669 
670 		if (!tgid)
671 			trace_seq_printf(s, "(-------) ");
672 		else
673 			trace_seq_printf(s, "(%7d) ", tgid);
674 	}
675 
676 	trace_seq_printf(s, "[%03d] ", iter->cpu);
677 
678 	if (tr->trace_flags & TRACE_ITER(IRQ_INFO))
679 		trace_print_lat_fmt(s, entry);
680 
681 	trace_print_time(s, iter, iter->ts);
682 	trace_seq_puts(s, ": ");
683 
684 	return !trace_seq_has_overflowed(s);
685 }
686 
687 int trace_print_lat_context(struct trace_iterator *iter)
688 {
689 	struct trace_entry *entry, *next_entry;
690 	struct trace_array *tr = iter->tr;
691 	struct trace_seq *s = &iter->seq;
692 	unsigned long verbose = (tr->trace_flags & TRACE_ITER(VERBOSE));
693 	u64 next_ts;
694 
695 	next_entry = trace_find_next_entry(iter, NULL, &next_ts);
696 	if (!next_entry)
697 		next_ts = iter->ts;
698 
699 	/* trace_find_next_entry() may change iter->ent */
700 	entry = iter->ent;
701 
702 	if (verbose) {
703 		char comm[TASK_COMM_LEN];
704 
705 		trace_find_cmdline(entry->pid, comm);
706 
707 		trace_seq_printf(
708 			s, "%16s %7d %3d %d %08x %08lx ",
709 			comm, entry->pid, iter->cpu, entry->flags,
710 			entry->preempt_count & 0xf, iter->idx);
711 	} else {
712 		lat_print_generic(s, entry, iter->cpu);
713 	}
714 
715 	lat_print_timestamp(iter, next_ts);
716 
717 	return !trace_seq_has_overflowed(s);
718 }
719 
720 #ifdef CONFIG_FUNCTION_TRACE_ARGS
721 void print_function_args(struct trace_seq *s, unsigned long *args,
722 			 unsigned long func)
723 {
724 	const struct btf_param *param;
725 	const struct btf_type *t;
726 	const char *param_name;
727 	char name[KSYM_NAME_LEN];
728 	unsigned long arg;
729 	struct btf *btf;
730 	s32 tid, nr = 0;
731 	int a, p, x;
732 	u16 encode;
733 
734 	trace_seq_printf(s, "(");
735 
736 	if (!args)
737 		goto out;
738 	if (lookup_symbol_name(func, name))
739 		goto out;
740 
741 	/* TODO: Pass module name here too */
742 	t = btf_find_func_proto(name, &btf);
743 	if (IS_ERR_OR_NULL(t))
744 		goto out;
745 
746 	param = btf_get_func_param(t, &nr);
747 	if (!param)
748 		goto out_put;
749 
750 	for (a = 0, p = 0; p < nr; a++, p++) {
751 		if (p)
752 			trace_seq_puts(s, ", ");
753 
754 		/* This only prints what the arch allows (6 args by default) */
755 		if (a == FTRACE_REGS_MAX_ARGS) {
756 			trace_seq_puts(s, "...");
757 			break;
758 		}
759 
760 		arg = args[a];
761 
762 		param_name = btf_name_by_offset(btf, param[p].name_off);
763 		if (param_name)
764 			trace_seq_printf(s, "%s=", param_name);
765 		t = btf_type_skip_modifiers(btf, param[p].type, &tid);
766 
767 		switch (t ? BTF_INFO_KIND(t->info) : BTF_KIND_UNKN) {
768 		case BTF_KIND_UNKN:
769 			trace_seq_putc(s, '?');
770 			/* Still print unknown type values */
771 			fallthrough;
772 		case BTF_KIND_PTR:
773 			trace_seq_printf(s, "0x%lx", arg);
774 			break;
775 		case BTF_KIND_INT:
776 			encode = btf_int_encoding(t);
777 			/* Print unsigned ints as hex */
778 			if (encode & BTF_INT_SIGNED)
779 				trace_seq_printf(s, "%ld", arg);
780 			else
781 				trace_seq_printf(s, "0x%lx", arg);
782 			break;
783 		case BTF_KIND_ENUM:
784 			trace_seq_printf(s, "%ld", arg);
785 			break;
786 		default:
787 			/* This does not handle complex arguments */
788 			trace_seq_printf(s, "(%s)[0x%lx", btf_type_str(t), arg);
789 			for (x = sizeof(long); x < t->size; x += sizeof(long)) {
790 				trace_seq_putc(s, ':');
791 				if (++a == FTRACE_REGS_MAX_ARGS) {
792 					trace_seq_puts(s, "...]");
793 					goto out_put;
794 				}
795 				trace_seq_printf(s, "0x%lx", args[a]);
796 			}
797 			trace_seq_putc(s, ']');
798 			break;
799 		}
800 	}
801 out_put:
802 	btf_put(btf);
803 out:
804 	trace_seq_printf(s, ")");
805 }
806 #endif
807 
808 /**
809  * ftrace_find_event - find a registered event
810  * @type: the type of event to look for
811  *
812  * Returns an event of type @type otherwise NULL
813  * Called with trace_event_read_lock() held.
814  */
815 struct trace_event *ftrace_find_event(int type)
816 {
817 	struct trace_event *event;
818 
819 	hash_for_each_possible(event_hash, event, node, type) {
820 		if (event->type == type)
821 			return event;
822 	}
823 
824 	return NULL;
825 }
826 
827 static DEFINE_IDA(trace_event_ida);
828 
829 static void free_trace_event_type(int type)
830 {
831 	if (type >= __TRACE_LAST_TYPE)
832 		ida_free(&trace_event_ida, type);
833 }
834 
835 static int alloc_trace_event_type(void)
836 {
837 	int next;
838 
839 	/* Skip static defined type numbers */
840 	next = ida_alloc_range(&trace_event_ida, __TRACE_LAST_TYPE,
841 			       TRACE_EVENT_TYPE_MAX, GFP_KERNEL);
842 	if (next < 0)
843 		return 0;
844 	return next;
845 }
846 
847 void trace_event_read_lock(void)
848 {
849 	down_read(&trace_event_sem);
850 }
851 
852 void trace_event_read_unlock(void)
853 {
854 	up_read(&trace_event_sem);
855 }
856 
857 /**
858  * register_trace_event - register output for an event type
859  * @event: the event type to register
860  *
861  * Event types are stored in a hash and this hash is used to
862  * find a way to print an event. If the @event->type is set
863  * then it will use that type, otherwise it will assign a
864  * type to use.
865  *
866  * If you assign your own type, please make sure it is added
867  * to the trace_type enum in trace.h, to avoid collisions
868  * with the dynamic types.
869  *
870  * Returns the event type number or zero on error.
871  */
872 int register_trace_event(struct trace_event *event)
873 {
874 	int ret = 0;
875 
876 	down_write(&trace_event_sem);
877 
878 	if (WARN_ON(!event))
879 		goto out;
880 
881 	if (WARN_ON(!event->funcs))
882 		goto out;
883 
884 	if (!event->type) {
885 		event->type = alloc_trace_event_type();
886 		if (!event->type)
887 			goto out;
888 	} else if (WARN(event->type > __TRACE_LAST_TYPE,
889 			"Need to add type to trace.h")) {
890 		goto out;
891 	} else {
892 		/* Is this event already used */
893 		if (ftrace_find_event(event->type))
894 			goto out;
895 	}
896 
897 	if (event->funcs->trace == NULL)
898 		event->funcs->trace = trace_nop_print;
899 	if (event->funcs->raw == NULL)
900 		event->funcs->raw = trace_nop_print;
901 	if (event->funcs->hex == NULL)
902 		event->funcs->hex = trace_nop_print;
903 	if (event->funcs->binary == NULL)
904 		event->funcs->binary = trace_nop_print;
905 
906 	hash_add(event_hash, &event->node, event->type);
907 
908 	ret = event->type;
909  out:
910 	up_write(&trace_event_sem);
911 
912 	return ret;
913 }
914 EXPORT_SYMBOL_GPL(register_trace_event);
915 
916 /*
917  * Used by module code with the trace_event_sem held for write.
918  */
919 int __unregister_trace_event(struct trace_event *event)
920 {
921 	hash_del(&event->node);
922 	free_trace_event_type(event->type);
923 	return 0;
924 }
925 
926 /**
927  * unregister_trace_event - remove a no longer used event
928  * @event: the event to remove
929  */
930 int unregister_trace_event(struct trace_event *event)
931 {
932 	down_write(&trace_event_sem);
933 	__unregister_trace_event(event);
934 	up_write(&trace_event_sem);
935 
936 	return 0;
937 }
938 EXPORT_SYMBOL_GPL(unregister_trace_event);
939 
940 /*
941  * Standard events
942  */
943 
944 static void print_array(struct trace_iterator *iter, void *pos,
945 			struct ftrace_event_field *field)
946 {
947 	int offset;
948 	int len;
949 	int i;
950 
951 	offset = *(int *)pos & 0xffff;
952 	len = *(int *)pos >> 16;
953 
954 	if (field)
955 		offset += field->offset + sizeof(int);
956 
957 	if (offset + len > iter->ent_size) {
958 		trace_seq_puts(&iter->seq, "<OVERFLOW>");
959 		return;
960 	}
961 
962 	pos = (void *)iter->ent + offset;
963 
964 	for (i = 0; i < len; i++, pos++) {
965 		if (i)
966 			trace_seq_putc(&iter->seq, ',');
967 		trace_seq_printf(&iter->seq, "%02x", *(unsigned char *)pos);
968 	}
969 }
970 
971 static void print_fields(struct trace_iterator *iter, struct trace_event_call *call,
972 			 struct list_head *head)
973 {
974 	struct ftrace_event_field *field;
975 	struct trace_array *tr = iter->tr;
976 	unsigned long long laddr;
977 	unsigned long addr;
978 	int offset;
979 	int len;
980 	int ret;
981 	int i;
982 	void *pos;
983 	char *str;
984 
985 	list_for_each_entry_reverse(field, head, link) {
986 		trace_seq_printf(&iter->seq, " %s=", field->name);
987 		if (field->offset + field->size > iter->ent_size) {
988 			trace_seq_puts(&iter->seq, "<OVERFLOW>");
989 			continue;
990 		}
991 		pos = (void *)iter->ent + field->offset;
992 
993 		switch (field->filter_type) {
994 		case FILTER_COMM:
995 		case FILTER_STATIC_STRING:
996 			trace_seq_printf(&iter->seq, "%.*s", field->size, (char *)pos);
997 			break;
998 		case FILTER_RDYN_STRING:
999 		case FILTER_DYN_STRING:
1000 			offset = *(int *)pos & 0xffff;
1001 			len = *(int *)pos >> 16;
1002 
1003 			if (field->filter_type == FILTER_RDYN_STRING)
1004 				offset += field->offset + sizeof(int);
1005 
1006 			if (offset + len > iter->ent_size) {
1007 				trace_seq_puts(&iter->seq, "<OVERFLOW>");
1008 				break;
1009 			}
1010 			str = (char *)iter->ent + offset;
1011 			/* Check if there's any non printable strings */
1012 			for (i = 0; i < len; i++) {
1013 				if (str[i] && !(isascii(str[i]) && isprint(str[i])))
1014 					break;
1015 			}
1016 			if (i < len) {
1017 				for (i = 0; i < len; i++) {
1018 					if (isascii(str[i]) && isprint(str[i]))
1019 						trace_seq_putc(&iter->seq, str[i]);
1020 					else
1021 						trace_seq_putc(&iter->seq, '.');
1022 				}
1023 				trace_seq_puts(&iter->seq, " (");
1024 				for (i = 0; i < len; i++) {
1025 					if (i)
1026 						trace_seq_putc(&iter->seq, ':');
1027 					trace_seq_printf(&iter->seq, "%02x", str[i]);
1028 				}
1029 				trace_seq_putc(&iter->seq, ')');
1030 			} else {
1031 				trace_seq_printf(&iter->seq, "%.*s", len, str);
1032 			}
1033 			break;
1034 		case FILTER_PTR_STRING:
1035 			if (!iter->fmt_size)
1036 				trace_iter_expand_format(iter);
1037 			addr = trace_adjust_address(tr, *(unsigned long *)pos);
1038 			ret = strncpy_from_kernel_nofault(iter->fmt, (void *)addr,
1039 							  iter->fmt_size);
1040 			if (ret < 0)
1041 				trace_seq_printf(&iter->seq, "(0x%px)", pos);
1042 			else
1043 				trace_seq_printf(&iter->seq, "(0x%px:%s)",
1044 						 pos, iter->fmt);
1045 			break;
1046 		case FILTER_TRACE_FN:
1047 			addr = trace_adjust_address(tr, *(unsigned long *)pos);
1048 			trace_seq_printf(&iter->seq, "%pS", (void *)addr);
1049 			break;
1050 		case FILTER_CPU:
1051 		case FILTER_OTHER:
1052 			switch (field->size) {
1053 			case 1:
1054 				if (isprint(*(char *)pos)) {
1055 					trace_seq_printf(&iter->seq, "'%c'",
1056 						 *(unsigned char *)pos);
1057 				}
1058 				trace_seq_printf(&iter->seq, "(%d)",
1059 						 *(unsigned char *)pos);
1060 				break;
1061 			case 2:
1062 				trace_seq_printf(&iter->seq, "0x%x (%d)",
1063 						 *(unsigned short *)pos,
1064 						 *(unsigned short *)pos);
1065 				break;
1066 			case 4:
1067 				/* dynamic array info is 4 bytes */
1068 				if (strstr(field->type, "__data_loc")) {
1069 					print_array(iter, pos, NULL);
1070 					break;
1071 				}
1072 
1073 				if (strstr(field->type, "__rel_loc")) {
1074 					print_array(iter, pos, field);
1075 					break;
1076 				}
1077 
1078 				addr = *(unsigned int *)pos;
1079 
1080 				/* Some fields reference offset from _stext. */
1081 				if (!strcmp(field->name, "caller_offs") ||
1082 				    !strcmp(field->name, "parent_offs")) {
1083 					unsigned long ip;
1084 
1085 					ip = addr + (unsigned long)_stext;
1086 					ip = trace_adjust_address(tr, ip);
1087 					trace_seq_printf(&iter->seq, "%pS ", (void *)ip);
1088 				}
1089 
1090 				if (sizeof(long) == 4) {
1091 					addr = trace_adjust_address(tr, addr);
1092 					trace_seq_printf(&iter->seq, "%pS (%d)",
1093 							 (void *)addr, (int)addr);
1094 				} else {
1095 					trace_seq_printf(&iter->seq, "0x%x (%d)",
1096 							 (unsigned int)addr, (int)addr);
1097 				}
1098 				break;
1099 			case 8:
1100 				laddr = *(unsigned long long *)pos;
1101 				if (sizeof(long) == 8) {
1102 					laddr = trace_adjust_address(tr, (unsigned long)laddr);
1103 					trace_seq_printf(&iter->seq, "%pS (%lld)",
1104 							 (void *)(long)laddr, laddr);
1105 				} else {
1106 					trace_seq_printf(&iter->seq, "0x%llx (%lld)", laddr, laddr);
1107 				}
1108 				break;
1109 			default:
1110 				trace_seq_puts(&iter->seq, "<INVALID-SIZE>");
1111 				break;
1112 			}
1113 			break;
1114 		default:
1115 			trace_seq_puts(&iter->seq, "<INVALID-TYPE>");
1116 		}
1117 	}
1118 	trace_seq_putc(&iter->seq, '\n');
1119 }
1120 
1121 enum print_line_t print_event_fields(struct trace_iterator *iter,
1122 				     struct trace_event *event)
1123 {
1124 	struct trace_event_call *call;
1125 	struct list_head *head;
1126 
1127 	lockdep_assert_held_read(&trace_event_sem);
1128 
1129 	/* ftrace defined events have separate call structures */
1130 	if (event->type <= __TRACE_LAST_TYPE) {
1131 		bool found = false;
1132 
1133 		list_for_each_entry(call, &ftrace_events, list) {
1134 			if (call->event.type == event->type) {
1135 				found = true;
1136 				break;
1137 			}
1138 			/* No need to search all events */
1139 			if (call->event.type > __TRACE_LAST_TYPE)
1140 				break;
1141 		}
1142 		if (!found) {
1143 			trace_seq_printf(&iter->seq, "UNKNOWN TYPE %d\n", event->type);
1144 			goto out;
1145 		}
1146 	} else {
1147 		call = container_of(event, struct trace_event_call, event);
1148 	}
1149 	head = trace_get_fields(call);
1150 
1151 	trace_seq_printf(&iter->seq, "%s:", trace_event_name(call));
1152 
1153 	if (head && !list_empty(head))
1154 		print_fields(iter, call, head);
1155 	else
1156 		trace_seq_puts(&iter->seq, "No fields found\n");
1157 
1158  out:
1159 	return trace_handle_return(&iter->seq);
1160 }
1161 
1162 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
1163 				  struct trace_event *event)
1164 {
1165 	trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type);
1166 
1167 	return trace_handle_return(&iter->seq);
1168 }
1169 
1170 static void print_fn_trace(struct trace_seq *s, unsigned long ip,
1171 			   unsigned long parent_ip, unsigned long *args,
1172 			   struct trace_array *tr, int flags)
1173 {
1174 	ip = trace_adjust_address(tr, ip);
1175 	parent_ip = trace_adjust_address(tr, parent_ip);
1176 
1177 	seq_print_ip_sym(s, ip, flags);
1178 	if (args)
1179 		print_function_args(s, args, ip);
1180 
1181 	if ((flags & TRACE_ITER(PRINT_PARENT)) && parent_ip) {
1182 		trace_seq_puts(s, " <-");
1183 		seq_print_ip_sym(s, parent_ip, flags);
1184 	}
1185 }
1186 
1187 /* TRACE_FN */
1188 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
1189 					struct trace_event *event)
1190 {
1191 	struct ftrace_entry *field;
1192 	struct trace_seq *s = &iter->seq;
1193 	unsigned long *args;
1194 	int args_size;
1195 
1196 	trace_assign_type(field, iter->ent);
1197 
1198 	args_size = iter->ent_size - offsetof(struct ftrace_entry, args);
1199 	if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long))
1200 		args = field->args;
1201 	else
1202 		args = NULL;
1203 
1204 	print_fn_trace(s, field->ip, field->parent_ip, args, iter->tr, flags);
1205 	trace_seq_putc(s, '\n');
1206 
1207 	return trace_handle_return(s);
1208 }
1209 
1210 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
1211 				      struct trace_event *event)
1212 {
1213 	struct ftrace_entry *field;
1214 
1215 	trace_assign_type(field, iter->ent);
1216 
1217 	trace_seq_printf(&iter->seq, "%lx %lx\n",
1218 			 field->ip,
1219 			 field->parent_ip);
1220 
1221 	return trace_handle_return(&iter->seq);
1222 }
1223 
1224 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
1225 				      struct trace_event *event)
1226 {
1227 	struct ftrace_entry *field;
1228 	struct trace_seq *s = &iter->seq;
1229 
1230 	trace_assign_type(field, iter->ent);
1231 
1232 	SEQ_PUT_HEX_FIELD(s, field->ip);
1233 	SEQ_PUT_HEX_FIELD(s, field->parent_ip);
1234 
1235 	return trace_handle_return(s);
1236 }
1237 
1238 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
1239 				      struct trace_event *event)
1240 {
1241 	struct ftrace_entry *field;
1242 	struct trace_seq *s = &iter->seq;
1243 
1244 	trace_assign_type(field, iter->ent);
1245 
1246 	SEQ_PUT_FIELD(s, field->ip);
1247 	SEQ_PUT_FIELD(s, field->parent_ip);
1248 
1249 	return trace_handle_return(s);
1250 }
1251 
1252 static struct trace_event_functions trace_fn_funcs = {
1253 	.trace		= trace_fn_trace,
1254 	.raw		= trace_fn_raw,
1255 	.hex		= trace_fn_hex,
1256 	.binary		= trace_fn_bin,
1257 };
1258 
1259 static struct trace_event trace_fn_event = {
1260 	.type		= TRACE_FN,
1261 	.funcs		= &trace_fn_funcs,
1262 };
1263 
1264 /* TRACE_CTX an TRACE_WAKE */
1265 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
1266 					     char *delim)
1267 {
1268 	struct ctx_switch_entry *field;
1269 	char comm[TASK_COMM_LEN];
1270 	int S, T;
1271 
1272 
1273 	trace_assign_type(field, iter->ent);
1274 
1275 	T = task_index_to_char(field->next_state);
1276 	S = task_index_to_char(field->prev_state);
1277 	trace_find_cmdline(field->next_pid, comm);
1278 	trace_seq_printf(&iter->seq,
1279 			 " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n",
1280 			 field->prev_pid,
1281 			 field->prev_prio,
1282 			 S, delim,
1283 			 field->next_cpu,
1284 			 field->next_pid,
1285 			 field->next_prio,
1286 			 T, comm);
1287 
1288 	return trace_handle_return(&iter->seq);
1289 }
1290 
1291 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
1292 					 struct trace_event *event)
1293 {
1294 	return trace_ctxwake_print(iter, "==>");
1295 }
1296 
1297 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
1298 					  int flags, struct trace_event *event)
1299 {
1300 	return trace_ctxwake_print(iter, "  +");
1301 }
1302 
1303 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
1304 {
1305 	struct ctx_switch_entry *field;
1306 	int T;
1307 
1308 	trace_assign_type(field, iter->ent);
1309 
1310 	if (!S)
1311 		S = task_index_to_char(field->prev_state);
1312 	T = task_index_to_char(field->next_state);
1313 	trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
1314 			 field->prev_pid,
1315 			 field->prev_prio,
1316 			 S,
1317 			 field->next_cpu,
1318 			 field->next_pid,
1319 			 field->next_prio,
1320 			 T);
1321 
1322 	return trace_handle_return(&iter->seq);
1323 }
1324 
1325 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
1326 				       struct trace_event *event)
1327 {
1328 	return trace_ctxwake_raw(iter, 0);
1329 }
1330 
1331 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
1332 					struct trace_event *event)
1333 {
1334 	return trace_ctxwake_raw(iter, '+');
1335 }
1336 
1337 
1338 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
1339 {
1340 	struct ctx_switch_entry *field;
1341 	struct trace_seq *s = &iter->seq;
1342 	int T;
1343 
1344 	trace_assign_type(field, iter->ent);
1345 
1346 	if (!S)
1347 		S = task_index_to_char(field->prev_state);
1348 	T = task_index_to_char(field->next_state);
1349 
1350 	SEQ_PUT_HEX_FIELD(s, field->prev_pid);
1351 	SEQ_PUT_HEX_FIELD(s, field->prev_prio);
1352 	SEQ_PUT_HEX_FIELD(s, S);
1353 	SEQ_PUT_HEX_FIELD(s, field->next_cpu);
1354 	SEQ_PUT_HEX_FIELD(s, field->next_pid);
1355 	SEQ_PUT_HEX_FIELD(s, field->next_prio);
1356 	SEQ_PUT_HEX_FIELD(s, T);
1357 
1358 	return trace_handle_return(s);
1359 }
1360 
1361 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
1362 				       struct trace_event *event)
1363 {
1364 	return trace_ctxwake_hex(iter, 0);
1365 }
1366 
1367 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
1368 					struct trace_event *event)
1369 {
1370 	return trace_ctxwake_hex(iter, '+');
1371 }
1372 
1373 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
1374 					   int flags, struct trace_event *event)
1375 {
1376 	struct ctx_switch_entry *field;
1377 	struct trace_seq *s = &iter->seq;
1378 
1379 	trace_assign_type(field, iter->ent);
1380 
1381 	SEQ_PUT_FIELD(s, field->prev_pid);
1382 	SEQ_PUT_FIELD(s, field->prev_prio);
1383 	SEQ_PUT_FIELD(s, field->prev_state);
1384 	SEQ_PUT_FIELD(s, field->next_cpu);
1385 	SEQ_PUT_FIELD(s, field->next_pid);
1386 	SEQ_PUT_FIELD(s, field->next_prio);
1387 	SEQ_PUT_FIELD(s, field->next_state);
1388 
1389 	return trace_handle_return(s);
1390 }
1391 
1392 static struct trace_event_functions trace_ctx_funcs = {
1393 	.trace		= trace_ctx_print,
1394 	.raw		= trace_ctx_raw,
1395 	.hex		= trace_ctx_hex,
1396 	.binary		= trace_ctxwake_bin,
1397 };
1398 
1399 static struct trace_event trace_ctx_event = {
1400 	.type		= TRACE_CTX,
1401 	.funcs		= &trace_ctx_funcs,
1402 };
1403 
1404 static struct trace_event_functions trace_wake_funcs = {
1405 	.trace		= trace_wake_print,
1406 	.raw		= trace_wake_raw,
1407 	.hex		= trace_wake_hex,
1408 	.binary		= trace_ctxwake_bin,
1409 };
1410 
1411 static struct trace_event trace_wake_event = {
1412 	.type		= TRACE_WAKE,
1413 	.funcs		= &trace_wake_funcs,
1414 };
1415 
1416 /* TRACE_STACK */
1417 
1418 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1419 					   int flags, struct trace_event *event)
1420 {
1421 	struct stack_entry *field;
1422 	struct trace_seq *s = &iter->seq;
1423 	unsigned long *p;
1424 	unsigned long *end;
1425 
1426 	trace_assign_type(field, iter->ent);
1427 	end = (unsigned long *)((long)iter->ent + iter->ent_size);
1428 
1429 	trace_seq_puts(s, "<stack trace>\n");
1430 
1431 	for (p = field->caller; p && p < end && *p != ULONG_MAX; p++) {
1432 
1433 		if (trace_seq_has_overflowed(s))
1434 			break;
1435 
1436 		trace_seq_puts(s, " => ");
1437 		if ((*p) == FTRACE_TRAMPOLINE_MARKER) {
1438 			trace_seq_puts(s, "[FTRACE TRAMPOLINE]\n");
1439 			continue;
1440 		}
1441 		seq_print_ip_sym(s, trace_adjust_address(iter->tr, *p), flags);
1442 		trace_seq_putc(s, '\n');
1443 	}
1444 
1445 	return trace_handle_return(s);
1446 }
1447 
1448 static struct trace_event_functions trace_stack_funcs = {
1449 	.trace		= trace_stack_print,
1450 };
1451 
1452 static struct trace_event trace_stack_event = {
1453 	.type		= TRACE_STACK,
1454 	.funcs		= &trace_stack_funcs,
1455 };
1456 
1457 /* TRACE_USER_STACK */
1458 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1459 						int flags, struct trace_event *event)
1460 {
1461 	struct trace_array *tr = iter->tr;
1462 	struct userstack_entry *field;
1463 	struct trace_seq *s = &iter->seq;
1464 	struct mm_struct *mm = NULL;
1465 	unsigned int i;
1466 
1467 	trace_assign_type(field, iter->ent);
1468 
1469 	trace_seq_puts(s, "<user stack trace>\n");
1470 
1471 	if (tr->trace_flags & TRACE_ITER(SYM_USEROBJ)) {
1472 		struct task_struct *task;
1473 		/*
1474 		 * we do the lookup on the thread group leader,
1475 		 * since individual threads might have already quit!
1476 		 */
1477 		rcu_read_lock();
1478 		task = find_task_by_vpid(field->tgid);
1479 		if (task)
1480 			mm = get_task_mm(task);
1481 		rcu_read_unlock();
1482 	}
1483 
1484 	for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1485 		unsigned long ip = field->caller[i];
1486 
1487 		if (!ip || trace_seq_has_overflowed(s))
1488 			break;
1489 
1490 		trace_seq_puts(s, " => ");
1491 		seq_print_user_ip(s, mm, ip, flags);
1492 		trace_seq_putc(s, '\n');
1493 	}
1494 
1495 	if (mm)
1496 		mmput(mm);
1497 
1498 	return trace_handle_return(s);
1499 }
1500 
1501 static struct trace_event_functions trace_user_stack_funcs = {
1502 	.trace		= trace_user_stack_print,
1503 };
1504 
1505 static struct trace_event trace_user_stack_event = {
1506 	.type		= TRACE_USER_STACK,
1507 	.funcs		= &trace_user_stack_funcs,
1508 };
1509 
1510 /* TRACE_HWLAT */
1511 static enum print_line_t
1512 trace_hwlat_print(struct trace_iterator *iter, int flags,
1513 		  struct trace_event *event)
1514 {
1515 	struct trace_entry *entry = iter->ent;
1516 	struct trace_seq *s = &iter->seq;
1517 	struct hwlat_entry *field;
1518 
1519 	trace_assign_type(field, entry);
1520 
1521 	trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ptSp count:%d",
1522 			 field->seqnum,
1523 			 field->duration,
1524 			 field->outer_duration,
1525 			 &field->timestamp,
1526 			 field->count);
1527 
1528 	if (field->nmi_count) {
1529 		/*
1530 		 * The generic sched_clock() is not NMI safe, thus
1531 		 * we only record the count and not the time.
1532 		 */
1533 		if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK))
1534 			trace_seq_printf(s, " nmi-total:%llu",
1535 					 field->nmi_total_ts);
1536 		trace_seq_printf(s, " nmi-count:%u",
1537 				 field->nmi_count);
1538 	}
1539 
1540 	trace_seq_putc(s, '\n');
1541 
1542 	return trace_handle_return(s);
1543 }
1544 
1545 static enum print_line_t
1546 trace_hwlat_raw(struct trace_iterator *iter, int flags,
1547 		struct trace_event *event)
1548 {
1549 	struct hwlat_entry *field;
1550 	struct trace_seq *s = &iter->seq;
1551 
1552 	trace_assign_type(field, iter->ent);
1553 
1554 	trace_seq_printf(s, "%llu %lld %lld %09ld %u\n",
1555 			 field->duration,
1556 			 field->outer_duration,
1557 			 (long long)field->timestamp.tv_sec,
1558 			 field->timestamp.tv_nsec,
1559 			 field->seqnum);
1560 
1561 	return trace_handle_return(s);
1562 }
1563 
1564 static struct trace_event_functions trace_hwlat_funcs = {
1565 	.trace		= trace_hwlat_print,
1566 	.raw		= trace_hwlat_raw,
1567 };
1568 
1569 static struct trace_event trace_hwlat_event = {
1570 	.type		= TRACE_HWLAT,
1571 	.funcs		= &trace_hwlat_funcs,
1572 };
1573 
1574 /* TRACE_OSNOISE */
1575 static enum print_line_t
1576 trace_osnoise_print(struct trace_iterator *iter, int flags,
1577 		    struct trace_event *event)
1578 {
1579 	struct trace_entry *entry = iter->ent;
1580 	struct trace_seq *s = &iter->seq;
1581 	struct osnoise_entry *field;
1582 	u64 ratio, ratio_dec;
1583 	u64 net_runtime;
1584 
1585 	trace_assign_type(field, entry);
1586 
1587 	/*
1588 	 * compute the available % of cpu time.
1589 	 */
1590 	net_runtime = field->runtime - field->noise;
1591 	ratio = net_runtime * 10000000;
1592 	do_div(ratio, field->runtime);
1593 	ratio_dec = do_div(ratio, 100000);
1594 
1595 	trace_seq_printf(s, "%llu %10llu %3llu.%05llu %7llu",
1596 			 field->runtime,
1597 			 field->noise,
1598 			 ratio, ratio_dec,
1599 			 field->max_sample);
1600 
1601 	trace_seq_printf(s, " %6u", field->hw_count);
1602 	trace_seq_printf(s, " %6u", field->nmi_count);
1603 	trace_seq_printf(s, " %6u", field->irq_count);
1604 	trace_seq_printf(s, " %6u", field->softirq_count);
1605 	trace_seq_printf(s, " %6u", field->thread_count);
1606 
1607 	trace_seq_putc(s, '\n');
1608 
1609 	return trace_handle_return(s);
1610 }
1611 
1612 static enum print_line_t
1613 trace_osnoise_raw(struct trace_iterator *iter, int flags,
1614 		  struct trace_event *event)
1615 {
1616 	struct osnoise_entry *field;
1617 	struct trace_seq *s = &iter->seq;
1618 
1619 	trace_assign_type(field, iter->ent);
1620 
1621 	trace_seq_printf(s, "%lld %llu %llu %u %u %u %u %u\n",
1622 			 field->runtime,
1623 			 field->noise,
1624 			 field->max_sample,
1625 			 field->hw_count,
1626 			 field->nmi_count,
1627 			 field->irq_count,
1628 			 field->softirq_count,
1629 			 field->thread_count);
1630 
1631 	return trace_handle_return(s);
1632 }
1633 
1634 static struct trace_event_functions trace_osnoise_funcs = {
1635 	.trace		= trace_osnoise_print,
1636 	.raw		= trace_osnoise_raw,
1637 };
1638 
1639 static struct trace_event trace_osnoise_event = {
1640 	.type		= TRACE_OSNOISE,
1641 	.funcs		= &trace_osnoise_funcs,
1642 };
1643 
1644 /* TRACE_TIMERLAT */
1645 
1646 static char *timerlat_lat_context[] = {"irq", "thread", "user-ret"};
1647 static enum print_line_t
1648 trace_timerlat_print(struct trace_iterator *iter, int flags,
1649 		     struct trace_event *event)
1650 {
1651 	struct trace_entry *entry = iter->ent;
1652 	struct trace_seq *s = &iter->seq;
1653 	struct timerlat_entry *field;
1654 
1655 	trace_assign_type(field, entry);
1656 
1657 	trace_seq_printf(s, "#%-5u context %6s timer_latency %9llu ns\n",
1658 			 field->seqnum,
1659 			 timerlat_lat_context[field->context],
1660 			 field->timer_latency);
1661 
1662 	return trace_handle_return(s);
1663 }
1664 
1665 static enum print_line_t
1666 trace_timerlat_raw(struct trace_iterator *iter, int flags,
1667 		   struct trace_event *event)
1668 {
1669 	struct timerlat_entry *field;
1670 	struct trace_seq *s = &iter->seq;
1671 
1672 	trace_assign_type(field, iter->ent);
1673 
1674 	trace_seq_printf(s, "%u %d %llu\n",
1675 			 field->seqnum,
1676 			 field->context,
1677 			 field->timer_latency);
1678 
1679 	return trace_handle_return(s);
1680 }
1681 
1682 static struct trace_event_functions trace_timerlat_funcs = {
1683 	.trace		= trace_timerlat_print,
1684 	.raw		= trace_timerlat_raw,
1685 };
1686 
1687 static struct trace_event trace_timerlat_event = {
1688 	.type		= TRACE_TIMERLAT,
1689 	.funcs		= &trace_timerlat_funcs,
1690 };
1691 
1692 /* TRACE_BPUTS */
1693 static enum print_line_t
1694 trace_bputs_print(struct trace_iterator *iter, int flags,
1695 		   struct trace_event *event)
1696 {
1697 	struct trace_entry *entry = iter->ent;
1698 	struct trace_seq *s = &iter->seq;
1699 	struct bputs_entry *field;
1700 
1701 	trace_assign_type(field, entry);
1702 
1703 	seq_print_ip_sym(s, field->ip, flags);
1704 	trace_seq_puts(s, ": ");
1705 	trace_seq_puts(s, field->str);
1706 
1707 	return trace_handle_return(s);
1708 }
1709 
1710 
1711 static enum print_line_t
1712 trace_bputs_raw(struct trace_iterator *iter, int flags,
1713 		struct trace_event *event)
1714 {
1715 	struct bputs_entry *field;
1716 	struct trace_seq *s = &iter->seq;
1717 
1718 	trace_assign_type(field, iter->ent);
1719 
1720 	trace_seq_printf(s, ": %lx : ", field->ip);
1721 	trace_seq_puts(s, field->str);
1722 
1723 	return trace_handle_return(s);
1724 }
1725 
1726 static struct trace_event_functions trace_bputs_funcs = {
1727 	.trace		= trace_bputs_print,
1728 	.raw		= trace_bputs_raw,
1729 };
1730 
1731 static struct trace_event trace_bputs_event = {
1732 	.type		= TRACE_BPUTS,
1733 	.funcs		= &trace_bputs_funcs,
1734 };
1735 
1736 /* TRACE_BPRINT */
1737 static enum print_line_t
1738 trace_bprint_print(struct trace_iterator *iter, int flags,
1739 		   struct trace_event *event)
1740 {
1741 	struct trace_entry *entry = iter->ent;
1742 	struct trace_seq *s = &iter->seq;
1743 	struct bprint_entry *field;
1744 
1745 	trace_assign_type(field, entry);
1746 
1747 	seq_print_ip_sym(s, field->ip, flags);
1748 	trace_seq_puts(s, ": ");
1749 	trace_seq_bprintf(s, field->fmt, field->buf);
1750 
1751 	return trace_handle_return(s);
1752 }
1753 
1754 
1755 static enum print_line_t
1756 trace_bprint_raw(struct trace_iterator *iter, int flags,
1757 		 struct trace_event *event)
1758 {
1759 	struct bprint_entry *field;
1760 	struct trace_seq *s = &iter->seq;
1761 
1762 	trace_assign_type(field, iter->ent);
1763 
1764 	trace_seq_printf(s, ": %lx : ", field->ip);
1765 	trace_seq_bprintf(s, field->fmt, field->buf);
1766 
1767 	return trace_handle_return(s);
1768 }
1769 
1770 static struct trace_event_functions trace_bprint_funcs = {
1771 	.trace		= trace_bprint_print,
1772 	.raw		= trace_bprint_raw,
1773 };
1774 
1775 static struct trace_event trace_bprint_event = {
1776 	.type		= TRACE_BPRINT,
1777 	.funcs		= &trace_bprint_funcs,
1778 };
1779 
1780 /* TRACE_PRINT */
1781 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1782 					   int flags, struct trace_event *event)
1783 {
1784 	struct print_entry *field;
1785 	struct trace_seq *s = &iter->seq;
1786 	unsigned long ip;
1787 
1788 	trace_assign_type(field, iter->ent);
1789 
1790 	ip = trace_adjust_address(iter->tr, field->ip);
1791 
1792 	seq_print_ip_sym(s, ip, flags);
1793 	trace_seq_printf(s, ": %s", field->buf);
1794 
1795 	return trace_handle_return(s);
1796 }
1797 
1798 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
1799 					 struct trace_event *event)
1800 {
1801 	struct print_entry *field;
1802 
1803 	trace_assign_type(field, iter->ent);
1804 
1805 	trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
1806 
1807 	return trace_handle_return(&iter->seq);
1808 }
1809 
1810 static struct trace_event_functions trace_print_funcs = {
1811 	.trace		= trace_print_print,
1812 	.raw		= trace_print_raw,
1813 };
1814 
1815 static struct trace_event trace_print_event = {
1816 	.type	 	= TRACE_PRINT,
1817 	.funcs		= &trace_print_funcs,
1818 };
1819 
1820 static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags,
1821 					 struct trace_event *event)
1822 {
1823 	struct raw_data_entry *field;
1824 	int i;
1825 
1826 	trace_assign_type(field, iter->ent);
1827 
1828 	trace_seq_printf(&iter->seq, "# %x buf:", field->id);
1829 
1830 	for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++)
1831 		trace_seq_printf(&iter->seq, " %02x",
1832 				 (unsigned char)field->buf[i]);
1833 
1834 	trace_seq_putc(&iter->seq, '\n');
1835 
1836 	return trace_handle_return(&iter->seq);
1837 }
1838 
1839 static struct trace_event_functions trace_raw_data_funcs = {
1840 	.trace		= trace_raw_data,
1841 	.raw		= trace_raw_data,
1842 };
1843 
1844 static struct trace_event trace_raw_data_event = {
1845 	.type	 	= TRACE_RAW_DATA,
1846 	.funcs		= &trace_raw_data_funcs,
1847 };
1848 
1849 static enum print_line_t
1850 trace_func_repeats_raw(struct trace_iterator *iter, int flags,
1851 			 struct trace_event *event)
1852 {
1853 	struct func_repeats_entry *field;
1854 	struct trace_seq *s = &iter->seq;
1855 
1856 	trace_assign_type(field, iter->ent);
1857 
1858 	trace_seq_printf(s, "%lu %lu %u %llu\n",
1859 			 field->ip,
1860 			 field->parent_ip,
1861 			 field->count,
1862 			 FUNC_REPEATS_GET_DELTA_TS(field));
1863 
1864 	return trace_handle_return(s);
1865 }
1866 
1867 static enum print_line_t
1868 trace_func_repeats_print(struct trace_iterator *iter, int flags,
1869 			 struct trace_event *event)
1870 {
1871 	struct func_repeats_entry *field;
1872 	struct trace_seq *s = &iter->seq;
1873 
1874 	trace_assign_type(field, iter->ent);
1875 
1876 	print_fn_trace(s, field->ip, field->parent_ip, NULL, iter->tr, flags);
1877 	trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
1878 	trace_print_time(s, iter,
1879 			 iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
1880 	trace_seq_puts(s, ")\n");
1881 
1882 	return trace_handle_return(s);
1883 }
1884 
1885 static struct trace_event_functions trace_func_repeats_funcs = {
1886 	.trace		= trace_func_repeats_print,
1887 	.raw		= trace_func_repeats_raw,
1888 };
1889 
1890 static struct trace_event trace_func_repeats_event = {
1891 	.type	 	= TRACE_FUNC_REPEATS,
1892 	.funcs		= &trace_func_repeats_funcs,
1893 };
1894 
1895 static struct trace_event *events[] __initdata = {
1896 	&trace_fn_event,
1897 	&trace_ctx_event,
1898 	&trace_wake_event,
1899 	&trace_stack_event,
1900 	&trace_user_stack_event,
1901 	&trace_bputs_event,
1902 	&trace_bprint_event,
1903 	&trace_print_event,
1904 	&trace_hwlat_event,
1905 	&trace_osnoise_event,
1906 	&trace_timerlat_event,
1907 	&trace_raw_data_event,
1908 	&trace_func_repeats_event,
1909 	NULL
1910 };
1911 
1912 __init int init_events(void)
1913 {
1914 	struct trace_event *event;
1915 	int i, ret;
1916 
1917 	for (i = 0; events[i]; i++) {
1918 		event = events[i];
1919 		ret = register_trace_event(event);
1920 		WARN_ONCE(!ret, "event %d failed to register", event->type);
1921 	}
1922 
1923 	return 0;
1924 }
1925