1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Function graph tracer.
5  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6  * Mostly borrowed from function tracer which
7  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8  *
9  */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 
16 #include "trace.h"
17 #include "trace_output.h"
18 
19 /* When set, irq functions will be ignored */
20 static int ftrace_graph_skip_irqs;
21 
22 struct fgraph_cpu_data {
23 	pid_t		last_pid;
24 	int		depth;
25 	int		depth_irq;
26 	int		ignore;
27 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
28 };
29 
30 struct fgraph_data {
31 	struct fgraph_cpu_data __percpu *cpu_data;
32 
33 	/* Place to preserve last processed entry. */
34 	union {
35 		struct ftrace_graph_ent_entry	ent;
36 		struct fgraph_retaddr_ent_entry	rent;
37 	} ent;
38 	struct ftrace_graph_ret_entry	ret;
39 	int				failed;
40 	int				cpu;
41 };
42 
43 #define TRACE_GRAPH_INDENT	2
44 
45 unsigned int fgraph_max_depth;
46 
47 static struct tracer_opt trace_opts[] = {
48 	/* Display overruns? (for self-debug purpose) */
49 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
50 	/* Display CPU ? */
51 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
52 	/* Display Overhead ? */
53 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
54 	/* Display proc name/pid */
55 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
56 	/* Display duration of execution */
57 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
58 	/* Display absolute time of an entry */
59 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
60 	/* Display interrupts */
61 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
62 	/* Display function name after trailing } */
63 	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
64 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
65 	/* Display function return value ? */
66 	{ TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
67 	/* Display function return value in hexadecimal format ? */
68 	{ TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
69 #endif
70 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
71 	/* Display function return address ? */
72 	{ TRACER_OPT(funcgraph-retaddr, TRACE_GRAPH_PRINT_RETADDR) },
73 #endif
74 #ifdef CONFIG_FUNCTION_TRACE_ARGS
75 	/* Display function arguments ? */
76 	{ TRACER_OPT(funcgraph-args, TRACE_GRAPH_ARGS) },
77 #endif
78 	/* Include sleep time (scheduled out) between entry and return */
79 	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
80 
81 #ifdef CONFIG_FUNCTION_PROFILER
82 	/* Include time within nested functions */
83 	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
84 #endif
85 
86 	{ } /* Empty entry */
87 };
88 
89 static struct tracer_flags tracer_flags = {
90 	/* Don't display overruns, proc, or tail by default */
91 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
92 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
93 	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
94 	.opts = trace_opts
95 };
96 
97 static bool tracer_flags_is_set(u32 flags)
98 {
99 	return (tracer_flags.val & flags) == flags;
100 }
101 
102 /*
103  * DURATION column is being also used to display IRQ signs,
104  * following values are used by print_graph_irq and others
105  * to fill in space into DURATION column.
106  */
107 enum {
108 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
109 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
110 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
111 };
112 
113 static void
114 print_graph_duration(struct trace_array *tr, unsigned long long duration,
115 		     struct trace_seq *s, u32 flags);
116 
117 static int __graph_entry(struct trace_array *tr, struct ftrace_graph_ent *trace,
118 			 unsigned int trace_ctx, struct ftrace_regs *fregs)
119 {
120 	struct ring_buffer_event *event;
121 	struct trace_buffer *buffer = tr->array_buffer.buffer;
122 	struct ftrace_graph_ent_entry *entry;
123 	int size;
124 
125 	/* If fregs is defined, add FTRACE_REGS_MAX_ARGS long size words */
126 	size = sizeof(*entry) + (FTRACE_REGS_MAX_ARGS * !!fregs * sizeof(long));
127 
128 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, size, trace_ctx);
129 	if (!event)
130 		return 0;
131 
132 	entry = ring_buffer_event_data(event);
133 	entry->graph_ent = *trace;
134 
135 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
136 	if (fregs) {
137 		for (int i = 0; i < FTRACE_REGS_MAX_ARGS; i++)
138 			entry->args[i] = ftrace_regs_get_argument(fregs, i);
139 	}
140 #endif
141 
142 	trace_buffer_unlock_commit_nostack(buffer, event);
143 
144 	return 1;
145 }
146 
147 int __trace_graph_entry(struct trace_array *tr,
148 				struct ftrace_graph_ent *trace,
149 				unsigned int trace_ctx)
150 {
151 	return __graph_entry(tr, trace, trace_ctx, NULL);
152 }
153 
154 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
155 int __trace_graph_retaddr_entry(struct trace_array *tr,
156 				struct ftrace_graph_ent *trace,
157 				unsigned int trace_ctx,
158 				unsigned long retaddr)
159 {
160 	struct ring_buffer_event *event;
161 	struct trace_buffer *buffer = tr->array_buffer.buffer;
162 	struct fgraph_retaddr_ent_entry *entry;
163 
164 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RETADDR_ENT,
165 					  sizeof(*entry), trace_ctx);
166 	if (!event)
167 		return 0;
168 	entry	= ring_buffer_event_data(event);
169 	entry->graph_ent.func = trace->func;
170 	entry->graph_ent.depth = trace->depth;
171 	entry->graph_ent.retaddr = retaddr;
172 	trace_buffer_unlock_commit_nostack(buffer, event);
173 
174 	return 1;
175 }
176 #else
177 int __trace_graph_retaddr_entry(struct trace_array *tr,
178 				struct ftrace_graph_ent *trace,
179 				unsigned int trace_ctx,
180 				unsigned long retaddr)
181 {
182 	return 1;
183 }
184 #endif
185 
186 static inline int ftrace_graph_ignore_irqs(void)
187 {
188 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
189 		return 0;
190 
191 	return in_hardirq();
192 }
193 
194 struct fgraph_times {
195 	unsigned long long		calltime;
196 	unsigned long long		sleeptime; /* may be optional! */
197 };
198 
199 static int graph_entry(struct ftrace_graph_ent *trace,
200 		       struct fgraph_ops *gops,
201 		       struct ftrace_regs *fregs)
202 {
203 	unsigned long *task_var = fgraph_get_task_var(gops);
204 	struct trace_array *tr = gops->private;
205 	struct fgraph_times *ftimes;
206 	unsigned int trace_ctx;
207 	int ret = 0;
208 
209 	if (*task_var & TRACE_GRAPH_NOTRACE)
210 		return 0;
211 
212 	/*
213 	 * Do not trace a function if it's filtered by set_graph_notrace.
214 	 * Make the index of ret stack negative to indicate that it should
215 	 * ignore further functions.  But it needs its own ret stack entry
216 	 * to recover the original index in order to continue tracing after
217 	 * returning from the function.
218 	 */
219 	if (ftrace_graph_notrace_addr(trace->func)) {
220 		*task_var |= TRACE_GRAPH_NOTRACE;
221 		/*
222 		 * Need to return 1 to have the return called
223 		 * that will clear the NOTRACE bit.
224 		 */
225 		return 1;
226 	}
227 
228 	if (!ftrace_trace_task(tr))
229 		return 0;
230 
231 	if (ftrace_graph_ignore_func(gops, trace))
232 		return 0;
233 
234 	if (ftrace_graph_ignore_irqs())
235 		return 0;
236 
237 	if (fgraph_sleep_time) {
238 		/* Only need to record the calltime */
239 		ftimes = fgraph_reserve_data(gops->idx, sizeof(ftimes->calltime));
240 	} else {
241 		ftimes = fgraph_reserve_data(gops->idx, sizeof(*ftimes));
242 		if (ftimes)
243 			ftimes->sleeptime = current->ftrace_sleeptime;
244 	}
245 	if (!ftimes)
246 		return 0;
247 
248 	ftimes->calltime = trace_clock_local();
249 
250 	/*
251 	 * Stop here if tracing_threshold is set. We only write function return
252 	 * events to the ring buffer.
253 	 */
254 	if (tracing_thresh)
255 		return 1;
256 
257 	trace_ctx = tracing_gen_ctx();
258 	if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
259 	    tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
260 		unsigned long retaddr = ftrace_graph_top_ret_addr(current);
261 		ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
262 	} else {
263 		ret = __graph_entry(tr, trace, trace_ctx, fregs);
264 	}
265 
266 	return ret;
267 }
268 
269 int trace_graph_entry(struct ftrace_graph_ent *trace,
270 		      struct fgraph_ops *gops,
271 		      struct ftrace_regs *fregs)
272 {
273 	return graph_entry(trace, gops, NULL);
274 }
275 
276 static int trace_graph_entry_args(struct ftrace_graph_ent *trace,
277 				  struct fgraph_ops *gops,
278 				  struct ftrace_regs *fregs)
279 {
280 	return graph_entry(trace, gops, fregs);
281 }
282 
283 static void
284 __trace_graph_function(struct trace_array *tr,
285 		unsigned long ip, unsigned int trace_ctx)
286 {
287 	u64 time = trace_clock_local();
288 	struct ftrace_graph_ent ent = {
289 		.func  = ip,
290 		.depth = 0,
291 	};
292 	struct ftrace_graph_ret ret = {
293 		.func     = ip,
294 		.depth    = 0,
295 	};
296 
297 	__trace_graph_entry(tr, &ent, trace_ctx);
298 	__trace_graph_return(tr, &ret, trace_ctx, time, time);
299 }
300 
301 void
302 trace_graph_function(struct trace_array *tr,
303 		unsigned long ip, unsigned long parent_ip,
304 		unsigned int trace_ctx)
305 {
306 	__trace_graph_function(tr, ip, trace_ctx);
307 }
308 
309 void __trace_graph_return(struct trace_array *tr,
310 			  struct ftrace_graph_ret *trace,
311 			  unsigned int trace_ctx,
312 			  u64 calltime, u64 rettime)
313 {
314 	struct ring_buffer_event *event;
315 	struct trace_buffer *buffer = tr->array_buffer.buffer;
316 	struct ftrace_graph_ret_entry *entry;
317 
318 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
319 					  sizeof(*entry), trace_ctx);
320 	if (!event)
321 		return;
322 	entry	= ring_buffer_event_data(event);
323 	entry->ret				= *trace;
324 	entry->calltime				= calltime;
325 	entry->rettime				= rettime;
326 	trace_buffer_unlock_commit_nostack(buffer, event);
327 }
328 
329 static void handle_nosleeptime(struct ftrace_graph_ret *trace,
330 			       struct fgraph_times *ftimes,
331 			       int size)
332 {
333 	if (fgraph_sleep_time || size < sizeof(*ftimes))
334 		return;
335 
336 	ftimes->calltime += current->ftrace_sleeptime - ftimes->sleeptime;
337 }
338 
339 void trace_graph_return(struct ftrace_graph_ret *trace,
340 			struct fgraph_ops *gops, struct ftrace_regs *fregs)
341 {
342 	unsigned long *task_var = fgraph_get_task_var(gops);
343 	struct trace_array *tr = gops->private;
344 	struct fgraph_times *ftimes;
345 	unsigned int trace_ctx;
346 	u64 calltime, rettime;
347 	int size;
348 
349 	rettime = trace_clock_local();
350 
351 	ftrace_graph_addr_finish(gops, trace);
352 
353 	if (*task_var & TRACE_GRAPH_NOTRACE) {
354 		*task_var &= ~TRACE_GRAPH_NOTRACE;
355 		return;
356 	}
357 
358 	ftimes = fgraph_retrieve_data(gops->idx, &size);
359 	if (!ftimes)
360 		return;
361 
362 	handle_nosleeptime(trace, ftimes, size);
363 
364 	calltime = ftimes->calltime;
365 
366 	trace_ctx = tracing_gen_ctx();
367 	__trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
368 }
369 
370 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
371 				      struct fgraph_ops *gops,
372 				      struct ftrace_regs *fregs)
373 {
374 	struct fgraph_times *ftimes;
375 	int size;
376 
377 	ftrace_graph_addr_finish(gops, trace);
378 
379 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
380 		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
381 		return;
382 	}
383 
384 	ftimes = fgraph_retrieve_data(gops->idx, &size);
385 	if (!ftimes)
386 		return;
387 
388 	handle_nosleeptime(trace, ftimes, size);
389 
390 	if (tracing_thresh &&
391 	    (trace_clock_local() - ftimes->calltime < tracing_thresh))
392 		return;
393 	else
394 		trace_graph_return(trace, gops, fregs);
395 }
396 
397 static struct fgraph_ops funcgraph_ops = {
398 	.entryfunc = &trace_graph_entry,
399 	.retfunc = &trace_graph_return,
400 };
401 
402 int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
403 {
404 	struct fgraph_ops *gops;
405 
406 	gops = kzalloc(sizeof(*gops), GFP_KERNEL);
407 	if (!gops)
408 		return -ENOMEM;
409 
410 	gops->entryfunc = &trace_graph_entry;
411 	gops->retfunc = &trace_graph_return;
412 
413 	tr->gops = gops;
414 	gops->private = tr;
415 
416 	fgraph_init_ops(&gops->ops, ops);
417 
418 	return 0;
419 }
420 
421 void free_fgraph_ops(struct trace_array *tr)
422 {
423 	kfree(tr->gops);
424 }
425 
426 __init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
427 {
428 	tr->gops = &funcgraph_ops;
429 	funcgraph_ops.private = tr;
430 	fgraph_init_ops(&tr->gops->ops, ops);
431 }
432 
433 static int graph_trace_init(struct trace_array *tr)
434 {
435 	int ret;
436 
437 	if (tracer_flags_is_set(TRACE_GRAPH_ARGS))
438 		tr->gops->entryfunc = trace_graph_entry_args;
439 	else
440 		tr->gops->entryfunc = trace_graph_entry;
441 
442 	if (tracing_thresh)
443 		tr->gops->retfunc = trace_graph_thresh_return;
444 	else
445 		tr->gops->retfunc = trace_graph_return;
446 
447 	/* Make gops functions visible before we start tracing */
448 	smp_mb();
449 
450 	ret = register_ftrace_graph(tr->gops);
451 	if (ret)
452 		return ret;
453 	tracing_start_cmdline_record();
454 
455 	return 0;
456 }
457 
458 static int ftrace_graph_trace_args(struct trace_array *tr, int set)
459 {
460 	trace_func_graph_ent_t entry;
461 
462 	if (set)
463 		entry = trace_graph_entry_args;
464 	else
465 		entry = trace_graph_entry;
466 
467 	/* See if there's any changes */
468 	if (tr->gops->entryfunc == entry)
469 		return 0;
470 
471 	unregister_ftrace_graph(tr->gops);
472 
473 	tr->gops->entryfunc = entry;
474 
475 	/* Make gops functions visible before we start tracing */
476 	smp_mb();
477 	return register_ftrace_graph(tr->gops);
478 }
479 
480 static void graph_trace_reset(struct trace_array *tr)
481 {
482 	tracing_stop_cmdline_record();
483 	unregister_ftrace_graph(tr->gops);
484 }
485 
486 static int graph_trace_update_thresh(struct trace_array *tr)
487 {
488 	graph_trace_reset(tr);
489 	return graph_trace_init(tr);
490 }
491 
492 static int max_bytes_for_cpu;
493 
494 static void print_graph_cpu(struct trace_seq *s, int cpu)
495 {
496 	/*
497 	 * Start with a space character - to make it stand out
498 	 * to the right a bit when trace output is pasted into
499 	 * email:
500 	 */
501 	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
502 }
503 
504 #define TRACE_GRAPH_PROCINFO_LENGTH	14
505 
506 static void print_graph_proc(struct trace_seq *s, pid_t pid)
507 {
508 	char comm[TASK_COMM_LEN];
509 	/* sign + log10(MAX_INT) + '\0' */
510 	char pid_str[11];
511 	int spaces = 0;
512 	int len;
513 	int i;
514 
515 	trace_find_cmdline(pid, comm);
516 	comm[7] = '\0';
517 	sprintf(pid_str, "%d", pid);
518 
519 	/* 1 stands for the "-" character */
520 	len = strlen(comm) + strlen(pid_str) + 1;
521 
522 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
523 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
524 
525 	/* First spaces to align center */
526 	for (i = 0; i < spaces / 2; i++)
527 		trace_seq_putc(s, ' ');
528 
529 	trace_seq_printf(s, "%s-%s", comm, pid_str);
530 
531 	/* Last spaces to align center */
532 	for (i = 0; i < spaces - (spaces / 2); i++)
533 		trace_seq_putc(s, ' ');
534 }
535 
536 
537 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
538 {
539 	trace_seq_putc(s, ' ');
540 	trace_print_lat_fmt(s, entry);
541 	trace_seq_puts(s, " | ");
542 }
543 
544 /* If the pid changed since the last trace, output this event */
545 static void
546 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
547 {
548 	pid_t prev_pid;
549 	pid_t *last_pid;
550 
551 	if (!data)
552 		return;
553 
554 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
555 
556 	if (*last_pid == pid)
557 		return;
558 
559 	prev_pid = *last_pid;
560 	*last_pid = pid;
561 
562 	if (prev_pid == -1)
563 		return;
564 /*
565  * Context-switch trace line:
566 
567  ------------------------------------------
568  | 1)  migration/0--1  =>  sshd-1755
569  ------------------------------------------
570 
571  */
572 	trace_seq_puts(s, " ------------------------------------------\n");
573 	print_graph_cpu(s, cpu);
574 	print_graph_proc(s, prev_pid);
575 	trace_seq_puts(s, " => ");
576 	print_graph_proc(s, pid);
577 	trace_seq_puts(s, "\n ------------------------------------------\n\n");
578 }
579 
580 static struct ftrace_graph_ret_entry *
581 get_return_for_leaf(struct trace_iterator *iter,
582 		struct ftrace_graph_ent_entry *curr)
583 {
584 	struct fgraph_data *data = iter->private;
585 	struct ring_buffer_iter *ring_iter = NULL;
586 	struct ring_buffer_event *event;
587 	struct ftrace_graph_ret_entry *next;
588 
589 	/*
590 	 * If the previous output failed to write to the seq buffer,
591 	 * then we just reuse the data from before.
592 	 */
593 	if (data && data->failed) {
594 		curr = &data->ent.ent;
595 		next = &data->ret;
596 	} else {
597 
598 		ring_iter = trace_buffer_iter(iter, iter->cpu);
599 
600 		/* First peek to compare current entry and the next one */
601 		if (ring_iter)
602 			event = ring_buffer_iter_peek(ring_iter, NULL);
603 		else {
604 			/*
605 			 * We need to consume the current entry to see
606 			 * the next one.
607 			 */
608 			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
609 					    NULL, NULL);
610 			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
611 						 NULL, NULL);
612 		}
613 
614 		if (!event)
615 			return NULL;
616 
617 		next = ring_buffer_event_data(event);
618 
619 		if (data) {
620 			/*
621 			 * Save current and next entries for later reference
622 			 * if the output fails.
623 			 */
624 			if (unlikely(curr->ent.type == TRACE_GRAPH_RETADDR_ENT))
625 				data->ent.rent = *(struct fgraph_retaddr_ent_entry *)curr;
626 			else
627 				data->ent.ent = *curr;
628 			/*
629 			 * If the next event is not a return type, then
630 			 * we only care about what type it is. Otherwise we can
631 			 * safely copy the entire event.
632 			 */
633 			if (next->ent.type == TRACE_GRAPH_RET)
634 				data->ret = *next;
635 			else
636 				data->ret.ent.type = next->ent.type;
637 		}
638 	}
639 
640 	if (next->ent.type != TRACE_GRAPH_RET)
641 		return NULL;
642 
643 	if (curr->ent.pid != next->ent.pid ||
644 			curr->graph_ent.func != next->ret.func)
645 		return NULL;
646 
647 	/* this is a leaf, now advance the iterator */
648 	if (ring_iter)
649 		ring_buffer_iter_advance(ring_iter);
650 
651 	return next;
652 }
653 
654 static void print_graph_abs_time(u64 t, struct trace_seq *s)
655 {
656 	unsigned long usecs_rem;
657 
658 	usecs_rem = do_div(t, NSEC_PER_SEC);
659 	usecs_rem /= 1000;
660 
661 	trace_seq_printf(s, "%5lu.%06lu |  ",
662 			 (unsigned long)t, usecs_rem);
663 }
664 
665 static void
666 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
667 {
668 	unsigned long long usecs;
669 
670 	usecs = iter->ts - iter->array_buffer->time_start;
671 	do_div(usecs, NSEC_PER_USEC);
672 
673 	trace_seq_printf(s, "%9llu us |  ", usecs);
674 }
675 
676 static void
677 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
678 		enum trace_type type, int cpu, pid_t pid, u32 flags)
679 {
680 	struct trace_array *tr = iter->tr;
681 	struct trace_seq *s = &iter->seq;
682 	struct trace_entry *ent = iter->ent;
683 
684 	addr += iter->tr->text_delta;
685 
686 	if (addr < (unsigned long)__irqentry_text_start ||
687 		addr >= (unsigned long)__irqentry_text_end)
688 		return;
689 
690 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
691 		/* Absolute time */
692 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
693 			print_graph_abs_time(iter->ts, s);
694 
695 		/* Relative time */
696 		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
697 			print_graph_rel_time(iter, s);
698 
699 		/* Cpu */
700 		if (flags & TRACE_GRAPH_PRINT_CPU)
701 			print_graph_cpu(s, cpu);
702 
703 		/* Proc */
704 		if (flags & TRACE_GRAPH_PRINT_PROC) {
705 			print_graph_proc(s, pid);
706 			trace_seq_puts(s, " | ");
707 		}
708 
709 		/* Latency format */
710 		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
711 			print_graph_lat_fmt(s, ent);
712 	}
713 
714 	/* No overhead */
715 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
716 
717 	if (type == TRACE_GRAPH_ENT)
718 		trace_seq_puts(s, "==========>");
719 	else
720 		trace_seq_puts(s, "<==========");
721 
722 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
723 	trace_seq_putc(s, '\n');
724 }
725 
726 void
727 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
728 {
729 	unsigned long nsecs_rem = do_div(duration, 1000);
730 	/* log10(ULONG_MAX) + '\0' */
731 	char usecs_str[21];
732 	char nsecs_str[5];
733 	int len;
734 	int i;
735 
736 	sprintf(usecs_str, "%lu", (unsigned long) duration);
737 
738 	/* Print msecs */
739 	trace_seq_printf(s, "%s", usecs_str);
740 
741 	len = strlen(usecs_str);
742 
743 	/* Print nsecs (we don't want to exceed 7 numbers) */
744 	if (len < 7) {
745 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
746 
747 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
748 		trace_seq_printf(s, ".%s", nsecs_str);
749 		len += strlen(nsecs_str) + 1;
750 	}
751 
752 	trace_seq_puts(s, " us ");
753 
754 	/* Print remaining spaces to fit the row's width */
755 	for (i = len; i < 8; i++)
756 		trace_seq_putc(s, ' ');
757 }
758 
759 static void
760 print_graph_duration(struct trace_array *tr, unsigned long long duration,
761 		     struct trace_seq *s, u32 flags)
762 {
763 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
764 	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
765 		return;
766 
767 	/* No real adata, just filling the column with spaces */
768 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
769 	case FLAGS_FILL_FULL:
770 		trace_seq_puts(s, "              |  ");
771 		return;
772 	case FLAGS_FILL_START:
773 		trace_seq_puts(s, "  ");
774 		return;
775 	case FLAGS_FILL_END:
776 		trace_seq_puts(s, " |");
777 		return;
778 	}
779 
780 	/* Signal a overhead of time execution to the output */
781 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
782 		trace_seq_printf(s, "%c ", trace_find_mark(duration));
783 	else
784 		trace_seq_puts(s, "  ");
785 
786 	trace_print_graph_duration(duration, s);
787 	trace_seq_puts(s, "|  ");
788 }
789 
790 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
791 #define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL
792 #else
793 #define __TRACE_GRAPH_PRINT_RETVAL 0
794 #endif
795 
796 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
797 #define __TRACE_GRAPH_PRINT_RETADDR TRACE_GRAPH_PRINT_RETADDR
798 static void print_graph_retaddr(struct trace_seq *s, struct fgraph_retaddr_ent_entry *entry,
799 				u32 trace_flags, bool comment)
800 {
801 	if (comment)
802 		trace_seq_puts(s, " /*");
803 
804 	trace_seq_puts(s, " <-");
805 	seq_print_ip_sym(s, entry->graph_ent.retaddr, trace_flags | TRACE_ITER_SYM_OFFSET);
806 
807 	if (comment)
808 		trace_seq_puts(s, " */");
809 }
810 #else
811 #define __TRACE_GRAPH_PRINT_RETADDR 0
812 #define print_graph_retaddr(_seq, _entry, _tflags, _comment)		do { } while (0)
813 #endif
814 
815 #if defined(CONFIG_FUNCTION_GRAPH_RETVAL) || defined(CONFIG_FUNCTION_GRAPH_RETADDR)
816 
817 static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entry *entry,
818 				struct ftrace_graph_ret *graph_ret, void *func,
819 				u32 opt_flags, u32 trace_flags, int args_size)
820 {
821 	unsigned long err_code = 0;
822 	unsigned long retval = 0;
823 	bool print_retaddr = false;
824 	bool print_retval = false;
825 	bool hex_format = !!(opt_flags & TRACE_GRAPH_PRINT_RETVAL_HEX);
826 
827 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
828 	retval = graph_ret->retval;
829 	print_retval = !!(opt_flags & TRACE_GRAPH_PRINT_RETVAL);
830 #endif
831 
832 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
833 	print_retaddr = !!(opt_flags & TRACE_GRAPH_PRINT_RETADDR);
834 #endif
835 
836 	if (print_retval && retval && !hex_format) {
837 		/* Check if the return value matches the negative format */
838 		if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) &&
839 			(((u64)retval) >> 32) == 0) {
840 			err_code = sign_extend64(retval, 31);
841 		} else {
842 			err_code = retval;
843 		}
844 
845 		if (!IS_ERR_VALUE(err_code))
846 			err_code = 0;
847 	}
848 
849 	if (entry) {
850 		if (entry->ent.type != TRACE_GRAPH_RETADDR_ENT)
851 			print_retaddr = false;
852 
853 		trace_seq_printf(s, "%ps", func);
854 
855 		if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long)) {
856 			print_function_args(s, entry->args, (unsigned long)func);
857 			trace_seq_putc(s, ';');
858 		} else
859 			trace_seq_puts(s, "();");
860 
861 		if (print_retval || print_retaddr)
862 			trace_seq_puts(s, " /*");
863 	} else {
864 		print_retaddr = false;
865 		trace_seq_printf(s, "} /* %ps", func);
866 	}
867 
868 	if (print_retaddr)
869 		print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry,
870 				    trace_flags, false);
871 
872 	if (print_retval) {
873 		if (hex_format || (err_code == 0))
874 			trace_seq_printf(s, " ret=0x%lx", retval);
875 		else
876 			trace_seq_printf(s, " ret=%ld", err_code);
877 	}
878 
879 	if (!entry || print_retval || print_retaddr)
880 		trace_seq_puts(s, " */");
881 }
882 
883 #else
884 
885 #define print_graph_retval(_seq, _ent, _ret, _func, _opt_flags, _trace_flags, args_size) \
886 	do {} while (0)
887 
888 #endif
889 
890 /* Case of a leaf function on its call entry */
891 static enum print_line_t
892 print_graph_entry_leaf(struct trace_iterator *iter,
893 		struct ftrace_graph_ent_entry *entry,
894 		struct ftrace_graph_ret_entry *ret_entry,
895 		struct trace_seq *s, u32 flags)
896 {
897 	struct fgraph_data *data = iter->private;
898 	struct trace_array *tr = iter->tr;
899 	struct ftrace_graph_ret *graph_ret;
900 	struct ftrace_graph_ent *call;
901 	unsigned long long duration;
902 	unsigned long ret_func;
903 	int args_size;
904 	int cpu = iter->cpu;
905 	int i;
906 
907 	args_size = iter->ent_size - offsetof(struct ftrace_graph_ent_entry, args);
908 
909 	graph_ret = &ret_entry->ret;
910 	call = &entry->graph_ent;
911 	duration = ret_entry->rettime - ret_entry->calltime;
912 
913 	if (data) {
914 		struct fgraph_cpu_data *cpu_data;
915 
916 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
917 
918 		/*
919 		 * Comments display at + 1 to depth. Since
920 		 * this is a leaf function, keep the comments
921 		 * equal to this depth.
922 		 */
923 		cpu_data->depth = call->depth - 1;
924 
925 		/* No need to keep this function around for this depth */
926 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
927 		    !WARN_ON_ONCE(call->depth < 0))
928 			cpu_data->enter_funcs[call->depth] = 0;
929 	}
930 
931 	/* Overhead and duration */
932 	print_graph_duration(tr, duration, s, flags);
933 
934 	/* Function */
935 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
936 		trace_seq_putc(s, ' ');
937 
938 	ret_func = graph_ret->func + iter->tr->text_delta;
939 
940 	/*
941 	 * Write out the function return value or return address
942 	 */
943 	if (flags & (__TRACE_GRAPH_PRINT_RETVAL | __TRACE_GRAPH_PRINT_RETADDR)) {
944 		print_graph_retval(s, entry, graph_ret,
945 				   (void *)graph_ret->func + iter->tr->text_delta,
946 				   flags, tr->trace_flags, args_size);
947 	} else {
948 		trace_seq_printf(s, "%ps", (void *)ret_func);
949 
950 		if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long)) {
951 			print_function_args(s, entry->args, ret_func);
952 			trace_seq_putc(s, ';');
953 		} else
954 			trace_seq_puts(s, "();");
955 	}
956 	trace_seq_putc(s, '\n');
957 
958 	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
959 			cpu, iter->ent->pid, flags);
960 
961 	return trace_handle_return(s);
962 }
963 
964 static enum print_line_t
965 print_graph_entry_nested(struct trace_iterator *iter,
966 			 struct ftrace_graph_ent_entry *entry,
967 			 struct trace_seq *s, int cpu, u32 flags)
968 {
969 	struct ftrace_graph_ent *call = &entry->graph_ent;
970 	struct fgraph_data *data = iter->private;
971 	struct trace_array *tr = iter->tr;
972 	unsigned long func;
973 	int args_size;
974 	int i;
975 
976 	if (data) {
977 		struct fgraph_cpu_data *cpu_data;
978 		int cpu = iter->cpu;
979 
980 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
981 		cpu_data->depth = call->depth;
982 
983 		/* Save this function pointer to see if the exit matches */
984 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
985 		    !WARN_ON_ONCE(call->depth < 0))
986 			cpu_data->enter_funcs[call->depth] = call->func;
987 	}
988 
989 	/* No time */
990 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
991 
992 	/* Function */
993 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
994 		trace_seq_putc(s, ' ');
995 
996 	func = call->func + iter->tr->text_delta;
997 
998 	trace_seq_printf(s, "%ps", (void *)func);
999 
1000 	args_size = iter->ent_size - offsetof(struct ftrace_graph_ent_entry, args);
1001 
1002 	if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long))
1003 		print_function_args(s, entry->args, func);
1004 	else
1005 		trace_seq_puts(s, "()");
1006 
1007 	trace_seq_puts(s, " {");
1008 
1009 	if (flags & __TRACE_GRAPH_PRINT_RETADDR  &&
1010 		entry->ent.type == TRACE_GRAPH_RETADDR_ENT)
1011 		print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry,
1012 			tr->trace_flags, true);
1013 	trace_seq_putc(s, '\n');
1014 
1015 	if (trace_seq_has_overflowed(s))
1016 		return TRACE_TYPE_PARTIAL_LINE;
1017 
1018 	/*
1019 	 * we already consumed the current entry to check the next one
1020 	 * and see if this is a leaf.
1021 	 */
1022 	return TRACE_TYPE_NO_CONSUME;
1023 }
1024 
1025 static void
1026 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
1027 		     int type, unsigned long addr, u32 flags)
1028 {
1029 	struct fgraph_data *data = iter->private;
1030 	struct trace_entry *ent = iter->ent;
1031 	struct trace_array *tr = iter->tr;
1032 	int cpu = iter->cpu;
1033 
1034 	/* Pid */
1035 	verif_pid(s, ent->pid, cpu, data);
1036 
1037 	if (type)
1038 		/* Interrupt */
1039 		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
1040 
1041 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1042 		return;
1043 
1044 	/* Absolute time */
1045 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1046 		print_graph_abs_time(iter->ts, s);
1047 
1048 	/* Relative time */
1049 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1050 		print_graph_rel_time(iter, s);
1051 
1052 	/* Cpu */
1053 	if (flags & TRACE_GRAPH_PRINT_CPU)
1054 		print_graph_cpu(s, cpu);
1055 
1056 	/* Proc */
1057 	if (flags & TRACE_GRAPH_PRINT_PROC) {
1058 		print_graph_proc(s, ent->pid);
1059 		trace_seq_puts(s, " | ");
1060 	}
1061 
1062 	/* Latency format */
1063 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
1064 		print_graph_lat_fmt(s, ent);
1065 
1066 	return;
1067 }
1068 
1069 /*
1070  * Entry check for irq code
1071  *
1072  * returns 1 if
1073  *  - we are inside irq code
1074  *  - we just entered irq code
1075  *
1076  * returns 0 if
1077  *  - funcgraph-interrupts option is set
1078  *  - we are not inside irq code
1079  */
1080 static int
1081 check_irq_entry(struct trace_iterator *iter, u32 flags,
1082 		unsigned long addr, int depth)
1083 {
1084 	int cpu = iter->cpu;
1085 	int *depth_irq;
1086 	struct fgraph_data *data = iter->private;
1087 
1088 	addr += iter->tr->text_delta;
1089 
1090 	/*
1091 	 * If we are either displaying irqs, or we got called as
1092 	 * a graph event and private data does not exist,
1093 	 * then we bypass the irq check.
1094 	 */
1095 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1096 	    (!data))
1097 		return 0;
1098 
1099 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1100 
1101 	/*
1102 	 * We are inside the irq code
1103 	 */
1104 	if (*depth_irq >= 0)
1105 		return 1;
1106 
1107 	if ((addr < (unsigned long)__irqentry_text_start) ||
1108 	    (addr >= (unsigned long)__irqentry_text_end))
1109 		return 0;
1110 
1111 	/*
1112 	 * We are entering irq code.
1113 	 */
1114 	*depth_irq = depth;
1115 	return 1;
1116 }
1117 
1118 /*
1119  * Return check for irq code
1120  *
1121  * returns 1 if
1122  *  - we are inside irq code
1123  *  - we just left irq code
1124  *
1125  * returns 0 if
1126  *  - funcgraph-interrupts option is set
1127  *  - we are not inside irq code
1128  */
1129 static int
1130 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1131 {
1132 	int cpu = iter->cpu;
1133 	int *depth_irq;
1134 	struct fgraph_data *data = iter->private;
1135 
1136 	/*
1137 	 * If we are either displaying irqs, or we got called as
1138 	 * a graph event and private data does not exist,
1139 	 * then we bypass the irq check.
1140 	 */
1141 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1142 	    (!data))
1143 		return 0;
1144 
1145 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1146 
1147 	/*
1148 	 * We are not inside the irq code.
1149 	 */
1150 	if (*depth_irq == -1)
1151 		return 0;
1152 
1153 	/*
1154 	 * We are inside the irq code, and this is returning entry.
1155 	 * Let's not trace it and clear the entry depth, since
1156 	 * we are out of irq code.
1157 	 *
1158 	 * This condition ensures that we 'leave the irq code' once
1159 	 * we are out of the entry depth. Thus protecting us from
1160 	 * the RETURN entry loss.
1161 	 */
1162 	if (*depth_irq >= depth) {
1163 		*depth_irq = -1;
1164 		return 1;
1165 	}
1166 
1167 	/*
1168 	 * We are inside the irq code, and this is not the entry.
1169 	 */
1170 	return 1;
1171 }
1172 
1173 static enum print_line_t
1174 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1175 			struct trace_iterator *iter, u32 flags)
1176 {
1177 	struct fgraph_data *data = iter->private;
1178 	struct ftrace_graph_ent *call;
1179 	struct ftrace_graph_ret_entry *leaf_ret;
1180 	static enum print_line_t ret;
1181 	int cpu = iter->cpu;
1182 	/*
1183 	 * print_graph_entry() may consume the current event,
1184 	 * thus @field may become invalid, so we need to save it.
1185 	 * sizeof(struct ftrace_graph_ent_entry) is very small,
1186 	 * it can be safely saved at the stack.
1187 	 */
1188 	struct ftrace_graph_ent_entry *entry;
1189 	u8 save_buf[sizeof(*entry) + FTRACE_REGS_MAX_ARGS * sizeof(long)];
1190 
1191 	/* The ent_size is expected to be as big as the entry */
1192 	if (iter->ent_size > sizeof(save_buf))
1193 		iter->ent_size = sizeof(save_buf);
1194 
1195 	entry = (void *)save_buf;
1196 	memcpy(entry, field, iter->ent_size);
1197 
1198 	call = &entry->graph_ent;
1199 
1200 	if (check_irq_entry(iter, flags, call->func, call->depth))
1201 		return TRACE_TYPE_HANDLED;
1202 
1203 	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1204 
1205 	leaf_ret = get_return_for_leaf(iter, entry);
1206 	if (leaf_ret)
1207 		ret = print_graph_entry_leaf(iter, entry, leaf_ret, s, flags);
1208 	else
1209 		ret = print_graph_entry_nested(iter, entry, s, cpu, flags);
1210 
1211 	if (data) {
1212 		/*
1213 		 * If we failed to write our output, then we need to make
1214 		 * note of it. Because we already consumed our entry.
1215 		 */
1216 		if (s->full) {
1217 			data->failed = 1;
1218 			data->cpu = cpu;
1219 		} else
1220 			data->failed = 0;
1221 	}
1222 
1223 	return ret;
1224 }
1225 
1226 static enum print_line_t
1227 print_graph_return(struct ftrace_graph_ret_entry *retentry, struct trace_seq *s,
1228 		   struct trace_entry *ent, struct trace_iterator *iter,
1229 		   u32 flags)
1230 {
1231 	struct ftrace_graph_ret *trace = &retentry->ret;
1232 	u64 calltime = retentry->calltime;
1233 	u64 rettime = retentry->rettime;
1234 	unsigned long long duration = rettime - calltime;
1235 	struct fgraph_data *data = iter->private;
1236 	struct trace_array *tr = iter->tr;
1237 	unsigned long func;
1238 	pid_t pid = ent->pid;
1239 	int cpu = iter->cpu;
1240 	int func_match = 1;
1241 	int i;
1242 
1243 	func = trace->func + iter->tr->text_delta;
1244 
1245 	if (check_irq_return(iter, flags, trace->depth))
1246 		return TRACE_TYPE_HANDLED;
1247 
1248 	if (data) {
1249 		struct fgraph_cpu_data *cpu_data;
1250 		int cpu = iter->cpu;
1251 
1252 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1253 
1254 		/*
1255 		 * Comments display at + 1 to depth. This is the
1256 		 * return from a function, we now want the comments
1257 		 * to display at the same level of the bracket.
1258 		 */
1259 		cpu_data->depth = trace->depth - 1;
1260 
1261 		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1262 		    !WARN_ON_ONCE(trace->depth < 0)) {
1263 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1264 				func_match = 0;
1265 			cpu_data->enter_funcs[trace->depth] = 0;
1266 		}
1267 	}
1268 
1269 	print_graph_prologue(iter, s, 0, 0, flags);
1270 
1271 	/* Overhead and duration */
1272 	print_graph_duration(tr, duration, s, flags);
1273 
1274 	/* Closing brace */
1275 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1276 		trace_seq_putc(s, ' ');
1277 
1278 	/*
1279 	 * Always write out the function name and its return value if the
1280 	 * funcgraph-retval option is enabled.
1281 	 */
1282 	if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
1283 		print_graph_retval(s, NULL, trace, (void *)func, flags,
1284 				   tr->trace_flags, 0);
1285 	} else {
1286 		/*
1287 		 * If the return function does not have a matching entry,
1288 		 * then the entry was lost. Instead of just printing
1289 		 * the '}' and letting the user guess what function this
1290 		 * belongs to, write out the function name. Always do
1291 		 * that if the funcgraph-tail option is enabled.
1292 		 */
1293 		if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1294 			trace_seq_puts(s, "}");
1295 		else
1296 			trace_seq_printf(s, "} /* %ps */", (void *)func);
1297 	}
1298 	trace_seq_putc(s, '\n');
1299 
1300 	/* Overrun */
1301 	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1302 		trace_seq_printf(s, " (Overruns: %u)\n",
1303 				 trace->overrun);
1304 
1305 	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1306 			cpu, pid, flags);
1307 
1308 	return trace_handle_return(s);
1309 }
1310 
1311 static enum print_line_t
1312 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1313 		    struct trace_iterator *iter, u32 flags)
1314 {
1315 	struct trace_array *tr = iter->tr;
1316 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1317 	struct fgraph_data *data = iter->private;
1318 	struct trace_event *event;
1319 	int depth = 0;
1320 	int ret;
1321 	int i;
1322 
1323 	if (data)
1324 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1325 
1326 	print_graph_prologue(iter, s, 0, 0, flags);
1327 
1328 	/* No time */
1329 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1330 
1331 	/* Indentation */
1332 	if (depth > 0)
1333 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1334 			trace_seq_putc(s, ' ');
1335 
1336 	/* The comment */
1337 	trace_seq_puts(s, "/* ");
1338 
1339 	switch (iter->ent->type) {
1340 	case TRACE_BPUTS:
1341 		ret = trace_print_bputs_msg_only(iter);
1342 		if (ret != TRACE_TYPE_HANDLED)
1343 			return ret;
1344 		break;
1345 	case TRACE_BPRINT:
1346 		ret = trace_print_bprintk_msg_only(iter);
1347 		if (ret != TRACE_TYPE_HANDLED)
1348 			return ret;
1349 		break;
1350 	case TRACE_PRINT:
1351 		ret = trace_print_printk_msg_only(iter);
1352 		if (ret != TRACE_TYPE_HANDLED)
1353 			return ret;
1354 		break;
1355 	default:
1356 		event = ftrace_find_event(ent->type);
1357 		if (!event)
1358 			return TRACE_TYPE_UNHANDLED;
1359 
1360 		ret = event->funcs->trace(iter, sym_flags, event);
1361 		if (ret != TRACE_TYPE_HANDLED)
1362 			return ret;
1363 	}
1364 
1365 	if (trace_seq_has_overflowed(s))
1366 		goto out;
1367 
1368 	/* Strip ending newline */
1369 	if (s->buffer[s->seq.len - 1] == '\n') {
1370 		s->buffer[s->seq.len - 1] = '\0';
1371 		s->seq.len--;
1372 	}
1373 
1374 	trace_seq_puts(s, " */\n");
1375  out:
1376 	return trace_handle_return(s);
1377 }
1378 
1379 
1380 enum print_line_t
1381 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1382 {
1383 	struct ftrace_graph_ent_entry *field;
1384 	struct fgraph_data *data = iter->private;
1385 	struct trace_entry *entry = iter->ent;
1386 	struct trace_seq *s = &iter->seq;
1387 	int cpu = iter->cpu;
1388 	int ret;
1389 
1390 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1391 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1392 		return TRACE_TYPE_HANDLED;
1393 	}
1394 
1395 	/*
1396 	 * If the last output failed, there's a possibility we need
1397 	 * to print out the missing entry which would never go out.
1398 	 */
1399 	if (data && data->failed) {
1400 		field = &data->ent.ent;
1401 		iter->cpu = data->cpu;
1402 		ret = print_graph_entry(field, s, iter, flags);
1403 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1404 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1405 			ret = TRACE_TYPE_NO_CONSUME;
1406 		}
1407 		iter->cpu = cpu;
1408 		return ret;
1409 	}
1410 
1411 	switch (entry->type) {
1412 	case TRACE_GRAPH_ENT: {
1413 		trace_assign_type(field, entry);
1414 		return print_graph_entry(field, s, iter, flags);
1415 	}
1416 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1417 	case TRACE_GRAPH_RETADDR_ENT: {
1418 		struct fgraph_retaddr_ent_entry saved;
1419 		struct fgraph_retaddr_ent_entry *rfield;
1420 
1421 		trace_assign_type(rfield, entry);
1422 		saved = *rfield;
1423 		return print_graph_entry((struct ftrace_graph_ent_entry *)&saved, s, iter, flags);
1424 	}
1425 #endif
1426 	case TRACE_GRAPH_RET: {
1427 		struct ftrace_graph_ret_entry *field;
1428 		trace_assign_type(field, entry);
1429 		return print_graph_return(field, s, entry, iter, flags);
1430 	}
1431 	case TRACE_STACK:
1432 	case TRACE_FN:
1433 		/* dont trace stack and functions as comments */
1434 		return TRACE_TYPE_UNHANDLED;
1435 
1436 	default:
1437 		return print_graph_comment(s, entry, iter, flags);
1438 	}
1439 
1440 	return TRACE_TYPE_HANDLED;
1441 }
1442 
1443 static enum print_line_t
1444 print_graph_function(struct trace_iterator *iter)
1445 {
1446 	return print_graph_function_flags(iter, tracer_flags.val);
1447 }
1448 
1449 static enum print_line_t
1450 print_graph_function_event(struct trace_iterator *iter, int flags,
1451 			   struct trace_event *event)
1452 {
1453 	return print_graph_function(iter);
1454 }
1455 
1456 static void print_lat_header(struct seq_file *s, u32 flags)
1457 {
1458 	static const char spaces[] = "                "	/* 16 spaces */
1459 		"    "					/* 4 spaces */
1460 		"                 ";			/* 17 spaces */
1461 	int size = 0;
1462 
1463 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1464 		size += 16;
1465 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1466 		size += 16;
1467 	if (flags & TRACE_GRAPH_PRINT_CPU)
1468 		size += 4;
1469 	if (flags & TRACE_GRAPH_PRINT_PROC)
1470 		size += 17;
1471 
1472 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1473 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1474 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1475 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1476 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1477 }
1478 
1479 static void __print_graph_headers_flags(struct trace_array *tr,
1480 					struct seq_file *s, u32 flags)
1481 {
1482 	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1483 
1484 	if (lat)
1485 		print_lat_header(s, flags);
1486 
1487 	/* 1st line */
1488 	seq_putc(s, '#');
1489 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1490 		seq_puts(s, "     TIME       ");
1491 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1492 		seq_puts(s, "   REL TIME     ");
1493 	if (flags & TRACE_GRAPH_PRINT_CPU)
1494 		seq_puts(s, " CPU");
1495 	if (flags & TRACE_GRAPH_PRINT_PROC)
1496 		seq_puts(s, "  TASK/PID       ");
1497 	if (lat)
1498 		seq_puts(s, "||||   ");
1499 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1500 		seq_puts(s, "  DURATION   ");
1501 	seq_puts(s, "               FUNCTION CALLS\n");
1502 
1503 	/* 2nd line */
1504 	seq_putc(s, '#');
1505 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1506 		seq_puts(s, "      |         ");
1507 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1508 		seq_puts(s, "      |         ");
1509 	if (flags & TRACE_GRAPH_PRINT_CPU)
1510 		seq_puts(s, " |  ");
1511 	if (flags & TRACE_GRAPH_PRINT_PROC)
1512 		seq_puts(s, "   |    |        ");
1513 	if (lat)
1514 		seq_puts(s, "||||   ");
1515 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1516 		seq_puts(s, "   |   |      ");
1517 	seq_puts(s, "               |   |   |   |\n");
1518 }
1519 
1520 static void print_graph_headers(struct seq_file *s)
1521 {
1522 	print_graph_headers_flags(s, tracer_flags.val);
1523 }
1524 
1525 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1526 {
1527 	struct trace_iterator *iter = s->private;
1528 	struct trace_array *tr = iter->tr;
1529 
1530 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1531 		return;
1532 
1533 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1534 		/* print nothing if the buffers are empty */
1535 		if (trace_empty(iter))
1536 			return;
1537 
1538 		print_trace_header(s, iter);
1539 	}
1540 
1541 	__print_graph_headers_flags(tr, s, flags);
1542 }
1543 
1544 void graph_trace_open(struct trace_iterator *iter)
1545 {
1546 	/* pid and depth on the last trace processed */
1547 	struct fgraph_data *data;
1548 	gfp_t gfpflags;
1549 	int cpu;
1550 
1551 	iter->private = NULL;
1552 
1553 	/* We can be called in atomic context via ftrace_dump() */
1554 	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1555 
1556 	data = kzalloc(sizeof(*data), gfpflags);
1557 	if (!data)
1558 		goto out_err;
1559 
1560 	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1561 	if (!data->cpu_data)
1562 		goto out_err_free;
1563 
1564 	for_each_possible_cpu(cpu) {
1565 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1566 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1567 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1568 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1569 
1570 		*pid = -1;
1571 		*depth = 0;
1572 		*ignore = 0;
1573 		*depth_irq = -1;
1574 	}
1575 
1576 	iter->private = data;
1577 
1578 	return;
1579 
1580  out_err_free:
1581 	kfree(data);
1582  out_err:
1583 	pr_warn("function graph tracer: not enough memory\n");
1584 }
1585 
1586 void graph_trace_close(struct trace_iterator *iter)
1587 {
1588 	struct fgraph_data *data = iter->private;
1589 
1590 	if (data) {
1591 		free_percpu(data->cpu_data);
1592 		kfree(data);
1593 		iter->private = NULL;
1594 	}
1595 }
1596 
1597 static int
1598 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1599 {
1600 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1601 		ftrace_graph_skip_irqs = !set;
1602 
1603 	if (bit == TRACE_GRAPH_SLEEP_TIME)
1604 		ftrace_graph_sleep_time_control(set);
1605 
1606 	if (bit == TRACE_GRAPH_GRAPH_TIME)
1607 		ftrace_graph_graph_time_control(set);
1608 
1609 	if (bit == TRACE_GRAPH_ARGS)
1610 		return ftrace_graph_trace_args(tr, set);
1611 
1612 	return 0;
1613 }
1614 
1615 static struct trace_event_functions graph_functions = {
1616 	.trace		= print_graph_function_event,
1617 };
1618 
1619 static struct trace_event graph_trace_entry_event = {
1620 	.type		= TRACE_GRAPH_ENT,
1621 	.funcs		= &graph_functions,
1622 };
1623 
1624 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1625 static struct trace_event graph_trace_retaddr_entry_event = {
1626 	.type		= TRACE_GRAPH_RETADDR_ENT,
1627 	.funcs		= &graph_functions,
1628 };
1629 #endif
1630 
1631 static struct trace_event graph_trace_ret_event = {
1632 	.type		= TRACE_GRAPH_RET,
1633 	.funcs		= &graph_functions
1634 };
1635 
1636 static struct tracer graph_trace __tracer_data = {
1637 	.name		= "function_graph",
1638 	.update_thresh	= graph_trace_update_thresh,
1639 	.open		= graph_trace_open,
1640 	.pipe_open	= graph_trace_open,
1641 	.close		= graph_trace_close,
1642 	.pipe_close	= graph_trace_close,
1643 	.init		= graph_trace_init,
1644 	.reset		= graph_trace_reset,
1645 	.print_line	= print_graph_function,
1646 	.print_header	= print_graph_headers,
1647 	.flags		= &tracer_flags,
1648 	.set_flag	= func_graph_set_flag,
1649 	.allow_instances = true,
1650 #ifdef CONFIG_FTRACE_SELFTEST
1651 	.selftest	= trace_selftest_startup_function_graph,
1652 #endif
1653 };
1654 
1655 
1656 static ssize_t
1657 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1658 		  loff_t *ppos)
1659 {
1660 	unsigned long val;
1661 	int ret;
1662 
1663 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1664 	if (ret)
1665 		return ret;
1666 
1667 	fgraph_max_depth = val;
1668 
1669 	*ppos += cnt;
1670 
1671 	return cnt;
1672 }
1673 
1674 static ssize_t
1675 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1676 		 loff_t *ppos)
1677 {
1678 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1679 	int n;
1680 
1681 	n = sprintf(buf, "%d\n", fgraph_max_depth);
1682 
1683 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1684 }
1685 
1686 static const struct file_operations graph_depth_fops = {
1687 	.open		= tracing_open_generic,
1688 	.write		= graph_depth_write,
1689 	.read		= graph_depth_read,
1690 	.llseek		= generic_file_llseek,
1691 };
1692 
1693 static __init int init_graph_tracefs(void)
1694 {
1695 	int ret;
1696 
1697 	ret = tracing_init_dentry();
1698 	if (ret)
1699 		return 0;
1700 
1701 	trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1702 			  NULL, &graph_depth_fops);
1703 
1704 	return 0;
1705 }
1706 fs_initcall(init_graph_tracefs);
1707 
1708 static __init int init_graph_trace(void)
1709 {
1710 	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1711 
1712 	if (!register_trace_event(&graph_trace_entry_event)) {
1713 		pr_warn("Warning: could not register graph trace events\n");
1714 		return 1;
1715 	}
1716 
1717 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1718 	if (!register_trace_event(&graph_trace_retaddr_entry_event)) {
1719 		pr_warn("Warning: could not register graph trace retaddr events\n");
1720 		return 1;
1721 	}
1722 #endif
1723 
1724 	if (!register_trace_event(&graph_trace_ret_event)) {
1725 		pr_warn("Warning: could not register graph trace events\n");
1726 		return 1;
1727 	}
1728 
1729 	return register_tracer(&graph_trace);
1730 }
1731 
1732 core_initcall(init_graph_trace);
1733