1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace irqs off critical timings
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * From code in the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/kallsyms.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
17 #include <linux/kprobes.h>
18 
19 #include "trace.h"
20 
21 #include <trace/events/preemptirq.h>
22 
23 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
24 static struct trace_array		*irqsoff_trace __read_mostly;
25 static int				tracer_enabled __read_mostly;
26 
27 static DEFINE_PER_CPU(int, tracing_cpu);
28 
29 static DEFINE_RAW_SPINLOCK(max_trace_lock);
30 
31 enum {
32 	TRACER_IRQS_OFF		= (1 << 1),
33 	TRACER_PREEMPT_OFF	= (1 << 2),
34 };
35 
36 static int trace_type __read_mostly;
37 
38 static int save_flags;
39 
40 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
41 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
42 
43 #ifdef CONFIG_PREEMPT_TRACER
44 static inline int
45 preempt_trace(int pc)
46 {
47 	return ((trace_type & TRACER_PREEMPT_OFF) && pc);
48 }
49 #else
50 # define preempt_trace(pc) (0)
51 #endif
52 
53 #ifdef CONFIG_IRQSOFF_TRACER
54 static inline int
55 irq_trace(void)
56 {
57 	return ((trace_type & TRACER_IRQS_OFF) &&
58 		irqs_disabled());
59 }
60 #else
61 # define irq_trace() (0)
62 #endif
63 
64 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
65 static int irqsoff_display_graph(struct trace_array *tr, int set);
66 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
67 #else
68 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
69 {
70 	return -EINVAL;
71 }
72 # define is_graph(tr) false
73 #endif
74 
75 /*
76  * Sequence count - we record it when starting a measurement and
77  * skip the latency if the sequence has changed - some other section
78  * did a maximum and could disturb our measurement with serial console
79  * printouts, etc. Truly coinciding maximum latencies should be rare
80  * and what happens together happens separately as well, so this doesn't
81  * decrease the validity of the maximum found:
82  */
83 static __cacheline_aligned_in_smp	unsigned long max_sequence;
84 
85 #ifdef CONFIG_FUNCTION_TRACER
86 /*
87  * Prologue for the preempt and irqs off function tracers.
88  *
89  * Returns 1 if it is OK to continue, and data->disabled is
90  *            incremented.
91  *         0 if the trace is to be ignored, and data->disabled
92  *            is kept the same.
93  *
94  * Note, this function is also used outside this ifdef but
95  *  inside the #ifdef of the function graph tracer below.
96  *  This is OK, since the function graph tracer is
97  *  dependent on the function tracer.
98  */
99 static int func_prolog_dec(struct trace_array *tr,
100 			   struct trace_array_cpu **data,
101 			   unsigned long *flags)
102 {
103 	long disabled;
104 	int cpu;
105 
106 	/*
107 	 * Does not matter if we preempt. We test the flags
108 	 * afterward, to see if irqs are disabled or not.
109 	 * If we preempt and get a false positive, the flags
110 	 * test will fail.
111 	 */
112 	cpu = raw_smp_processor_id();
113 	if (likely(!per_cpu(tracing_cpu, cpu)))
114 		return 0;
115 
116 	local_save_flags(*flags);
117 	/*
118 	 * Slight chance to get a false positive on tracing_cpu,
119 	 * although I'm starting to think there isn't a chance.
120 	 * Leave this for now just to be paranoid.
121 	 */
122 	if (!irqs_disabled_flags(*flags) && !preempt_count())
123 		return 0;
124 
125 	*data = per_cpu_ptr(tr->array_buffer.data, cpu);
126 	disabled = local_inc_return(&(*data)->disabled);
127 
128 	if (likely(disabled == 1))
129 		return 1;
130 
131 	local_dec(&(*data)->disabled);
132 
133 	return 0;
134 }
135 
136 /*
137  * irqsoff uses its own tracer function to keep the overhead down:
138  */
139 static void
140 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
141 		    struct ftrace_ops *op, struct ftrace_regs *fregs)
142 {
143 	struct trace_array *tr = irqsoff_trace;
144 	struct trace_array_cpu *data;
145 	unsigned long flags;
146 	unsigned int trace_ctx;
147 
148 	if (!func_prolog_dec(tr, &data, &flags))
149 		return;
150 
151 	trace_ctx = tracing_gen_ctx_flags(flags);
152 
153 	trace_function(tr, ip, parent_ip, trace_ctx, fregs);
154 
155 	local_dec(&data->disabled);
156 }
157 #endif /* CONFIG_FUNCTION_TRACER */
158 
159 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
160 static int irqsoff_display_graph(struct trace_array *tr, int set)
161 {
162 	int cpu;
163 
164 	if (!(is_graph(tr) ^ set))
165 		return 0;
166 
167 	stop_irqsoff_tracer(irqsoff_trace, !set);
168 
169 	for_each_possible_cpu(cpu)
170 		per_cpu(tracing_cpu, cpu) = 0;
171 
172 	tr->max_latency = 0;
173 	tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
174 
175 	return start_irqsoff_tracer(irqsoff_trace, set);
176 }
177 
178 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
179 			       struct fgraph_ops *gops,
180 			       struct ftrace_regs *fregs)
181 {
182 	struct trace_array *tr = irqsoff_trace;
183 	struct trace_array_cpu *data;
184 	unsigned long flags;
185 	unsigned int trace_ctx;
186 	u64 *calltime;
187 	int ret;
188 
189 	if (ftrace_graph_ignore_func(gops, trace))
190 		return 0;
191 	/*
192 	 * Do not trace a function if it's filtered by set_graph_notrace.
193 	 * Make the index of ret stack negative to indicate that it should
194 	 * ignore further functions.  But it needs its own ret stack entry
195 	 * to recover the original index in order to continue tracing after
196 	 * returning from the function.
197 	 */
198 	if (ftrace_graph_notrace_addr(trace->func))
199 		return 1;
200 
201 	if (!func_prolog_dec(tr, &data, &flags))
202 		return 0;
203 
204 	calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
205 	if (!calltime)
206 		return 0;
207 
208 	*calltime = trace_clock_local();
209 
210 	trace_ctx = tracing_gen_ctx_flags(flags);
211 	ret = __trace_graph_entry(tr, trace, trace_ctx);
212 	local_dec(&data->disabled);
213 
214 	return ret;
215 }
216 
217 static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
218 				 struct fgraph_ops *gops,
219 				 struct ftrace_regs *fregs)
220 {
221 	struct trace_array *tr = irqsoff_trace;
222 	struct trace_array_cpu *data;
223 	unsigned long flags;
224 	unsigned int trace_ctx;
225 	u64 *calltime;
226 	u64 rettime;
227 	int size;
228 
229 	ftrace_graph_addr_finish(gops, trace);
230 
231 	if (!func_prolog_dec(tr, &data, &flags))
232 		return;
233 
234 	rettime = trace_clock_local();
235 	calltime = fgraph_retrieve_data(gops->idx, &size);
236 	if (!calltime)
237 		return;
238 
239 	trace_ctx = tracing_gen_ctx_flags(flags);
240 	__trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
241 	local_dec(&data->disabled);
242 }
243 
244 static struct fgraph_ops fgraph_ops = {
245 	.entryfunc		= &irqsoff_graph_entry,
246 	.retfunc		= &irqsoff_graph_return,
247 };
248 
249 static void irqsoff_trace_open(struct trace_iterator *iter)
250 {
251 	if (is_graph(iter->tr))
252 		graph_trace_open(iter);
253 }
254 
255 static void irqsoff_trace_close(struct trace_iterator *iter)
256 {
257 	if (iter->private)
258 		graph_trace_close(iter);
259 }
260 
261 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
262 			    TRACE_GRAPH_PRINT_PROC | \
263 			    TRACE_GRAPH_PRINT_REL_TIME | \
264 			    TRACE_GRAPH_PRINT_DURATION)
265 
266 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
267 {
268 	/*
269 	 * In graph mode call the graph tracer output function,
270 	 * otherwise go with the TRACE_FN event handler
271 	 */
272 	if (is_graph(iter->tr))
273 		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
274 
275 	return TRACE_TYPE_UNHANDLED;
276 }
277 
278 static void irqsoff_print_header(struct seq_file *s)
279 {
280 	struct trace_array *tr = irqsoff_trace;
281 
282 	if (is_graph(tr))
283 		print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
284 	else
285 		trace_default_header(s);
286 }
287 
288 static void
289 __trace_function(struct trace_array *tr,
290 		 unsigned long ip, unsigned long parent_ip,
291 		 unsigned int trace_ctx)
292 {
293 	if (is_graph(tr))
294 		trace_graph_function(tr, ip, parent_ip, trace_ctx);
295 	else
296 		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
297 }
298 
299 #else
300 static inline void
301 __trace_function(struct trace_array *tr,
302 		 unsigned long ip, unsigned long parent_ip,
303 		 unsigned int trace_ctx)
304 {
305 	return trace_function(tr, ip, parent_ip, trace_ctx, NULL);
306 }
307 
308 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
309 {
310 	return TRACE_TYPE_UNHANDLED;
311 }
312 
313 static void irqsoff_trace_open(struct trace_iterator *iter) { }
314 static void irqsoff_trace_close(struct trace_iterator *iter) { }
315 
316 #ifdef CONFIG_FUNCTION_TRACER
317 static void irqsoff_print_header(struct seq_file *s)
318 {
319 	trace_default_header(s);
320 }
321 #else
322 static void irqsoff_print_header(struct seq_file *s)
323 {
324 	trace_latency_header(s);
325 }
326 #endif /* CONFIG_FUNCTION_TRACER */
327 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
328 
329 /*
330  * Should this new latency be reported/recorded?
331  */
332 static bool report_latency(struct trace_array *tr, u64 delta)
333 {
334 	if (tracing_thresh) {
335 		if (delta < tracing_thresh)
336 			return false;
337 	} else {
338 		if (delta <= tr->max_latency)
339 			return false;
340 	}
341 	return true;
342 }
343 
344 static void
345 check_critical_timing(struct trace_array *tr,
346 		      struct trace_array_cpu *data,
347 		      unsigned long parent_ip,
348 		      int cpu)
349 {
350 	u64 T0, T1, delta;
351 	unsigned long flags;
352 	unsigned int trace_ctx;
353 
354 	T0 = data->preempt_timestamp;
355 	T1 = ftrace_now(cpu);
356 	delta = T1-T0;
357 
358 	trace_ctx = tracing_gen_ctx();
359 
360 	if (!report_latency(tr, delta))
361 		goto out;
362 
363 	raw_spin_lock_irqsave(&max_trace_lock, flags);
364 
365 	/* check if we are still the max latency */
366 	if (!report_latency(tr, delta))
367 		goto out_unlock;
368 
369 	__trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
370 	/* Skip 5 functions to get to the irq/preempt enable function */
371 	__trace_stack(tr, trace_ctx, 5);
372 
373 	if (data->critical_sequence != max_sequence)
374 		goto out_unlock;
375 
376 	data->critical_end = parent_ip;
377 
378 	if (likely(!is_tracing_stopped())) {
379 		tr->max_latency = delta;
380 		update_max_tr_single(tr, current, cpu);
381 	}
382 
383 	max_sequence++;
384 
385 out_unlock:
386 	raw_spin_unlock_irqrestore(&max_trace_lock, flags);
387 
388 out:
389 	data->critical_sequence = max_sequence;
390 	data->preempt_timestamp = ftrace_now(cpu);
391 	__trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
392 }
393 
394 static nokprobe_inline void
395 start_critical_timing(unsigned long ip, unsigned long parent_ip)
396 {
397 	int cpu;
398 	struct trace_array *tr = irqsoff_trace;
399 	struct trace_array_cpu *data;
400 	long disabled;
401 
402 	if (!tracer_enabled || !tracing_is_enabled())
403 		return;
404 
405 	cpu = raw_smp_processor_id();
406 
407 	if (per_cpu(tracing_cpu, cpu))
408 		return;
409 
410 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
411 
412 	if (unlikely(!data) || local_read(&data->disabled))
413 		return;
414 
415 	disabled = local_inc_return(&data->disabled);
416 
417 	if (disabled == 1) {
418 		data->critical_sequence = max_sequence;
419 		data->preempt_timestamp = ftrace_now(cpu);
420 		data->critical_start = parent_ip ? : ip;
421 
422 		__trace_function(tr, ip, parent_ip, tracing_gen_ctx());
423 
424 		per_cpu(tracing_cpu, cpu) = 1;
425 	}
426 
427 	local_dec(&data->disabled);
428 }
429 
430 static nokprobe_inline void
431 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
432 {
433 	int cpu;
434 	struct trace_array *tr = irqsoff_trace;
435 	struct trace_array_cpu *data;
436 	unsigned int trace_ctx;
437 	long disabled;
438 
439 	cpu = raw_smp_processor_id();
440 	/* Always clear the tracing cpu on stopping the trace */
441 	if (unlikely(per_cpu(tracing_cpu, cpu)))
442 		per_cpu(tracing_cpu, cpu) = 0;
443 	else
444 		return;
445 
446 	if (!tracer_enabled || !tracing_is_enabled())
447 		return;
448 
449 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
450 
451 	if (unlikely(!data) ||
452 	    !data->critical_start || local_read(&data->disabled))
453 		return;
454 
455 	disabled = local_inc_return(&data->disabled);
456 
457 	if (disabled == 1) {
458 		trace_ctx = tracing_gen_ctx();
459 		__trace_function(tr, ip, parent_ip, trace_ctx);
460 		check_critical_timing(tr, data, parent_ip ? : ip, cpu);
461 		data->critical_start = 0;
462 	}
463 
464 	local_dec(&data->disabled);
465 }
466 
467 /* start and stop critical timings used to for stoppage (in idle) */
468 void start_critical_timings(void)
469 {
470 	if (preempt_trace(preempt_count()) || irq_trace())
471 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
472 }
473 EXPORT_SYMBOL_GPL(start_critical_timings);
474 NOKPROBE_SYMBOL(start_critical_timings);
475 
476 void stop_critical_timings(void)
477 {
478 	if (preempt_trace(preempt_count()) || irq_trace())
479 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
480 }
481 EXPORT_SYMBOL_GPL(stop_critical_timings);
482 NOKPROBE_SYMBOL(stop_critical_timings);
483 
484 #ifdef CONFIG_FUNCTION_TRACER
485 static bool function_enabled;
486 
487 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
488 {
489 	int ret;
490 
491 	/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
492 	if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
493 		return 0;
494 
495 	if (graph)
496 		ret = register_ftrace_graph(&fgraph_ops);
497 	else
498 		ret = register_ftrace_function(tr->ops);
499 
500 	if (!ret)
501 		function_enabled = true;
502 
503 	return ret;
504 }
505 
506 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
507 {
508 	if (!function_enabled)
509 		return;
510 
511 	if (graph)
512 		unregister_ftrace_graph(&fgraph_ops);
513 	else
514 		unregister_ftrace_function(tr->ops);
515 
516 	function_enabled = false;
517 }
518 
519 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
520 {
521 	if (!(mask & TRACE_ITER_FUNCTION))
522 		return 0;
523 
524 	if (set)
525 		register_irqsoff_function(tr, is_graph(tr), 1);
526 	else
527 		unregister_irqsoff_function(tr, is_graph(tr));
528 	return 1;
529 }
530 #else
531 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
532 {
533 	return 0;
534 }
535 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
536 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
537 {
538 	return 0;
539 }
540 #endif /* CONFIG_FUNCTION_TRACER */
541 
542 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
543 {
544 	struct tracer *tracer = tr->current_trace;
545 
546 	if (irqsoff_function_set(tr, mask, set))
547 		return 0;
548 
549 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
550 	if (mask & TRACE_ITER_DISPLAY_GRAPH)
551 		return irqsoff_display_graph(tr, set);
552 #endif
553 
554 	return trace_keep_overwrite(tracer, mask, set);
555 }
556 
557 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
558 {
559 	int ret;
560 
561 	ret = register_irqsoff_function(tr, graph, 0);
562 
563 	if (!ret && tracing_is_enabled())
564 		tracer_enabled = 1;
565 	else
566 		tracer_enabled = 0;
567 
568 	return ret;
569 }
570 
571 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
572 {
573 	tracer_enabled = 0;
574 
575 	unregister_irqsoff_function(tr, graph);
576 }
577 
578 static bool irqsoff_busy;
579 
580 static int __irqsoff_tracer_init(struct trace_array *tr)
581 {
582 	if (irqsoff_busy)
583 		return -EBUSY;
584 
585 	save_flags = tr->trace_flags;
586 
587 	/* non overwrite screws up the latency tracers */
588 	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
589 	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
590 	/* without pause, we will produce garbage if another latency occurs */
591 	set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
592 
593 	tr->max_latency = 0;
594 	irqsoff_trace = tr;
595 	/* make sure that the tracer is visible */
596 	smp_wmb();
597 
598 	ftrace_init_array_ops(tr, irqsoff_tracer_call);
599 
600 	/* Only toplevel instance supports graph tracing */
601 	if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
602 				      is_graph(tr))))
603 		printk(KERN_ERR "failed to start irqsoff tracer\n");
604 
605 	irqsoff_busy = true;
606 	return 0;
607 }
608 
609 static void __irqsoff_tracer_reset(struct trace_array *tr)
610 {
611 	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
612 	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
613 	int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
614 
615 	stop_irqsoff_tracer(tr, is_graph(tr));
616 
617 	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
618 	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
619 	set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
620 	ftrace_reset_array_ops(tr);
621 
622 	irqsoff_busy = false;
623 }
624 
625 static void irqsoff_tracer_start(struct trace_array *tr)
626 {
627 	tracer_enabled = 1;
628 }
629 
630 static void irqsoff_tracer_stop(struct trace_array *tr)
631 {
632 	tracer_enabled = 0;
633 }
634 
635 #ifdef CONFIG_IRQSOFF_TRACER
636 /*
637  * We are only interested in hardirq on/off events:
638  */
639 void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
640 {
641 	if (!preempt_trace(preempt_count()) && irq_trace())
642 		stop_critical_timing(a0, a1);
643 }
644 NOKPROBE_SYMBOL(tracer_hardirqs_on);
645 
646 void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
647 {
648 	if (!preempt_trace(preempt_count()) && irq_trace())
649 		start_critical_timing(a0, a1);
650 }
651 NOKPROBE_SYMBOL(tracer_hardirqs_off);
652 
653 static int irqsoff_tracer_init(struct trace_array *tr)
654 {
655 	trace_type = TRACER_IRQS_OFF;
656 
657 	return __irqsoff_tracer_init(tr);
658 }
659 
660 static void irqsoff_tracer_reset(struct trace_array *tr)
661 {
662 	__irqsoff_tracer_reset(tr);
663 }
664 
665 static struct tracer irqsoff_tracer __read_mostly =
666 {
667 	.name		= "irqsoff",
668 	.init		= irqsoff_tracer_init,
669 	.reset		= irqsoff_tracer_reset,
670 	.start		= irqsoff_tracer_start,
671 	.stop		= irqsoff_tracer_stop,
672 	.print_max	= true,
673 	.print_header   = irqsoff_print_header,
674 	.print_line     = irqsoff_print_line,
675 	.flag_changed	= irqsoff_flag_changed,
676 #ifdef CONFIG_FTRACE_SELFTEST
677 	.selftest    = trace_selftest_startup_irqsoff,
678 #endif
679 	.open           = irqsoff_trace_open,
680 	.close          = irqsoff_trace_close,
681 	.allow_instances = true,
682 	.use_max_tr	= true,
683 };
684 #endif /*  CONFIG_IRQSOFF_TRACER */
685 
686 #ifdef CONFIG_PREEMPT_TRACER
687 void tracer_preempt_on(unsigned long a0, unsigned long a1)
688 {
689 	if (preempt_trace(preempt_count()) && !irq_trace())
690 		stop_critical_timing(a0, a1);
691 }
692 
693 void tracer_preempt_off(unsigned long a0, unsigned long a1)
694 {
695 	if (preempt_trace(preempt_count()) && !irq_trace())
696 		start_critical_timing(a0, a1);
697 }
698 
699 static int preemptoff_tracer_init(struct trace_array *tr)
700 {
701 	trace_type = TRACER_PREEMPT_OFF;
702 
703 	return __irqsoff_tracer_init(tr);
704 }
705 
706 static void preemptoff_tracer_reset(struct trace_array *tr)
707 {
708 	__irqsoff_tracer_reset(tr);
709 }
710 
711 static struct tracer preemptoff_tracer __read_mostly =
712 {
713 	.name		= "preemptoff",
714 	.init		= preemptoff_tracer_init,
715 	.reset		= preemptoff_tracer_reset,
716 	.start		= irqsoff_tracer_start,
717 	.stop		= irqsoff_tracer_stop,
718 	.print_max	= true,
719 	.print_header   = irqsoff_print_header,
720 	.print_line     = irqsoff_print_line,
721 	.flag_changed	= irqsoff_flag_changed,
722 #ifdef CONFIG_FTRACE_SELFTEST
723 	.selftest    = trace_selftest_startup_preemptoff,
724 #endif
725 	.open		= irqsoff_trace_open,
726 	.close		= irqsoff_trace_close,
727 	.allow_instances = true,
728 	.use_max_tr	= true,
729 };
730 #endif /* CONFIG_PREEMPT_TRACER */
731 
732 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
733 
734 static int preemptirqsoff_tracer_init(struct trace_array *tr)
735 {
736 	trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
737 
738 	return __irqsoff_tracer_init(tr);
739 }
740 
741 static void preemptirqsoff_tracer_reset(struct trace_array *tr)
742 {
743 	__irqsoff_tracer_reset(tr);
744 }
745 
746 static struct tracer preemptirqsoff_tracer __read_mostly =
747 {
748 	.name		= "preemptirqsoff",
749 	.init		= preemptirqsoff_tracer_init,
750 	.reset		= preemptirqsoff_tracer_reset,
751 	.start		= irqsoff_tracer_start,
752 	.stop		= irqsoff_tracer_stop,
753 	.print_max	= true,
754 	.print_header   = irqsoff_print_header,
755 	.print_line     = irqsoff_print_line,
756 	.flag_changed	= irqsoff_flag_changed,
757 #ifdef CONFIG_FTRACE_SELFTEST
758 	.selftest    = trace_selftest_startup_preemptirqsoff,
759 #endif
760 	.open		= irqsoff_trace_open,
761 	.close		= irqsoff_trace_close,
762 	.allow_instances = true,
763 	.use_max_tr	= true,
764 };
765 #endif
766 
767 __init static int init_irqsoff_tracer(void)
768 {
769 #ifdef CONFIG_IRQSOFF_TRACER
770 	register_tracer(&irqsoff_tracer);
771 #endif
772 #ifdef CONFIG_PREEMPT_TRACER
773 	register_tracer(&preemptoff_tracer);
774 #endif
775 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
776 	register_tracer(&preemptirqsoff_tracer);
777 #endif
778 
779 	return 0;
780 }
781 core_initcall(init_irqsoff_tracer);
782 #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
783