1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19
20 #include "trace.h"
21
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_args_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static void
31 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
32 struct ftrace_ops *op, struct ftrace_regs *fregs);
33 static void
34 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 struct ftrace_ops *op, struct ftrace_regs *fregs);
36 static void
37 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
38 struct ftrace_ops *op,
39 struct ftrace_regs *fregs);
40 static struct tracer_flags func_flags;
41
42 /* Our option */
43 enum {
44
45 TRACE_FUNC_NO_OPTS = 0x0, /* No flags set. */
46 TRACE_FUNC_OPT_STACK = 0x1,
47 TRACE_FUNC_OPT_NO_REPEATS = 0x2,
48 TRACE_FUNC_OPT_ARGS = 0x4,
49
50 /* Update this to next highest bit. */
51 TRACE_FUNC_OPT_HIGHEST_BIT = 0x8
52 };
53
54 #define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
55
ftrace_allocate_ftrace_ops(struct trace_array * tr)56 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
57 {
58 struct ftrace_ops *ops;
59
60 /* The top level array uses the "global_ops" */
61 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
62 return 0;
63
64 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
65 if (!ops)
66 return -ENOMEM;
67
68 /* Currently only the non stack version is supported */
69 ops->func = function_trace_call;
70 ops->flags = FTRACE_OPS_FL_PID;
71
72 tr->ops = ops;
73 ops->private = tr;
74
75 return 0;
76 }
77
ftrace_free_ftrace_ops(struct trace_array * tr)78 void ftrace_free_ftrace_ops(struct trace_array *tr)
79 {
80 kfree(tr->ops);
81 tr->ops = NULL;
82 }
83
ftrace_create_function_files(struct trace_array * tr,struct dentry * parent)84 int ftrace_create_function_files(struct trace_array *tr,
85 struct dentry *parent)
86 {
87 int ret;
88 /*
89 * The top level array uses the "global_ops", and the files are
90 * created on boot up.
91 */
92 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
93 return 0;
94
95 if (!tr->ops)
96 return -EINVAL;
97
98 ret = allocate_fgraph_ops(tr, tr->ops);
99 if (ret) {
100 kfree(tr->ops);
101 return ret;
102 }
103
104 ftrace_create_filter_files(tr->ops, parent);
105
106 return 0;
107 }
108
ftrace_destroy_function_files(struct trace_array * tr)109 void ftrace_destroy_function_files(struct trace_array *tr)
110 {
111 ftrace_destroy_filter_files(tr->ops);
112 ftrace_free_ftrace_ops(tr);
113 free_fgraph_ops(tr);
114 }
115
select_trace_function(u32 flags_val)116 static ftrace_func_t select_trace_function(u32 flags_val)
117 {
118 switch (flags_val & TRACE_FUNC_OPT_MASK) {
119 case TRACE_FUNC_NO_OPTS:
120 return function_trace_call;
121 case TRACE_FUNC_OPT_ARGS:
122 return function_args_trace_call;
123 case TRACE_FUNC_OPT_STACK:
124 return function_stack_trace_call;
125 case TRACE_FUNC_OPT_NO_REPEATS:
126 return function_no_repeats_trace_call;
127 case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
128 return function_stack_no_repeats_trace_call;
129 default:
130 return NULL;
131 }
132 }
133
handle_func_repeats(struct trace_array * tr,u32 flags_val)134 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
135 {
136 if (!tr->last_func_repeats &&
137 (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
138 tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
139 if (!tr->last_func_repeats)
140 return false;
141 }
142
143 return true;
144 }
145
function_trace_init(struct trace_array * tr)146 static int function_trace_init(struct trace_array *tr)
147 {
148 ftrace_func_t func;
149 /*
150 * Instance trace_arrays get their ops allocated
151 * at instance creation. Unless it failed
152 * the allocation.
153 */
154 if (!tr->ops)
155 return -ENOMEM;
156
157 func = select_trace_function(func_flags.val);
158 if (!func)
159 return -EINVAL;
160
161 if (!handle_func_repeats(tr, func_flags.val))
162 return -ENOMEM;
163
164 ftrace_init_array_ops(tr, func);
165
166 tr->array_buffer.cpu = raw_smp_processor_id();
167
168 tracing_start_cmdline_record();
169 tracing_start_function_trace(tr);
170 return 0;
171 }
172
function_trace_reset(struct trace_array * tr)173 static void function_trace_reset(struct trace_array *tr)
174 {
175 tracing_stop_function_trace(tr);
176 tracing_stop_cmdline_record();
177 ftrace_reset_array_ops(tr);
178 }
179
function_trace_start(struct trace_array * tr)180 static void function_trace_start(struct trace_array *tr)
181 {
182 tracing_reset_online_cpus(&tr->array_buffer);
183 }
184
185 /* fregs are guaranteed not to be NULL if HAVE_DYNAMIC_FTRACE_WITH_ARGS is set */
186 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
187 static __always_inline unsigned long
function_get_true_parent_ip(unsigned long parent_ip,struct ftrace_regs * fregs)188 function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
189 {
190 unsigned long true_parent_ip;
191 int idx = 0;
192
193 true_parent_ip = parent_ip;
194 if (unlikely(parent_ip == (unsigned long)&return_to_handler) && fregs)
195 true_parent_ip = ftrace_graph_ret_addr(current, &idx, parent_ip,
196 (unsigned long *)ftrace_regs_get_stack_pointer(fregs));
197 return true_parent_ip;
198 }
199 #else
200 static __always_inline unsigned long
function_get_true_parent_ip(unsigned long parent_ip,struct ftrace_regs * fregs)201 function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
202 {
203 return parent_ip;
204 }
205 #endif
206
207 static void
function_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)208 function_trace_call(unsigned long ip, unsigned long parent_ip,
209 struct ftrace_ops *op, struct ftrace_regs *fregs)
210 {
211 struct trace_array *tr = op->private;
212 struct trace_array_cpu *data;
213 unsigned int trace_ctx;
214 int bit;
215
216 if (unlikely(!tr->function_enabled))
217 return;
218
219 bit = ftrace_test_recursion_trylock(ip, parent_ip);
220 if (bit < 0)
221 return;
222
223 parent_ip = function_get_true_parent_ip(parent_ip, fregs);
224
225 trace_ctx = tracing_gen_ctx_dec();
226
227 data = this_cpu_ptr(tr->array_buffer.data);
228 if (!atomic_read(&data->disabled))
229 trace_function(tr, ip, parent_ip, trace_ctx, NULL);
230
231 ftrace_test_recursion_unlock(bit);
232 }
233
234 static void
function_args_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)235 function_args_trace_call(unsigned long ip, unsigned long parent_ip,
236 struct ftrace_ops *op, struct ftrace_regs *fregs)
237 {
238 struct trace_array *tr = op->private;
239 struct trace_array_cpu *data;
240 unsigned int trace_ctx;
241 int bit;
242 int cpu;
243
244 if (unlikely(!tr->function_enabled))
245 return;
246
247 bit = ftrace_test_recursion_trylock(ip, parent_ip);
248 if (bit < 0)
249 return;
250
251 trace_ctx = tracing_gen_ctx();
252
253 cpu = smp_processor_id();
254 data = per_cpu_ptr(tr->array_buffer.data, cpu);
255 if (!atomic_read(&data->disabled))
256 trace_function(tr, ip, parent_ip, trace_ctx, fregs);
257
258 ftrace_test_recursion_unlock(bit);
259 }
260
261 #ifdef CONFIG_UNWINDER_ORC
262 /*
263 * Skip 2:
264 *
265 * function_stack_trace_call()
266 * ftrace_call()
267 */
268 #define STACK_SKIP 2
269 #else
270 /*
271 * Skip 3:
272 * __trace_stack()
273 * function_stack_trace_call()
274 * ftrace_call()
275 */
276 #define STACK_SKIP 3
277 #endif
278
279 static void
function_stack_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)280 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
281 struct ftrace_ops *op, struct ftrace_regs *fregs)
282 {
283 struct trace_array *tr = op->private;
284 struct trace_array_cpu *data;
285 unsigned long flags;
286 long disabled;
287 int cpu;
288 unsigned int trace_ctx;
289 int skip = STACK_SKIP;
290
291 if (unlikely(!tr->function_enabled))
292 return;
293
294 /*
295 * Need to use raw, since this must be called before the
296 * recursive protection is performed.
297 */
298 local_irq_save(flags);
299 parent_ip = function_get_true_parent_ip(parent_ip, fregs);
300 cpu = raw_smp_processor_id();
301 data = per_cpu_ptr(tr->array_buffer.data, cpu);
302 disabled = atomic_inc_return(&data->disabled);
303
304 if (likely(disabled == 1)) {
305 trace_ctx = tracing_gen_ctx_flags(flags);
306 trace_function(tr, ip, parent_ip, trace_ctx, NULL);
307 #ifdef CONFIG_UNWINDER_FRAME_POINTER
308 if (ftrace_pids_enabled(op))
309 skip++;
310 #endif
311 __trace_stack(tr, trace_ctx, skip);
312 }
313
314 atomic_dec(&data->disabled);
315 local_irq_restore(flags);
316 }
317
is_repeat_check(struct trace_array * tr,struct trace_func_repeats * last_info,unsigned long ip,unsigned long parent_ip)318 static inline bool is_repeat_check(struct trace_array *tr,
319 struct trace_func_repeats *last_info,
320 unsigned long ip, unsigned long parent_ip)
321 {
322 if (last_info->ip == ip &&
323 last_info->parent_ip == parent_ip &&
324 last_info->count < U16_MAX) {
325 last_info->ts_last_call =
326 ring_buffer_time_stamp(tr->array_buffer.buffer);
327 last_info->count++;
328 return true;
329 }
330
331 return false;
332 }
333
process_repeats(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,struct trace_func_repeats * last_info,unsigned int trace_ctx)334 static inline void process_repeats(struct trace_array *tr,
335 unsigned long ip, unsigned long parent_ip,
336 struct trace_func_repeats *last_info,
337 unsigned int trace_ctx)
338 {
339 if (last_info->count) {
340 trace_last_func_repeats(tr, last_info, trace_ctx);
341 last_info->count = 0;
342 }
343
344 last_info->ip = ip;
345 last_info->parent_ip = parent_ip;
346 }
347
348 static void
function_no_repeats_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)349 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
350 struct ftrace_ops *op,
351 struct ftrace_regs *fregs)
352 {
353 struct trace_func_repeats *last_info;
354 struct trace_array *tr = op->private;
355 struct trace_array_cpu *data;
356 unsigned int trace_ctx;
357 int bit;
358
359 if (unlikely(!tr->function_enabled))
360 return;
361
362 bit = ftrace_test_recursion_trylock(ip, parent_ip);
363 if (bit < 0)
364 return;
365
366 parent_ip = function_get_true_parent_ip(parent_ip, fregs);
367 data = this_cpu_ptr(tr->array_buffer.data);
368 if (atomic_read(&data->disabled))
369 goto out;
370
371 /*
372 * An interrupt may happen at any place here. But as far as I can see,
373 * the only damage that this can cause is to mess up the repetition
374 * counter without valuable data being lost.
375 * TODO: think about a solution that is better than just hoping to be
376 * lucky.
377 */
378 last_info = this_cpu_ptr(tr->last_func_repeats);
379 if (is_repeat_check(tr, last_info, ip, parent_ip))
380 goto out;
381
382 trace_ctx = tracing_gen_ctx_dec();
383 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
384
385 trace_function(tr, ip, parent_ip, trace_ctx, NULL);
386
387 out:
388 ftrace_test_recursion_unlock(bit);
389 }
390
391 static void
function_stack_no_repeats_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)392 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
393 struct ftrace_ops *op,
394 struct ftrace_regs *fregs)
395 {
396 struct trace_func_repeats *last_info;
397 struct trace_array *tr = op->private;
398 struct trace_array_cpu *data;
399 unsigned long flags;
400 long disabled;
401 int cpu;
402 unsigned int trace_ctx;
403
404 if (unlikely(!tr->function_enabled))
405 return;
406
407 /*
408 * Need to use raw, since this must be called before the
409 * recursive protection is performed.
410 */
411 local_irq_save(flags);
412 parent_ip = function_get_true_parent_ip(parent_ip, fregs);
413 cpu = raw_smp_processor_id();
414 data = per_cpu_ptr(tr->array_buffer.data, cpu);
415 disabled = atomic_inc_return(&data->disabled);
416
417 if (likely(disabled == 1)) {
418 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
419 if (is_repeat_check(tr, last_info, ip, parent_ip))
420 goto out;
421
422 trace_ctx = tracing_gen_ctx_flags(flags);
423 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
424
425 trace_function(tr, ip, parent_ip, trace_ctx, NULL);
426 __trace_stack(tr, trace_ctx, STACK_SKIP);
427 }
428
429 out:
430 atomic_dec(&data->disabled);
431 local_irq_restore(flags);
432 }
433
434 static struct tracer_opt func_opts[] = {
435 #ifdef CONFIG_STACKTRACE
436 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
437 #endif
438 { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
439 #ifdef CONFIG_FUNCTION_TRACE_ARGS
440 { TRACER_OPT(func-args, TRACE_FUNC_OPT_ARGS) },
441 #endif
442 { } /* Always set a last empty entry */
443 };
444
445 static struct tracer_flags func_flags = {
446 .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
447 .opts = func_opts
448 };
449
tracing_start_function_trace(struct trace_array * tr)450 static void tracing_start_function_trace(struct trace_array *tr)
451 {
452 tr->function_enabled = 0;
453 register_ftrace_function(tr->ops);
454 tr->function_enabled = 1;
455 }
456
tracing_stop_function_trace(struct trace_array * tr)457 static void tracing_stop_function_trace(struct trace_array *tr)
458 {
459 tr->function_enabled = 0;
460 unregister_ftrace_function(tr->ops);
461 }
462
463 static struct tracer function_trace;
464
465 static int
func_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)466 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
467 {
468 ftrace_func_t func;
469 u32 new_flags;
470
471 /* Do nothing if already set. */
472 if (!!set == !!(func_flags.val & bit))
473 return 0;
474
475 /* We can change this flag only when not running. */
476 if (tr->current_trace != &function_trace)
477 return 0;
478
479 new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
480 func = select_trace_function(new_flags);
481 if (!func)
482 return -EINVAL;
483
484 /* Check if there's anything to change. */
485 if (tr->ops->func == func)
486 return 0;
487
488 if (!handle_func_repeats(tr, new_flags))
489 return -ENOMEM;
490
491 unregister_ftrace_function(tr->ops);
492 tr->ops->func = func;
493 register_ftrace_function(tr->ops);
494
495 return 0;
496 }
497
498 static struct tracer function_trace __tracer_data =
499 {
500 .name = "function",
501 .init = function_trace_init,
502 .reset = function_trace_reset,
503 .start = function_trace_start,
504 .flags = &func_flags,
505 .set_flag = func_set_flag,
506 .allow_instances = true,
507 #ifdef CONFIG_FTRACE_SELFTEST
508 .selftest = trace_selftest_startup_function,
509 #endif
510 };
511
512 #ifdef CONFIG_DYNAMIC_FTRACE
update_traceon_count(struct ftrace_probe_ops * ops,unsigned long ip,struct trace_array * tr,bool on,void * data)513 static void update_traceon_count(struct ftrace_probe_ops *ops,
514 unsigned long ip,
515 struct trace_array *tr, bool on,
516 void *data)
517 {
518 struct ftrace_func_mapper *mapper = data;
519 long *count;
520 long old_count;
521
522 /*
523 * Tracing gets disabled (or enabled) once per count.
524 * This function can be called at the same time on multiple CPUs.
525 * It is fine if both disable (or enable) tracing, as disabling
526 * (or enabling) the second time doesn't do anything as the
527 * state of the tracer is already disabled (or enabled).
528 * What needs to be synchronized in this case is that the count
529 * only gets decremented once, even if the tracer is disabled
530 * (or enabled) twice, as the second one is really a nop.
531 *
532 * The memory barriers guarantee that we only decrement the
533 * counter once. First the count is read to a local variable
534 * and a read barrier is used to make sure that it is loaded
535 * before checking if the tracer is in the state we want.
536 * If the tracer is not in the state we want, then the count
537 * is guaranteed to be the old count.
538 *
539 * Next the tracer is set to the state we want (disabled or enabled)
540 * then a write memory barrier is used to make sure that
541 * the new state is visible before changing the counter by
542 * one minus the old counter. This guarantees that another CPU
543 * executing this code will see the new state before seeing
544 * the new counter value, and would not do anything if the new
545 * counter is seen.
546 *
547 * Note, there is no synchronization between this and a user
548 * setting the tracing_on file. But we currently don't care
549 * about that.
550 */
551 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
552 old_count = *count;
553
554 if (old_count <= 0)
555 return;
556
557 /* Make sure we see count before checking tracing state */
558 smp_rmb();
559
560 if (on == !!tracer_tracing_is_on(tr))
561 return;
562
563 if (on)
564 tracer_tracing_on(tr);
565 else
566 tracer_tracing_off(tr);
567
568 /* Make sure tracing state is visible before updating count */
569 smp_wmb();
570
571 *count = old_count - 1;
572 }
573
574 static void
ftrace_traceon_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)575 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
576 struct trace_array *tr, struct ftrace_probe_ops *ops,
577 void *data)
578 {
579 update_traceon_count(ops, ip, tr, 1, data);
580 }
581
582 static void
ftrace_traceoff_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)583 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
584 struct trace_array *tr, struct ftrace_probe_ops *ops,
585 void *data)
586 {
587 update_traceon_count(ops, ip, tr, 0, data);
588 }
589
590 static void
ftrace_traceon(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)591 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
592 struct trace_array *tr, struct ftrace_probe_ops *ops,
593 void *data)
594 {
595 if (tracer_tracing_is_on(tr))
596 return;
597
598 tracer_tracing_on(tr);
599 }
600
601 static void
ftrace_traceoff(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)602 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
603 struct trace_array *tr, struct ftrace_probe_ops *ops,
604 void *data)
605 {
606 if (!tracer_tracing_is_on(tr))
607 return;
608
609 tracer_tracing_off(tr);
610 }
611
612 #ifdef CONFIG_UNWINDER_ORC
613 /*
614 * Skip 3:
615 *
616 * function_trace_probe_call()
617 * ftrace_ops_assist_func()
618 * ftrace_call()
619 */
620 #define FTRACE_STACK_SKIP 3
621 #else
622 /*
623 * Skip 5:
624 *
625 * __trace_stack()
626 * ftrace_stacktrace()
627 * function_trace_probe_call()
628 * ftrace_ops_assist_func()
629 * ftrace_call()
630 */
631 #define FTRACE_STACK_SKIP 5
632 #endif
633
trace_stack(struct trace_array * tr)634 static __always_inline void trace_stack(struct trace_array *tr)
635 {
636 __trace_stack(tr, tracing_gen_ctx_dec(), FTRACE_STACK_SKIP);
637 }
638
639 static void
ftrace_stacktrace(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)640 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
641 struct trace_array *tr, struct ftrace_probe_ops *ops,
642 void *data)
643 {
644 trace_stack(tr);
645 }
646
647 static void
ftrace_stacktrace_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)648 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
649 struct trace_array *tr, struct ftrace_probe_ops *ops,
650 void *data)
651 {
652 struct ftrace_func_mapper *mapper = data;
653 long *count;
654 long old_count;
655 long new_count;
656
657 if (!tracing_is_on())
658 return;
659
660 /* unlimited? */
661 if (!mapper) {
662 trace_stack(tr);
663 return;
664 }
665
666 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
667
668 /*
669 * Stack traces should only execute the number of times the
670 * user specified in the counter.
671 */
672 do {
673 old_count = *count;
674
675 if (!old_count)
676 return;
677
678 new_count = old_count - 1;
679 new_count = cmpxchg(count, old_count, new_count);
680 if (new_count == old_count)
681 trace_stack(tr);
682
683 if (!tracing_is_on())
684 return;
685
686 } while (new_count != old_count);
687 }
688
update_count(struct ftrace_probe_ops * ops,unsigned long ip,void * data)689 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
690 void *data)
691 {
692 struct ftrace_func_mapper *mapper = data;
693 long *count = NULL;
694
695 if (mapper)
696 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
697
698 if (count) {
699 if (*count <= 0)
700 return 0;
701 (*count)--;
702 }
703
704 return 1;
705 }
706
707 static void
ftrace_dump_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)708 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
709 struct trace_array *tr, struct ftrace_probe_ops *ops,
710 void *data)
711 {
712 if (update_count(ops, ip, data))
713 ftrace_dump(DUMP_ALL);
714 }
715
716 /* Only dump the current CPU buffer. */
717 static void
ftrace_cpudump_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)718 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
719 struct trace_array *tr, struct ftrace_probe_ops *ops,
720 void *data)
721 {
722 if (update_count(ops, ip, data))
723 ftrace_dump(DUMP_ORIG);
724 }
725
726 static int
ftrace_probe_print(const char * name,struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)727 ftrace_probe_print(const char *name, struct seq_file *m,
728 unsigned long ip, struct ftrace_probe_ops *ops,
729 void *data)
730 {
731 struct ftrace_func_mapper *mapper = data;
732 long *count = NULL;
733
734 seq_printf(m, "%ps:%s", (void *)ip, name);
735
736 if (mapper)
737 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
738
739 if (count)
740 seq_printf(m, ":count=%ld\n", *count);
741 else
742 seq_puts(m, ":unlimited\n");
743
744 return 0;
745 }
746
747 static int
ftrace_traceon_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)748 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
749 struct ftrace_probe_ops *ops,
750 void *data)
751 {
752 return ftrace_probe_print("traceon", m, ip, ops, data);
753 }
754
755 static int
ftrace_traceoff_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)756 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
757 struct ftrace_probe_ops *ops, void *data)
758 {
759 return ftrace_probe_print("traceoff", m, ip, ops, data);
760 }
761
762 static int
ftrace_stacktrace_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)763 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
764 struct ftrace_probe_ops *ops, void *data)
765 {
766 return ftrace_probe_print("stacktrace", m, ip, ops, data);
767 }
768
769 static int
ftrace_dump_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)770 ftrace_dump_print(struct seq_file *m, unsigned long ip,
771 struct ftrace_probe_ops *ops, void *data)
772 {
773 return ftrace_probe_print("dump", m, ip, ops, data);
774 }
775
776 static int
ftrace_cpudump_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)777 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
778 struct ftrace_probe_ops *ops, void *data)
779 {
780 return ftrace_probe_print("cpudump", m, ip, ops, data);
781 }
782
783
784 static int
ftrace_count_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)785 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
786 unsigned long ip, void *init_data, void **data)
787 {
788 struct ftrace_func_mapper *mapper = *data;
789
790 if (!mapper) {
791 mapper = allocate_ftrace_func_mapper();
792 if (!mapper)
793 return -ENOMEM;
794 *data = mapper;
795 }
796
797 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
798 }
799
800 static void
ftrace_count_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)801 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
802 unsigned long ip, void *data)
803 {
804 struct ftrace_func_mapper *mapper = data;
805
806 if (!ip) {
807 free_ftrace_func_mapper(mapper, NULL);
808 return;
809 }
810
811 ftrace_func_mapper_remove_ip(mapper, ip);
812 }
813
814 static struct ftrace_probe_ops traceon_count_probe_ops = {
815 .func = ftrace_traceon_count,
816 .print = ftrace_traceon_print,
817 .init = ftrace_count_init,
818 .free = ftrace_count_free,
819 };
820
821 static struct ftrace_probe_ops traceoff_count_probe_ops = {
822 .func = ftrace_traceoff_count,
823 .print = ftrace_traceoff_print,
824 .init = ftrace_count_init,
825 .free = ftrace_count_free,
826 };
827
828 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
829 .func = ftrace_stacktrace_count,
830 .print = ftrace_stacktrace_print,
831 .init = ftrace_count_init,
832 .free = ftrace_count_free,
833 };
834
835 static struct ftrace_probe_ops dump_probe_ops = {
836 .func = ftrace_dump_probe,
837 .print = ftrace_dump_print,
838 .init = ftrace_count_init,
839 .free = ftrace_count_free,
840 };
841
842 static struct ftrace_probe_ops cpudump_probe_ops = {
843 .func = ftrace_cpudump_probe,
844 .print = ftrace_cpudump_print,
845 };
846
847 static struct ftrace_probe_ops traceon_probe_ops = {
848 .func = ftrace_traceon,
849 .print = ftrace_traceon_print,
850 };
851
852 static struct ftrace_probe_ops traceoff_probe_ops = {
853 .func = ftrace_traceoff,
854 .print = ftrace_traceoff_print,
855 };
856
857 static struct ftrace_probe_ops stacktrace_probe_ops = {
858 .func = ftrace_stacktrace,
859 .print = ftrace_stacktrace_print,
860 };
861
862 static int
ftrace_trace_probe_callback(struct trace_array * tr,struct ftrace_probe_ops * ops,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)863 ftrace_trace_probe_callback(struct trace_array *tr,
864 struct ftrace_probe_ops *ops,
865 struct ftrace_hash *hash, char *glob,
866 char *cmd, char *param, int enable)
867 {
868 void *count = (void *)-1;
869 char *number;
870 int ret;
871
872 /* hash funcs only work with set_ftrace_filter */
873 if (!enable)
874 return -EINVAL;
875
876 if (glob[0] == '!')
877 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
878
879 if (!param)
880 goto out_reg;
881
882 number = strsep(¶m, ":");
883
884 if (!strlen(number))
885 goto out_reg;
886
887 /*
888 * We use the callback data field (which is a pointer)
889 * as our counter.
890 */
891 ret = kstrtoul(number, 0, (unsigned long *)&count);
892 if (ret)
893 return ret;
894
895 out_reg:
896 ret = register_ftrace_function_probe(glob, tr, ops, count);
897
898 return ret < 0 ? ret : 0;
899 }
900
901 static int
ftrace_trace_onoff_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)902 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
903 char *glob, char *cmd, char *param, int enable)
904 {
905 struct ftrace_probe_ops *ops;
906
907 if (!tr)
908 return -ENODEV;
909
910 /* we register both traceon and traceoff to this callback */
911 if (strcmp(cmd, "traceon") == 0)
912 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
913 else
914 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
915
916 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
917 param, enable);
918 }
919
920 static int
ftrace_stacktrace_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)921 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
922 char *glob, char *cmd, char *param, int enable)
923 {
924 struct ftrace_probe_ops *ops;
925
926 if (!tr)
927 return -ENODEV;
928
929 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
930
931 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
932 param, enable);
933 }
934
935 static int
ftrace_dump_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)936 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
937 char *glob, char *cmd, char *param, int enable)
938 {
939 struct ftrace_probe_ops *ops;
940
941 if (!tr)
942 return -ENODEV;
943
944 ops = &dump_probe_ops;
945
946 /* Only dump once. */
947 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
948 "1", enable);
949 }
950
951 static int
ftrace_cpudump_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)952 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
953 char *glob, char *cmd, char *param, int enable)
954 {
955 struct ftrace_probe_ops *ops;
956
957 if (!tr)
958 return -ENODEV;
959
960 ops = &cpudump_probe_ops;
961
962 /* Only dump once. */
963 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
964 "1", enable);
965 }
966
967 static struct ftrace_func_command ftrace_traceon_cmd = {
968 .name = "traceon",
969 .func = ftrace_trace_onoff_callback,
970 };
971
972 static struct ftrace_func_command ftrace_traceoff_cmd = {
973 .name = "traceoff",
974 .func = ftrace_trace_onoff_callback,
975 };
976
977 static struct ftrace_func_command ftrace_stacktrace_cmd = {
978 .name = "stacktrace",
979 .func = ftrace_stacktrace_callback,
980 };
981
982 static struct ftrace_func_command ftrace_dump_cmd = {
983 .name = "dump",
984 .func = ftrace_dump_callback,
985 };
986
987 static struct ftrace_func_command ftrace_cpudump_cmd = {
988 .name = "cpudump",
989 .func = ftrace_cpudump_callback,
990 };
991
init_func_cmd_traceon(void)992 static int __init init_func_cmd_traceon(void)
993 {
994 int ret;
995
996 ret = register_ftrace_command(&ftrace_traceoff_cmd);
997 if (ret)
998 return ret;
999
1000 ret = register_ftrace_command(&ftrace_traceon_cmd);
1001 if (ret)
1002 goto out_free_traceoff;
1003
1004 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
1005 if (ret)
1006 goto out_free_traceon;
1007
1008 ret = register_ftrace_command(&ftrace_dump_cmd);
1009 if (ret)
1010 goto out_free_stacktrace;
1011
1012 ret = register_ftrace_command(&ftrace_cpudump_cmd);
1013 if (ret)
1014 goto out_free_dump;
1015
1016 return 0;
1017
1018 out_free_dump:
1019 unregister_ftrace_command(&ftrace_dump_cmd);
1020 out_free_stacktrace:
1021 unregister_ftrace_command(&ftrace_stacktrace_cmd);
1022 out_free_traceon:
1023 unregister_ftrace_command(&ftrace_traceon_cmd);
1024 out_free_traceoff:
1025 unregister_ftrace_command(&ftrace_traceoff_cmd);
1026
1027 return ret;
1028 }
1029 #else
init_func_cmd_traceon(void)1030 static inline int init_func_cmd_traceon(void)
1031 {
1032 return 0;
1033 }
1034 #endif /* CONFIG_DYNAMIC_FTRACE */
1035
init_function_trace(void)1036 __init int init_function_trace(void)
1037 {
1038 init_func_cmd_traceon();
1039 return register_tracer(&function_trace);
1040 }
1041