1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * event tracer
4 *
5 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 *
7 * - Added format output of fields of the trace point.
8 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9 *
10 */
11
12 #define pr_fmt(fmt) fmt
13
14 #include <linux/workqueue.h>
15 #include <linux/security.h>
16 #include <linux/spinlock.h>
17 #include <linux/kthread.h>
18 #include <linux/tracefs.h>
19 #include <linux/uaccess.h>
20 #include <linux/module.h>
21 #include <linux/ctype.h>
22 #include <linux/sort.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25
26 #include <trace/events/sched.h>
27 #include <trace/syscall.h>
28
29 #include <asm/setup.h>
30
31 #include "trace_output.h"
32
33 #undef TRACE_SYSTEM
34 #define TRACE_SYSTEM "TRACE_SYSTEM"
35
36 DEFINE_MUTEX(event_mutex);
37
38 LIST_HEAD(ftrace_events);
39 static LIST_HEAD(ftrace_generic_fields);
40 static LIST_HEAD(ftrace_common_fields);
41 static bool eventdir_initialized;
42
43 static LIST_HEAD(module_strings);
44
45 struct module_string {
46 struct list_head next;
47 struct module *module;
48 char *str;
49 };
50
51 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
52
53 static struct kmem_cache *field_cachep;
54 static struct kmem_cache *file_cachep;
55
system_refcount(struct event_subsystem * system)56 static inline int system_refcount(struct event_subsystem *system)
57 {
58 return system->ref_count;
59 }
60
system_refcount_inc(struct event_subsystem * system)61 static int system_refcount_inc(struct event_subsystem *system)
62 {
63 return system->ref_count++;
64 }
65
system_refcount_dec(struct event_subsystem * system)66 static int system_refcount_dec(struct event_subsystem *system)
67 {
68 return --system->ref_count;
69 }
70
71 /* Double loops, do not use break, only goto's work */
72 #define do_for_each_event_file(tr, file) \
73 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
74 list_for_each_entry(file, &tr->events, list)
75
76 #define do_for_each_event_file_safe(tr, file) \
77 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
78 struct trace_event_file *___n; \
79 list_for_each_entry_safe(file, ___n, &tr->events, list)
80
81 #define while_for_each_event_file() \
82 }
83
84 static struct ftrace_event_field *
__find_event_field(struct list_head * head,const char * name)85 __find_event_field(struct list_head *head, const char *name)
86 {
87 struct ftrace_event_field *field;
88
89 list_for_each_entry(field, head, link) {
90 if (!strcmp(field->name, name))
91 return field;
92 }
93
94 return NULL;
95 }
96
97 struct ftrace_event_field *
trace_find_event_field(struct trace_event_call * call,char * name)98 trace_find_event_field(struct trace_event_call *call, char *name)
99 {
100 struct ftrace_event_field *field;
101 struct list_head *head;
102
103 head = trace_get_fields(call);
104 field = __find_event_field(head, name);
105 if (field)
106 return field;
107
108 field = __find_event_field(&ftrace_generic_fields, name);
109 if (field)
110 return field;
111
112 return __find_event_field(&ftrace_common_fields, name);
113 }
114
__trace_define_field(struct list_head * head,const char * type,const char * name,int offset,int size,int is_signed,int filter_type,int len,int need_test)115 static int __trace_define_field(struct list_head *head, const char *type,
116 const char *name, int offset, int size,
117 int is_signed, int filter_type, int len,
118 int need_test)
119 {
120 struct ftrace_event_field *field;
121
122 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
123 if (!field)
124 return -ENOMEM;
125
126 field->name = name;
127 field->type = type;
128
129 if (filter_type == FILTER_OTHER)
130 field->filter_type = filter_assign_type(type);
131 else
132 field->filter_type = filter_type;
133
134 field->offset = offset;
135 field->size = size;
136 field->is_signed = is_signed;
137 field->needs_test = need_test;
138 field->len = len;
139
140 list_add(&field->link, head);
141
142 return 0;
143 }
144
trace_define_field(struct trace_event_call * call,const char * type,const char * name,int offset,int size,int is_signed,int filter_type)145 int trace_define_field(struct trace_event_call *call, const char *type,
146 const char *name, int offset, int size, int is_signed,
147 int filter_type)
148 {
149 struct list_head *head;
150
151 if (WARN_ON(!call->class))
152 return 0;
153
154 head = trace_get_fields(call);
155 return __trace_define_field(head, type, name, offset, size,
156 is_signed, filter_type, 0, 0);
157 }
158 EXPORT_SYMBOL_GPL(trace_define_field);
159
trace_define_field_ext(struct trace_event_call * call,const char * type,const char * name,int offset,int size,int is_signed,int filter_type,int len,int need_test)160 static int trace_define_field_ext(struct trace_event_call *call, const char *type,
161 const char *name, int offset, int size, int is_signed,
162 int filter_type, int len, int need_test)
163 {
164 struct list_head *head;
165
166 if (WARN_ON(!call->class))
167 return 0;
168
169 head = trace_get_fields(call);
170 return __trace_define_field(head, type, name, offset, size,
171 is_signed, filter_type, len, need_test);
172 }
173
174 #define __generic_field(type, item, filter_type) \
175 ret = __trace_define_field(&ftrace_generic_fields, #type, \
176 #item, 0, 0, is_signed_type(type), \
177 filter_type, 0, 0); \
178 if (ret) \
179 return ret;
180
181 #define __common_field(type, item) \
182 ret = __trace_define_field(&ftrace_common_fields, #type, \
183 "common_" #item, \
184 offsetof(typeof(ent), item), \
185 sizeof(ent.item), \
186 is_signed_type(type), FILTER_OTHER, \
187 0, 0); \
188 if (ret) \
189 return ret;
190
trace_define_generic_fields(void)191 static int trace_define_generic_fields(void)
192 {
193 int ret;
194
195 __generic_field(int, CPU, FILTER_CPU);
196 __generic_field(int, cpu, FILTER_CPU);
197 __generic_field(int, common_cpu, FILTER_CPU);
198 __generic_field(char *, COMM, FILTER_COMM);
199 __generic_field(char *, comm, FILTER_COMM);
200 __generic_field(char *, stacktrace, FILTER_STACKTRACE);
201 __generic_field(char *, STACKTRACE, FILTER_STACKTRACE);
202
203 return ret;
204 }
205
trace_define_common_fields(void)206 static int trace_define_common_fields(void)
207 {
208 int ret;
209 struct trace_entry ent;
210
211 __common_field(unsigned short, type);
212 __common_field(unsigned char, flags);
213 /* Holds both preempt_count and migrate_disable */
214 __common_field(unsigned char, preempt_count);
215 __common_field(int, pid);
216
217 return ret;
218 }
219
trace_destroy_fields(struct trace_event_call * call)220 static void trace_destroy_fields(struct trace_event_call *call)
221 {
222 struct ftrace_event_field *field, *next;
223 struct list_head *head;
224
225 head = trace_get_fields(call);
226 list_for_each_entry_safe(field, next, head, link) {
227 list_del(&field->link);
228 kmem_cache_free(field_cachep, field);
229 }
230 }
231
232 /*
233 * run-time version of trace_event_get_offsets_<call>() that returns the last
234 * accessible offset of trace fields excluding __dynamic_array bytes
235 */
trace_event_get_offsets(struct trace_event_call * call)236 int trace_event_get_offsets(struct trace_event_call *call)
237 {
238 struct ftrace_event_field *tail;
239 struct list_head *head;
240
241 head = trace_get_fields(call);
242 /*
243 * head->next points to the last field with the largest offset,
244 * since it was added last by trace_define_field()
245 */
246 tail = list_first_entry(head, struct ftrace_event_field, link);
247 return tail->offset + tail->size;
248 }
249
250
find_event_field(const char * fmt,struct trace_event_call * call)251 static struct trace_event_fields *find_event_field(const char *fmt,
252 struct trace_event_call *call)
253 {
254 struct trace_event_fields *field = call->class->fields_array;
255 const char *p = fmt;
256 int len;
257
258 if (!(len = str_has_prefix(fmt, "REC->")))
259 return NULL;
260 fmt += len;
261 for (p = fmt; *p; p++) {
262 if (!isalnum(*p) && *p != '_')
263 break;
264 }
265 len = p - fmt;
266
267 for (; field->type; field++) {
268 if (strncmp(field->name, fmt, len) || field->name[len])
269 continue;
270
271 return field;
272 }
273 return NULL;
274 }
275
276 /*
277 * Check if the referenced field is an array and return true,
278 * as arrays are OK to dereference.
279 */
test_field(const char * fmt,struct trace_event_call * call)280 static bool test_field(const char *fmt, struct trace_event_call *call)
281 {
282 struct trace_event_fields *field;
283
284 field = find_event_field(fmt, call);
285 if (!field)
286 return false;
287
288 /* This is an array and is OK to dereference. */
289 return strchr(field->type, '[') != NULL;
290 }
291
292 /* Look for a string within an argument */
find_print_string(const char * arg,const char * str,const char * end)293 static bool find_print_string(const char *arg, const char *str, const char *end)
294 {
295 const char *r;
296
297 r = strstr(arg, str);
298 return r && r < end;
299 }
300
301 /* Return true if the argument pointer is safe */
process_pointer(const char * fmt,int len,struct trace_event_call * call)302 static bool process_pointer(const char *fmt, int len, struct trace_event_call *call)
303 {
304 const char *r, *e, *a;
305
306 e = fmt + len;
307
308 /* Find the REC-> in the argument */
309 r = strstr(fmt, "REC->");
310 if (r && r < e) {
311 /*
312 * Addresses of events on the buffer, or an array on the buffer is
313 * OK to dereference. There's ways to fool this, but
314 * this is to catch common mistakes, not malicious code.
315 */
316 a = strchr(fmt, '&');
317 if ((a && (a < r)) || test_field(r, call))
318 return true;
319 } else if (find_print_string(fmt, "__get_dynamic_array(", e)) {
320 return true;
321 } else if (find_print_string(fmt, "__get_rel_dynamic_array(", e)) {
322 return true;
323 } else if (find_print_string(fmt, "__get_dynamic_array_len(", e)) {
324 return true;
325 } else if (find_print_string(fmt, "__get_rel_dynamic_array_len(", e)) {
326 return true;
327 } else if (find_print_string(fmt, "__get_sockaddr(", e)) {
328 return true;
329 } else if (find_print_string(fmt, "__get_rel_sockaddr(", e)) {
330 return true;
331 }
332 return false;
333 }
334
335 /* Return true if the string is safe */
process_string(const char * fmt,int len,struct trace_event_call * call)336 static bool process_string(const char *fmt, int len, struct trace_event_call *call)
337 {
338 struct trace_event_fields *field;
339 const char *r, *e, *s;
340
341 e = fmt + len;
342
343 /*
344 * There are several helper functions that return strings.
345 * If the argument contains a function, then assume its field is valid.
346 * It is considered that the argument has a function if it has:
347 * alphanumeric or '_' before a parenthesis.
348 */
349 s = fmt;
350 do {
351 r = strstr(s, "(");
352 if (!r || r >= e)
353 break;
354 for (int i = 1; r - i >= s; i++) {
355 char ch = *(r - i);
356 if (isspace(ch))
357 continue;
358 if (isalnum(ch) || ch == '_')
359 return true;
360 /* Anything else, this isn't a function */
361 break;
362 }
363 /* A function could be wrapped in parenthesis, try the next one */
364 s = r + 1;
365 } while (s < e);
366
367 /*
368 * Check for arrays. If the argument has: foo[REC->val]
369 * then it is very likely that foo is an array of strings
370 * that are safe to use.
371 */
372 r = strstr(s, "[");
373 if (r && r < e) {
374 r = strstr(r, "REC->");
375 if (r && r < e)
376 return true;
377 }
378
379 /*
380 * If there's any strings in the argument consider this arg OK as it
381 * could be: REC->field ? "foo" : "bar" and we don't want to get into
382 * verifying that logic here.
383 */
384 if (find_print_string(fmt, "\"", e))
385 return true;
386
387 /* Dereferenced strings are also valid like any other pointer */
388 if (process_pointer(fmt, len, call))
389 return true;
390
391 /* Make sure the field is found */
392 field = find_event_field(fmt, call);
393 if (!field)
394 return false;
395
396 /* Test this field's string before printing the event */
397 call->flags |= TRACE_EVENT_FL_TEST_STR;
398 field->needs_test = 1;
399
400 return true;
401 }
402
handle_dereference_arg(const char * arg_str,u64 string_flags,int len,u64 * dereference_flags,int arg,struct trace_event_call * call)403 static void handle_dereference_arg(const char *arg_str, u64 string_flags, int len,
404 u64 *dereference_flags, int arg,
405 struct trace_event_call *call)
406 {
407 if (string_flags & (1ULL << arg)) {
408 if (process_string(arg_str, len, call))
409 *dereference_flags &= ~(1ULL << arg);
410 } else if (process_pointer(arg_str, len, call))
411 *dereference_flags &= ~(1ULL << arg);
412 else
413 pr_warn("TRACE EVENT ERROR: Bad dereference argument: '%.*s'\n",
414 len, arg_str);
415 }
416
417 /*
418 * Examine the print fmt of the event looking for unsafe dereference
419 * pointers using %p* that could be recorded in the trace event and
420 * much later referenced after the pointer was freed. Dereferencing
421 * pointers are OK, if it is dereferenced into the event itself.
422 */
test_event_printk(struct trace_event_call * call)423 static void test_event_printk(struct trace_event_call *call)
424 {
425 u64 dereference_flags = 0;
426 u64 string_flags = 0;
427 bool first = true;
428 const char *fmt;
429 int parens = 0;
430 char in_quote = 0;
431 int start_arg = 0;
432 int arg = 0;
433 int i, e;
434
435 fmt = call->print_fmt;
436
437 if (!fmt)
438 return;
439
440 for (i = 0; fmt[i]; i++) {
441 switch (fmt[i]) {
442 case '\\':
443 i++;
444 if (!fmt[i])
445 return;
446 continue;
447 case '"':
448 case '\'':
449 /*
450 * The print fmt starts with a string that
451 * is processed first to find %p* usage,
452 * then after the first string, the print fmt
453 * contains arguments that are used to check
454 * if the dereferenced %p* usage is safe.
455 */
456 if (first) {
457 if (fmt[i] == '\'')
458 continue;
459 if (in_quote) {
460 arg = 0;
461 first = false;
462 /*
463 * If there was no %p* uses
464 * the fmt is OK.
465 */
466 if (!dereference_flags)
467 return;
468 }
469 }
470 if (in_quote) {
471 if (in_quote == fmt[i])
472 in_quote = 0;
473 } else {
474 in_quote = fmt[i];
475 }
476 continue;
477 case '%':
478 if (!first || !in_quote)
479 continue;
480 i++;
481 if (!fmt[i])
482 return;
483 switch (fmt[i]) {
484 case '%':
485 continue;
486 case 'p':
487 do_pointer:
488 /* Find dereferencing fields */
489 switch (fmt[i + 1]) {
490 case 'B': case 'R': case 'r':
491 case 'b': case 'M': case 'm':
492 case 'I': case 'i': case 'E':
493 case 'U': case 'V': case 'N':
494 case 'a': case 'd': case 'D':
495 case 'g': case 't': case 'C':
496 case 'O': case 'f':
497 if (WARN_ONCE(arg == 63,
498 "Too many args for event: %s",
499 trace_event_name(call)))
500 return;
501 dereference_flags |= 1ULL << arg;
502 }
503 break;
504 default:
505 {
506 bool star = false;
507 int j;
508
509 /* Increment arg if %*s exists. */
510 for (j = 0; fmt[i + j]; j++) {
511 if (isdigit(fmt[i + j]) ||
512 fmt[i + j] == '.')
513 continue;
514 if (fmt[i + j] == '*') {
515 star = true;
516 /* Handle %*pbl case */
517 if (!j && fmt[i + 1] == 'p') {
518 arg++;
519 i++;
520 goto do_pointer;
521 }
522 continue;
523 }
524 if ((fmt[i + j] == 's')) {
525 if (star)
526 arg++;
527 if (WARN_ONCE(arg == 63,
528 "Too many args for event: %s",
529 trace_event_name(call)))
530 return;
531 dereference_flags |= 1ULL << arg;
532 string_flags |= 1ULL << arg;
533 }
534 break;
535 }
536 break;
537 } /* default */
538
539 } /* switch */
540 arg++;
541 continue;
542 case '(':
543 if (in_quote)
544 continue;
545 parens++;
546 continue;
547 case ')':
548 if (in_quote)
549 continue;
550 parens--;
551 if (WARN_ONCE(parens < 0,
552 "Paren mismatch for event: %s\narg='%s'\n%*s",
553 trace_event_name(call),
554 fmt + start_arg,
555 (i - start_arg) + 5, "^"))
556 return;
557 continue;
558 case ',':
559 if (in_quote || parens)
560 continue;
561 e = i;
562 i++;
563 while (isspace(fmt[i]))
564 i++;
565
566 /*
567 * If start_arg is zero, then this is the start of the
568 * first argument. The processing of the argument happens
569 * when the end of the argument is found, as it needs to
570 * handle parenthesis and such.
571 */
572 if (!start_arg) {
573 start_arg = i;
574 /* Balance out the i++ in the for loop */
575 i--;
576 continue;
577 }
578
579 if (dereference_flags & (1ULL << arg)) {
580 handle_dereference_arg(fmt + start_arg, string_flags,
581 e - start_arg,
582 &dereference_flags, arg, call);
583 }
584
585 start_arg = i;
586 arg++;
587 /* Balance out the i++ in the for loop */
588 i--;
589 }
590 }
591
592 if (dereference_flags & (1ULL << arg)) {
593 handle_dereference_arg(fmt + start_arg, string_flags,
594 i - start_arg,
595 &dereference_flags, arg, call);
596 }
597
598 /*
599 * If you triggered the below warning, the trace event reported
600 * uses an unsafe dereference pointer %p*. As the data stored
601 * at the trace event time may no longer exist when the trace
602 * event is printed, dereferencing to the original source is
603 * unsafe. The source of the dereference must be copied into the
604 * event itself, and the dereference must access the copy instead.
605 */
606 if (WARN_ON_ONCE(dereference_flags)) {
607 arg = 1;
608 while (!(dereference_flags & 1)) {
609 dereference_flags >>= 1;
610 arg++;
611 }
612 pr_warn("event %s has unsafe dereference of argument %d\n",
613 trace_event_name(call), arg);
614 pr_warn("print_fmt: %s\n", fmt);
615 }
616 }
617
trace_event_raw_init(struct trace_event_call * call)618 int trace_event_raw_init(struct trace_event_call *call)
619 {
620 int id;
621
622 id = register_trace_event(&call->event);
623 if (!id)
624 return -ENODEV;
625
626 test_event_printk(call);
627
628 return 0;
629 }
630 EXPORT_SYMBOL_GPL(trace_event_raw_init);
631
trace_event_ignore_this_pid(struct trace_event_file * trace_file)632 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
633 {
634 struct trace_array *tr = trace_file->tr;
635 struct trace_pid_list *no_pid_list;
636 struct trace_pid_list *pid_list;
637
638 pid_list = rcu_dereference_raw(tr->filtered_pids);
639 no_pid_list = rcu_dereference_raw(tr->filtered_no_pids);
640
641 if (!pid_list && !no_pid_list)
642 return false;
643
644 /*
645 * This is recorded at every sched_switch for this task.
646 * Thus, even if the task migrates the ignore value will be the same.
647 */
648 return this_cpu_read(tr->array_buffer.data->ignore_pid) != 0;
649 }
650 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
651
652 /**
653 * trace_event_buffer_reserve - reserve space on the ring buffer for an event
654 * @fbuffer: information about how to save the event
655 * @trace_file: the instance file descriptor for the event
656 * @len: The length of the event
657 *
658 * The @fbuffer has information about the ring buffer and data will
659 * be added to it to be used by the call to trace_event_buffer_commit().
660 * The @trace_file is the desrciptor with information about the status
661 * of the given event for a specific trace_array instance.
662 * The @len is the length of data to save for the event.
663 *
664 * Returns a pointer to the data on the ring buffer or NULL if the
665 * event was not reserved (event was filtered, too big, or the buffer
666 * simply was disabled for write).
667 */
trace_event_buffer_reserve(struct trace_event_buffer * fbuffer,struct trace_event_file * trace_file,unsigned long len)668 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
669 struct trace_event_file *trace_file,
670 unsigned long len)
671 {
672 struct trace_event_call *event_call = trace_file->event_call;
673
674 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
675 trace_event_ignore_this_pid(trace_file))
676 return NULL;
677
678 /*
679 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
680 * preemption (adding one to the preempt_count). Since we are
681 * interested in the preempt_count at the time the tracepoint was
682 * hit, we need to subtract one to offset the increment.
683 */
684 fbuffer->trace_ctx = tracing_gen_ctx_dec();
685 fbuffer->trace_file = trace_file;
686
687 fbuffer->event =
688 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
689 event_call->event.type, len,
690 fbuffer->trace_ctx);
691 if (!fbuffer->event)
692 return NULL;
693
694 fbuffer->regs = NULL;
695 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
696 return fbuffer->entry;
697 }
698 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
699
trace_event_reg(struct trace_event_call * call,enum trace_reg type,void * data)700 int trace_event_reg(struct trace_event_call *call,
701 enum trace_reg type, void *data)
702 {
703 struct trace_event_file *file = data;
704
705 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
706 switch (type) {
707 case TRACE_REG_REGISTER:
708 return tracepoint_probe_register(call->tp,
709 call->class->probe,
710 file);
711 case TRACE_REG_UNREGISTER:
712 tracepoint_probe_unregister(call->tp,
713 call->class->probe,
714 file);
715 return 0;
716
717 #ifdef CONFIG_PERF_EVENTS
718 case TRACE_REG_PERF_REGISTER:
719 if (!call->class->perf_probe)
720 return -ENODEV;
721 return tracepoint_probe_register(call->tp,
722 call->class->perf_probe,
723 call);
724 case TRACE_REG_PERF_UNREGISTER:
725 tracepoint_probe_unregister(call->tp,
726 call->class->perf_probe,
727 call);
728 return 0;
729 case TRACE_REG_PERF_OPEN:
730 case TRACE_REG_PERF_CLOSE:
731 case TRACE_REG_PERF_ADD:
732 case TRACE_REG_PERF_DEL:
733 return 0;
734 #endif
735 }
736 return 0;
737 }
738 EXPORT_SYMBOL_GPL(trace_event_reg);
739
trace_event_enable_cmd_record(bool enable)740 void trace_event_enable_cmd_record(bool enable)
741 {
742 struct trace_event_file *file;
743 struct trace_array *tr;
744
745 lockdep_assert_held(&event_mutex);
746
747 do_for_each_event_file(tr, file) {
748
749 if (!(file->flags & EVENT_FILE_FL_ENABLED))
750 continue;
751
752 if (enable) {
753 tracing_start_cmdline_record();
754 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
755 } else {
756 tracing_stop_cmdline_record();
757 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
758 }
759 } while_for_each_event_file();
760 }
761
trace_event_enable_tgid_record(bool enable)762 void trace_event_enable_tgid_record(bool enable)
763 {
764 struct trace_event_file *file;
765 struct trace_array *tr;
766
767 lockdep_assert_held(&event_mutex);
768
769 do_for_each_event_file(tr, file) {
770 if (!(file->flags & EVENT_FILE_FL_ENABLED))
771 continue;
772
773 if (enable) {
774 tracing_start_tgid_record();
775 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
776 } else {
777 tracing_stop_tgid_record();
778 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT,
779 &file->flags);
780 }
781 } while_for_each_event_file();
782 }
783
__ftrace_event_enable_disable(struct trace_event_file * file,int enable,int soft_disable)784 static int __ftrace_event_enable_disable(struct trace_event_file *file,
785 int enable, int soft_disable)
786 {
787 struct trace_event_call *call = file->event_call;
788 struct trace_array *tr = file->tr;
789 bool soft_mode = atomic_read(&file->sm_ref) != 0;
790 int ret = 0;
791 int disable;
792
793 switch (enable) {
794 case 0:
795 /*
796 * When soft_disable is set and enable is cleared, the sm_ref
797 * reference counter is decremented. If it reaches 0, we want
798 * to clear the SOFT_DISABLED flag but leave the event in the
799 * state that it was. That is, if the event was enabled and
800 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
801 * is set we do not want the event to be enabled before we
802 * clear the bit.
803 *
804 * When soft_disable is not set but the soft_mode is,
805 * we do nothing. Do not disable the tracepoint, otherwise
806 * "soft enable"s (clearing the SOFT_DISABLED bit) won't work.
807 */
808 if (soft_disable) {
809 if (atomic_dec_return(&file->sm_ref) > 0)
810 break;
811 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
812 soft_mode = false;
813 /* Disable use of trace_buffered_event */
814 trace_buffered_event_disable();
815 } else
816 disable = !soft_mode;
817
818 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
819 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
820 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
821 tracing_stop_cmdline_record();
822 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
823 }
824
825 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
826 tracing_stop_tgid_record();
827 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
828 }
829
830 ret = call->class->reg(call, TRACE_REG_UNREGISTER, file);
831
832 WARN_ON_ONCE(ret);
833 }
834 /* If in soft mode, just set the SOFT_DISABLE_BIT, else clear it */
835 if (soft_mode)
836 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
837 else
838 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
839 break;
840 case 1:
841 /*
842 * When soft_disable is set and enable is set, we want to
843 * register the tracepoint for the event, but leave the event
844 * as is. That means, if the event was already enabled, we do
845 * nothing. If the event is disabled, we set SOFT_DISABLED
846 * before enabling the event tracepoint, so it still seems
847 * to be disabled.
848 */
849 if (!soft_disable)
850 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
851 else {
852 if (atomic_inc_return(&file->sm_ref) > 1)
853 break;
854 /* Enable use of trace_buffered_event */
855 trace_buffered_event_enable();
856 }
857
858 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
859 bool cmd = false, tgid = false;
860
861 /* Keep the event disabled, when going to soft mode. */
862 if (soft_disable)
863 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
864
865 if (tr->trace_flags & TRACE_ITER(RECORD_CMD)) {
866 cmd = true;
867 tracing_start_cmdline_record();
868 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
869 }
870
871 if (tr->trace_flags & TRACE_ITER(RECORD_TGID)) {
872 tgid = true;
873 tracing_start_tgid_record();
874 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
875 }
876
877 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
878 if (ret) {
879 if (cmd)
880 tracing_stop_cmdline_record();
881 if (tgid)
882 tracing_stop_tgid_record();
883 pr_info("event trace: Could not enable event "
884 "%s\n", trace_event_name(call));
885 break;
886 }
887 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
888
889 /* WAS_ENABLED gets set but never cleared. */
890 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
891 }
892 break;
893 }
894
895 return ret;
896 }
897
trace_event_enable_disable(struct trace_event_file * file,int enable,int soft_disable)898 int trace_event_enable_disable(struct trace_event_file *file,
899 int enable, int soft_disable)
900 {
901 return __ftrace_event_enable_disable(file, enable, soft_disable);
902 }
903
ftrace_event_enable_disable(struct trace_event_file * file,int enable)904 static int ftrace_event_enable_disable(struct trace_event_file *file,
905 int enable)
906 {
907 return __ftrace_event_enable_disable(file, enable, 0);
908 }
909
910 #ifdef CONFIG_MODULES
911 struct event_mod_load {
912 struct list_head list;
913 char *module;
914 char *match;
915 char *system;
916 char *event;
917 };
918
free_event_mod(struct event_mod_load * event_mod)919 static void free_event_mod(struct event_mod_load *event_mod)
920 {
921 list_del(&event_mod->list);
922 kfree(event_mod->module);
923 kfree(event_mod->match);
924 kfree(event_mod->system);
925 kfree(event_mod->event);
926 kfree(event_mod);
927 }
928
clear_mod_events(struct trace_array * tr)929 static void clear_mod_events(struct trace_array *tr)
930 {
931 struct event_mod_load *event_mod, *n;
932
933 list_for_each_entry_safe(event_mod, n, &tr->mod_events, list) {
934 free_event_mod(event_mod);
935 }
936 }
937
remove_cache_mod(struct trace_array * tr,const char * mod,const char * match,const char * system,const char * event)938 static int remove_cache_mod(struct trace_array *tr, const char *mod,
939 const char *match, const char *system, const char *event)
940 {
941 struct event_mod_load *event_mod, *n;
942 int ret = -EINVAL;
943
944 list_for_each_entry_safe(event_mod, n, &tr->mod_events, list) {
945 if (strcmp(event_mod->module, mod) != 0)
946 continue;
947
948 if (match && strcmp(event_mod->match, match) != 0)
949 continue;
950
951 if (system &&
952 (!event_mod->system || strcmp(event_mod->system, system) != 0))
953 continue;
954
955 if (event &&
956 (!event_mod->event || strcmp(event_mod->event, event) != 0))
957 continue;
958
959 free_event_mod(event_mod);
960 ret = 0;
961 }
962
963 return ret;
964 }
965
cache_mod(struct trace_array * tr,const char * mod,int set,const char * match,const char * system,const char * event)966 static int cache_mod(struct trace_array *tr, const char *mod, int set,
967 const char *match, const char *system, const char *event)
968 {
969 struct event_mod_load *event_mod;
970
971 /* If the module exists, then this just failed to find an event */
972 if (module_exists(mod))
973 return -EINVAL;
974
975 /* See if this is to remove a cached filter */
976 if (!set)
977 return remove_cache_mod(tr, mod, match, system, event);
978
979 event_mod = kzalloc_obj(*event_mod);
980 if (!event_mod)
981 return -ENOMEM;
982
983 INIT_LIST_HEAD(&event_mod->list);
984 event_mod->module = kstrdup(mod, GFP_KERNEL);
985 if (!event_mod->module)
986 goto out_free;
987
988 if (match) {
989 event_mod->match = kstrdup(match, GFP_KERNEL);
990 if (!event_mod->match)
991 goto out_free;
992 }
993
994 if (system) {
995 event_mod->system = kstrdup(system, GFP_KERNEL);
996 if (!event_mod->system)
997 goto out_free;
998 }
999
1000 if (event) {
1001 event_mod->event = kstrdup(event, GFP_KERNEL);
1002 if (!event_mod->event)
1003 goto out_free;
1004 }
1005
1006 list_add(&event_mod->list, &tr->mod_events);
1007
1008 return 0;
1009
1010 out_free:
1011 free_event_mod(event_mod);
1012
1013 return -ENOMEM;
1014 }
1015 #else /* CONFIG_MODULES */
clear_mod_events(struct trace_array * tr)1016 static inline void clear_mod_events(struct trace_array *tr) { }
cache_mod(struct trace_array * tr,const char * mod,int set,const char * match,const char * system,const char * event)1017 static int cache_mod(struct trace_array *tr, const char *mod, int set,
1018 const char *match, const char *system, const char *event)
1019 {
1020 return -EINVAL;
1021 }
1022 #endif
1023
ftrace_clear_events(struct trace_array * tr)1024 static void ftrace_clear_events(struct trace_array *tr)
1025 {
1026 struct trace_event_file *file;
1027
1028 mutex_lock(&event_mutex);
1029 list_for_each_entry(file, &tr->events, list) {
1030 ftrace_event_enable_disable(file, 0);
1031 }
1032 clear_mod_events(tr);
1033 mutex_unlock(&event_mutex);
1034 }
1035
1036 static void
event_filter_pid_sched_process_exit(void * data,struct task_struct * task)1037 event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
1038 {
1039 struct trace_pid_list *pid_list;
1040 struct trace_array *tr = data;
1041
1042 guard(preempt)();
1043 pid_list = rcu_dereference_raw(tr->filtered_pids);
1044 trace_filter_add_remove_task(pid_list, NULL, task);
1045
1046 pid_list = rcu_dereference_raw(tr->filtered_no_pids);
1047 trace_filter_add_remove_task(pid_list, NULL, task);
1048 }
1049
1050 static void
event_filter_pid_sched_process_fork(void * data,struct task_struct * self,struct task_struct * task)1051 event_filter_pid_sched_process_fork(void *data,
1052 struct task_struct *self,
1053 struct task_struct *task)
1054 {
1055 struct trace_pid_list *pid_list;
1056 struct trace_array *tr = data;
1057
1058 guard(preempt)();
1059 pid_list = rcu_dereference_sched(tr->filtered_pids);
1060 trace_filter_add_remove_task(pid_list, self, task);
1061
1062 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1063 trace_filter_add_remove_task(pid_list, self, task);
1064 }
1065
trace_event_follow_fork(struct trace_array * tr,bool enable)1066 void trace_event_follow_fork(struct trace_array *tr, bool enable)
1067 {
1068 if (enable) {
1069 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
1070 tr, INT_MIN);
1071 register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
1072 tr, INT_MAX);
1073 } else {
1074 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
1075 tr);
1076 unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
1077 tr);
1078 }
1079 }
1080
1081 static void
event_filter_pid_sched_switch_probe_pre(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)1082 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
1083 struct task_struct *prev,
1084 struct task_struct *next,
1085 unsigned int prev_state)
1086 {
1087 struct trace_array *tr = data;
1088 struct trace_pid_list *no_pid_list;
1089 struct trace_pid_list *pid_list;
1090 bool ret;
1091
1092 pid_list = rcu_dereference_sched(tr->filtered_pids);
1093 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1094
1095 /*
1096 * Sched switch is funny, as we only want to ignore it
1097 * in the notrace case if both prev and next should be ignored.
1098 */
1099 ret = trace_ignore_this_task(NULL, no_pid_list, prev) &&
1100 trace_ignore_this_task(NULL, no_pid_list, next);
1101
1102 this_cpu_write(tr->array_buffer.data->ignore_pid, ret ||
1103 (trace_ignore_this_task(pid_list, NULL, prev) &&
1104 trace_ignore_this_task(pid_list, NULL, next)));
1105 }
1106
1107 static void
event_filter_pid_sched_switch_probe_post(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)1108 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
1109 struct task_struct *prev,
1110 struct task_struct *next,
1111 unsigned int prev_state)
1112 {
1113 struct trace_array *tr = data;
1114 struct trace_pid_list *no_pid_list;
1115 struct trace_pid_list *pid_list;
1116
1117 pid_list = rcu_dereference_sched(tr->filtered_pids);
1118 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1119
1120 this_cpu_write(tr->array_buffer.data->ignore_pid,
1121 trace_ignore_this_task(pid_list, no_pid_list, next));
1122 }
1123
1124 static void
event_filter_pid_sched_wakeup_probe_pre(void * data,struct task_struct * task)1125 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
1126 {
1127 struct trace_array *tr = data;
1128 struct trace_pid_list *no_pid_list;
1129 struct trace_pid_list *pid_list;
1130
1131 /* Nothing to do if we are already tracing */
1132 if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
1133 return;
1134
1135 pid_list = rcu_dereference_sched(tr->filtered_pids);
1136 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1137
1138 this_cpu_write(tr->array_buffer.data->ignore_pid,
1139 trace_ignore_this_task(pid_list, no_pid_list, task));
1140 }
1141
1142 static void
event_filter_pid_sched_wakeup_probe_post(void * data,struct task_struct * task)1143 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
1144 {
1145 struct trace_array *tr = data;
1146 struct trace_pid_list *no_pid_list;
1147 struct trace_pid_list *pid_list;
1148
1149 /* Nothing to do if we are not tracing */
1150 if (this_cpu_read(tr->array_buffer.data->ignore_pid))
1151 return;
1152
1153 pid_list = rcu_dereference_sched(tr->filtered_pids);
1154 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1155
1156 /* Set tracing if current is enabled */
1157 this_cpu_write(tr->array_buffer.data->ignore_pid,
1158 trace_ignore_this_task(pid_list, no_pid_list, current));
1159 }
1160
unregister_pid_events(struct trace_array * tr)1161 static void unregister_pid_events(struct trace_array *tr)
1162 {
1163 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
1164 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
1165
1166 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
1167 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
1168
1169 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
1170 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
1171
1172 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
1173 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
1174 }
1175
__ftrace_clear_event_pids(struct trace_array * tr,int type)1176 static void __ftrace_clear_event_pids(struct trace_array *tr, int type)
1177 {
1178 struct trace_pid_list *pid_list;
1179 struct trace_pid_list *no_pid_list;
1180 struct trace_event_file *file;
1181 int cpu;
1182
1183 pid_list = rcu_dereference_protected(tr->filtered_pids,
1184 lockdep_is_held(&event_mutex));
1185 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
1186 lockdep_is_held(&event_mutex));
1187
1188 /* Make sure there's something to do */
1189 if (!pid_type_enabled(type, pid_list, no_pid_list))
1190 return;
1191
1192 if (!still_need_pid_events(type, pid_list, no_pid_list)) {
1193 unregister_pid_events(tr);
1194
1195 list_for_each_entry(file, &tr->events, list) {
1196 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1197 }
1198
1199 for_each_possible_cpu(cpu)
1200 per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
1201 }
1202
1203 if (type & TRACE_PIDS)
1204 rcu_assign_pointer(tr->filtered_pids, NULL);
1205
1206 if (type & TRACE_NO_PIDS)
1207 rcu_assign_pointer(tr->filtered_no_pids, NULL);
1208
1209 /* Wait till all users are no longer using pid filtering */
1210 tracepoint_synchronize_unregister();
1211
1212 if ((type & TRACE_PIDS) && pid_list)
1213 trace_pid_list_free(pid_list);
1214
1215 if ((type & TRACE_NO_PIDS) && no_pid_list)
1216 trace_pid_list_free(no_pid_list);
1217 }
1218
ftrace_clear_event_pids(struct trace_array * tr,int type)1219 static void ftrace_clear_event_pids(struct trace_array *tr, int type)
1220 {
1221 mutex_lock(&event_mutex);
1222 __ftrace_clear_event_pids(tr, type);
1223 mutex_unlock(&event_mutex);
1224 }
1225
__put_system(struct event_subsystem * system)1226 static void __put_system(struct event_subsystem *system)
1227 {
1228 struct event_filter *filter = system->filter;
1229
1230 WARN_ON_ONCE(system_refcount(system) == 0);
1231 if (system_refcount_dec(system))
1232 return;
1233
1234 list_del(&system->list);
1235
1236 if (filter) {
1237 kfree(filter->filter_string);
1238 kfree(filter);
1239 }
1240 kfree_const(system->name);
1241 kfree(system);
1242 }
1243
__get_system(struct event_subsystem * system)1244 static void __get_system(struct event_subsystem *system)
1245 {
1246 WARN_ON_ONCE(system_refcount(system) == 0);
1247 system_refcount_inc(system);
1248 }
1249
__get_system_dir(struct trace_subsystem_dir * dir)1250 static void __get_system_dir(struct trace_subsystem_dir *dir)
1251 {
1252 WARN_ON_ONCE(dir->ref_count == 0);
1253 dir->ref_count++;
1254 __get_system(dir->subsystem);
1255 }
1256
__put_system_dir(struct trace_subsystem_dir * dir)1257 static void __put_system_dir(struct trace_subsystem_dir *dir)
1258 {
1259 WARN_ON_ONCE(dir->ref_count == 0);
1260 /* If the subsystem is about to be freed, the dir must be too */
1261 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
1262
1263 __put_system(dir->subsystem);
1264 if (!--dir->ref_count)
1265 kfree(dir);
1266 }
1267
put_system(struct trace_subsystem_dir * dir)1268 static void put_system(struct trace_subsystem_dir *dir)
1269 {
1270 mutex_lock(&event_mutex);
1271 __put_system_dir(dir);
1272 mutex_unlock(&event_mutex);
1273 }
1274
remove_subsystem(struct trace_subsystem_dir * dir)1275 static void remove_subsystem(struct trace_subsystem_dir *dir)
1276 {
1277 if (!dir)
1278 return;
1279
1280 if (!--dir->nr_events) {
1281 eventfs_remove_dir(dir->ei);
1282 list_del(&dir->list);
1283 __put_system_dir(dir);
1284 }
1285 }
1286
event_file_get(struct trace_event_file * file)1287 void event_file_get(struct trace_event_file *file)
1288 {
1289 refcount_inc(&file->ref);
1290 }
1291
event_file_put(struct trace_event_file * file)1292 void event_file_put(struct trace_event_file *file)
1293 {
1294 if (WARN_ON_ONCE(!refcount_read(&file->ref))) {
1295 if (file->flags & EVENT_FILE_FL_FREED)
1296 kmem_cache_free(file_cachep, file);
1297 return;
1298 }
1299
1300 if (refcount_dec_and_test(&file->ref)) {
1301 /* Count should only go to zero when it is freed */
1302 if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
1303 return;
1304 kmem_cache_free(file_cachep, file);
1305 }
1306 }
1307
remove_event_file_dir(struct trace_event_file * file)1308 static void remove_event_file_dir(struct trace_event_file *file)
1309 {
1310 eventfs_remove_dir(file->ei);
1311 list_del(&file->list);
1312 remove_subsystem(file->system);
1313 free_event_filter(file->filter);
1314 file->flags |= EVENT_FILE_FL_FREED;
1315 event_file_put(file);
1316
1317 /* Wake up hist poll waiters to notice the EVENT_FILE_FL_FREED flag. */
1318 hist_poll_wakeup();
1319 }
1320
1321 /*
1322 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
1323 */
1324 static int
__ftrace_set_clr_event_nolock(struct trace_array * tr,const char * match,const char * sub,const char * event,int set,const char * mod)1325 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
1326 const char *sub, const char *event, int set,
1327 const char *mod)
1328 {
1329 struct trace_event_file *file;
1330 struct trace_event_call *call;
1331 char *module __free(kfree) = NULL;
1332 const char *name;
1333 int ret = -EINVAL;
1334 int eret = 0;
1335
1336 if (mod) {
1337 char *p;
1338
1339 module = kstrdup(mod, GFP_KERNEL);
1340 if (!module)
1341 return -ENOMEM;
1342
1343 /* Replace all '-' with '_' as that's what modules do */
1344 for (p = strchr(module, '-'); p; p = strchr(p + 1, '-'))
1345 *p = '_';
1346 }
1347
1348 list_for_each_entry(file, &tr->events, list) {
1349
1350 call = file->event_call;
1351
1352 /* If a module is specified, skip events that are not that module */
1353 if (module && (!call->module || strcmp(module_name(call->module), module)))
1354 continue;
1355
1356 name = trace_event_name(call);
1357
1358 if (!name || !call->class || !call->class->reg)
1359 continue;
1360
1361 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1362 continue;
1363
1364 if (match &&
1365 strcmp(match, name) != 0 &&
1366 strcmp(match, call->class->system) != 0)
1367 continue;
1368
1369 if (sub && strcmp(sub, call->class->system) != 0)
1370 continue;
1371
1372 if (event && strcmp(event, name) != 0)
1373 continue;
1374
1375 ret = ftrace_event_enable_disable(file, set);
1376
1377 /*
1378 * Save the first error and return that. Some events
1379 * may still have been enabled, but let the user
1380 * know that something went wrong.
1381 */
1382 if (ret && !eret)
1383 eret = ret;
1384
1385 ret = eret;
1386 }
1387
1388 /*
1389 * If this is a module setting and nothing was found,
1390 * check if the module was loaded. If it wasn't cache it.
1391 */
1392 if (module && ret == -EINVAL && !eret)
1393 ret = cache_mod(tr, module, set, match, sub, event);
1394
1395 return ret;
1396 }
1397
__ftrace_set_clr_event(struct trace_array * tr,const char * match,const char * sub,const char * event,int set,const char * mod)1398 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
1399 const char *sub, const char *event, int set,
1400 const char *mod)
1401 {
1402 int ret;
1403
1404 if (trace_array_is_readonly(tr))
1405 return -EACCES;
1406
1407 mutex_lock(&event_mutex);
1408 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set, mod);
1409 mutex_unlock(&event_mutex);
1410
1411 return ret;
1412 }
1413
ftrace_set_clr_event(struct trace_array * tr,char * buf,int set)1414 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
1415 {
1416 char *event = NULL, *sub = NULL, *match, *mod;
1417 int ret;
1418
1419 if (!tr)
1420 return -ENOENT;
1421
1422 /* Modules events can be appended with :mod:<module> */
1423 mod = strstr(buf, ":mod:");
1424 if (mod) {
1425 *mod = '\0';
1426 /* move to the module name */
1427 mod += 5;
1428 }
1429
1430 /*
1431 * The buf format can be <subsystem>:<event-name>
1432 * *:<event-name> means any event by that name.
1433 * :<event-name> is the same.
1434 *
1435 * <subsystem>:* means all events in that subsystem
1436 * <subsystem>: means the same.
1437 *
1438 * <name> (no ':') means all events in a subsystem with
1439 * the name <name> or any event that matches <name>
1440 */
1441
1442 match = strsep(&buf, ":");
1443 if (buf) {
1444 sub = match;
1445 event = buf;
1446 match = NULL;
1447
1448 if (!strlen(sub) || strcmp(sub, "*") == 0)
1449 sub = NULL;
1450 if (!strlen(event) || strcmp(event, "*") == 0)
1451 event = NULL;
1452 } else if (mod) {
1453 /* Allow wildcard for no length or star */
1454 if (!strlen(match) || strcmp(match, "*") == 0)
1455 match = NULL;
1456 }
1457
1458 ret = __ftrace_set_clr_event(tr, match, sub, event, set, mod);
1459
1460 /* Put back the colon to allow this to be called again */
1461 if (buf)
1462 *(buf - 1) = ':';
1463
1464 return ret;
1465 }
1466
1467 /**
1468 * trace_set_clr_event - enable or disable an event
1469 * @system: system name to match (NULL for any system)
1470 * @event: event name to match (NULL for all events, within system)
1471 * @set: 1 to enable, 0 to disable
1472 *
1473 * This is a way for other parts of the kernel to enable or disable
1474 * event recording.
1475 *
1476 * Returns 0 on success, -EINVAL if the parameters do not match any
1477 * registered events.
1478 */
trace_set_clr_event(const char * system,const char * event,int set)1479 int trace_set_clr_event(const char *system, const char *event, int set)
1480 {
1481 struct trace_array *tr = top_trace_array();
1482
1483 if (!tr)
1484 return -ENODEV;
1485
1486 return __ftrace_set_clr_event(tr, NULL, system, event, set, NULL);
1487 }
1488 EXPORT_SYMBOL_GPL(trace_set_clr_event);
1489
1490 /**
1491 * trace_array_set_clr_event - enable or disable an event for a trace array.
1492 * @tr: concerned trace array.
1493 * @system: system name to match (NULL for any system)
1494 * @event: event name to match (NULL for all events, within system)
1495 * @enable: true to enable, false to disable
1496 *
1497 * This is a way for other parts of the kernel to enable or disable
1498 * event recording.
1499 *
1500 * Returns 0 on success, -EINVAL if the parameters do not match any
1501 * registered events.
1502 */
trace_array_set_clr_event(struct trace_array * tr,const char * system,const char * event,bool enable)1503 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
1504 const char *event, bool enable)
1505 {
1506 int set;
1507
1508 if (!tr)
1509 return -ENOENT;
1510
1511 set = (enable == true) ? 1 : 0;
1512 return __ftrace_set_clr_event(tr, NULL, system, event, set, NULL);
1513 }
1514 EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
1515
1516 /* 128 should be much more than enough */
1517 #define EVENT_BUF_SIZE 127
1518
1519 static ssize_t
ftrace_event_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)1520 ftrace_event_write(struct file *file, const char __user *ubuf,
1521 size_t cnt, loff_t *ppos)
1522 {
1523 struct trace_parser parser;
1524 struct seq_file *m = file->private_data;
1525 struct trace_array *tr = m->private;
1526 ssize_t read, ret;
1527
1528 if (!cnt)
1529 return 0;
1530
1531 ret = tracing_update_buffers(tr);
1532 if (ret < 0)
1533 return ret;
1534
1535 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
1536 return -ENOMEM;
1537
1538 read = trace_get_user(&parser, ubuf, cnt, ppos);
1539
1540 if (read >= 0 && trace_parser_loaded((&parser))) {
1541 int set = 1;
1542
1543 if (*parser.buffer == '!')
1544 set = 0;
1545
1546 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
1547 if (ret)
1548 goto out_put;
1549 }
1550
1551 ret = read;
1552
1553 out_put:
1554 trace_parser_put(&parser);
1555
1556 return ret;
1557 }
1558
1559 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)1560 t_next(struct seq_file *m, void *v, loff_t *pos)
1561 {
1562 struct trace_event_file *file = v;
1563 struct trace_event_call *call;
1564 struct trace_array *tr = m->private;
1565
1566 (*pos)++;
1567
1568 list_for_each_entry_continue(file, &tr->events, list) {
1569 call = file->event_call;
1570 /*
1571 * The ftrace subsystem is for showing formats only.
1572 * They can not be enabled or disabled via the event files.
1573 */
1574 if (call->class && call->class->reg &&
1575 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1576 return file;
1577 }
1578
1579 return NULL;
1580 }
1581
t_start(struct seq_file * m,loff_t * pos)1582 static void *t_start(struct seq_file *m, loff_t *pos)
1583 {
1584 struct trace_event_file *file;
1585 struct trace_array *tr = m->private;
1586 loff_t l;
1587
1588 mutex_lock(&event_mutex);
1589
1590 file = list_entry(&tr->events, struct trace_event_file, list);
1591 for (l = 0; l <= *pos; ) {
1592 file = t_next(m, file, &l);
1593 if (!file)
1594 break;
1595 }
1596 return file;
1597 }
1598
1599 enum set_event_iter_type {
1600 SET_EVENT_FILE,
1601 SET_EVENT_MOD,
1602 };
1603
1604 struct set_event_iter {
1605 enum set_event_iter_type type;
1606 union {
1607 struct trace_event_file *file;
1608 struct event_mod_load *event_mod;
1609 };
1610 };
1611
1612 static void *
s_next(struct seq_file * m,void * v,loff_t * pos)1613 s_next(struct seq_file *m, void *v, loff_t *pos)
1614 {
1615 struct set_event_iter *iter = v;
1616 struct trace_event_file *file;
1617 struct trace_array *tr = m->private;
1618
1619 (*pos)++;
1620
1621 if (iter->type == SET_EVENT_FILE) {
1622 file = iter->file;
1623 list_for_each_entry_continue(file, &tr->events, list) {
1624 if (file->flags & EVENT_FILE_FL_ENABLED) {
1625 iter->file = file;
1626 return iter;
1627 }
1628 }
1629 #ifdef CONFIG_MODULES
1630 iter->type = SET_EVENT_MOD;
1631 iter->event_mod = list_entry(&tr->mod_events, struct event_mod_load, list);
1632 #endif
1633 }
1634
1635 #ifdef CONFIG_MODULES
1636 list_for_each_entry_continue(iter->event_mod, &tr->mod_events, list)
1637 return iter;
1638 #endif
1639
1640 /*
1641 * The iter is allocated in s_start() and passed via the 'v'
1642 * parameter. To stop the iterator, NULL must be returned. But
1643 * the return value is what the 'v' parameter in s_stop() receives
1644 * and frees. Free iter here as it will no longer be used.
1645 */
1646 kfree(iter);
1647 return NULL;
1648 }
1649
s_start(struct seq_file * m,loff_t * pos)1650 static void *s_start(struct seq_file *m, loff_t *pos)
1651 {
1652 struct trace_array *tr = m->private;
1653 struct set_event_iter *iter;
1654 loff_t l;
1655
1656 iter = kzalloc_obj(*iter);
1657 mutex_lock(&event_mutex);
1658 if (!iter)
1659 return NULL;
1660
1661 iter->type = SET_EVENT_FILE;
1662 iter->file = list_entry(&tr->events, struct trace_event_file, list);
1663
1664 for (l = 0; l <= *pos; ) {
1665 iter = s_next(m, iter, &l);
1666 if (!iter)
1667 break;
1668 }
1669 return iter;
1670 }
1671
t_show(struct seq_file * m,void * v)1672 static int t_show(struct seq_file *m, void *v)
1673 {
1674 struct trace_event_file *file = v;
1675 struct trace_event_call *call = file->event_call;
1676
1677 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1678 seq_printf(m, "%s:", call->class->system);
1679 seq_printf(m, "%s\n", trace_event_name(call));
1680
1681 return 0;
1682 }
1683
t_stop(struct seq_file * m,void * p)1684 static void t_stop(struct seq_file *m, void *p)
1685 {
1686 mutex_unlock(&event_mutex);
1687 }
1688
get_call_len(struct trace_event_call * call)1689 static int get_call_len(struct trace_event_call *call)
1690 {
1691 int len;
1692
1693 /* Get the length of "<system>:<event>" */
1694 len = strlen(call->class->system) + 1;
1695 len += strlen(trace_event_name(call));
1696
1697 /* Set the index to 32 bytes to separate event from data */
1698 return len >= 32 ? 1 : 32 - len;
1699 }
1700
1701 /**
1702 * t_show_filters - seq_file callback to display active event filters
1703 * @m: The seq_file interface for formatted output
1704 * @v: The current trace_event_file being iterated
1705 *
1706 * Identifies and prints active filters for the current event file in the
1707 * iteration. If a filter is applied to the current event and, if so,
1708 * prints the system name, event name, and the filter string.
1709 */
t_show_filters(struct seq_file * m,void * v)1710 static int t_show_filters(struct seq_file *m, void *v)
1711 {
1712 struct trace_event_file *file = v;
1713 struct trace_event_call *call = file->event_call;
1714 struct event_filter *filter;
1715 int len;
1716
1717 guard(rcu)();
1718 filter = rcu_dereference(file->filter);
1719 if (!filter || !filter->filter_string)
1720 return 0;
1721
1722 len = get_call_len(call);
1723
1724 seq_printf(m, "%s:%s%*.s%s\n", call->class->system,
1725 trace_event_name(call), len, "", filter->filter_string);
1726
1727 return 0;
1728 }
1729
1730 /**
1731 * t_show_triggers - seq_file callback to display active event triggers
1732 * @m: The seq_file interface for formatted output
1733 * @v: The current trace_event_file being iterated
1734 *
1735 * Iterates through the trigger list of the current event file and prints
1736 * each active trigger's configuration using its associated print
1737 * operation.
1738 */
t_show_triggers(struct seq_file * m,void * v)1739 static int t_show_triggers(struct seq_file *m, void *v)
1740 {
1741 struct trace_event_file *file = v;
1742 struct trace_event_call *call = file->event_call;
1743 struct event_trigger_data *data;
1744 int len;
1745
1746 /*
1747 * The event_mutex is held by t_start(), protecting the
1748 * file->triggers list traversal.
1749 */
1750 if (list_empty(&file->triggers))
1751 return 0;
1752
1753 len = get_call_len(call);
1754
1755 list_for_each_entry_rcu(data, &file->triggers, list) {
1756 seq_printf(m, "%s:%s%*.s", call->class->system,
1757 trace_event_name(call), len, "");
1758
1759 data->cmd_ops->print(m, data);
1760 }
1761
1762 return 0;
1763 }
1764
1765 #ifdef CONFIG_MODULES
s_show(struct seq_file * m,void * v)1766 static int s_show(struct seq_file *m, void *v)
1767 {
1768 struct set_event_iter *iter = v;
1769 const char *system;
1770 const char *event;
1771
1772 if (iter->type == SET_EVENT_FILE)
1773 return t_show(m, iter->file);
1774
1775 /* When match is set, system and event are not */
1776 if (iter->event_mod->match) {
1777 seq_printf(m, "%s:mod:%s\n", iter->event_mod->match,
1778 iter->event_mod->module);
1779 return 0;
1780 }
1781
1782 system = iter->event_mod->system ? : "*";
1783 event = iter->event_mod->event ? : "*";
1784
1785 seq_printf(m, "%s:%s:mod:%s\n", system, event, iter->event_mod->module);
1786
1787 return 0;
1788 }
1789 #else /* CONFIG_MODULES */
s_show(struct seq_file * m,void * v)1790 static int s_show(struct seq_file *m, void *v)
1791 {
1792 struct set_event_iter *iter = v;
1793
1794 return t_show(m, iter->file);
1795 }
1796 #endif
1797
s_stop(struct seq_file * m,void * v)1798 static void s_stop(struct seq_file *m, void *v)
1799 {
1800 kfree(v);
1801 t_stop(m, NULL);
1802 }
1803
1804 static void *
__next(struct seq_file * m,void * v,loff_t * pos,int type)1805 __next(struct seq_file *m, void *v, loff_t *pos, int type)
1806 {
1807 struct trace_array *tr = m->private;
1808 struct trace_pid_list *pid_list;
1809
1810 if (type == TRACE_PIDS)
1811 pid_list = rcu_dereference_sched(tr->filtered_pids);
1812 else
1813 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1814
1815 return trace_pid_next(pid_list, v, pos);
1816 }
1817
1818 static void *
p_next(struct seq_file * m,void * v,loff_t * pos)1819 p_next(struct seq_file *m, void *v, loff_t *pos)
1820 {
1821 return __next(m, v, pos, TRACE_PIDS);
1822 }
1823
1824 static void *
np_next(struct seq_file * m,void * v,loff_t * pos)1825 np_next(struct seq_file *m, void *v, loff_t *pos)
1826 {
1827 return __next(m, v, pos, TRACE_NO_PIDS);
1828 }
1829
__start(struct seq_file * m,loff_t * pos,int type)1830 static void *__start(struct seq_file *m, loff_t *pos, int type)
1831 __acquires(RCU)
1832 {
1833 struct trace_pid_list *pid_list;
1834 struct trace_array *tr = m->private;
1835
1836 /*
1837 * Grab the mutex, to keep calls to p_next() having the same
1838 * tr->filtered_pids as p_start() has.
1839 * If we just passed the tr->filtered_pids around, then RCU would
1840 * have been enough, but doing that makes things more complex.
1841 */
1842 mutex_lock(&event_mutex);
1843 rcu_read_lock_sched();
1844
1845 if (type == TRACE_PIDS)
1846 pid_list = rcu_dereference_sched(tr->filtered_pids);
1847 else
1848 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1849
1850 if (!pid_list)
1851 return NULL;
1852
1853 return trace_pid_start(pid_list, pos);
1854 }
1855
p_start(struct seq_file * m,loff_t * pos)1856 static void *p_start(struct seq_file *m, loff_t *pos)
1857 __acquires(RCU)
1858 {
1859 return __start(m, pos, TRACE_PIDS);
1860 }
1861
np_start(struct seq_file * m,loff_t * pos)1862 static void *np_start(struct seq_file *m, loff_t *pos)
1863 __acquires(RCU)
1864 {
1865 return __start(m, pos, TRACE_NO_PIDS);
1866 }
1867
p_stop(struct seq_file * m,void * p)1868 static void p_stop(struct seq_file *m, void *p)
1869 __releases(RCU)
1870 {
1871 rcu_read_unlock_sched();
1872 mutex_unlock(&event_mutex);
1873 }
1874
1875 static ssize_t
event_enable_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1876 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1877 loff_t *ppos)
1878 {
1879 struct trace_event_file *file;
1880 unsigned long flags;
1881 char buf[4] = "0";
1882
1883 mutex_lock(&event_mutex);
1884 file = event_file_file(filp);
1885 if (likely(file))
1886 flags = file->flags;
1887 mutex_unlock(&event_mutex);
1888
1889 if (!file)
1890 return -ENODEV;
1891
1892 if (flags & EVENT_FILE_FL_ENABLED &&
1893 !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1894 strcpy(buf, "1");
1895
1896 if (atomic_read(&file->sm_ref) != 0)
1897 strcat(buf, "*");
1898
1899 strcat(buf, "\n");
1900
1901 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1902 }
1903
1904 static ssize_t
event_enable_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1905 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1906 loff_t *ppos)
1907 {
1908 struct trace_event_file *file;
1909 unsigned long val;
1910 int ret;
1911
1912 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1913 if (ret)
1914 return ret;
1915
1916 guard(mutex)(&event_mutex);
1917
1918 switch (val) {
1919 case 0:
1920 case 1:
1921 file = event_file_file(filp);
1922 if (!file)
1923 return -ENODEV;
1924 ret = tracing_update_buffers(file->tr);
1925 if (ret < 0)
1926 return ret;
1927 ret = ftrace_event_enable_disable(file, val);
1928 if (ret < 0)
1929 return ret;
1930 break;
1931
1932 default:
1933 return -EINVAL;
1934 }
1935
1936 *ppos += cnt;
1937
1938 return cnt;
1939 }
1940
1941 /*
1942 * Returns:
1943 * 0 : no events exist?
1944 * 1 : all events are disabled
1945 * 2 : all events are enabled
1946 * 3 : some events are enabled and some are enabled
1947 */
trace_events_enabled(struct trace_array * tr,const char * system)1948 int trace_events_enabled(struct trace_array *tr, const char *system)
1949 {
1950 struct trace_event_call *call;
1951 struct trace_event_file *file;
1952 int set = 0;
1953
1954 guard(mutex)(&event_mutex);
1955
1956 list_for_each_entry(file, &tr->events, list) {
1957 call = file->event_call;
1958 if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
1959 !trace_event_name(call) || !call->class || !call->class->reg)
1960 continue;
1961
1962 if (system && strcmp(call->class->system, system) != 0)
1963 continue;
1964
1965 /*
1966 * We need to find out if all the events are set
1967 * or if all events or cleared, or if we have
1968 * a mixture.
1969 */
1970 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1971
1972 /*
1973 * If we have a mixture, no need to look further.
1974 */
1975 if (set == 3)
1976 break;
1977 }
1978
1979 return set;
1980 }
1981
1982 static ssize_t
system_enable_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1983 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1984 loff_t *ppos)
1985 {
1986 const char set_to_char[4] = { '?', '0', '1', 'X' };
1987 struct trace_subsystem_dir *dir = filp->private_data;
1988 struct event_subsystem *system = dir->subsystem;
1989 struct trace_array *tr = dir->tr;
1990 char buf[2];
1991 int set;
1992 int ret;
1993
1994 set = trace_events_enabled(tr, system ? system->name : NULL);
1995
1996 buf[0] = set_to_char[set];
1997 buf[1] = '\n';
1998
1999 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
2000
2001 return ret;
2002 }
2003
2004 static ssize_t
system_enable_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2005 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
2006 loff_t *ppos)
2007 {
2008 struct trace_subsystem_dir *dir = filp->private_data;
2009 struct event_subsystem *system = dir->subsystem;
2010 const char *name = NULL;
2011 unsigned long val;
2012 ssize_t ret;
2013
2014 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
2015 if (ret)
2016 return ret;
2017
2018 ret = tracing_update_buffers(dir->tr);
2019 if (ret < 0)
2020 return ret;
2021
2022 if (val != 0 && val != 1)
2023 return -EINVAL;
2024
2025 /*
2026 * Opening of "enable" adds a ref count to system,
2027 * so the name is safe to use.
2028 */
2029 if (system)
2030 name = system->name;
2031
2032 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val, NULL);
2033 if (ret)
2034 goto out;
2035
2036 ret = cnt;
2037
2038 out:
2039 *ppos += cnt;
2040
2041 return ret;
2042 }
2043
2044 enum {
2045 FORMAT_HEADER = 1,
2046 FORMAT_FIELD_SEPERATOR = 2,
2047 FORMAT_PRINTFMT = 3,
2048 };
2049
f_next(struct seq_file * m,void * v,loff_t * pos)2050 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
2051 {
2052 struct trace_event_file *file = event_file_data(m->private);
2053 struct trace_event_call *call = file->event_call;
2054 struct list_head *common_head = &ftrace_common_fields;
2055 struct list_head *head = trace_get_fields(call);
2056 struct list_head *node = v;
2057
2058 (*pos)++;
2059
2060 switch ((unsigned long)v) {
2061 case FORMAT_HEADER:
2062 node = common_head;
2063 break;
2064
2065 case FORMAT_FIELD_SEPERATOR:
2066 node = head;
2067 break;
2068
2069 case FORMAT_PRINTFMT:
2070 /* all done */
2071 return NULL;
2072 }
2073
2074 node = node->prev;
2075 if (node == common_head)
2076 return (void *)FORMAT_FIELD_SEPERATOR;
2077 else if (node == head)
2078 return (void *)FORMAT_PRINTFMT;
2079 else
2080 return node;
2081 }
2082
f_show(struct seq_file * m,void * v)2083 static int f_show(struct seq_file *m, void *v)
2084 {
2085 struct trace_event_file *file = event_file_data(m->private);
2086 struct trace_event_call *call = file->event_call;
2087 struct ftrace_event_field *field;
2088 const char *array_descriptor;
2089
2090 switch ((unsigned long)v) {
2091 case FORMAT_HEADER:
2092 seq_printf(m, "name: %s\n", trace_event_name(call));
2093 seq_printf(m, "ID: %d\n", call->event.type);
2094 seq_puts(m, "format:\n");
2095 return 0;
2096
2097 case FORMAT_FIELD_SEPERATOR:
2098 seq_putc(m, '\n');
2099 return 0;
2100
2101 case FORMAT_PRINTFMT:
2102 seq_printf(m, "\nprint fmt: %s\n",
2103 call->print_fmt);
2104 return 0;
2105 }
2106
2107 field = list_entry(v, struct ftrace_event_field, link);
2108 /*
2109 * Smartly shows the array type(except dynamic array).
2110 * Normal:
2111 * field:TYPE VAR
2112 * If TYPE := TYPE[LEN], it is shown:
2113 * field:TYPE VAR[LEN]
2114 */
2115 array_descriptor = strchr(field->type, '[');
2116
2117 if (str_has_prefix(field->type, "__data_loc"))
2118 array_descriptor = NULL;
2119
2120 if (!array_descriptor)
2121 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
2122 field->type, field->name, field->offset,
2123 field->size, !!field->is_signed);
2124 else if (field->len)
2125 seq_printf(m, "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
2126 (int)(array_descriptor - field->type),
2127 field->type, field->name,
2128 field->len, field->offset,
2129 field->size, !!field->is_signed);
2130 else
2131 seq_printf(m, "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
2132 (int)(array_descriptor - field->type),
2133 field->type, field->name,
2134 field->offset, field->size, !!field->is_signed);
2135
2136 return 0;
2137 }
2138
f_start(struct seq_file * m,loff_t * pos)2139 static void *f_start(struct seq_file *m, loff_t *pos)
2140 {
2141 struct trace_event_file *file;
2142 void *p = (void *)FORMAT_HEADER;
2143 loff_t l = 0;
2144
2145 /* ->stop() is called even if ->start() fails */
2146 mutex_lock(&event_mutex);
2147 file = event_file_file(m->private);
2148 if (!file)
2149 return ERR_PTR(-ENODEV);
2150
2151 while (l < *pos && p)
2152 p = f_next(m, p, &l);
2153
2154 return p;
2155 }
2156
f_stop(struct seq_file * m,void * p)2157 static void f_stop(struct seq_file *m, void *p)
2158 {
2159 mutex_unlock(&event_mutex);
2160 }
2161
2162 static const struct seq_operations trace_format_seq_ops = {
2163 .start = f_start,
2164 .next = f_next,
2165 .stop = f_stop,
2166 .show = f_show,
2167 };
2168
trace_format_open(struct inode * inode,struct file * file)2169 static int trace_format_open(struct inode *inode, struct file *file)
2170 {
2171 struct seq_file *m;
2172 int ret;
2173
2174 /* Do we want to hide event format files on tracefs lockdown? */
2175
2176 ret = seq_open(file, &trace_format_seq_ops);
2177 if (ret < 0)
2178 return ret;
2179
2180 m = file->private_data;
2181 m->private = file;
2182
2183 return 0;
2184 }
2185
2186 #ifdef CONFIG_PERF_EVENTS
2187 static ssize_t
event_id_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2188 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
2189 {
2190 int id = (long)event_file_data(filp);
2191 char buf[32];
2192 int len;
2193
2194 if (unlikely(!id))
2195 return -ENODEV;
2196
2197 len = sprintf(buf, "%d\n", id);
2198
2199 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
2200 }
2201 #endif
2202
2203 static ssize_t
event_filter_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2204 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
2205 loff_t *ppos)
2206 {
2207 struct trace_event_file *file;
2208 struct trace_seq *s;
2209 int r = -ENODEV;
2210
2211 if (*ppos)
2212 return 0;
2213
2214 s = kmalloc_obj(*s);
2215
2216 if (!s)
2217 return -ENOMEM;
2218
2219 trace_seq_init(s);
2220
2221 mutex_lock(&event_mutex);
2222 file = event_file_file(filp);
2223 if (file)
2224 print_event_filter(file, s);
2225 mutex_unlock(&event_mutex);
2226
2227 if (file)
2228 r = simple_read_from_buffer(ubuf, cnt, ppos,
2229 s->buffer, trace_seq_used(s));
2230
2231 kfree(s);
2232
2233 return r;
2234 }
2235
2236 static ssize_t
event_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2237 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
2238 loff_t *ppos)
2239 {
2240 struct trace_event_file *file;
2241 char *buf;
2242 int err = -ENODEV;
2243
2244 if (cnt >= PAGE_SIZE)
2245 return -EINVAL;
2246
2247 buf = memdup_user_nul(ubuf, cnt);
2248 if (IS_ERR(buf))
2249 return PTR_ERR(buf);
2250
2251 mutex_lock(&event_mutex);
2252 file = event_file_file(filp);
2253 if (file) {
2254 if (file->flags & EVENT_FILE_FL_FREED)
2255 err = -ENODEV;
2256 else
2257 err = apply_event_filter(file, buf);
2258 }
2259 mutex_unlock(&event_mutex);
2260
2261 kfree(buf);
2262 if (err < 0)
2263 return err;
2264
2265 *ppos += cnt;
2266
2267 return cnt;
2268 }
2269
2270 static LIST_HEAD(event_subsystems);
2271
subsystem_open(struct inode * inode,struct file * filp)2272 static int subsystem_open(struct inode *inode, struct file *filp)
2273 {
2274 struct trace_subsystem_dir *dir = NULL, *iter_dir;
2275 struct trace_array *tr = NULL, *iter_tr;
2276 struct event_subsystem *system = NULL;
2277 int ret;
2278
2279 if (unlikely(tracing_disabled))
2280 return -ENODEV;
2281
2282 /* Make sure the system still exists */
2283 mutex_lock(&event_mutex);
2284 mutex_lock(&trace_types_lock);
2285 list_for_each_entry(iter_tr, &ftrace_trace_arrays, list) {
2286 list_for_each_entry(iter_dir, &iter_tr->systems, list) {
2287 if (iter_dir == inode->i_private) {
2288 /* Don't open systems with no events */
2289 tr = iter_tr;
2290 dir = iter_dir;
2291 if (dir->nr_events) {
2292 __get_system_dir(dir);
2293 system = dir->subsystem;
2294 }
2295 goto exit_loop;
2296 }
2297 }
2298 }
2299 exit_loop:
2300 mutex_unlock(&trace_types_lock);
2301 mutex_unlock(&event_mutex);
2302
2303 if (!system)
2304 return -ENODEV;
2305
2306 /* Still need to increment the ref count of the system */
2307 if (trace_array_get(tr) < 0) {
2308 put_system(dir);
2309 return -ENODEV;
2310 }
2311
2312 ret = tracing_open_generic(inode, filp);
2313 if (ret < 0) {
2314 trace_array_put(tr);
2315 put_system(dir);
2316 }
2317
2318 return ret;
2319 }
2320
system_tr_open(struct inode * inode,struct file * filp)2321 static int system_tr_open(struct inode *inode, struct file *filp)
2322 {
2323 struct trace_subsystem_dir *dir;
2324 struct trace_array *tr = inode->i_private;
2325 int ret;
2326
2327 /* Make a temporary dir that has no system but points to tr */
2328 dir = kzalloc_obj(*dir);
2329 if (!dir)
2330 return -ENOMEM;
2331
2332 ret = tracing_open_generic_tr(inode, filp);
2333 if (ret < 0) {
2334 kfree(dir);
2335 return ret;
2336 }
2337 dir->tr = tr;
2338 filp->private_data = dir;
2339
2340 return 0;
2341 }
2342
subsystem_release(struct inode * inode,struct file * file)2343 static int subsystem_release(struct inode *inode, struct file *file)
2344 {
2345 struct trace_subsystem_dir *dir = file->private_data;
2346
2347 trace_array_put(dir->tr);
2348
2349 /*
2350 * If dir->subsystem is NULL, then this is a temporary
2351 * descriptor that was made for a trace_array to enable
2352 * all subsystems.
2353 */
2354 if (dir->subsystem)
2355 put_system(dir);
2356 else
2357 kfree(dir);
2358
2359 return 0;
2360 }
2361
2362 static ssize_t
subsystem_filter_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2363 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
2364 loff_t *ppos)
2365 {
2366 struct trace_subsystem_dir *dir = filp->private_data;
2367 struct event_subsystem *system = dir->subsystem;
2368 struct trace_seq *s;
2369 int r;
2370
2371 if (*ppos)
2372 return 0;
2373
2374 s = kmalloc_obj(*s);
2375 if (!s)
2376 return -ENOMEM;
2377
2378 trace_seq_init(s);
2379
2380 print_subsystem_event_filter(system, s);
2381 r = simple_read_from_buffer(ubuf, cnt, ppos,
2382 s->buffer, trace_seq_used(s));
2383
2384 kfree(s);
2385
2386 return r;
2387 }
2388
2389 static ssize_t
subsystem_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2390 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
2391 loff_t *ppos)
2392 {
2393 struct trace_subsystem_dir *dir = filp->private_data;
2394 char *buf;
2395 int err;
2396
2397 if (cnt >= PAGE_SIZE)
2398 return -EINVAL;
2399
2400 buf = memdup_user_nul(ubuf, cnt);
2401 if (IS_ERR(buf))
2402 return PTR_ERR(buf);
2403
2404 err = apply_subsystem_event_filter(dir, buf);
2405 kfree(buf);
2406 if (err < 0)
2407 return err;
2408
2409 *ppos += cnt;
2410
2411 return cnt;
2412 }
2413
2414 static ssize_t
show_header_page_file(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2415 show_header_page_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
2416 {
2417 struct trace_array *tr = filp->private_data;
2418 struct trace_seq *s;
2419 int r;
2420
2421 if (*ppos)
2422 return 0;
2423
2424 s = kmalloc_obj(*s);
2425 if (!s)
2426 return -ENOMEM;
2427
2428 trace_seq_init(s);
2429
2430 ring_buffer_print_page_header(tr->array_buffer.buffer, s);
2431 r = simple_read_from_buffer(ubuf, cnt, ppos,
2432 s->buffer, trace_seq_used(s));
2433
2434 kfree(s);
2435
2436 return r;
2437 }
2438
2439 static ssize_t
show_header_event_file(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)2440 show_header_event_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
2441 {
2442 struct trace_seq *s;
2443 int r;
2444
2445 if (*ppos)
2446 return 0;
2447
2448 s = kmalloc_obj(*s);
2449 if (!s)
2450 return -ENOMEM;
2451
2452 trace_seq_init(s);
2453
2454 ring_buffer_print_entry_header(s);
2455 r = simple_read_from_buffer(ubuf, cnt, ppos,
2456 s->buffer, trace_seq_used(s));
2457
2458 kfree(s);
2459
2460 return r;
2461 }
2462
ignore_task_cpu(void * data)2463 static void ignore_task_cpu(void *data)
2464 {
2465 struct trace_array *tr = data;
2466 struct trace_pid_list *pid_list;
2467 struct trace_pid_list *no_pid_list;
2468
2469 /*
2470 * This function is called by on_each_cpu() while the
2471 * event_mutex is held.
2472 */
2473 pid_list = rcu_dereference_protected(tr->filtered_pids,
2474 mutex_is_locked(&event_mutex));
2475 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
2476 mutex_is_locked(&event_mutex));
2477
2478 this_cpu_write(tr->array_buffer.data->ignore_pid,
2479 trace_ignore_this_task(pid_list, no_pid_list, current));
2480 }
2481
register_pid_events(struct trace_array * tr)2482 static void register_pid_events(struct trace_array *tr)
2483 {
2484 /*
2485 * Register a probe that is called before all other probes
2486 * to set ignore_pid if next or prev do not match.
2487 * Register a probe this is called after all other probes
2488 * to only keep ignore_pid set if next pid matches.
2489 */
2490 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
2491 tr, INT_MAX);
2492 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
2493 tr, 0);
2494
2495 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
2496 tr, INT_MAX);
2497 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
2498 tr, 0);
2499
2500 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
2501 tr, INT_MAX);
2502 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
2503 tr, 0);
2504
2505 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
2506 tr, INT_MAX);
2507 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
2508 tr, 0);
2509 }
2510
2511 static ssize_t
event_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,int type)2512 event_pid_write(struct file *filp, const char __user *ubuf,
2513 size_t cnt, loff_t *ppos, int type)
2514 {
2515 struct seq_file *m = filp->private_data;
2516 struct trace_array *tr = m->private;
2517 struct trace_pid_list *filtered_pids = NULL;
2518 struct trace_pid_list *other_pids = NULL;
2519 struct trace_pid_list *pid_list;
2520 struct trace_event_file *file;
2521 ssize_t ret;
2522
2523 if (!cnt)
2524 return 0;
2525
2526 ret = tracing_update_buffers(tr);
2527 if (ret < 0)
2528 return ret;
2529
2530 guard(mutex)(&event_mutex);
2531
2532 if (type == TRACE_PIDS) {
2533 filtered_pids = rcu_dereference_protected(tr->filtered_pids,
2534 lockdep_is_held(&event_mutex));
2535 other_pids = rcu_dereference_protected(tr->filtered_no_pids,
2536 lockdep_is_held(&event_mutex));
2537 } else {
2538 filtered_pids = rcu_dereference_protected(tr->filtered_no_pids,
2539 lockdep_is_held(&event_mutex));
2540 other_pids = rcu_dereference_protected(tr->filtered_pids,
2541 lockdep_is_held(&event_mutex));
2542 }
2543
2544 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
2545 if (ret < 0)
2546 return ret;
2547
2548 if (type == TRACE_PIDS)
2549 rcu_assign_pointer(tr->filtered_pids, pid_list);
2550 else
2551 rcu_assign_pointer(tr->filtered_no_pids, pid_list);
2552
2553 list_for_each_entry(file, &tr->events, list) {
2554 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
2555 }
2556
2557 if (filtered_pids) {
2558 tracepoint_synchronize_unregister();
2559 trace_pid_list_free(filtered_pids);
2560 } else if (pid_list && !other_pids) {
2561 register_pid_events(tr);
2562 }
2563
2564 /*
2565 * Ignoring of pids is done at task switch. But we have to
2566 * check for those tasks that are currently running.
2567 * Always do this in case a pid was appended or removed.
2568 */
2569 on_each_cpu(ignore_task_cpu, tr, 1);
2570
2571 *ppos += ret;
2572
2573 return ret;
2574 }
2575
2576 static ssize_t
ftrace_event_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2577 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
2578 size_t cnt, loff_t *ppos)
2579 {
2580 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
2581 }
2582
2583 static ssize_t
ftrace_event_npid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2584 ftrace_event_npid_write(struct file *filp, const char __user *ubuf,
2585 size_t cnt, loff_t *ppos)
2586 {
2587 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
2588 }
2589
2590 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
2591 static int ftrace_event_set_open(struct inode *inode, struct file *file);
2592 static int ftrace_event_show_filters_open(struct inode *inode, struct file *file);
2593 static int ftrace_event_show_triggers_open(struct inode *inode, struct file *file);
2594 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
2595 static int ftrace_event_set_npid_open(struct inode *inode, struct file *file);
2596 static int ftrace_event_release(struct inode *inode, struct file *file);
2597
2598 static const struct seq_operations show_event_seq_ops = {
2599 .start = t_start,
2600 .next = t_next,
2601 .show = t_show,
2602 .stop = t_stop,
2603 };
2604
2605 static const struct seq_operations show_set_event_seq_ops = {
2606 .start = s_start,
2607 .next = s_next,
2608 .show = s_show,
2609 .stop = s_stop,
2610 };
2611
2612 static const struct seq_operations show_show_event_filters_seq_ops = {
2613 .start = t_start,
2614 .next = t_next,
2615 .show = t_show_filters,
2616 .stop = t_stop,
2617 };
2618
2619 static const struct seq_operations show_show_event_triggers_seq_ops = {
2620 .start = t_start,
2621 .next = t_next,
2622 .show = t_show_triggers,
2623 .stop = t_stop,
2624 };
2625
2626 static const struct seq_operations show_set_pid_seq_ops = {
2627 .start = p_start,
2628 .next = p_next,
2629 .show = trace_pid_show,
2630 .stop = p_stop,
2631 };
2632
2633 static const struct seq_operations show_set_no_pid_seq_ops = {
2634 .start = np_start,
2635 .next = np_next,
2636 .show = trace_pid_show,
2637 .stop = p_stop,
2638 };
2639
2640 static const struct file_operations ftrace_avail_fops = {
2641 .open = ftrace_event_avail_open,
2642 .read = seq_read,
2643 .llseek = seq_lseek,
2644 .release = seq_release,
2645 };
2646
2647 static const struct file_operations ftrace_set_event_fops = {
2648 .open = ftrace_event_set_open,
2649 .read = seq_read,
2650 .write = ftrace_event_write,
2651 .llseek = seq_lseek,
2652 .release = ftrace_event_release,
2653 };
2654
2655 static const struct file_operations ftrace_show_event_filters_fops = {
2656 .open = ftrace_event_show_filters_open,
2657 .read = seq_read,
2658 .llseek = seq_lseek,
2659 .release = seq_release,
2660 };
2661
2662 static const struct file_operations ftrace_show_event_triggers_fops = {
2663 .open = ftrace_event_show_triggers_open,
2664 .read = seq_read,
2665 .llseek = seq_lseek,
2666 .release = seq_release,
2667 };
2668
2669 static const struct file_operations ftrace_set_event_pid_fops = {
2670 .open = ftrace_event_set_pid_open,
2671 .read = seq_read,
2672 .write = ftrace_event_pid_write,
2673 .llseek = seq_lseek,
2674 .release = ftrace_event_release,
2675 };
2676
2677 static const struct file_operations ftrace_set_event_notrace_pid_fops = {
2678 .open = ftrace_event_set_npid_open,
2679 .read = seq_read,
2680 .write = ftrace_event_npid_write,
2681 .llseek = seq_lseek,
2682 .release = ftrace_event_release,
2683 };
2684
2685 static const struct file_operations ftrace_enable_fops = {
2686 .open = tracing_open_file_tr,
2687 .read = event_enable_read,
2688 .write = event_enable_write,
2689 .release = tracing_release_file_tr,
2690 .llseek = default_llseek,
2691 };
2692
2693 static const struct file_operations ftrace_event_format_fops = {
2694 .open = trace_format_open,
2695 .read = seq_read,
2696 .llseek = seq_lseek,
2697 .release = seq_release,
2698 };
2699
2700 #ifdef CONFIG_PERF_EVENTS
2701 static const struct file_operations ftrace_event_id_fops = {
2702 .read = event_id_read,
2703 .llseek = default_llseek,
2704 };
2705 #endif
2706
2707 static const struct file_operations ftrace_event_filter_fops = {
2708 .open = tracing_open_file_tr,
2709 .read = event_filter_read,
2710 .write = event_filter_write,
2711 .release = tracing_release_file_tr,
2712 .llseek = default_llseek,
2713 };
2714
2715 static const struct file_operations ftrace_subsystem_filter_fops = {
2716 .open = subsystem_open,
2717 .read = subsystem_filter_read,
2718 .write = subsystem_filter_write,
2719 .llseek = default_llseek,
2720 .release = subsystem_release,
2721 };
2722
2723 static const struct file_operations ftrace_system_enable_fops = {
2724 .open = subsystem_open,
2725 .read = system_enable_read,
2726 .write = system_enable_write,
2727 .llseek = default_llseek,
2728 .release = subsystem_release,
2729 };
2730
2731 static const struct file_operations ftrace_tr_enable_fops = {
2732 .open = system_tr_open,
2733 .read = system_enable_read,
2734 .write = system_enable_write,
2735 .llseek = default_llseek,
2736 .release = subsystem_release,
2737 };
2738
2739 static const struct file_operations ftrace_show_header_page_fops = {
2740 .open = tracing_open_generic_tr,
2741 .read = show_header_page_file,
2742 .llseek = default_llseek,
2743 .release = tracing_release_generic_tr,
2744 };
2745
2746 static const struct file_operations ftrace_show_header_event_fops = {
2747 .open = tracing_open_generic_tr,
2748 .read = show_header_event_file,
2749 .llseek = default_llseek,
2750 .release = tracing_release_generic_tr,
2751 };
2752
2753 static int
ftrace_event_open(struct inode * inode,struct file * file,const struct seq_operations * seq_ops)2754 ftrace_event_open(struct inode *inode, struct file *file,
2755 const struct seq_operations *seq_ops)
2756 {
2757 struct seq_file *m;
2758 int ret;
2759
2760 ret = security_locked_down(LOCKDOWN_TRACEFS);
2761 if (ret)
2762 return ret;
2763
2764 ret = seq_open(file, seq_ops);
2765 if (ret < 0)
2766 return ret;
2767 m = file->private_data;
2768 /* copy tr over to seq ops */
2769 m->private = inode->i_private;
2770
2771 return ret;
2772 }
2773
ftrace_event_release(struct inode * inode,struct file * file)2774 static int ftrace_event_release(struct inode *inode, struct file *file)
2775 {
2776 struct trace_array *tr = inode->i_private;
2777
2778 trace_array_put(tr);
2779
2780 return seq_release(inode, file);
2781 }
2782
2783 static int
ftrace_event_avail_open(struct inode * inode,struct file * file)2784 ftrace_event_avail_open(struct inode *inode, struct file *file)
2785 {
2786 const struct seq_operations *seq_ops = &show_event_seq_ops;
2787
2788 /* Checks for tracefs lockdown */
2789 return ftrace_event_open(inode, file, seq_ops);
2790 }
2791
2792 static int
ftrace_event_set_open(struct inode * inode,struct file * file)2793 ftrace_event_set_open(struct inode *inode, struct file *file)
2794 {
2795 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
2796 struct trace_array *tr = inode->i_private;
2797 int ret;
2798
2799 ret = tracing_check_open_get_tr(tr);
2800 if (ret)
2801 return ret;
2802
2803 if ((file->f_mode & FMODE_WRITE) &&
2804 (file->f_flags & O_TRUNC))
2805 ftrace_clear_events(tr);
2806
2807 ret = ftrace_event_open(inode, file, seq_ops);
2808 if (ret < 0)
2809 trace_array_put(tr);
2810 return ret;
2811 }
2812
2813 /**
2814 * ftrace_event_show_filters_open - open interface for set_event_filters
2815 * @inode: The inode of the file
2816 * @file: The file being opened
2817 *
2818 * Connects the set_event_filters file to the sequence operations
2819 * required to iterate over and display active event filters.
2820 */
2821 static int
ftrace_event_show_filters_open(struct inode * inode,struct file * file)2822 ftrace_event_show_filters_open(struct inode *inode, struct file *file)
2823 {
2824 return ftrace_event_open(inode, file, &show_show_event_filters_seq_ops);
2825 }
2826
2827 /**
2828 * ftrace_event_show_triggers_open - open interface for show_event_triggers
2829 * @inode: The inode of the file
2830 * @file: The file being opened
2831 *
2832 * Connects the show_event_triggers file to the sequence operations
2833 * required to iterate over and display active event triggers.
2834 */
2835 static int
ftrace_event_show_triggers_open(struct inode * inode,struct file * file)2836 ftrace_event_show_triggers_open(struct inode *inode, struct file *file)
2837 {
2838 return ftrace_event_open(inode, file, &show_show_event_triggers_seq_ops);
2839 }
2840
2841 static int
ftrace_event_set_pid_open(struct inode * inode,struct file * file)2842 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
2843 {
2844 const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
2845 struct trace_array *tr = inode->i_private;
2846 int ret;
2847
2848 ret = tracing_check_open_get_tr(tr);
2849 if (ret)
2850 return ret;
2851
2852 if ((file->f_mode & FMODE_WRITE) &&
2853 (file->f_flags & O_TRUNC))
2854 ftrace_clear_event_pids(tr, TRACE_PIDS);
2855
2856 ret = ftrace_event_open(inode, file, seq_ops);
2857 if (ret < 0)
2858 trace_array_put(tr);
2859 return ret;
2860 }
2861
2862 static int
ftrace_event_set_npid_open(struct inode * inode,struct file * file)2863 ftrace_event_set_npid_open(struct inode *inode, struct file *file)
2864 {
2865 const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops;
2866 struct trace_array *tr = inode->i_private;
2867 int ret;
2868
2869 ret = tracing_check_open_get_tr(tr);
2870 if (ret)
2871 return ret;
2872
2873 if ((file->f_mode & FMODE_WRITE) &&
2874 (file->f_flags & O_TRUNC))
2875 ftrace_clear_event_pids(tr, TRACE_NO_PIDS);
2876
2877 ret = ftrace_event_open(inode, file, seq_ops);
2878 if (ret < 0)
2879 trace_array_put(tr);
2880 return ret;
2881 }
2882
2883 static struct event_subsystem *
create_new_subsystem(const char * name)2884 create_new_subsystem(const char *name)
2885 {
2886 struct event_subsystem *system;
2887
2888 /* need to create new entry */
2889 system = kmalloc_obj(*system);
2890 if (!system)
2891 return NULL;
2892
2893 system->ref_count = 1;
2894
2895 /* Only allocate if dynamic (kprobes and modules) */
2896 system->name = kstrdup_const(name, GFP_KERNEL);
2897 if (!system->name)
2898 goto out_free;
2899
2900 system->filter = kzalloc_obj(struct event_filter);
2901 if (!system->filter)
2902 goto out_free;
2903
2904 list_add(&system->list, &event_subsystems);
2905
2906 return system;
2907
2908 out_free:
2909 kfree_const(system->name);
2910 kfree(system);
2911 return NULL;
2912 }
2913
system_callback(const char * name,umode_t * mode,void ** data,const struct file_operations ** fops)2914 static int system_callback(const char *name, umode_t *mode, void **data,
2915 const struct file_operations **fops)
2916 {
2917 if (strcmp(name, "filter") == 0)
2918 *fops = &ftrace_subsystem_filter_fops;
2919
2920 else if (strcmp(name, "enable") == 0)
2921 *fops = &ftrace_system_enable_fops;
2922
2923 else
2924 return 0;
2925
2926 *mode = TRACE_MODE_WRITE;
2927 return 1;
2928 }
2929
2930 static struct eventfs_inode *
event_subsystem_dir(struct trace_array * tr,const char * name,struct trace_event_file * file,struct eventfs_inode * parent)2931 event_subsystem_dir(struct trace_array *tr, const char *name,
2932 struct trace_event_file *file, struct eventfs_inode *parent)
2933 {
2934 struct event_subsystem *system, *iter;
2935 struct trace_subsystem_dir *dir;
2936 struct eventfs_inode *ei;
2937 int nr_entries;
2938 static struct eventfs_entry system_entries[] = {
2939 {
2940 .name = "filter",
2941 .callback = system_callback,
2942 },
2943 {
2944 .name = "enable",
2945 .callback = system_callback,
2946 }
2947 };
2948
2949 /* First see if we did not already create this dir */
2950 list_for_each_entry(dir, &tr->systems, list) {
2951 system = dir->subsystem;
2952 if (strcmp(system->name, name) == 0) {
2953 dir->nr_events++;
2954 file->system = dir;
2955 return dir->ei;
2956 }
2957 }
2958
2959 /* Now see if the system itself exists. */
2960 system = NULL;
2961 list_for_each_entry(iter, &event_subsystems, list) {
2962 if (strcmp(iter->name, name) == 0) {
2963 system = iter;
2964 break;
2965 }
2966 }
2967
2968 dir = kmalloc_obj(*dir);
2969 if (!dir)
2970 goto out_fail;
2971
2972 if (!system) {
2973 system = create_new_subsystem(name);
2974 if (!system)
2975 goto out_free;
2976 } else
2977 __get_system(system);
2978
2979 /* ftrace only has directories no files, readonly instance too. */
2980 if (strcmp(name, "ftrace") == 0 || trace_array_is_readonly(tr))
2981 nr_entries = 0;
2982 else
2983 nr_entries = ARRAY_SIZE(system_entries);
2984
2985 ei = eventfs_create_dir(name, parent, system_entries, nr_entries, dir);
2986 if (IS_ERR(ei)) {
2987 pr_warn("Failed to create system directory %s\n", name);
2988 __put_system(system);
2989 goto out_free;
2990 }
2991
2992 dir->ei = ei;
2993 dir->tr = tr;
2994 dir->ref_count = 1;
2995 dir->nr_events = 1;
2996 dir->subsystem = system;
2997 file->system = dir;
2998
2999 list_add(&dir->list, &tr->systems);
3000
3001 return dir->ei;
3002
3003 out_free:
3004 kfree(dir);
3005 out_fail:
3006 /* Only print this message if failed on memory allocation */
3007 if (!dir || !system)
3008 pr_warn("No memory to create event subsystem %s\n", name);
3009 return NULL;
3010 }
3011
3012 static int
event_define_fields(struct trace_event_call * call)3013 event_define_fields(struct trace_event_call *call)
3014 {
3015 struct list_head *head;
3016 int ret = 0;
3017
3018 /*
3019 * Other events may have the same class. Only update
3020 * the fields if they are not already defined.
3021 */
3022 head = trace_get_fields(call);
3023 if (list_empty(head)) {
3024 struct trace_event_fields *field = call->class->fields_array;
3025 unsigned int offset = sizeof(struct trace_entry);
3026
3027 for (; field->type; field++) {
3028 if (field->type == TRACE_FUNCTION_TYPE) {
3029 field->define_fields(call);
3030 break;
3031 }
3032
3033 offset = ALIGN(offset, field->align);
3034 ret = trace_define_field_ext(call, field->type, field->name,
3035 offset, field->size,
3036 field->is_signed, field->filter_type,
3037 field->len, field->needs_test);
3038 if (WARN_ON_ONCE(ret)) {
3039 pr_err("error code is %d\n", ret);
3040 break;
3041 }
3042
3043 offset += field->size;
3044 }
3045 }
3046
3047 return ret;
3048 }
3049
event_callback(const char * name,umode_t * mode,void ** data,const struct file_operations ** fops)3050 static int event_callback(const char *name, umode_t *mode, void **data,
3051 const struct file_operations **fops)
3052 {
3053 struct trace_event_file *file = *data;
3054 struct trace_event_call *call = file->event_call;
3055
3056 if (strcmp(name, "format") == 0) {
3057 *mode = TRACE_MODE_READ;
3058 *fops = &ftrace_event_format_fops;
3059 return 1;
3060 }
3061
3062 /*
3063 * Only event directories that can be enabled should have
3064 * triggers or filters, with the exception of the "print"
3065 * event that can have a "trigger" file.
3066 */
3067 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
3068 if (call->class->reg && strcmp(name, "enable") == 0) {
3069 *mode = TRACE_MODE_WRITE;
3070 *fops = &ftrace_enable_fops;
3071 return 1;
3072 }
3073
3074 if (strcmp(name, "filter") == 0) {
3075 *mode = TRACE_MODE_WRITE;
3076 *fops = &ftrace_event_filter_fops;
3077 return 1;
3078 }
3079 }
3080
3081 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
3082 strcmp(trace_event_name(call), "print") == 0) {
3083 if (strcmp(name, "trigger") == 0) {
3084 *mode = TRACE_MODE_WRITE;
3085 *fops = &event_trigger_fops;
3086 return 1;
3087 }
3088 }
3089
3090 #ifdef CONFIG_PERF_EVENTS
3091 if (call->event.type && call->class->reg &&
3092 strcmp(name, "id") == 0) {
3093 *mode = TRACE_MODE_READ;
3094 *data = (void *)(long)call->event.type;
3095 *fops = &ftrace_event_id_fops;
3096 return 1;
3097 }
3098 #endif
3099
3100 #ifdef CONFIG_HIST_TRIGGERS
3101 if (strcmp(name, "hist") == 0) {
3102 *mode = TRACE_MODE_READ;
3103 *fops = &event_hist_fops;
3104 return 1;
3105 }
3106 #endif
3107 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
3108 if (strcmp(name, "hist_debug") == 0) {
3109 *mode = TRACE_MODE_READ;
3110 *fops = &event_hist_debug_fops;
3111 return 1;
3112 }
3113 #endif
3114 #ifdef CONFIG_TRACE_EVENT_INJECT
3115 if (call->event.type && call->class->reg &&
3116 strcmp(name, "inject") == 0) {
3117 *mode = 0200;
3118 *fops = &event_inject_fops;
3119 return 1;
3120 }
3121 #endif
3122 return 0;
3123 }
3124
3125 /* The file is incremented on creation and freeing the enable file decrements it */
event_release(const char * name,void * data)3126 static void event_release(const char *name, void *data)
3127 {
3128 struct trace_event_file *file = data;
3129
3130 event_file_put(file);
3131 }
3132
3133 static int
event_create_dir(struct eventfs_inode * parent,struct trace_event_file * file)3134 event_create_dir(struct eventfs_inode *parent, struct trace_event_file *file)
3135 {
3136 struct trace_event_call *call = file->event_call;
3137 struct trace_array *tr = file->tr;
3138 struct eventfs_inode *e_events;
3139 struct eventfs_inode *ei;
3140 const char *name;
3141 int nr_entries;
3142 int ret;
3143 static struct eventfs_entry event_entries[] = {
3144 {
3145 .name = "format",
3146 .callback = event_callback,
3147 },
3148 #ifdef CONFIG_PERF_EVENTS
3149 {
3150 .name = "id",
3151 .callback = event_callback,
3152 },
3153 #endif
3154 #define NR_RO_EVENT_ENTRIES (1 + IS_ENABLED(CONFIG_PERF_EVENTS))
3155 /* Readonly files must be above this line and counted by NR_RO_EVENT_ENTRIES. */
3156 {
3157 .name = "enable",
3158 .callback = event_callback,
3159 .release = event_release,
3160 },
3161 {
3162 .name = "filter",
3163 .callback = event_callback,
3164 },
3165 {
3166 .name = "trigger",
3167 .callback = event_callback,
3168 },
3169 #ifdef CONFIG_HIST_TRIGGERS
3170 {
3171 .name = "hist",
3172 .callback = event_callback,
3173 },
3174 #endif
3175 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
3176 {
3177 .name = "hist_debug",
3178 .callback = event_callback,
3179 },
3180 #endif
3181 #ifdef CONFIG_TRACE_EVENT_INJECT
3182 {
3183 .name = "inject",
3184 .callback = event_callback,
3185 },
3186 #endif
3187 };
3188
3189 /*
3190 * If the trace point header did not define TRACE_SYSTEM
3191 * then the system would be called "TRACE_SYSTEM". This should
3192 * never happen.
3193 */
3194 if (WARN_ON_ONCE(strcmp(call->class->system, TRACE_SYSTEM) == 0))
3195 return -ENODEV;
3196
3197 e_events = event_subsystem_dir(tr, call->class->system, file, parent);
3198 if (!e_events)
3199 return -ENOMEM;
3200
3201 if (trace_array_is_readonly(tr))
3202 nr_entries = NR_RO_EVENT_ENTRIES;
3203 else
3204 nr_entries = ARRAY_SIZE(event_entries);
3205
3206 name = trace_event_name(call);
3207 ei = eventfs_create_dir(name, e_events, event_entries, nr_entries, file);
3208 if (IS_ERR(ei)) {
3209 pr_warn("Could not create tracefs '%s' directory\n", name);
3210 return -1;
3211 }
3212
3213 file->ei = ei;
3214
3215 ret = event_define_fields(call);
3216 if (ret < 0) {
3217 pr_warn("Could not initialize trace point events/%s\n", name);
3218 return ret;
3219 }
3220
3221 /* Gets decremented on freeing of the "enable" file */
3222 event_file_get(file);
3223
3224 return 0;
3225 }
3226
remove_event_from_tracers(struct trace_event_call * call)3227 static void remove_event_from_tracers(struct trace_event_call *call)
3228 {
3229 struct trace_event_file *file;
3230 struct trace_array *tr;
3231
3232 do_for_each_event_file_safe(tr, file) {
3233 if (file->event_call != call)
3234 continue;
3235
3236 remove_event_file_dir(file);
3237 /*
3238 * The do_for_each_event_file_safe() is
3239 * a double loop. After finding the call for this
3240 * trace_array, we use break to jump to the next
3241 * trace_array.
3242 */
3243 break;
3244 } while_for_each_event_file();
3245 }
3246
event_remove(struct trace_event_call * call)3247 static void event_remove(struct trace_event_call *call)
3248 {
3249 struct trace_array *tr;
3250 struct trace_event_file *file;
3251
3252 do_for_each_event_file(tr, file) {
3253 if (file->event_call != call)
3254 continue;
3255
3256 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
3257 tr->clear_trace = true;
3258
3259 ftrace_event_enable_disable(file, 0);
3260 /*
3261 * The do_for_each_event_file() is
3262 * a double loop. After finding the call for this
3263 * trace_array, we use break to jump to the next
3264 * trace_array.
3265 */
3266 break;
3267 } while_for_each_event_file();
3268
3269 if (call->event.funcs)
3270 __unregister_trace_event(&call->event);
3271 remove_event_from_tracers(call);
3272 list_del(&call->list);
3273 }
3274
event_init(struct trace_event_call * call)3275 static int event_init(struct trace_event_call *call)
3276 {
3277 int ret = 0;
3278 const char *name;
3279
3280 name = trace_event_name(call);
3281 if (WARN_ON(!name))
3282 return -EINVAL;
3283
3284 if (call->class->raw_init) {
3285 ret = call->class->raw_init(call);
3286 if (ret < 0 && ret != -ENOSYS)
3287 pr_warn("Could not initialize trace events/%s\n", name);
3288 }
3289
3290 return ret;
3291 }
3292
3293 static int
__register_event(struct trace_event_call * call,struct module * mod)3294 __register_event(struct trace_event_call *call, struct module *mod)
3295 {
3296 int ret;
3297
3298 ret = event_init(call);
3299 if (ret < 0)
3300 return ret;
3301
3302 down_write(&trace_event_sem);
3303 list_add(&call->list, &ftrace_events);
3304 up_write(&trace_event_sem);
3305
3306 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
3307 atomic_set(&call->refcnt, 0);
3308 else
3309 call->module = mod;
3310
3311 return 0;
3312 }
3313
eval_replace(char * ptr,struct trace_eval_map * map,int len)3314 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len)
3315 {
3316 int rlen;
3317 int elen;
3318
3319 /* Find the length of the eval value as a string */
3320 elen = snprintf(ptr, 0, "%ld", map->eval_value);
3321 /* Make sure there's enough room to replace the string with the value */
3322 if (len < elen)
3323 return NULL;
3324
3325 snprintf(ptr, elen + 1, "%ld", map->eval_value);
3326
3327 /* Get the rest of the string of ptr */
3328 rlen = strlen(ptr + len);
3329 memmove(ptr + elen, ptr + len, rlen);
3330 /* Make sure we end the new string */
3331 ptr[elen + rlen] = 0;
3332
3333 return ptr + elen;
3334 }
3335
update_event_printk(struct trace_event_call * call,struct trace_eval_map * map)3336 static void update_event_printk(struct trace_event_call *call,
3337 struct trace_eval_map *map)
3338 {
3339 char *ptr;
3340 int quote = 0;
3341 int len = strlen(map->eval_string);
3342
3343 for (ptr = call->print_fmt; *ptr; ptr++) {
3344 if (*ptr == '\\') {
3345 ptr++;
3346 /* paranoid */
3347 if (!*ptr)
3348 break;
3349 continue;
3350 }
3351 if (*ptr == '"') {
3352 quote ^= 1;
3353 continue;
3354 }
3355 if (quote)
3356 continue;
3357 if (isdigit(*ptr)) {
3358 /* skip numbers */
3359 do {
3360 ptr++;
3361 /* Check for alpha chars like ULL */
3362 } while (isalnum(*ptr));
3363 if (!*ptr)
3364 break;
3365 /*
3366 * A number must have some kind of delimiter after
3367 * it, and we can ignore that too.
3368 */
3369 continue;
3370 }
3371 if (isalpha(*ptr) || *ptr == '_') {
3372 if (strncmp(map->eval_string, ptr, len) == 0 &&
3373 !isalnum(ptr[len]) && ptr[len] != '_') {
3374 ptr = eval_replace(ptr, map, len);
3375 /* enum/sizeof string smaller than value */
3376 if (WARN_ON_ONCE(!ptr))
3377 return;
3378 /*
3379 * No need to decrement here, as eval_replace()
3380 * returns the pointer to the character passed
3381 * the eval, and two evals can not be placed
3382 * back to back without something in between.
3383 * We can skip that something in between.
3384 */
3385 continue;
3386 }
3387 skip_more:
3388 do {
3389 ptr++;
3390 } while (isalnum(*ptr) || *ptr == '_');
3391 if (!*ptr)
3392 break;
3393 /*
3394 * If what comes after this variable is a '.' or
3395 * '->' then we can continue to ignore that string.
3396 */
3397 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
3398 ptr += *ptr == '.' ? 1 : 2;
3399 if (!*ptr)
3400 break;
3401 goto skip_more;
3402 }
3403 /*
3404 * Once again, we can skip the delimiter that came
3405 * after the string.
3406 */
3407 continue;
3408 }
3409 }
3410 }
3411
add_str_to_module(struct module * module,char * str)3412 static void add_str_to_module(struct module *module, char *str)
3413 {
3414 struct module_string *modstr;
3415
3416 modstr = kmalloc_obj(*modstr);
3417
3418 /*
3419 * If we failed to allocate memory here, then we'll just
3420 * let the str memory leak when the module is removed.
3421 * If this fails to allocate, there's worse problems than
3422 * a leaked string on module removal.
3423 */
3424 if (WARN_ON_ONCE(!modstr))
3425 return;
3426
3427 modstr->module = module;
3428 modstr->str = str;
3429
3430 list_add(&modstr->next, &module_strings);
3431 }
3432
3433 #define ATTRIBUTE_STR "__attribute__("
3434 #define ATTRIBUTE_STR_LEN (sizeof(ATTRIBUTE_STR) - 1)
3435
3436 /* Remove all __attribute__() from @type. Return allocated string or @type. */
sanitize_field_type(const char * type)3437 static char *sanitize_field_type(const char *type)
3438 {
3439 char *attr, *tmp, *next, *ret = (char *)type;
3440 int depth;
3441
3442 next = (char *)type;
3443 while ((attr = strstr(next, ATTRIBUTE_STR))) {
3444 /* Retry if "__attribute__(" is a part of another word. */
3445 if (attr != next && !isspace(attr[-1])) {
3446 next = attr + ATTRIBUTE_STR_LEN;
3447 continue;
3448 }
3449
3450 if (ret == type) {
3451 ret = kstrdup(type, GFP_KERNEL);
3452 if (WARN_ON_ONCE(!ret))
3453 return NULL;
3454 attr = ret + (attr - type);
3455 }
3456
3457 /* the ATTRIBUTE_STR already has the first '(' */
3458 depth = 1;
3459 next = attr + ATTRIBUTE_STR_LEN;
3460 do {
3461 tmp = strpbrk(next, "()");
3462 /* There is unbalanced parentheses */
3463 if (WARN_ON_ONCE(!tmp)) {
3464 kfree(ret);
3465 return (char *)type;
3466 }
3467
3468 if (*tmp == '(')
3469 depth++;
3470 else
3471 depth--;
3472 next = tmp + 1;
3473 } while (depth > 0);
3474 next = skip_spaces(next);
3475 strcpy(attr, next);
3476 next = attr;
3477 }
3478 return ret;
3479 }
3480
find_replacable_eval(const char * type,const char * eval_string,int len)3481 static char *find_replacable_eval(const char *type, const char *eval_string,
3482 int len)
3483 {
3484 char *ptr;
3485
3486 if (!eval_string)
3487 return NULL;
3488
3489 ptr = strchr(type, '[');
3490 if (!ptr)
3491 return NULL;
3492 ptr++;
3493
3494 if (!isalpha(*ptr) && *ptr != '_')
3495 return NULL;
3496
3497 if (strncmp(eval_string, ptr, len) != 0)
3498 return NULL;
3499
3500 return ptr;
3501 }
3502
update_event_fields(struct trace_event_call * call,struct trace_eval_map * map)3503 static void update_event_fields(struct trace_event_call *call,
3504 struct trace_eval_map *map)
3505 {
3506 struct ftrace_event_field *field;
3507 const char *eval_string = NULL;
3508 struct list_head *head;
3509 int len = 0;
3510 char *ptr;
3511 char *str;
3512
3513 /* Dynamic events should never have field maps */
3514 if (call->flags & TRACE_EVENT_FL_DYNAMIC)
3515 return;
3516
3517 if (map) {
3518 eval_string = map->eval_string;
3519 len = strlen(map->eval_string);
3520 }
3521
3522 head = trace_get_fields(call);
3523 list_for_each_entry(field, head, link) {
3524 str = sanitize_field_type(field->type);
3525 if (!str)
3526 return;
3527
3528 ptr = find_replacable_eval(str, eval_string, len);
3529 if (ptr) {
3530 if (str == field->type) {
3531 str = kstrdup(field->type, GFP_KERNEL);
3532 if (WARN_ON_ONCE(!str))
3533 return;
3534 ptr = str + (ptr - field->type);
3535 }
3536
3537 ptr = eval_replace(ptr, map, len);
3538 /* enum/sizeof string smaller than value */
3539 if (WARN_ON_ONCE(!ptr)) {
3540 kfree(str);
3541 continue;
3542 }
3543 }
3544
3545 if (str == field->type)
3546 continue;
3547 /*
3548 * If the event is part of a module, then we need to free the string
3549 * when the module is removed. Otherwise, it will stay allocated
3550 * until a reboot.
3551 */
3552 if (call->module)
3553 add_str_to_module(call->module, str);
3554
3555 field->type = str;
3556 if (field->filter_type == FILTER_OTHER)
3557 field->filter_type = filter_assign_type(field->type);
3558 }
3559 }
3560
3561 /* Update all events for replacing eval and sanitizing */
trace_event_update_all(struct trace_eval_map ** map,int len)3562 void trace_event_update_all(struct trace_eval_map **map, int len)
3563 {
3564 struct trace_event_call *call, *p;
3565 const char *last_system = NULL;
3566 bool first = false;
3567 bool updated;
3568 int last_i;
3569 int i;
3570
3571 down_write(&trace_event_sem);
3572 list_for_each_entry_safe(call, p, &ftrace_events, list) {
3573 /* events are usually grouped together with systems */
3574 if (!last_system || call->class->system != last_system) {
3575 first = true;
3576 last_i = 0;
3577 last_system = call->class->system;
3578 }
3579
3580 updated = false;
3581 /*
3582 * Since calls are grouped by systems, the likelihood that the
3583 * next call in the iteration belongs to the same system as the
3584 * previous call is high. As an optimization, we skip searching
3585 * for a map[] that matches the call's system if the last call
3586 * was from the same system. That's what last_i is for. If the
3587 * call has the same system as the previous call, then last_i
3588 * will be the index of the first map[] that has a matching
3589 * system.
3590 */
3591 for (i = last_i; i < len; i++) {
3592 if (call->class->system == map[i]->system) {
3593 /* Save the first system if need be */
3594 if (first) {
3595 last_i = i;
3596 first = false;
3597 }
3598 update_event_printk(call, map[i]);
3599 update_event_fields(call, map[i]);
3600 updated = true;
3601 }
3602 }
3603 /* If not updated yet, update field for sanitizing. */
3604 if (!updated)
3605 update_event_fields(call, NULL);
3606 cond_resched();
3607 }
3608 up_write(&trace_event_sem);
3609 }
3610
event_in_systems(struct trace_event_call * call,const char * systems)3611 static bool event_in_systems(struct trace_event_call *call,
3612 const char *systems)
3613 {
3614 const char *system;
3615 const char *p;
3616
3617 if (!systems)
3618 return true;
3619
3620 system = call->class->system;
3621 p = strstr(systems, system);
3622 if (!p)
3623 return false;
3624
3625 if (p != systems && !isspace(*(p - 1)) && *(p - 1) != ',')
3626 return false;
3627
3628 p += strlen(system);
3629 return !*p || isspace(*p) || *p == ',';
3630 }
3631
3632 #ifdef CONFIG_HIST_TRIGGERS
3633 /*
3634 * Wake up waiter on the hist_poll_wq from irq_work because the hist trigger
3635 * may happen in any context.
3636 */
hist_poll_event_irq_work(struct irq_work * work)3637 static void hist_poll_event_irq_work(struct irq_work *work)
3638 {
3639 wake_up_all(&hist_poll_wq);
3640 }
3641
3642 DEFINE_IRQ_WORK(hist_poll_work, hist_poll_event_irq_work);
3643 DECLARE_WAIT_QUEUE_HEAD(hist_poll_wq);
3644 #endif
3645
3646 static struct trace_event_file *
trace_create_new_event(struct trace_event_call * call,struct trace_array * tr)3647 trace_create_new_event(struct trace_event_call *call,
3648 struct trace_array *tr)
3649 {
3650 struct trace_pid_list *no_pid_list;
3651 struct trace_pid_list *pid_list;
3652 struct trace_event_file *file;
3653 unsigned int first;
3654
3655 if (!event_in_systems(call, tr->system_names))
3656 return NULL;
3657
3658 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
3659 if (!file)
3660 return ERR_PTR(-ENOMEM);
3661
3662 pid_list = rcu_dereference_protected(tr->filtered_pids,
3663 lockdep_is_held(&event_mutex));
3664 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
3665 lockdep_is_held(&event_mutex));
3666
3667 if (!trace_pid_list_first(pid_list, &first) ||
3668 !trace_pid_list_first(no_pid_list, &first))
3669 file->flags |= EVENT_FILE_FL_PID_FILTER;
3670
3671 file->event_call = call;
3672 file->tr = tr;
3673 atomic_set(&file->sm_ref, 0);
3674 atomic_set(&file->tm_ref, 0);
3675 INIT_LIST_HEAD(&file->triggers);
3676 list_add(&file->list, &tr->events);
3677 refcount_set(&file->ref, 1);
3678
3679 return file;
3680 }
3681
3682 #define MAX_BOOT_TRIGGERS 32
3683
3684 static struct boot_triggers {
3685 const char *event;
3686 char *trigger;
3687 } bootup_triggers[MAX_BOOT_TRIGGERS];
3688
3689 static char bootup_trigger_buf[COMMAND_LINE_SIZE];
3690 static int nr_boot_triggers;
3691
setup_trace_triggers(char * str)3692 static __init int setup_trace_triggers(char *str)
3693 {
3694 char *trigger;
3695 char *buf;
3696 int i;
3697
3698 strscpy(bootup_trigger_buf, str, COMMAND_LINE_SIZE);
3699 trace_set_ring_buffer_expanded(NULL);
3700 disable_tracing_selftest("running event triggers");
3701
3702 buf = bootup_trigger_buf;
3703 for (i = 0; i < MAX_BOOT_TRIGGERS; i++) {
3704 trigger = strsep(&buf, ",");
3705 if (!trigger)
3706 break;
3707 bootup_triggers[i].event = strsep(&trigger, ".");
3708 bootup_triggers[i].trigger = trigger;
3709 if (!bootup_triggers[i].trigger)
3710 break;
3711 }
3712
3713 nr_boot_triggers = i;
3714 return 1;
3715 }
3716 __setup("trace_trigger=", setup_trace_triggers);
3717
3718 /* Add an event to a trace directory */
3719 static int
__trace_add_new_event(struct trace_event_call * call,struct trace_array * tr)3720 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
3721 {
3722 struct trace_event_file *file;
3723
3724 file = trace_create_new_event(call, tr);
3725 /*
3726 * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed
3727 * allocation, or NULL if the event is not part of the tr->system_names.
3728 * When the event is not part of the tr->system_names, return zero, not
3729 * an error.
3730 */
3731 if (!file)
3732 return 0;
3733
3734 if (IS_ERR(file))
3735 return PTR_ERR(file);
3736
3737 if (eventdir_initialized)
3738 return event_create_dir(tr->event_dir, file);
3739 else
3740 return event_define_fields(call);
3741 }
3742
trace_early_triggers(struct trace_event_file * file,const char * name)3743 static void trace_early_triggers(struct trace_event_file *file, const char *name)
3744 {
3745 int ret;
3746 int i;
3747
3748 for (i = 0; i < nr_boot_triggers; i++) {
3749 if (strcmp(name, bootup_triggers[i].event))
3750 continue;
3751 mutex_lock(&event_mutex);
3752 ret = trigger_process_regex(file, bootup_triggers[i].trigger);
3753 mutex_unlock(&event_mutex);
3754 if (ret)
3755 pr_err("Failed to register trigger '%s' on event %s\n",
3756 bootup_triggers[i].trigger,
3757 bootup_triggers[i].event);
3758 }
3759 }
3760
3761 /*
3762 * Just create a descriptor for early init. A descriptor is required
3763 * for enabling events at boot. We want to enable events before
3764 * the filesystem is initialized.
3765 */
3766 static int
__trace_early_add_new_event(struct trace_event_call * call,struct trace_array * tr)3767 __trace_early_add_new_event(struct trace_event_call *call,
3768 struct trace_array *tr)
3769 {
3770 struct trace_event_file *file;
3771 int ret;
3772
3773 file = trace_create_new_event(call, tr);
3774 /*
3775 * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed
3776 * allocation, or NULL if the event is not part of the tr->system_names.
3777 * When the event is not part of the tr->system_names, return zero, not
3778 * an error.
3779 */
3780 if (!file)
3781 return 0;
3782
3783 if (IS_ERR(file))
3784 return PTR_ERR(file);
3785
3786 ret = event_define_fields(call);
3787 if (ret)
3788 return ret;
3789
3790 trace_early_triggers(file, trace_event_name(call));
3791
3792 return 0;
3793 }
3794
3795 struct ftrace_module_file_ops;
3796 static void __add_event_to_tracers(struct trace_event_call *call);
3797
3798 /* Add an additional event_call dynamically */
trace_add_event_call(struct trace_event_call * call)3799 int trace_add_event_call(struct trace_event_call *call)
3800 {
3801 int ret;
3802 lockdep_assert_held(&event_mutex);
3803
3804 guard(mutex)(&trace_types_lock);
3805
3806 ret = __register_event(call, NULL);
3807 if (ret < 0)
3808 return ret;
3809
3810 __add_event_to_tracers(call);
3811 return ret;
3812 }
3813 EXPORT_SYMBOL_GPL(trace_add_event_call);
3814
3815 /*
3816 * Must be called under locking of trace_types_lock, event_mutex and
3817 * trace_event_sem.
3818 */
__trace_remove_event_call(struct trace_event_call * call)3819 static void __trace_remove_event_call(struct trace_event_call *call)
3820 {
3821 event_remove(call);
3822 trace_destroy_fields(call);
3823 }
3824
probe_remove_event_call(struct trace_event_call * call)3825 static int probe_remove_event_call(struct trace_event_call *call)
3826 {
3827 struct trace_array *tr;
3828 struct trace_event_file *file;
3829
3830 #ifdef CONFIG_PERF_EVENTS
3831 if (call->perf_refcount)
3832 return -EBUSY;
3833 #endif
3834 do_for_each_event_file(tr, file) {
3835 if (file->event_call != call)
3836 continue;
3837 /*
3838 * We can't rely on ftrace_event_enable_disable(enable => 0)
3839 * we are going to do, soft mode can suppress
3840 * TRACE_REG_UNREGISTER.
3841 */
3842 if (file->flags & EVENT_FILE_FL_ENABLED)
3843 goto busy;
3844
3845 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
3846 tr->clear_trace = true;
3847 /*
3848 * The do_for_each_event_file_safe() is
3849 * a double loop. After finding the call for this
3850 * trace_array, we use break to jump to the next
3851 * trace_array.
3852 */
3853 break;
3854 } while_for_each_event_file();
3855
3856 __trace_remove_event_call(call);
3857
3858 return 0;
3859 busy:
3860 /* No need to clear the trace now */
3861 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
3862 tr->clear_trace = false;
3863 }
3864 return -EBUSY;
3865 }
3866
3867 /* Remove an event_call */
trace_remove_event_call(struct trace_event_call * call)3868 int trace_remove_event_call(struct trace_event_call *call)
3869 {
3870 int ret;
3871
3872 lockdep_assert_held(&event_mutex);
3873
3874 mutex_lock(&trace_types_lock);
3875 down_write(&trace_event_sem);
3876 ret = probe_remove_event_call(call);
3877 up_write(&trace_event_sem);
3878 mutex_unlock(&trace_types_lock);
3879
3880 return ret;
3881 }
3882 EXPORT_SYMBOL_GPL(trace_remove_event_call);
3883
3884 #define for_each_event(event, start, end) \
3885 for (event = start; \
3886 (unsigned long)event < (unsigned long)end; \
3887 event++)
3888
3889 #ifdef CONFIG_MODULES
update_mod_cache(struct trace_array * tr,struct module * mod)3890 static void update_mod_cache(struct trace_array *tr, struct module *mod)
3891 {
3892 struct event_mod_load *event_mod, *n;
3893
3894 list_for_each_entry_safe(event_mod, n, &tr->mod_events, list) {
3895 if (strcmp(event_mod->module, mod->name) != 0)
3896 continue;
3897
3898 __ftrace_set_clr_event_nolock(tr, event_mod->match,
3899 event_mod->system,
3900 event_mod->event, 1, mod->name);
3901 free_event_mod(event_mod);
3902 }
3903 }
3904
update_cache_events(struct module * mod)3905 static void update_cache_events(struct module *mod)
3906 {
3907 struct trace_array *tr;
3908
3909 list_for_each_entry(tr, &ftrace_trace_arrays, list)
3910 update_mod_cache(tr, mod);
3911 }
3912
trace_module_add_events(struct module * mod)3913 static void trace_module_add_events(struct module *mod)
3914 {
3915 struct trace_event_call **call, **start, **end;
3916
3917 if (!mod->num_trace_events)
3918 return;
3919
3920 /* Don't add infrastructure for mods without tracepoints */
3921 if (trace_module_has_bad_taint(mod)) {
3922 pr_err("%s: module has bad taint, not creating trace events\n",
3923 mod->name);
3924 return;
3925 }
3926
3927 start = mod->trace_events;
3928 end = mod->trace_events + mod->num_trace_events;
3929
3930 for_each_event(call, start, end) {
3931 __register_event(*call, mod);
3932 __add_event_to_tracers(*call);
3933 }
3934
3935 update_cache_events(mod);
3936 }
3937
trace_module_remove_events(struct module * mod)3938 static void trace_module_remove_events(struct module *mod)
3939 {
3940 struct trace_event_call *call, *p;
3941 struct module_string *modstr, *m;
3942
3943 down_write(&trace_event_sem);
3944 list_for_each_entry_safe(call, p, &ftrace_events, list) {
3945 if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module)
3946 continue;
3947 if (call->module == mod)
3948 __trace_remove_event_call(call);
3949 }
3950 /* Check for any strings allocated for this module */
3951 list_for_each_entry_safe(modstr, m, &module_strings, next) {
3952 if (modstr->module != mod)
3953 continue;
3954 list_del(&modstr->next);
3955 kfree(modstr->str);
3956 kfree(modstr);
3957 }
3958 up_write(&trace_event_sem);
3959
3960 /*
3961 * It is safest to reset the ring buffer if the module being unloaded
3962 * registered any events that were used. The only worry is if
3963 * a new module gets loaded, and takes on the same id as the events
3964 * of this module. When printing out the buffer, traced events left
3965 * over from this module may be passed to the new module events and
3966 * unexpected results may occur.
3967 */
3968 tracing_reset_all_online_cpus_unlocked();
3969 }
3970
trace_module_notify(struct notifier_block * self,unsigned long val,void * data)3971 static int trace_module_notify(struct notifier_block *self,
3972 unsigned long val, void *data)
3973 {
3974 struct module *mod = data;
3975
3976 mutex_lock(&event_mutex);
3977 mutex_lock(&trace_types_lock);
3978 switch (val) {
3979 case MODULE_STATE_COMING:
3980 trace_module_add_events(mod);
3981 break;
3982 case MODULE_STATE_GOING:
3983 trace_module_remove_events(mod);
3984 break;
3985 }
3986 mutex_unlock(&trace_types_lock);
3987 mutex_unlock(&event_mutex);
3988
3989 return NOTIFY_OK;
3990 }
3991
3992 static struct notifier_block trace_module_nb = {
3993 .notifier_call = trace_module_notify,
3994 .priority = 1, /* higher than trace.c module notify */
3995 };
3996 #endif /* CONFIG_MODULES */
3997
3998 /* Create a new event directory structure for a trace directory. */
3999 static void
__trace_add_event_dirs(struct trace_array * tr)4000 __trace_add_event_dirs(struct trace_array *tr)
4001 {
4002 struct trace_event_call *call;
4003 int ret;
4004
4005 lockdep_assert_held(&trace_event_sem);
4006
4007 list_for_each_entry(call, &ftrace_events, list) {
4008 ret = __trace_add_new_event(call, tr);
4009 if (ret < 0)
4010 pr_warn("Could not create directory for event %s\n",
4011 trace_event_name(call));
4012 }
4013 }
4014
4015 /* Returns any file that matches the system and event */
4016 struct trace_event_file *
__find_event_file(struct trace_array * tr,const char * system,const char * event)4017 __find_event_file(struct trace_array *tr, const char *system, const char *event)
4018 {
4019 struct trace_event_file *file;
4020 struct trace_event_call *call;
4021 const char *name;
4022
4023 list_for_each_entry(file, &tr->events, list) {
4024
4025 call = file->event_call;
4026 name = trace_event_name(call);
4027
4028 if (!name || !call->class)
4029 continue;
4030
4031 if (strcmp(event, name) == 0 &&
4032 strcmp(system, call->class->system) == 0)
4033 return file;
4034 }
4035 return NULL;
4036 }
4037
4038 /* Returns valid trace event files that match system and event */
4039 struct trace_event_file *
find_event_file(struct trace_array * tr,const char * system,const char * event)4040 find_event_file(struct trace_array *tr, const char *system, const char *event)
4041 {
4042 struct trace_event_file *file;
4043
4044 file = __find_event_file(tr, system, event);
4045 if (!file || !file->event_call->class->reg ||
4046 file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
4047 return NULL;
4048
4049 return file;
4050 }
4051
4052 /**
4053 * trace_get_event_file - Find and return a trace event file
4054 * @instance: The name of the trace instance containing the event
4055 * @system: The name of the system containing the event
4056 * @event: The name of the event
4057 *
4058 * Return a trace event file given the trace instance name, trace
4059 * system, and trace event name. If the instance name is NULL, it
4060 * refers to the top-level trace array.
4061 *
4062 * This function will look it up and return it if found, after calling
4063 * trace_array_get() to prevent the instance from going away, and
4064 * increment the event's module refcount to prevent it from being
4065 * removed.
4066 *
4067 * To release the file, call trace_put_event_file(), which will call
4068 * trace_array_put() and decrement the event's module refcount.
4069 *
4070 * Return: The trace event on success, ERR_PTR otherwise.
4071 */
trace_get_event_file(const char * instance,const char * system,const char * event)4072 struct trace_event_file *trace_get_event_file(const char *instance,
4073 const char *system,
4074 const char *event)
4075 {
4076 struct trace_array *tr = top_trace_array();
4077 struct trace_event_file *file = NULL;
4078 int ret = -EINVAL;
4079
4080 if (instance) {
4081 tr = trace_array_find_get(instance);
4082 if (!tr)
4083 return ERR_PTR(-ENOENT);
4084 } else {
4085 ret = trace_array_get(tr);
4086 if (ret)
4087 return ERR_PTR(ret);
4088 }
4089
4090 guard(mutex)(&event_mutex);
4091
4092 file = find_event_file(tr, system, event);
4093 if (!file) {
4094 trace_array_put(tr);
4095 return ERR_PTR(-EINVAL);
4096 }
4097
4098 /* Don't let event modules unload while in use */
4099 ret = trace_event_try_get_ref(file->event_call);
4100 if (!ret) {
4101 trace_array_put(tr);
4102 return ERR_PTR(-EBUSY);
4103 }
4104
4105 return file;
4106 }
4107 EXPORT_SYMBOL_GPL(trace_get_event_file);
4108
4109 /**
4110 * trace_put_event_file - Release a file from trace_get_event_file()
4111 * @file: The trace event file
4112 *
4113 * If a file was retrieved using trace_get_event_file(), this should
4114 * be called when it's no longer needed. It will cancel the previous
4115 * trace_array_get() called by that function, and decrement the
4116 * event's module refcount.
4117 */
trace_put_event_file(struct trace_event_file * file)4118 void trace_put_event_file(struct trace_event_file *file)
4119 {
4120 mutex_lock(&event_mutex);
4121 trace_event_put_ref(file->event_call);
4122 mutex_unlock(&event_mutex);
4123
4124 trace_array_put(file->tr);
4125 }
4126 EXPORT_SYMBOL_GPL(trace_put_event_file);
4127
4128 #ifdef CONFIG_DYNAMIC_FTRACE
4129 struct event_probe_data {
4130 struct trace_event_file *file;
4131 unsigned long count;
4132 int ref;
4133 bool enable;
4134 };
4135
update_event_probe(struct event_probe_data * data)4136 static void update_event_probe(struct event_probe_data *data)
4137 {
4138 if (data->enable)
4139 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
4140 else
4141 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
4142 }
4143
4144 static void
event_enable_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)4145 event_enable_probe(unsigned long ip, unsigned long parent_ip,
4146 struct trace_array *tr, struct ftrace_probe_ops *ops,
4147 void *data)
4148 {
4149 struct ftrace_func_mapper *mapper = data;
4150 struct event_probe_data *edata;
4151 void **pdata;
4152
4153 pdata = ftrace_func_mapper_find_ip(mapper, ip);
4154 if (!pdata || !*pdata)
4155 return;
4156
4157 edata = *pdata;
4158 update_event_probe(edata);
4159 }
4160
4161 static void
event_enable_count_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)4162 event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
4163 struct trace_array *tr, struct ftrace_probe_ops *ops,
4164 void *data)
4165 {
4166 struct ftrace_func_mapper *mapper = data;
4167 struct event_probe_data *edata;
4168 void **pdata;
4169
4170 pdata = ftrace_func_mapper_find_ip(mapper, ip);
4171 if (!pdata || !*pdata)
4172 return;
4173
4174 edata = *pdata;
4175
4176 if (!edata->count)
4177 return;
4178
4179 /* Skip if the event is in a state we want to switch to */
4180 if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
4181 return;
4182
4183 if (edata->count != -1)
4184 (edata->count)--;
4185
4186 update_event_probe(edata);
4187 }
4188
4189 static int
event_enable_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)4190 event_enable_print(struct seq_file *m, unsigned long ip,
4191 struct ftrace_probe_ops *ops, void *data)
4192 {
4193 struct ftrace_func_mapper *mapper = data;
4194 struct event_probe_data *edata;
4195 void **pdata;
4196
4197 pdata = ftrace_func_mapper_find_ip(mapper, ip);
4198
4199 if (WARN_ON_ONCE(!pdata || !*pdata))
4200 return 0;
4201
4202 edata = *pdata;
4203
4204 seq_printf(m, "%ps:", (void *)ip);
4205
4206 seq_printf(m, "%s:%s:%s",
4207 edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
4208 edata->file->event_call->class->system,
4209 trace_event_name(edata->file->event_call));
4210
4211 if (edata->count == -1)
4212 seq_puts(m, ":unlimited\n");
4213 else
4214 seq_printf(m, ":count=%ld\n", edata->count);
4215
4216 return 0;
4217 }
4218
4219 static int
event_enable_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)4220 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
4221 unsigned long ip, void *init_data, void **data)
4222 {
4223 struct ftrace_func_mapper *mapper = *data;
4224 struct event_probe_data *edata = init_data;
4225 int ret;
4226
4227 if (!mapper) {
4228 mapper = allocate_ftrace_func_mapper();
4229 if (!mapper)
4230 return -ENODEV;
4231 *data = mapper;
4232 }
4233
4234 ret = ftrace_func_mapper_add_ip(mapper, ip, edata);
4235 if (ret < 0)
4236 return ret;
4237
4238 edata->ref++;
4239
4240 return 0;
4241 }
4242
free_probe_data(void * data)4243 static int free_probe_data(void *data)
4244 {
4245 struct event_probe_data *edata = data;
4246
4247 edata->ref--;
4248 if (!edata->ref) {
4249 /* Remove soft mode */
4250 __ftrace_event_enable_disable(edata->file, 0, 1);
4251 trace_event_put_ref(edata->file->event_call);
4252 kfree(edata);
4253 }
4254 return 0;
4255 }
4256
4257 static void
event_enable_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)4258 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
4259 unsigned long ip, void *data)
4260 {
4261 struct ftrace_func_mapper *mapper = data;
4262 struct event_probe_data *edata;
4263
4264 if (!ip) {
4265 if (!mapper)
4266 return;
4267 free_ftrace_func_mapper(mapper, free_probe_data);
4268 return;
4269 }
4270
4271 edata = ftrace_func_mapper_remove_ip(mapper, ip);
4272
4273 if (WARN_ON_ONCE(!edata))
4274 return;
4275
4276 if (WARN_ON_ONCE(edata->ref <= 0))
4277 return;
4278
4279 free_probe_data(edata);
4280 }
4281
4282 static struct ftrace_probe_ops event_enable_probe_ops = {
4283 .func = event_enable_probe,
4284 .print = event_enable_print,
4285 .init = event_enable_init,
4286 .free = event_enable_free,
4287 };
4288
4289 static struct ftrace_probe_ops event_enable_count_probe_ops = {
4290 .func = event_enable_count_probe,
4291 .print = event_enable_print,
4292 .init = event_enable_init,
4293 .free = event_enable_free,
4294 };
4295
4296 static struct ftrace_probe_ops event_disable_probe_ops = {
4297 .func = event_enable_probe,
4298 .print = event_enable_print,
4299 .init = event_enable_init,
4300 .free = event_enable_free,
4301 };
4302
4303 static struct ftrace_probe_ops event_disable_count_probe_ops = {
4304 .func = event_enable_count_probe,
4305 .print = event_enable_print,
4306 .init = event_enable_init,
4307 .free = event_enable_free,
4308 };
4309
4310 static int
event_enable_func(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enabled)4311 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
4312 char *glob, char *cmd, char *param, int enabled)
4313 {
4314 struct trace_event_file *file;
4315 struct ftrace_probe_ops *ops;
4316 struct event_probe_data *data;
4317 unsigned long count = -1;
4318 const char *system;
4319 const char *event;
4320 char *number;
4321 bool enable;
4322 int ret;
4323
4324 if (!tr)
4325 return -ENODEV;
4326
4327 /* hash funcs only work with set_ftrace_filter */
4328 if (!enabled || !param)
4329 return -EINVAL;
4330
4331 system = strsep(¶m, ":");
4332 if (!param)
4333 return -EINVAL;
4334
4335 event = strsep(¶m, ":");
4336
4337 guard(mutex)(&event_mutex);
4338
4339 file = find_event_file(tr, system, event);
4340 if (!file)
4341 return -EINVAL;
4342
4343 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
4344
4345 if (enable)
4346 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
4347 else
4348 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
4349
4350 if (glob[0] == '!')
4351 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
4352
4353 if (param) {
4354 number = strsep(¶m, ":");
4355
4356 if (!strlen(number))
4357 return -EINVAL;
4358
4359 /*
4360 * We use the callback data field (which is a pointer)
4361 * as our counter.
4362 */
4363 ret = kstrtoul(number, 0, &count);
4364 if (ret)
4365 return ret;
4366 }
4367
4368 /* Don't let event modules unload while probe registered */
4369 ret = trace_event_try_get_ref(file->event_call);
4370 if (!ret)
4371 return -EBUSY;
4372
4373 ret = __ftrace_event_enable_disable(file, 1, 1);
4374 if (ret < 0)
4375 goto out_put;
4376
4377 ret = -ENOMEM;
4378 data = kzalloc_obj(*data);
4379 if (!data)
4380 goto out_put;
4381
4382 data->enable = enable;
4383 data->count = count;
4384 data->file = file;
4385
4386 ret = register_ftrace_function_probe(glob, tr, ops, data);
4387 /*
4388 * The above returns on success the # of functions enabled,
4389 * but if it didn't find any functions it returns zero.
4390 * Consider no functions a failure too.
4391 */
4392
4393 /* Just return zero, not the number of enabled functions */
4394 if (ret > 0)
4395 return 0;
4396
4397 kfree(data);
4398
4399 if (!ret)
4400 ret = -ENOENT;
4401
4402 __ftrace_event_enable_disable(file, 0, 1);
4403 out_put:
4404 trace_event_put_ref(file->event_call);
4405 return ret;
4406 }
4407
4408 static struct ftrace_func_command event_enable_cmd = {
4409 .name = ENABLE_EVENT_STR,
4410 .func = event_enable_func,
4411 };
4412
4413 static struct ftrace_func_command event_disable_cmd = {
4414 .name = DISABLE_EVENT_STR,
4415 .func = event_enable_func,
4416 };
4417
register_event_cmds(void)4418 static __init int register_event_cmds(void)
4419 {
4420 int ret;
4421
4422 ret = register_ftrace_command(&event_enable_cmd);
4423 if (WARN_ON(ret < 0))
4424 return ret;
4425 ret = register_ftrace_command(&event_disable_cmd);
4426 if (WARN_ON(ret < 0))
4427 unregister_ftrace_command(&event_enable_cmd);
4428 return ret;
4429 }
4430 #else
register_event_cmds(void)4431 static inline int register_event_cmds(void) { return 0; }
4432 #endif /* CONFIG_DYNAMIC_FTRACE */
4433
4434 /*
4435 * The top level array and trace arrays created by boot-time tracing
4436 * have already had its trace_event_file descriptors created in order
4437 * to allow for early events to be recorded.
4438 * This function is called after the tracefs has been initialized,
4439 * and we now have to create the files associated to the events.
4440 */
__trace_early_add_event_dirs(struct trace_array * tr)4441 static void __trace_early_add_event_dirs(struct trace_array *tr)
4442 {
4443 struct trace_event_file *file;
4444 int ret;
4445
4446
4447 list_for_each_entry(file, &tr->events, list) {
4448 ret = event_create_dir(tr->event_dir, file);
4449 if (ret < 0)
4450 pr_warn("Could not create directory for event %s\n",
4451 trace_event_name(file->event_call));
4452 }
4453 }
4454
4455 /*
4456 * For early boot up, the top trace array and the trace arrays created
4457 * by boot-time tracing require to have a list of events that can be
4458 * enabled. This must be done before the filesystem is set up in order
4459 * to allow events to be traced early.
4460 */
__trace_early_add_events(struct trace_array * tr)4461 void __trace_early_add_events(struct trace_array *tr)
4462 {
4463 struct trace_event_call *call;
4464 int ret;
4465
4466 list_for_each_entry(call, &ftrace_events, list) {
4467 /* Early boot up should not have any modules loaded */
4468 if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) &&
4469 WARN_ON_ONCE(call->module))
4470 continue;
4471
4472 ret = __trace_early_add_new_event(call, tr);
4473 if (ret < 0)
4474 pr_warn("Could not create early event %s\n",
4475 trace_event_name(call));
4476 }
4477 }
4478
4479 /* Remove the event directory structure for a trace directory. */
4480 static void
__trace_remove_event_dirs(struct trace_array * tr)4481 __trace_remove_event_dirs(struct trace_array *tr)
4482 {
4483 struct trace_event_file *file, *next;
4484
4485 list_for_each_entry_safe(file, next, &tr->events, list)
4486 remove_event_file_dir(file);
4487 }
4488
__add_event_to_tracers(struct trace_event_call * call)4489 static void __add_event_to_tracers(struct trace_event_call *call)
4490 {
4491 struct trace_array *tr;
4492
4493 list_for_each_entry(tr, &ftrace_trace_arrays, list)
4494 __trace_add_new_event(call, tr);
4495 }
4496
4497 extern struct trace_event_call *__start_ftrace_events[];
4498 extern struct trace_event_call *__stop_ftrace_events[];
4499
4500 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
4501
setup_trace_event(char * str)4502 static __init int setup_trace_event(char *str)
4503 {
4504 if (bootup_event_buf[0] != '\0')
4505 strlcat(bootup_event_buf, ",", COMMAND_LINE_SIZE);
4506
4507 strlcat(bootup_event_buf, str, COMMAND_LINE_SIZE);
4508
4509 trace_set_ring_buffer_expanded(NULL);
4510 disable_tracing_selftest("running event tracing");
4511
4512 return 1;
4513 }
4514 __setup("trace_event=", setup_trace_event);
4515
events_callback(const char * name,umode_t * mode,void ** data,const struct file_operations ** fops)4516 static int events_callback(const char *name, umode_t *mode, void **data,
4517 const struct file_operations **fops)
4518 {
4519 if (strcmp(name, "enable") == 0) {
4520 *mode = TRACE_MODE_WRITE;
4521 *fops = &ftrace_tr_enable_fops;
4522 return 1;
4523 }
4524
4525 if (strcmp(name, "header_page") == 0) {
4526 *mode = TRACE_MODE_READ;
4527 *fops = &ftrace_show_header_page_fops;
4528
4529 } else if (strcmp(name, "header_event") == 0) {
4530 *mode = TRACE_MODE_READ;
4531 *fops = &ftrace_show_header_event_fops;
4532 } else
4533 return 0;
4534
4535 return 1;
4536 }
4537
4538 /* Expects to have event_mutex held when called */
4539 static int
create_event_toplevel_files(struct dentry * parent,struct trace_array * tr)4540 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
4541 {
4542 struct eventfs_inode *e_events;
4543 struct dentry *entry;
4544 int nr_entries;
4545 static struct eventfs_entry events_entries[] = {
4546 {
4547 .name = "header_page",
4548 .callback = events_callback,
4549 },
4550 {
4551 .name = "header_event",
4552 .callback = events_callback,
4553 },
4554 #define NR_RO_TOP_ENTRIES 2
4555 /* Readonly files must be above this line and counted by NR_RO_TOP_ENTRIES. */
4556 {
4557 .name = "enable",
4558 .callback = events_callback,
4559 },
4560 };
4561
4562 if (!trace_array_is_readonly(tr)) {
4563 entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent,
4564 tr, &ftrace_set_event_fops);
4565 if (!entry)
4566 return -ENOMEM;
4567
4568 /* There are not as crucial, just warn if they are not created */
4569 trace_create_file("show_event_filters", TRACE_MODE_READ, parent, tr,
4570 &ftrace_show_event_filters_fops);
4571
4572 trace_create_file("show_event_triggers", TRACE_MODE_READ, parent, tr,
4573 &ftrace_show_event_triggers_fops);
4574
4575 trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent,
4576 tr, &ftrace_set_event_pid_fops);
4577
4578 trace_create_file("set_event_notrace_pid",
4579 TRACE_MODE_WRITE, parent, tr,
4580 &ftrace_set_event_notrace_pid_fops);
4581 nr_entries = ARRAY_SIZE(events_entries);
4582 } else {
4583 nr_entries = NR_RO_TOP_ENTRIES;
4584 }
4585
4586 e_events = eventfs_create_events_dir("events", parent, events_entries,
4587 nr_entries, tr);
4588 if (IS_ERR(e_events)) {
4589 pr_warn("Could not create tracefs 'events' directory\n");
4590 return -ENOMEM;
4591 }
4592
4593 tr->event_dir = e_events;
4594
4595 return 0;
4596 }
4597
4598 /**
4599 * event_trace_add_tracer - add a instance of a trace_array to events
4600 * @parent: The parent dentry to place the files/directories for events in
4601 * @tr: The trace array associated with these events
4602 *
4603 * When a new instance is created, it needs to set up its events
4604 * directory, as well as other files associated with events. It also
4605 * creates the event hierarchy in the @parent/events directory.
4606 *
4607 * Returns 0 on success.
4608 *
4609 * Must be called with event_mutex held.
4610 */
event_trace_add_tracer(struct dentry * parent,struct trace_array * tr)4611 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
4612 {
4613 int ret;
4614
4615 lockdep_assert_held(&event_mutex);
4616
4617 ret = create_event_toplevel_files(parent, tr);
4618 if (ret)
4619 goto out;
4620
4621 down_write(&trace_event_sem);
4622 /* If tr already has the event list, it is initialized in early boot. */
4623 if (unlikely(!list_empty(&tr->events)))
4624 __trace_early_add_event_dirs(tr);
4625 else
4626 __trace_add_event_dirs(tr);
4627 up_write(&trace_event_sem);
4628
4629 out:
4630 return ret;
4631 }
4632
4633 /*
4634 * The top trace array already had its file descriptors created.
4635 * Now the files themselves need to be created.
4636 */
4637 static __init int
early_event_add_tracer(struct dentry * parent,struct trace_array * tr)4638 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
4639 {
4640 int ret;
4641
4642 guard(mutex)(&event_mutex);
4643
4644 ret = create_event_toplevel_files(parent, tr);
4645 if (ret)
4646 return ret;
4647
4648 down_write(&trace_event_sem);
4649 __trace_early_add_event_dirs(tr);
4650 up_write(&trace_event_sem);
4651
4652 return 0;
4653 }
4654
4655 /* Must be called with event_mutex held */
event_trace_del_tracer(struct trace_array * tr)4656 int event_trace_del_tracer(struct trace_array *tr)
4657 {
4658 lockdep_assert_held(&event_mutex);
4659
4660 /* Disable any event triggers and associated soft-disabled events */
4661 clear_event_triggers(tr);
4662
4663 /* Clear the pid list */
4664 __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
4665
4666 /* Disable any running events */
4667 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0, NULL);
4668
4669 /* Make sure no more events are being executed */
4670 tracepoint_synchronize_unregister();
4671
4672 down_write(&trace_event_sem);
4673 __trace_remove_event_dirs(tr);
4674 eventfs_remove_events_dir(tr->event_dir);
4675 up_write(&trace_event_sem);
4676
4677 tr->event_dir = NULL;
4678
4679 return 0;
4680 }
4681
event_trace_memsetup(void)4682 static __init int event_trace_memsetup(void)
4683 {
4684 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
4685 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
4686 return 0;
4687 }
4688
4689 /*
4690 * Helper function to enable or disable a comma-separated list of events
4691 * from the bootup buffer.
4692 */
__early_set_events(struct trace_array * tr,char * buf,bool enable)4693 static __init void __early_set_events(struct trace_array *tr, char *buf, bool enable)
4694 {
4695 char *token;
4696
4697 while ((token = strsep(&buf, ","))) {
4698 if (*token) {
4699 if (enable) {
4700 if (ftrace_set_clr_event(tr, token, 1))
4701 pr_warn("Failed to enable trace event: %s\n", token);
4702 } else {
4703 ftrace_set_clr_event(tr, token, 0);
4704 }
4705 }
4706
4707 /* Put back the comma to allow this to be called again */
4708 if (buf)
4709 *(buf - 1) = ',';
4710 }
4711 }
4712
4713 /**
4714 * early_enable_events - enable events from the bootup buffer
4715 * @tr: The trace array to enable the events in
4716 * @buf: The buffer containing the comma separated list of events
4717 * @disable_first: If true, disable all events in @buf before enabling them
4718 *
4719 * This function enables events from the bootup buffer. If @disable_first
4720 * is true, it will first disable all events in the buffer before enabling
4721 * them.
4722 *
4723 * For syscall events, which rely on a global refcount to register the
4724 * SYSCALL_WORK_SYSCALL_TRACEPOINT flag (especially for pid 1), we must
4725 * ensure the refcount hits zero before re-enabling them. A simple
4726 * "disable then enable" per-event is not enough if multiple syscalls are
4727 * used, as the refcount will stay above zero. Thus, we need a two-phase
4728 * approach: disable all, then enable all.
4729 */
4730 __init void
early_enable_events(struct trace_array * tr,char * buf,bool disable_first)4731 early_enable_events(struct trace_array *tr, char *buf, bool disable_first)
4732 {
4733 if (disable_first)
4734 __early_set_events(tr, buf, false);
4735
4736 __early_set_events(tr, buf, true);
4737 }
4738
event_trace_enable(void)4739 static __init int event_trace_enable(void)
4740 {
4741 struct trace_array *tr = top_trace_array();
4742 struct trace_event_call **iter, *call;
4743 int ret;
4744
4745 if (!tr)
4746 return -ENODEV;
4747
4748 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
4749
4750 call = *iter;
4751 ret = event_init(call);
4752 if (!ret)
4753 list_add(&call->list, &ftrace_events);
4754 }
4755
4756 register_trigger_cmds();
4757
4758 /*
4759 * We need the top trace array to have a working set of trace
4760 * points at early init, before the debug files and directories
4761 * are created. Create the file entries now, and attach them
4762 * to the actual file dentries later.
4763 */
4764 __trace_early_add_events(tr);
4765
4766 early_enable_events(tr, bootup_event_buf, false);
4767
4768 trace_printk_start_comm();
4769
4770 register_event_cmds();
4771
4772
4773 return 0;
4774 }
4775
4776 /*
4777 * event_trace_enable() is called from trace_event_init() first to
4778 * initialize events and perhaps start any events that are on the
4779 * command line. Unfortunately, there are some events that will not
4780 * start this early, like the system call tracepoints that need
4781 * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But
4782 * event_trace_enable() is called before pid 1 starts, and this flag
4783 * is never set, making the syscall tracepoint never get reached, but
4784 * the event is enabled regardless (and not doing anything).
4785 */
event_trace_enable_again(void)4786 static __init int event_trace_enable_again(void)
4787 {
4788 struct trace_array *tr;
4789
4790 tr = top_trace_array();
4791 if (!tr)
4792 return -ENODEV;
4793
4794 early_enable_events(tr, bootup_event_buf, true);
4795
4796 return 0;
4797 }
4798
4799 early_initcall(event_trace_enable_again);
4800
4801 /* Init fields which doesn't related to the tracefs */
event_trace_init_fields(void)4802 static __init int event_trace_init_fields(void)
4803 {
4804 if (trace_define_generic_fields())
4805 pr_warn("tracing: Failed to allocated generic fields");
4806
4807 if (trace_define_common_fields())
4808 pr_warn("tracing: Failed to allocate common fields");
4809
4810 return 0;
4811 }
4812
event_trace_init(void)4813 __init int event_trace_init(void)
4814 {
4815 struct trace_array *tr;
4816 int ret;
4817
4818 tr = top_trace_array();
4819 if (!tr)
4820 return -ENODEV;
4821
4822 trace_create_file("available_events", TRACE_MODE_READ,
4823 NULL, tr, &ftrace_avail_fops);
4824
4825 ret = early_event_add_tracer(NULL, tr);
4826 if (ret)
4827 return ret;
4828
4829 #ifdef CONFIG_MODULES
4830 ret = register_module_notifier(&trace_module_nb);
4831 if (ret)
4832 pr_warn("Failed to register trace events module notifier\n");
4833 #endif
4834
4835 eventdir_initialized = true;
4836
4837 return 0;
4838 }
4839
trace_event_init(void)4840 void __init trace_event_init(void)
4841 {
4842 event_trace_memsetup();
4843 init_ftrace_syscalls();
4844 event_trace_enable();
4845 event_trace_init_fields();
4846 }
4847
4848 #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST
4849
4850 static DEFINE_SPINLOCK(test_spinlock);
4851 static DEFINE_SPINLOCK(test_spinlock_irq);
4852 static DEFINE_MUTEX(test_mutex);
4853
test_work(struct work_struct * dummy)4854 static __init void test_work(struct work_struct *dummy)
4855 {
4856 spin_lock(&test_spinlock);
4857 spin_lock_irq(&test_spinlock_irq);
4858 udelay(1);
4859 spin_unlock_irq(&test_spinlock_irq);
4860 spin_unlock(&test_spinlock);
4861
4862 mutex_lock(&test_mutex);
4863 msleep(1);
4864 mutex_unlock(&test_mutex);
4865 }
4866
event_test_thread(void * unused)4867 static __init int event_test_thread(void *unused)
4868 {
4869 void *test_malloc;
4870
4871 test_malloc = kmalloc(1234, GFP_KERNEL);
4872 if (!test_malloc)
4873 pr_info("failed to kmalloc\n");
4874
4875 schedule_on_each_cpu(test_work);
4876
4877 kfree(test_malloc);
4878
4879 set_current_state(TASK_INTERRUPTIBLE);
4880 while (!kthread_should_stop()) {
4881 schedule();
4882 set_current_state(TASK_INTERRUPTIBLE);
4883 }
4884 __set_current_state(TASK_RUNNING);
4885
4886 return 0;
4887 }
4888
4889 /*
4890 * Do various things that may trigger events.
4891 */
event_test_stuff(void)4892 static __init void event_test_stuff(void)
4893 {
4894 struct task_struct *test_thread;
4895
4896 test_thread = kthread_run(event_test_thread, NULL, "test-events");
4897 msleep(1);
4898 kthread_stop(test_thread);
4899 }
4900
4901 /*
4902 * For every trace event defined, we will test each trace point separately,
4903 * and then by groups, and finally all trace points.
4904 */
event_trace_self_tests(void)4905 static __init void event_trace_self_tests(void)
4906 {
4907 struct trace_subsystem_dir *dir;
4908 struct trace_event_file *file;
4909 struct trace_event_call *call;
4910 struct event_subsystem *system;
4911 struct trace_array *tr;
4912 int ret;
4913
4914 tr = top_trace_array();
4915 if (!tr)
4916 return;
4917
4918 pr_info("Running tests on trace events:\n");
4919
4920 list_for_each_entry(file, &tr->events, list) {
4921
4922 call = file->event_call;
4923
4924 /* Only test those that have a probe */
4925 if (!call->class || !call->class->probe)
4926 continue;
4927
4928 /*
4929 * Testing syscall events here is pretty useless, but
4930 * we still do it if configured. But this is time consuming.
4931 * What we really need is a user thread to perform the
4932 * syscalls as we test.
4933 */
4934 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
4935 if (call->class->system &&
4936 strcmp(call->class->system, "syscalls") == 0)
4937 continue;
4938 #endif
4939
4940 pr_info("Testing event %s: ", trace_event_name(call));
4941
4942 /*
4943 * If an event is already enabled, someone is using
4944 * it and the self test should not be on.
4945 */
4946 if (file->flags & EVENT_FILE_FL_ENABLED) {
4947 pr_warn("Enabled event during self test!\n");
4948 WARN_ON_ONCE(1);
4949 continue;
4950 }
4951
4952 ftrace_event_enable_disable(file, 1);
4953 event_test_stuff();
4954 ftrace_event_enable_disable(file, 0);
4955
4956 pr_cont("OK\n");
4957 }
4958
4959 /* Now test at the sub system level */
4960
4961 pr_info("Running tests on trace event systems:\n");
4962
4963 list_for_each_entry(dir, &tr->systems, list) {
4964
4965 system = dir->subsystem;
4966
4967 /* the ftrace system is special, skip it */
4968 if (strcmp(system->name, "ftrace") == 0)
4969 continue;
4970
4971 pr_info("Testing event system %s: ", system->name);
4972
4973 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1, NULL);
4974 if (WARN_ON_ONCE(ret)) {
4975 pr_warn("error enabling system %s\n",
4976 system->name);
4977 continue;
4978 }
4979
4980 event_test_stuff();
4981
4982 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0, NULL);
4983 if (WARN_ON_ONCE(ret)) {
4984 pr_warn("error disabling system %s\n",
4985 system->name);
4986 continue;
4987 }
4988
4989 pr_cont("OK\n");
4990 }
4991
4992 /* Test with all events enabled */
4993
4994 pr_info("Running tests on all trace events:\n");
4995 pr_info("Testing all events: ");
4996
4997 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1, NULL);
4998 if (WARN_ON_ONCE(ret)) {
4999 pr_warn("error enabling all events\n");
5000 return;
5001 }
5002
5003 event_test_stuff();
5004
5005 /* reset sysname */
5006 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0, NULL);
5007 if (WARN_ON_ONCE(ret)) {
5008 pr_warn("error disabling all events\n");
5009 return;
5010 }
5011
5012 pr_cont("OK\n");
5013 }
5014
5015 #ifdef CONFIG_FUNCTION_TRACER
5016
5017 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
5018
5019 static struct trace_event_file event_trace_file __initdata;
5020
5021 static void __init
function_test_events_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * regs)5022 function_test_events_call(unsigned long ip, unsigned long parent_ip,
5023 struct ftrace_ops *op, struct ftrace_regs *regs)
5024 {
5025 struct trace_buffer *buffer;
5026 struct ring_buffer_event *event;
5027 struct ftrace_entry *entry;
5028 unsigned int trace_ctx;
5029 long disabled;
5030 int cpu;
5031
5032 trace_ctx = tracing_gen_ctx();
5033 preempt_disable_notrace();
5034 cpu = raw_smp_processor_id();
5035 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
5036
5037 if (disabled != 1)
5038 goto out;
5039
5040 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
5041 TRACE_FN, sizeof(*entry),
5042 trace_ctx);
5043 if (!event)
5044 goto out;
5045 entry = ring_buffer_event_data(event);
5046 entry->ip = ip;
5047 entry->parent_ip = parent_ip;
5048
5049 event_trigger_unlock_commit(&event_trace_file, buffer, event,
5050 entry, trace_ctx);
5051 out:
5052 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
5053 preempt_enable_notrace();
5054 }
5055
5056 static struct ftrace_ops trace_ops __initdata =
5057 {
5058 .func = function_test_events_call,
5059 };
5060
event_trace_self_test_with_function(void)5061 static __init void event_trace_self_test_with_function(void)
5062 {
5063 int ret;
5064
5065 event_trace_file.tr = top_trace_array();
5066 if (WARN_ON(!event_trace_file.tr))
5067 return;
5068
5069 ret = register_ftrace_function(&trace_ops);
5070 if (WARN_ON(ret < 0)) {
5071 pr_info("Failed to enable function tracer for event tests\n");
5072 return;
5073 }
5074 pr_info("Running tests again, along with the function tracer\n");
5075 event_trace_self_tests();
5076 unregister_ftrace_function(&trace_ops);
5077 }
5078 #else
event_trace_self_test_with_function(void)5079 static __init void event_trace_self_test_with_function(void)
5080 {
5081 }
5082 #endif
5083
event_trace_self_tests_init(void)5084 static __init int event_trace_self_tests_init(void)
5085 {
5086 if (!tracing_selftest_disabled) {
5087 event_trace_self_tests();
5088 event_trace_self_test_with_function();
5089 }
5090
5091 return 0;
5092 }
5093
5094 late_initcall(event_trace_self_tests_init);
5095
5096 #endif
5097