1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10 
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 
21 #include <asm/setup.h>
22 
23 #include "trace_output.h"
24 
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27 
28 DEFINE_MUTEX(event_mutex);
29 
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32 
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35 
36 LIST_HEAD(ftrace_events);
37 LIST_HEAD(ftrace_common_fields);
38 
39 struct list_head *
trace_get_fields(struct ftrace_event_call * event_call)40 trace_get_fields(struct ftrace_event_call *event_call)
41 {
42 	if (!event_call->class->get_fields)
43 		return &event_call->class->fields;
44 	return event_call->class->get_fields(event_call);
45 }
46 
__trace_define_field(struct list_head * head,const char * type,const char * name,int offset,int size,int is_signed,int filter_type)47 static int __trace_define_field(struct list_head *head, const char *type,
48 				const char *name, int offset, int size,
49 				int is_signed, int filter_type)
50 {
51 	struct ftrace_event_field *field;
52 
53 	field = kzalloc(sizeof(*field), GFP_KERNEL);
54 	if (!field)
55 		goto err;
56 
57 	field->name = kstrdup(name, GFP_KERNEL);
58 	if (!field->name)
59 		goto err;
60 
61 	field->type = kstrdup(type, GFP_KERNEL);
62 	if (!field->type)
63 		goto err;
64 
65 	if (filter_type == FILTER_OTHER)
66 		field->filter_type = filter_assign_type(type);
67 	else
68 		field->filter_type = filter_type;
69 
70 	field->offset = offset;
71 	field->size = size;
72 	field->is_signed = is_signed;
73 
74 	list_add(&field->link, head);
75 
76 	return 0;
77 
78 err:
79 	if (field)
80 		kfree(field->name);
81 	kfree(field);
82 
83 	return -ENOMEM;
84 }
85 
trace_define_field(struct ftrace_event_call * call,const char * type,const char * name,int offset,int size,int is_signed,int filter_type)86 int trace_define_field(struct ftrace_event_call *call, const char *type,
87 		       const char *name, int offset, int size, int is_signed,
88 		       int filter_type)
89 {
90 	struct list_head *head;
91 
92 	if (WARN_ON(!call->class))
93 		return 0;
94 
95 	head = trace_get_fields(call);
96 	return __trace_define_field(head, type, name, offset, size,
97 				    is_signed, filter_type);
98 }
99 EXPORT_SYMBOL_GPL(trace_define_field);
100 
101 #define __common_field(type, item)					\
102 	ret = __trace_define_field(&ftrace_common_fields, #type,	\
103 				   "common_" #item,			\
104 				   offsetof(typeof(ent), item),		\
105 				   sizeof(ent.item),			\
106 				   is_signed_type(type), FILTER_OTHER);	\
107 	if (ret)							\
108 		return ret;
109 
trace_define_common_fields(void)110 static int trace_define_common_fields(void)
111 {
112 	int ret;
113 	struct trace_entry ent;
114 
115 	__common_field(unsigned short, type);
116 	__common_field(unsigned char, flags);
117 	__common_field(unsigned char, preempt_count);
118 	__common_field(int, pid);
119 	__common_field(int, padding);
120 
121 	return ret;
122 }
123 
trace_destroy_fields(struct ftrace_event_call * call)124 void trace_destroy_fields(struct ftrace_event_call *call)
125 {
126 	struct ftrace_event_field *field, *next;
127 	struct list_head *head;
128 
129 	head = trace_get_fields(call);
130 	list_for_each_entry_safe(field, next, head, link) {
131 		list_del(&field->link);
132 		kfree(field->type);
133 		kfree(field->name);
134 		kfree(field);
135 	}
136 }
137 
trace_event_raw_init(struct ftrace_event_call * call)138 int trace_event_raw_init(struct ftrace_event_call *call)
139 {
140 	int id;
141 
142 	id = register_ftrace_event(&call->event);
143 	if (!id)
144 		return -ENODEV;
145 
146 	return 0;
147 }
148 EXPORT_SYMBOL_GPL(trace_event_raw_init);
149 
ftrace_event_reg(struct ftrace_event_call * call,enum trace_reg type)150 int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
151 {
152 	switch (type) {
153 	case TRACE_REG_REGISTER:
154 		return tracepoint_probe_register(call->name,
155 						 call->class->probe,
156 						 call);
157 	case TRACE_REG_UNREGISTER:
158 		tracepoint_probe_unregister(call->name,
159 					    call->class->probe,
160 					    call);
161 		return 0;
162 
163 #ifdef CONFIG_PERF_EVENTS
164 	case TRACE_REG_PERF_REGISTER:
165 		return tracepoint_probe_register(call->name,
166 						 call->class->perf_probe,
167 						 call);
168 	case TRACE_REG_PERF_UNREGISTER:
169 		tracepoint_probe_unregister(call->name,
170 					    call->class->perf_probe,
171 					    call);
172 		return 0;
173 #endif
174 	}
175 	return 0;
176 }
177 EXPORT_SYMBOL_GPL(ftrace_event_reg);
178 
trace_event_enable_cmd_record(bool enable)179 void trace_event_enable_cmd_record(bool enable)
180 {
181 	struct ftrace_event_call *call;
182 
183 	mutex_lock(&event_mutex);
184 	list_for_each_entry(call, &ftrace_events, list) {
185 		if (!(call->flags & TRACE_EVENT_FL_ENABLED))
186 			continue;
187 
188 		if (enable) {
189 			tracing_start_cmdline_record();
190 			call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
191 		} else {
192 			tracing_stop_cmdline_record();
193 			call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
194 		}
195 	}
196 	mutex_unlock(&event_mutex);
197 }
198 
ftrace_event_enable_disable(struct ftrace_event_call * call,int enable)199 static int ftrace_event_enable_disable(struct ftrace_event_call *call,
200 					int enable)
201 {
202 	int ret = 0;
203 
204 	switch (enable) {
205 	case 0:
206 		if (call->flags & TRACE_EVENT_FL_ENABLED) {
207 			call->flags &= ~TRACE_EVENT_FL_ENABLED;
208 			if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
209 				tracing_stop_cmdline_record();
210 				call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
211 			}
212 			call->class->reg(call, TRACE_REG_UNREGISTER);
213 		}
214 		break;
215 	case 1:
216 		if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
217 			if (trace_flags & TRACE_ITER_RECORD_CMD) {
218 				tracing_start_cmdline_record();
219 				call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
220 			}
221 			ret = call->class->reg(call, TRACE_REG_REGISTER);
222 			if (ret) {
223 				tracing_stop_cmdline_record();
224 				pr_info("event trace: Could not enable event "
225 					"%s\n", call->name);
226 				break;
227 			}
228 			call->flags |= TRACE_EVENT_FL_ENABLED;
229 		}
230 		break;
231 	}
232 
233 	return ret;
234 }
235 
ftrace_clear_events(void)236 static void ftrace_clear_events(void)
237 {
238 	struct ftrace_event_call *call;
239 
240 	mutex_lock(&event_mutex);
241 	list_for_each_entry(call, &ftrace_events, list) {
242 		ftrace_event_enable_disable(call, 0);
243 	}
244 	mutex_unlock(&event_mutex);
245 }
246 
__put_system(struct event_subsystem * system)247 static void __put_system(struct event_subsystem *system)
248 {
249 	struct event_filter *filter = system->filter;
250 
251 	WARN_ON_ONCE(system->ref_count == 0);
252 	if (--system->ref_count)
253 		return;
254 
255 	if (filter) {
256 		kfree(filter->filter_string);
257 		kfree(filter);
258 	}
259 	kfree(system->name);
260 	kfree(system);
261 }
262 
__get_system(struct event_subsystem * system)263 static void __get_system(struct event_subsystem *system)
264 {
265 	WARN_ON_ONCE(system->ref_count == 0);
266 	system->ref_count++;
267 }
268 
put_system(struct event_subsystem * system)269 static void put_system(struct event_subsystem *system)
270 {
271 	mutex_lock(&event_mutex);
272 	__put_system(system);
273 	mutex_unlock(&event_mutex);
274 }
275 
276 /*
277  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
278  */
__ftrace_set_clr_event(const char * match,const char * sub,const char * event,int set)279 static int __ftrace_set_clr_event(const char *match, const char *sub,
280 				  const char *event, int set)
281 {
282 	struct ftrace_event_call *call;
283 	int ret = -EINVAL;
284 
285 	mutex_lock(&event_mutex);
286 	list_for_each_entry(call, &ftrace_events, list) {
287 
288 		if (!call->name || !call->class || !call->class->reg)
289 			continue;
290 
291 		if (match &&
292 		    strcmp(match, call->name) != 0 &&
293 		    strcmp(match, call->class->system) != 0)
294 			continue;
295 
296 		if (sub && strcmp(sub, call->class->system) != 0)
297 			continue;
298 
299 		if (event && strcmp(event, call->name) != 0)
300 			continue;
301 
302 		ftrace_event_enable_disable(call, set);
303 
304 		ret = 0;
305 	}
306 	mutex_unlock(&event_mutex);
307 
308 	return ret;
309 }
310 
ftrace_set_clr_event(char * buf,int set)311 static int ftrace_set_clr_event(char *buf, int set)
312 {
313 	char *event = NULL, *sub = NULL, *match;
314 
315 	/*
316 	 * The buf format can be <subsystem>:<event-name>
317 	 *  *:<event-name> means any event by that name.
318 	 *  :<event-name> is the same.
319 	 *
320 	 *  <subsystem>:* means all events in that subsystem
321 	 *  <subsystem>: means the same.
322 	 *
323 	 *  <name> (no ':') means all events in a subsystem with
324 	 *  the name <name> or any event that matches <name>
325 	 */
326 
327 	match = strsep(&buf, ":");
328 	if (buf) {
329 		sub = match;
330 		event = buf;
331 		match = NULL;
332 
333 		if (!strlen(sub) || strcmp(sub, "*") == 0)
334 			sub = NULL;
335 		if (!strlen(event) || strcmp(event, "*") == 0)
336 			event = NULL;
337 	}
338 
339 	return __ftrace_set_clr_event(match, sub, event, set);
340 }
341 
342 /**
343  * trace_set_clr_event - enable or disable an event
344  * @system: system name to match (NULL for any system)
345  * @event: event name to match (NULL for all events, within system)
346  * @set: 1 to enable, 0 to disable
347  *
348  * This is a way for other parts of the kernel to enable or disable
349  * event recording.
350  *
351  * Returns 0 on success, -EINVAL if the parameters do not match any
352  * registered events.
353  */
trace_set_clr_event(const char * system,const char * event,int set)354 int trace_set_clr_event(const char *system, const char *event, int set)
355 {
356 	return __ftrace_set_clr_event(NULL, system, event, set);
357 }
358 EXPORT_SYMBOL_GPL(trace_set_clr_event);
359 
360 /* 128 should be much more than enough */
361 #define EVENT_BUF_SIZE		127
362 
363 static ssize_t
ftrace_event_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)364 ftrace_event_write(struct file *file, const char __user *ubuf,
365 		   size_t cnt, loff_t *ppos)
366 {
367 	struct trace_parser parser;
368 	ssize_t read, ret;
369 
370 	if (!cnt)
371 		return 0;
372 
373 	ret = tracing_update_buffers();
374 	if (ret < 0)
375 		return ret;
376 
377 	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
378 		return -ENOMEM;
379 
380 	read = trace_get_user(&parser, ubuf, cnt, ppos);
381 
382 	if (read >= 0 && trace_parser_loaded((&parser))) {
383 		int set = 1;
384 
385 		if (*parser.buffer == '!')
386 			set = 0;
387 
388 		parser.buffer[parser.idx] = 0;
389 
390 		ret = ftrace_set_clr_event(parser.buffer + !set, set);
391 		if (ret)
392 			goto out_put;
393 	}
394 
395 	ret = read;
396 
397  out_put:
398 	trace_parser_put(&parser);
399 
400 	return ret;
401 }
402 
403 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)404 t_next(struct seq_file *m, void *v, loff_t *pos)
405 {
406 	struct ftrace_event_call *call = v;
407 
408 	(*pos)++;
409 
410 	list_for_each_entry_continue(call, &ftrace_events, list) {
411 		/*
412 		 * The ftrace subsystem is for showing formats only.
413 		 * They can not be enabled or disabled via the event files.
414 		 */
415 		if (call->class && call->class->reg)
416 			return call;
417 	}
418 
419 	return NULL;
420 }
421 
t_start(struct seq_file * m,loff_t * pos)422 static void *t_start(struct seq_file *m, loff_t *pos)
423 {
424 	struct ftrace_event_call *call;
425 	loff_t l;
426 
427 	mutex_lock(&event_mutex);
428 
429 	call = list_entry(&ftrace_events, struct ftrace_event_call, list);
430 	for (l = 0; l <= *pos; ) {
431 		call = t_next(m, call, &l);
432 		if (!call)
433 			break;
434 	}
435 	return call;
436 }
437 
438 static void *
s_next(struct seq_file * m,void * v,loff_t * pos)439 s_next(struct seq_file *m, void *v, loff_t *pos)
440 {
441 	struct ftrace_event_call *call = v;
442 
443 	(*pos)++;
444 
445 	list_for_each_entry_continue(call, &ftrace_events, list) {
446 		if (call->flags & TRACE_EVENT_FL_ENABLED)
447 			return call;
448 	}
449 
450 	return NULL;
451 }
452 
s_start(struct seq_file * m,loff_t * pos)453 static void *s_start(struct seq_file *m, loff_t *pos)
454 {
455 	struct ftrace_event_call *call;
456 	loff_t l;
457 
458 	mutex_lock(&event_mutex);
459 
460 	call = list_entry(&ftrace_events, struct ftrace_event_call, list);
461 	for (l = 0; l <= *pos; ) {
462 		call = s_next(m, call, &l);
463 		if (!call)
464 			break;
465 	}
466 	return call;
467 }
468 
t_show(struct seq_file * m,void * v)469 static int t_show(struct seq_file *m, void *v)
470 {
471 	struct ftrace_event_call *call = v;
472 
473 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
474 		seq_printf(m, "%s:", call->class->system);
475 	seq_printf(m, "%s\n", call->name);
476 
477 	return 0;
478 }
479 
t_stop(struct seq_file * m,void * p)480 static void t_stop(struct seq_file *m, void *p)
481 {
482 	mutex_unlock(&event_mutex);
483 }
484 
485 static int
ftrace_event_seq_open(struct inode * inode,struct file * file)486 ftrace_event_seq_open(struct inode *inode, struct file *file)
487 {
488 	const struct seq_operations *seq_ops;
489 
490 	if ((file->f_mode & FMODE_WRITE) &&
491 	    (file->f_flags & O_TRUNC))
492 		ftrace_clear_events();
493 
494 	seq_ops = inode->i_private;
495 	return seq_open(file, seq_ops);
496 }
497 
498 static ssize_t
event_enable_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)499 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
500 		  loff_t *ppos)
501 {
502 	struct ftrace_event_call *call = filp->private_data;
503 	char *buf;
504 
505 	if (call->flags & TRACE_EVENT_FL_ENABLED)
506 		buf = "1\n";
507 	else
508 		buf = "0\n";
509 
510 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
511 }
512 
513 static ssize_t
event_enable_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)514 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
515 		   loff_t *ppos)
516 {
517 	struct ftrace_event_call *call = filp->private_data;
518 	unsigned long val;
519 	int ret;
520 
521 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
522 	if (ret)
523 		return ret;
524 
525 	ret = tracing_update_buffers();
526 	if (ret < 0)
527 		return ret;
528 
529 	switch (val) {
530 	case 0:
531 	case 1:
532 		mutex_lock(&event_mutex);
533 		ret = ftrace_event_enable_disable(call, val);
534 		mutex_unlock(&event_mutex);
535 		break;
536 
537 	default:
538 		return -EINVAL;
539 	}
540 
541 	*ppos += cnt;
542 
543 	return ret ? ret : cnt;
544 }
545 
546 static ssize_t
system_enable_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)547 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
548 		   loff_t *ppos)
549 {
550 	const char set_to_char[4] = { '?', '0', '1', 'X' };
551 	struct event_subsystem *system = filp->private_data;
552 	struct ftrace_event_call *call;
553 	char buf[2];
554 	int set = 0;
555 	int ret;
556 
557 	mutex_lock(&event_mutex);
558 	list_for_each_entry(call, &ftrace_events, list) {
559 		if (!call->name || !call->class || !call->class->reg)
560 			continue;
561 
562 		if (system && strcmp(call->class->system, system->name) != 0)
563 			continue;
564 
565 		/*
566 		 * We need to find out if all the events are set
567 		 * or if all events or cleared, or if we have
568 		 * a mixture.
569 		 */
570 		set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
571 
572 		/*
573 		 * If we have a mixture, no need to look further.
574 		 */
575 		if (set == 3)
576 			break;
577 	}
578 	mutex_unlock(&event_mutex);
579 
580 	buf[0] = set_to_char[set];
581 	buf[1] = '\n';
582 
583 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
584 
585 	return ret;
586 }
587 
588 static ssize_t
system_enable_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)589 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
590 		    loff_t *ppos)
591 {
592 	struct event_subsystem *system = filp->private_data;
593 	const char *name = NULL;
594 	unsigned long val;
595 	ssize_t ret;
596 
597 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
598 	if (ret)
599 		return ret;
600 
601 	ret = tracing_update_buffers();
602 	if (ret < 0)
603 		return ret;
604 
605 	if (val != 0 && val != 1)
606 		return -EINVAL;
607 
608 	/*
609 	 * Opening of "enable" adds a ref count to system,
610 	 * so the name is safe to use.
611 	 */
612 	if (system)
613 		name = system->name;
614 
615 	ret = __ftrace_set_clr_event(NULL, name, NULL, val);
616 	if (ret)
617 		goto out;
618 
619 	ret = cnt;
620 
621 out:
622 	*ppos += cnt;
623 
624 	return ret;
625 }
626 
627 enum {
628 	FORMAT_HEADER		= 1,
629 	FORMAT_FIELD_SEPERATOR	= 2,
630 	FORMAT_PRINTFMT		= 3,
631 };
632 
f_next(struct seq_file * m,void * v,loff_t * pos)633 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
634 {
635 	struct ftrace_event_call *call = m->private;
636 	struct ftrace_event_field *field;
637 	struct list_head *common_head = &ftrace_common_fields;
638 	struct list_head *head = trace_get_fields(call);
639 
640 	(*pos)++;
641 
642 	switch ((unsigned long)v) {
643 	case FORMAT_HEADER:
644 		if (unlikely(list_empty(common_head)))
645 			return NULL;
646 
647 		field = list_entry(common_head->prev,
648 				   struct ftrace_event_field, link);
649 		return field;
650 
651 	case FORMAT_FIELD_SEPERATOR:
652 		if (unlikely(list_empty(head)))
653 			return NULL;
654 
655 		field = list_entry(head->prev, struct ftrace_event_field, link);
656 		return field;
657 
658 	case FORMAT_PRINTFMT:
659 		/* all done */
660 		return NULL;
661 	}
662 
663 	field = v;
664 	if (field->link.prev == common_head)
665 		return (void *)FORMAT_FIELD_SEPERATOR;
666 	else if (field->link.prev == head)
667 		return (void *)FORMAT_PRINTFMT;
668 
669 	field = list_entry(field->link.prev, struct ftrace_event_field, link);
670 
671 	return field;
672 }
673 
f_start(struct seq_file * m,loff_t * pos)674 static void *f_start(struct seq_file *m, loff_t *pos)
675 {
676 	loff_t l = 0;
677 	void *p;
678 
679 	/* Start by showing the header */
680 	if (!*pos)
681 		return (void *)FORMAT_HEADER;
682 
683 	p = (void *)FORMAT_HEADER;
684 	do {
685 		p = f_next(m, p, &l);
686 	} while (p && l < *pos);
687 
688 	return p;
689 }
690 
f_show(struct seq_file * m,void * v)691 static int f_show(struct seq_file *m, void *v)
692 {
693 	struct ftrace_event_call *call = m->private;
694 	struct ftrace_event_field *field;
695 	const char *array_descriptor;
696 
697 	switch ((unsigned long)v) {
698 	case FORMAT_HEADER:
699 		seq_printf(m, "name: %s\n", call->name);
700 		seq_printf(m, "ID: %d\n", call->event.type);
701 		seq_printf(m, "format:\n");
702 		return 0;
703 
704 	case FORMAT_FIELD_SEPERATOR:
705 		seq_putc(m, '\n');
706 		return 0;
707 
708 	case FORMAT_PRINTFMT:
709 		seq_printf(m, "\nprint fmt: %s\n",
710 			   call->print_fmt);
711 		return 0;
712 	}
713 
714 	field = v;
715 
716 	/*
717 	 * Smartly shows the array type(except dynamic array).
718 	 * Normal:
719 	 *	field:TYPE VAR
720 	 * If TYPE := TYPE[LEN], it is shown:
721 	 *	field:TYPE VAR[LEN]
722 	 */
723 	array_descriptor = strchr(field->type, '[');
724 
725 	if (!strncmp(field->type, "__data_loc", 10))
726 		array_descriptor = NULL;
727 
728 	if (!array_descriptor)
729 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
730 			   field->type, field->name, field->offset,
731 			   field->size, !!field->is_signed);
732 	else
733 		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
734 			   (int)(array_descriptor - field->type),
735 			   field->type, field->name,
736 			   array_descriptor, field->offset,
737 			   field->size, !!field->is_signed);
738 
739 	return 0;
740 }
741 
f_stop(struct seq_file * m,void * p)742 static void f_stop(struct seq_file *m, void *p)
743 {
744 }
745 
746 static const struct seq_operations trace_format_seq_ops = {
747 	.start		= f_start,
748 	.next		= f_next,
749 	.stop		= f_stop,
750 	.show		= f_show,
751 };
752 
trace_format_open(struct inode * inode,struct file * file)753 static int trace_format_open(struct inode *inode, struct file *file)
754 {
755 	struct ftrace_event_call *call = inode->i_private;
756 	struct seq_file *m;
757 	int ret;
758 
759 	ret = seq_open(file, &trace_format_seq_ops);
760 	if (ret < 0)
761 		return ret;
762 
763 	m = file->private_data;
764 	m->private = call;
765 
766 	return 0;
767 }
768 
769 static ssize_t
event_id_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)770 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
771 {
772 	struct ftrace_event_call *call = filp->private_data;
773 	struct trace_seq *s;
774 	int r;
775 
776 	if (*ppos)
777 		return 0;
778 
779 	s = kmalloc(sizeof(*s), GFP_KERNEL);
780 	if (!s)
781 		return -ENOMEM;
782 
783 	trace_seq_init(s);
784 	trace_seq_printf(s, "%d\n", call->event.type);
785 
786 	r = simple_read_from_buffer(ubuf, cnt, ppos,
787 				    s->buffer, s->len);
788 	kfree(s);
789 	return r;
790 }
791 
792 static ssize_t
event_filter_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)793 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
794 		  loff_t *ppos)
795 {
796 	struct ftrace_event_call *call = filp->private_data;
797 	struct trace_seq *s;
798 	int r;
799 
800 	if (*ppos)
801 		return 0;
802 
803 	s = kmalloc(sizeof(*s), GFP_KERNEL);
804 	if (!s)
805 		return -ENOMEM;
806 
807 	trace_seq_init(s);
808 
809 	print_event_filter(call, s);
810 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
811 
812 	kfree(s);
813 
814 	return r;
815 }
816 
817 static ssize_t
event_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)818 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
819 		   loff_t *ppos)
820 {
821 	struct ftrace_event_call *call = filp->private_data;
822 	char *buf;
823 	int err;
824 
825 	if (cnt >= PAGE_SIZE)
826 		return -EINVAL;
827 
828 	buf = (char *)__get_free_page(GFP_TEMPORARY);
829 	if (!buf)
830 		return -ENOMEM;
831 
832 	if (copy_from_user(buf, ubuf, cnt)) {
833 		free_page((unsigned long) buf);
834 		return -EFAULT;
835 	}
836 	buf[cnt] = '\0';
837 
838 	err = apply_event_filter(call, buf);
839 	free_page((unsigned long) buf);
840 	if (err < 0)
841 		return err;
842 
843 	*ppos += cnt;
844 
845 	return cnt;
846 }
847 
848 static LIST_HEAD(event_subsystems);
849 
subsystem_open(struct inode * inode,struct file * filp)850 static int subsystem_open(struct inode *inode, struct file *filp)
851 {
852 	struct event_subsystem *system = NULL;
853 	int ret;
854 
855 	if (!inode->i_private)
856 		goto skip_search;
857 
858 	/* Make sure the system still exists */
859 	mutex_lock(&event_mutex);
860 	list_for_each_entry(system, &event_subsystems, list) {
861 		if (system == inode->i_private) {
862 			/* Don't open systems with no events */
863 			if (!system->nr_events) {
864 				system = NULL;
865 				break;
866 			}
867 			__get_system(system);
868 			break;
869 		}
870 	}
871 	mutex_unlock(&event_mutex);
872 
873 	if (system != inode->i_private)
874 		return -ENODEV;
875 
876  skip_search:
877 	ret = tracing_open_generic(inode, filp);
878 	if (ret < 0 && system)
879 		put_system(system);
880 
881 	return ret;
882 }
883 
subsystem_release(struct inode * inode,struct file * file)884 static int subsystem_release(struct inode *inode, struct file *file)
885 {
886 	struct event_subsystem *system = inode->i_private;
887 
888 	if (system)
889 		put_system(system);
890 
891 	return 0;
892 }
893 
894 static ssize_t
subsystem_filter_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)895 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
896 		      loff_t *ppos)
897 {
898 	struct event_subsystem *system = filp->private_data;
899 	struct trace_seq *s;
900 	int r;
901 
902 	if (*ppos)
903 		return 0;
904 
905 	s = kmalloc(sizeof(*s), GFP_KERNEL);
906 	if (!s)
907 		return -ENOMEM;
908 
909 	trace_seq_init(s);
910 
911 	print_subsystem_event_filter(system, s);
912 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
913 
914 	kfree(s);
915 
916 	return r;
917 }
918 
919 static ssize_t
subsystem_filter_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)920 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
921 		       loff_t *ppos)
922 {
923 	struct event_subsystem *system = filp->private_data;
924 	char *buf;
925 	int err;
926 
927 	if (cnt >= PAGE_SIZE)
928 		return -EINVAL;
929 
930 	buf = (char *)__get_free_page(GFP_TEMPORARY);
931 	if (!buf)
932 		return -ENOMEM;
933 
934 	if (copy_from_user(buf, ubuf, cnt)) {
935 		free_page((unsigned long) buf);
936 		return -EFAULT;
937 	}
938 	buf[cnt] = '\0';
939 
940 	err = apply_subsystem_event_filter(system, buf);
941 	free_page((unsigned long) buf);
942 	if (err < 0)
943 		return err;
944 
945 	*ppos += cnt;
946 
947 	return cnt;
948 }
949 
950 static ssize_t
show_header(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)951 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
952 {
953 	int (*func)(struct trace_seq *s) = filp->private_data;
954 	struct trace_seq *s;
955 	int r;
956 
957 	if (*ppos)
958 		return 0;
959 
960 	s = kmalloc(sizeof(*s), GFP_KERNEL);
961 	if (!s)
962 		return -ENOMEM;
963 
964 	trace_seq_init(s);
965 
966 	func(s);
967 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
968 
969 	kfree(s);
970 
971 	return r;
972 }
973 
974 static const struct seq_operations show_event_seq_ops = {
975 	.start = t_start,
976 	.next = t_next,
977 	.show = t_show,
978 	.stop = t_stop,
979 };
980 
981 static const struct seq_operations show_set_event_seq_ops = {
982 	.start = s_start,
983 	.next = s_next,
984 	.show = t_show,
985 	.stop = t_stop,
986 };
987 
988 static const struct file_operations ftrace_avail_fops = {
989 	.open = ftrace_event_seq_open,
990 	.read = seq_read,
991 	.llseek = seq_lseek,
992 	.release = seq_release,
993 };
994 
995 static const struct file_operations ftrace_set_event_fops = {
996 	.open = ftrace_event_seq_open,
997 	.read = seq_read,
998 	.write = ftrace_event_write,
999 	.llseek = seq_lseek,
1000 	.release = seq_release,
1001 };
1002 
1003 static const struct file_operations ftrace_enable_fops = {
1004 	.open = tracing_open_generic,
1005 	.read = event_enable_read,
1006 	.write = event_enable_write,
1007 	.llseek = default_llseek,
1008 };
1009 
1010 static const struct file_operations ftrace_event_format_fops = {
1011 	.open = trace_format_open,
1012 	.read = seq_read,
1013 	.llseek = seq_lseek,
1014 	.release = seq_release,
1015 };
1016 
1017 static const struct file_operations ftrace_event_id_fops = {
1018 	.open = tracing_open_generic,
1019 	.read = event_id_read,
1020 	.llseek = default_llseek,
1021 };
1022 
1023 static const struct file_operations ftrace_event_filter_fops = {
1024 	.open = tracing_open_generic,
1025 	.read = event_filter_read,
1026 	.write = event_filter_write,
1027 	.llseek = default_llseek,
1028 };
1029 
1030 static const struct file_operations ftrace_subsystem_filter_fops = {
1031 	.open = subsystem_open,
1032 	.read = subsystem_filter_read,
1033 	.write = subsystem_filter_write,
1034 	.llseek = default_llseek,
1035 	.release = subsystem_release,
1036 };
1037 
1038 static const struct file_operations ftrace_system_enable_fops = {
1039 	.open = subsystem_open,
1040 	.read = system_enable_read,
1041 	.write = system_enable_write,
1042 	.llseek = default_llseek,
1043 	.release = subsystem_release,
1044 };
1045 
1046 static const struct file_operations ftrace_show_header_fops = {
1047 	.open = tracing_open_generic,
1048 	.read = show_header,
1049 	.llseek = default_llseek,
1050 };
1051 
event_trace_events_dir(void)1052 static struct dentry *event_trace_events_dir(void)
1053 {
1054 	static struct dentry *d_tracer;
1055 	static struct dentry *d_events;
1056 
1057 	if (d_events)
1058 		return d_events;
1059 
1060 	d_tracer = tracing_init_dentry();
1061 	if (!d_tracer)
1062 		return NULL;
1063 
1064 	d_events = debugfs_create_dir("events", d_tracer);
1065 	if (!d_events)
1066 		pr_warning("Could not create debugfs "
1067 			   "'events' directory\n");
1068 
1069 	return d_events;
1070 }
1071 
1072 static struct dentry *
event_subsystem_dir(const char * name,struct dentry * d_events)1073 event_subsystem_dir(const char *name, struct dentry *d_events)
1074 {
1075 	struct event_subsystem *system;
1076 	struct dentry *entry;
1077 
1078 	/* First see if we did not already create this dir */
1079 	list_for_each_entry(system, &event_subsystems, list) {
1080 		if (strcmp(system->name, name) == 0) {
1081 			system->nr_events++;
1082 			return system->entry;
1083 		}
1084 	}
1085 
1086 	/* need to create new entry */
1087 	system = kmalloc(sizeof(*system), GFP_KERNEL);
1088 	if (!system) {
1089 		pr_warning("No memory to create event subsystem %s\n",
1090 			   name);
1091 		return d_events;
1092 	}
1093 
1094 	system->entry = debugfs_create_dir(name, d_events);
1095 	if (!system->entry) {
1096 		pr_warning("Could not create event subsystem %s\n",
1097 			   name);
1098 		kfree(system);
1099 		return d_events;
1100 	}
1101 
1102 	system->nr_events = 1;
1103 	system->ref_count = 1;
1104 	system->name = kstrdup(name, GFP_KERNEL);
1105 	if (!system->name) {
1106 		debugfs_remove(system->entry);
1107 		kfree(system);
1108 		return d_events;
1109 	}
1110 
1111 	list_add(&system->list, &event_subsystems);
1112 
1113 	system->filter = NULL;
1114 
1115 	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1116 	if (!system->filter) {
1117 		pr_warning("Could not allocate filter for subsystem "
1118 			   "'%s'\n", name);
1119 		return system->entry;
1120 	}
1121 
1122 	entry = debugfs_create_file("filter", 0644, system->entry, system,
1123 				    &ftrace_subsystem_filter_fops);
1124 	if (!entry) {
1125 		kfree(system->filter);
1126 		system->filter = NULL;
1127 		pr_warning("Could not create debugfs "
1128 			   "'%s/filter' entry\n", name);
1129 	}
1130 
1131 	trace_create_file("enable", 0644, system->entry, system,
1132 			  &ftrace_system_enable_fops);
1133 
1134 	return system->entry;
1135 }
1136 
1137 static int
event_create_dir(struct ftrace_event_call * call,struct dentry * d_events,const struct file_operations * id,const struct file_operations * enable,const struct file_operations * filter,const struct file_operations * format)1138 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1139 		 const struct file_operations *id,
1140 		 const struct file_operations *enable,
1141 		 const struct file_operations *filter,
1142 		 const struct file_operations *format)
1143 {
1144 	struct list_head *head;
1145 	int ret;
1146 
1147 	/*
1148 	 * If the trace point header did not define TRACE_SYSTEM
1149 	 * then the system would be called "TRACE_SYSTEM".
1150 	 */
1151 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1152 		d_events = event_subsystem_dir(call->class->system, d_events);
1153 
1154 	call->dir = debugfs_create_dir(call->name, d_events);
1155 	if (!call->dir) {
1156 		pr_warning("Could not create debugfs "
1157 			   "'%s' directory\n", call->name);
1158 		return -1;
1159 	}
1160 
1161 	if (call->class->reg)
1162 		trace_create_file("enable", 0644, call->dir, call,
1163 				  enable);
1164 
1165 #ifdef CONFIG_PERF_EVENTS
1166 	if (call->event.type && call->class->reg)
1167 		trace_create_file("id", 0444, call->dir, call,
1168 		 		  id);
1169 #endif
1170 
1171 	/*
1172 	 * Other events may have the same class. Only update
1173 	 * the fields if they are not already defined.
1174 	 */
1175 	head = trace_get_fields(call);
1176 	if (list_empty(head)) {
1177 		ret = call->class->define_fields(call);
1178 		if (ret < 0) {
1179 			pr_warning("Could not initialize trace point"
1180 				   " events/%s\n", call->name);
1181 			return ret;
1182 		}
1183 	}
1184 	trace_create_file("filter", 0644, call->dir, call,
1185 			  filter);
1186 
1187 	trace_create_file("format", 0444, call->dir, call,
1188 			  format);
1189 
1190 	return 0;
1191 }
1192 
1193 static int
__trace_add_event_call(struct ftrace_event_call * call,struct module * mod,const struct file_operations * id,const struct file_operations * enable,const struct file_operations * filter,const struct file_operations * format)1194 __trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1195 		       const struct file_operations *id,
1196 		       const struct file_operations *enable,
1197 		       const struct file_operations *filter,
1198 		       const struct file_operations *format)
1199 {
1200 	struct dentry *d_events;
1201 	int ret;
1202 
1203 	/* The linker may leave blanks */
1204 	if (!call->name)
1205 		return -EINVAL;
1206 
1207 	if (call->class->raw_init) {
1208 		ret = call->class->raw_init(call);
1209 		if (ret < 0) {
1210 			if (ret != -ENOSYS)
1211 				pr_warning("Could not initialize trace events/%s\n",
1212 					   call->name);
1213 			return ret;
1214 		}
1215 	}
1216 
1217 	d_events = event_trace_events_dir();
1218 	if (!d_events)
1219 		return -ENOENT;
1220 
1221 	ret = event_create_dir(call, d_events, id, enable, filter, format);
1222 	if (!ret)
1223 		list_add(&call->list, &ftrace_events);
1224 	call->mod = mod;
1225 
1226 	return ret;
1227 }
1228 
1229 /* Add an additional event_call dynamically */
trace_add_event_call(struct ftrace_event_call * call)1230 int trace_add_event_call(struct ftrace_event_call *call)
1231 {
1232 	int ret;
1233 	mutex_lock(&event_mutex);
1234 	ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1235 				     &ftrace_enable_fops,
1236 				     &ftrace_event_filter_fops,
1237 				     &ftrace_event_format_fops);
1238 	mutex_unlock(&event_mutex);
1239 	return ret;
1240 }
1241 
remove_subsystem_dir(const char * name)1242 static void remove_subsystem_dir(const char *name)
1243 {
1244 	struct event_subsystem *system;
1245 
1246 	if (strcmp(name, TRACE_SYSTEM) == 0)
1247 		return;
1248 
1249 	list_for_each_entry(system, &event_subsystems, list) {
1250 		if (strcmp(system->name, name) == 0) {
1251 			if (!--system->nr_events) {
1252 				debugfs_remove_recursive(system->entry);
1253 				list_del(&system->list);
1254 				__put_system(system);
1255 			}
1256 			break;
1257 		}
1258 	}
1259 }
1260 
1261 /*
1262  * Must be called under locking both of event_mutex and trace_event_mutex.
1263  */
__trace_remove_event_call(struct ftrace_event_call * call)1264 static void __trace_remove_event_call(struct ftrace_event_call *call)
1265 {
1266 	ftrace_event_enable_disable(call, 0);
1267 	if (call->event.funcs)
1268 		__unregister_ftrace_event(&call->event);
1269 	debugfs_remove_recursive(call->dir);
1270 	list_del(&call->list);
1271 	trace_destroy_fields(call);
1272 	destroy_preds(call);
1273 	remove_subsystem_dir(call->class->system);
1274 }
1275 
1276 /* Remove an event_call */
trace_remove_event_call(struct ftrace_event_call * call)1277 void trace_remove_event_call(struct ftrace_event_call *call)
1278 {
1279 	mutex_lock(&event_mutex);
1280 	down_write(&trace_event_mutex);
1281 	__trace_remove_event_call(call);
1282 	up_write(&trace_event_mutex);
1283 	mutex_unlock(&event_mutex);
1284 }
1285 
1286 #define for_each_event(event, start, end)			\
1287 	for (event = start;					\
1288 	     (unsigned long)event < (unsigned long)end;		\
1289 	     event++)
1290 
1291 #ifdef CONFIG_MODULES
1292 
1293 static LIST_HEAD(ftrace_module_file_list);
1294 
1295 /*
1296  * Modules must own their file_operations to keep up with
1297  * reference counting.
1298  */
1299 struct ftrace_module_file_ops {
1300 	struct list_head		list;
1301 	struct module			*mod;
1302 	struct file_operations		id;
1303 	struct file_operations		enable;
1304 	struct file_operations		format;
1305 	struct file_operations		filter;
1306 };
1307 
1308 static struct ftrace_module_file_ops *
trace_create_file_ops(struct module * mod)1309 trace_create_file_ops(struct module *mod)
1310 {
1311 	struct ftrace_module_file_ops *file_ops;
1312 
1313 	/*
1314 	 * This is a bit of a PITA. To allow for correct reference
1315 	 * counting, modules must "own" their file_operations.
1316 	 * To do this, we allocate the file operations that will be
1317 	 * used in the event directory.
1318 	 */
1319 
1320 	file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1321 	if (!file_ops)
1322 		return NULL;
1323 
1324 	file_ops->mod = mod;
1325 
1326 	file_ops->id = ftrace_event_id_fops;
1327 	file_ops->id.owner = mod;
1328 
1329 	file_ops->enable = ftrace_enable_fops;
1330 	file_ops->enable.owner = mod;
1331 
1332 	file_ops->filter = ftrace_event_filter_fops;
1333 	file_ops->filter.owner = mod;
1334 
1335 	file_ops->format = ftrace_event_format_fops;
1336 	file_ops->format.owner = mod;
1337 
1338 	list_add(&file_ops->list, &ftrace_module_file_list);
1339 
1340 	return file_ops;
1341 }
1342 
trace_module_add_events(struct module * mod)1343 static void trace_module_add_events(struct module *mod)
1344 {
1345 	struct ftrace_module_file_ops *file_ops = NULL;
1346 	struct ftrace_event_call **call, **start, **end;
1347 
1348 	start = mod->trace_events;
1349 	end = mod->trace_events + mod->num_trace_events;
1350 
1351 	if (start == end)
1352 		return;
1353 
1354 	file_ops = trace_create_file_ops(mod);
1355 	if (!file_ops)
1356 		return;
1357 
1358 	for_each_event(call, start, end) {
1359 		__trace_add_event_call(*call, mod,
1360 				       &file_ops->id, &file_ops->enable,
1361 				       &file_ops->filter, &file_ops->format);
1362 	}
1363 }
1364 
trace_module_remove_events(struct module * mod)1365 static void trace_module_remove_events(struct module *mod)
1366 {
1367 	struct ftrace_module_file_ops *file_ops;
1368 	struct ftrace_event_call *call, *p;
1369 	bool found = false;
1370 
1371 	down_write(&trace_event_mutex);
1372 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
1373 		if (call->mod == mod) {
1374 			found = true;
1375 			__trace_remove_event_call(call);
1376 		}
1377 	}
1378 
1379 	/* Now free the file_operations */
1380 	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1381 		if (file_ops->mod == mod)
1382 			break;
1383 	}
1384 	if (&file_ops->list != &ftrace_module_file_list) {
1385 		list_del(&file_ops->list);
1386 		kfree(file_ops);
1387 	}
1388 
1389 	/*
1390 	 * It is safest to reset the ring buffer if the module being unloaded
1391 	 * registered any events.
1392 	 */
1393 	if (found)
1394 		tracing_reset_current_online_cpus();
1395 	up_write(&trace_event_mutex);
1396 }
1397 
trace_module_notify(struct notifier_block * self,unsigned long val,void * data)1398 static int trace_module_notify(struct notifier_block *self,
1399 			       unsigned long val, void *data)
1400 {
1401 	struct module *mod = data;
1402 
1403 	mutex_lock(&event_mutex);
1404 	switch (val) {
1405 	case MODULE_STATE_COMING:
1406 		trace_module_add_events(mod);
1407 		break;
1408 	case MODULE_STATE_GOING:
1409 		trace_module_remove_events(mod);
1410 		break;
1411 	}
1412 	mutex_unlock(&event_mutex);
1413 
1414 	return 0;
1415 }
1416 #else
trace_module_notify(struct notifier_block * self,unsigned long val,void * data)1417 static int trace_module_notify(struct notifier_block *self,
1418 			       unsigned long val, void *data)
1419 {
1420 	return 0;
1421 }
1422 #endif /* CONFIG_MODULES */
1423 
1424 static struct notifier_block trace_module_nb = {
1425 	.notifier_call = trace_module_notify,
1426 	.priority = 0,
1427 };
1428 
1429 extern struct ftrace_event_call *__start_ftrace_events[];
1430 extern struct ftrace_event_call *__stop_ftrace_events[];
1431 
1432 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1433 
setup_trace_event(char * str)1434 static __init int setup_trace_event(char *str)
1435 {
1436 	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1437 	ring_buffer_expanded = 1;
1438 	tracing_selftest_disabled = 1;
1439 
1440 	return 1;
1441 }
1442 __setup("trace_event=", setup_trace_event);
1443 
event_trace_init(void)1444 static __init int event_trace_init(void)
1445 {
1446 	struct ftrace_event_call **call;
1447 	struct dentry *d_tracer;
1448 	struct dentry *entry;
1449 	struct dentry *d_events;
1450 	int ret;
1451 	char *buf = bootup_event_buf;
1452 	char *token;
1453 
1454 	d_tracer = tracing_init_dentry();
1455 	if (!d_tracer)
1456 		return 0;
1457 
1458 	entry = debugfs_create_file("available_events", 0444, d_tracer,
1459 				    (void *)&show_event_seq_ops,
1460 				    &ftrace_avail_fops);
1461 	if (!entry)
1462 		pr_warning("Could not create debugfs "
1463 			   "'available_events' entry\n");
1464 
1465 	entry = debugfs_create_file("set_event", 0644, d_tracer,
1466 				    (void *)&show_set_event_seq_ops,
1467 				    &ftrace_set_event_fops);
1468 	if (!entry)
1469 		pr_warning("Could not create debugfs "
1470 			   "'set_event' entry\n");
1471 
1472 	d_events = event_trace_events_dir();
1473 	if (!d_events)
1474 		return 0;
1475 
1476 	/* ring buffer internal formats */
1477 	trace_create_file("header_page", 0444, d_events,
1478 			  ring_buffer_print_page_header,
1479 			  &ftrace_show_header_fops);
1480 
1481 	trace_create_file("header_event", 0444, d_events,
1482 			  ring_buffer_print_entry_header,
1483 			  &ftrace_show_header_fops);
1484 
1485 	trace_create_file("enable", 0644, d_events,
1486 			  NULL, &ftrace_system_enable_fops);
1487 
1488 	if (trace_define_common_fields())
1489 		pr_warning("tracing: Failed to allocate common fields");
1490 
1491 	for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1492 		__trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
1493 				       &ftrace_enable_fops,
1494 				       &ftrace_event_filter_fops,
1495 				       &ftrace_event_format_fops);
1496 	}
1497 
1498 	while (true) {
1499 		token = strsep(&buf, ",");
1500 
1501 		if (!token)
1502 			break;
1503 		if (!*token)
1504 			continue;
1505 
1506 		ret = ftrace_set_clr_event(token, 1);
1507 		if (ret)
1508 			pr_warning("Failed to enable trace event: %s\n", token);
1509 	}
1510 
1511 	ret = register_module_notifier(&trace_module_nb);
1512 	if (ret)
1513 		pr_warning("Failed to register trace events module notifier\n");
1514 
1515 	return 0;
1516 }
1517 fs_initcall(event_trace_init);
1518 
1519 #ifdef CONFIG_FTRACE_STARTUP_TEST
1520 
1521 static DEFINE_SPINLOCK(test_spinlock);
1522 static DEFINE_SPINLOCK(test_spinlock_irq);
1523 static DEFINE_MUTEX(test_mutex);
1524 
test_work(struct work_struct * dummy)1525 static __init void test_work(struct work_struct *dummy)
1526 {
1527 	spin_lock(&test_spinlock);
1528 	spin_lock_irq(&test_spinlock_irq);
1529 	udelay(1);
1530 	spin_unlock_irq(&test_spinlock_irq);
1531 	spin_unlock(&test_spinlock);
1532 
1533 	mutex_lock(&test_mutex);
1534 	msleep(1);
1535 	mutex_unlock(&test_mutex);
1536 }
1537 
event_test_thread(void * unused)1538 static __init int event_test_thread(void *unused)
1539 {
1540 	void *test_malloc;
1541 
1542 	test_malloc = kmalloc(1234, GFP_KERNEL);
1543 	if (!test_malloc)
1544 		pr_info("failed to kmalloc\n");
1545 
1546 	schedule_on_each_cpu(test_work);
1547 
1548 	kfree(test_malloc);
1549 
1550 	set_current_state(TASK_INTERRUPTIBLE);
1551 	while (!kthread_should_stop())
1552 		schedule();
1553 
1554 	return 0;
1555 }
1556 
1557 /*
1558  * Do various things that may trigger events.
1559  */
event_test_stuff(void)1560 static __init void event_test_stuff(void)
1561 {
1562 	struct task_struct *test_thread;
1563 
1564 	test_thread = kthread_run(event_test_thread, NULL, "test-events");
1565 	msleep(1);
1566 	kthread_stop(test_thread);
1567 }
1568 
1569 /*
1570  * For every trace event defined, we will test each trace point separately,
1571  * and then by groups, and finally all trace points.
1572  */
event_trace_self_tests(void)1573 static __init void event_trace_self_tests(void)
1574 {
1575 	struct ftrace_event_call *call;
1576 	struct event_subsystem *system;
1577 	int ret;
1578 
1579 	pr_info("Running tests on trace events:\n");
1580 
1581 	list_for_each_entry(call, &ftrace_events, list) {
1582 
1583 		/* Only test those that have a probe */
1584 		if (!call->class || !call->class->probe)
1585 			continue;
1586 
1587 /*
1588  * Testing syscall events here is pretty useless, but
1589  * we still do it if configured. But this is time consuming.
1590  * What we really need is a user thread to perform the
1591  * syscalls as we test.
1592  */
1593 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1594 		if (call->class->system &&
1595 		    strcmp(call->class->system, "syscalls") == 0)
1596 			continue;
1597 #endif
1598 
1599 		pr_info("Testing event %s: ", call->name);
1600 
1601 		/*
1602 		 * If an event is already enabled, someone is using
1603 		 * it and the self test should not be on.
1604 		 */
1605 		if (call->flags & TRACE_EVENT_FL_ENABLED) {
1606 			pr_warning("Enabled event during self test!\n");
1607 			WARN_ON_ONCE(1);
1608 			continue;
1609 		}
1610 
1611 		ftrace_event_enable_disable(call, 1);
1612 		event_test_stuff();
1613 		ftrace_event_enable_disable(call, 0);
1614 
1615 		pr_cont("OK\n");
1616 	}
1617 
1618 	/* Now test at the sub system level */
1619 
1620 	pr_info("Running tests on trace event systems:\n");
1621 
1622 	list_for_each_entry(system, &event_subsystems, list) {
1623 
1624 		/* the ftrace system is special, skip it */
1625 		if (strcmp(system->name, "ftrace") == 0)
1626 			continue;
1627 
1628 		pr_info("Testing event system %s: ", system->name);
1629 
1630 		ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1631 		if (WARN_ON_ONCE(ret)) {
1632 			pr_warning("error enabling system %s\n",
1633 				   system->name);
1634 			continue;
1635 		}
1636 
1637 		event_test_stuff();
1638 
1639 		ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1640 		if (WARN_ON_ONCE(ret))
1641 			pr_warning("error disabling system %s\n",
1642 				   system->name);
1643 
1644 		pr_cont("OK\n");
1645 	}
1646 
1647 	/* Test with all events enabled */
1648 
1649 	pr_info("Running tests on all trace events:\n");
1650 	pr_info("Testing all events: ");
1651 
1652 	ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1653 	if (WARN_ON_ONCE(ret)) {
1654 		pr_warning("error enabling all events\n");
1655 		return;
1656 	}
1657 
1658 	event_test_stuff();
1659 
1660 	/* reset sysname */
1661 	ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1662 	if (WARN_ON_ONCE(ret)) {
1663 		pr_warning("error disabling all events\n");
1664 		return;
1665 	}
1666 
1667 	pr_cont("OK\n");
1668 }
1669 
1670 #ifdef CONFIG_FUNCTION_TRACER
1671 
1672 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1673 
1674 static void
function_test_events_call(unsigned long ip,unsigned long parent_ip)1675 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1676 {
1677 	struct ring_buffer_event *event;
1678 	struct ring_buffer *buffer;
1679 	struct ftrace_entry *entry;
1680 	unsigned long flags;
1681 	long disabled;
1682 	int cpu;
1683 	int pc;
1684 
1685 	pc = preempt_count();
1686 	preempt_disable_notrace();
1687 	cpu = raw_smp_processor_id();
1688 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1689 
1690 	if (disabled != 1)
1691 		goto out;
1692 
1693 	local_save_flags(flags);
1694 
1695 	event = trace_current_buffer_lock_reserve(&buffer,
1696 						  TRACE_FN, sizeof(*entry),
1697 						  flags, pc);
1698 	if (!event)
1699 		goto out;
1700 	entry	= ring_buffer_event_data(event);
1701 	entry->ip			= ip;
1702 	entry->parent_ip		= parent_ip;
1703 
1704 	trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1705 
1706  out:
1707 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1708 	preempt_enable_notrace();
1709 }
1710 
1711 static struct ftrace_ops trace_ops __initdata  =
1712 {
1713 	.func = function_test_events_call,
1714 };
1715 
event_trace_self_test_with_function(void)1716 static __init void event_trace_self_test_with_function(void)
1717 {
1718 	int ret;
1719 	ret = register_ftrace_function(&trace_ops);
1720 	if (WARN_ON(ret < 0)) {
1721 		pr_info("Failed to enable function tracer for event tests\n");
1722 		return;
1723 	}
1724 	pr_info("Running tests again, along with the function tracer\n");
1725 	event_trace_self_tests();
1726 	unregister_ftrace_function(&trace_ops);
1727 }
1728 #else
event_trace_self_test_with_function(void)1729 static __init void event_trace_self_test_with_function(void)
1730 {
1731 }
1732 #endif
1733 
event_trace_self_tests_init(void)1734 static __init int event_trace_self_tests_init(void)
1735 {
1736 	if (!tracing_selftest_disabled) {
1737 		event_trace_self_tests();
1738 		event_trace_self_test_with_function();
1739 	}
1740 
1741 	return 0;
1742 }
1743 
1744 late_initcall(event_trace_self_tests_init);
1745 
1746 #endif
1747