1 /*
2  * trace_events_filter - generic event filtering
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19  */
20 
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26 
27 #include "trace.h"
28 #include "trace_output.h"
29 
30 #define DEFAULT_SYS_FILTER_MESSAGE					\
31 	"### global filter ###\n"					\
32 	"# Use this to set filters for multiple events.\n"		\
33 	"# Only events with the given fields will be affected.\n"	\
34 	"# If no events are modified, an error message will be displayed here"
35 
36 enum filter_op_ids
37 {
38 	OP_OR,
39 	OP_AND,
40 	OP_GLOB,
41 	OP_NE,
42 	OP_EQ,
43 	OP_LT,
44 	OP_LE,
45 	OP_GT,
46 	OP_GE,
47 	OP_NONE,
48 	OP_OPEN_PAREN,
49 };
50 
51 struct filter_op {
52 	int id;
53 	char *string;
54 	int precedence;
55 };
56 
57 static struct filter_op filter_ops[] = {
58 	{ OP_OR,	"||",		1 },
59 	{ OP_AND,	"&&",		2 },
60 	{ OP_GLOB,	"~",		4 },
61 	{ OP_NE,	"!=",		4 },
62 	{ OP_EQ,	"==",		4 },
63 	{ OP_LT,	"<",		5 },
64 	{ OP_LE,	"<=",		5 },
65 	{ OP_GT,	">",		5 },
66 	{ OP_GE,	">=",		5 },
67 	{ OP_NONE,	"OP_NONE",	0 },
68 	{ OP_OPEN_PAREN, "(",		0 },
69 };
70 
71 enum {
72 	FILT_ERR_NONE,
73 	FILT_ERR_INVALID_OP,
74 	FILT_ERR_UNBALANCED_PAREN,
75 	FILT_ERR_TOO_MANY_OPERANDS,
76 	FILT_ERR_OPERAND_TOO_LONG,
77 	FILT_ERR_FIELD_NOT_FOUND,
78 	FILT_ERR_ILLEGAL_FIELD_OP,
79 	FILT_ERR_ILLEGAL_INTVAL,
80 	FILT_ERR_BAD_SUBSYS_FILTER,
81 	FILT_ERR_TOO_MANY_PREDS,
82 	FILT_ERR_MISSING_FIELD,
83 	FILT_ERR_INVALID_FILTER,
84 };
85 
86 static char *err_text[] = {
87 	"No error",
88 	"Invalid operator",
89 	"Unbalanced parens",
90 	"Too many operands",
91 	"Operand too long",
92 	"Field not found",
93 	"Illegal operation for field type",
94 	"Illegal integer value",
95 	"Couldn't find or set field in one of a subsystem's events",
96 	"Too many terms in predicate expression",
97 	"Missing field name and/or value",
98 	"Meaningless filter expression",
99 };
100 
101 struct opstack_op {
102 	int op;
103 	struct list_head list;
104 };
105 
106 struct postfix_elt {
107 	int op;
108 	char *operand;
109 	struct list_head list;
110 };
111 
112 struct filter_parse_state {
113 	struct filter_op *ops;
114 	struct list_head opstack;
115 	struct list_head postfix;
116 	int lasterr;
117 	int lasterr_pos;
118 
119 	struct {
120 		char *string;
121 		unsigned int cnt;
122 		unsigned int tail;
123 	} infix;
124 
125 	struct {
126 		char string[MAX_FILTER_STR_VAL];
127 		int pos;
128 		unsigned int tail;
129 	} operand;
130 };
131 
132 struct pred_stack {
133 	struct filter_pred	**preds;
134 	int			index;
135 };
136 
137 #define DEFINE_COMPARISON_PRED(type)					\
138 static int filter_pred_##type(struct filter_pred *pred, void *event)	\
139 {									\
140 	type *addr = (type *)(event + pred->offset);			\
141 	type val = (type)pred->val;					\
142 	int match = 0;							\
143 									\
144 	switch (pred->op) {						\
145 	case OP_LT:							\
146 		match = (*addr < val);					\
147 		break;							\
148 	case OP_LE:							\
149 		match = (*addr <= val);					\
150 		break;							\
151 	case OP_GT:							\
152 		match = (*addr > val);					\
153 		break;							\
154 	case OP_GE:							\
155 		match = (*addr >= val);					\
156 		break;							\
157 	default:							\
158 		break;							\
159 	}								\
160 									\
161 	return match;							\
162 }
163 
164 #define DEFINE_EQUALITY_PRED(size)					\
165 static int filter_pred_##size(struct filter_pred *pred, void *event)	\
166 {									\
167 	u##size *addr = (u##size *)(event + pred->offset);		\
168 	u##size val = (u##size)pred->val;				\
169 	int match;							\
170 									\
171 	match = (val == *addr) ^ pred->not;				\
172 									\
173 	return match;							\
174 }
175 
176 DEFINE_COMPARISON_PRED(s64);
177 DEFINE_COMPARISON_PRED(u64);
178 DEFINE_COMPARISON_PRED(s32);
179 DEFINE_COMPARISON_PRED(u32);
180 DEFINE_COMPARISON_PRED(s16);
181 DEFINE_COMPARISON_PRED(u16);
182 DEFINE_COMPARISON_PRED(s8);
183 DEFINE_COMPARISON_PRED(u8);
184 
185 DEFINE_EQUALITY_PRED(64);
186 DEFINE_EQUALITY_PRED(32);
187 DEFINE_EQUALITY_PRED(16);
188 DEFINE_EQUALITY_PRED(8);
189 
190 /* Filter predicate for fixed sized arrays of characters */
filter_pred_string(struct filter_pred * pred,void * event)191 static int filter_pred_string(struct filter_pred *pred, void *event)
192 {
193 	char *addr = (char *)(event + pred->offset);
194 	int cmp, match;
195 
196 	cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
197 
198 	match = cmp ^ pred->not;
199 
200 	return match;
201 }
202 
203 /* Filter predicate for char * pointers */
filter_pred_pchar(struct filter_pred * pred,void * event)204 static int filter_pred_pchar(struct filter_pred *pred, void *event)
205 {
206 	char **addr = (char **)(event + pred->offset);
207 	int cmp, match;
208 	int len = strlen(*addr) + 1;	/* including tailing '\0' */
209 
210 	cmp = pred->regex.match(*addr, &pred->regex, len);
211 
212 	match = cmp ^ pred->not;
213 
214 	return match;
215 }
216 
217 /*
218  * Filter predicate for dynamic sized arrays of characters.
219  * These are implemented through a list of strings at the end
220  * of the entry.
221  * Also each of these strings have a field in the entry which
222  * contains its offset from the beginning of the entry.
223  * We have then first to get this field, dereference it
224  * and add it to the address of the entry, and at last we have
225  * the address of the string.
226  */
filter_pred_strloc(struct filter_pred * pred,void * event)227 static int filter_pred_strloc(struct filter_pred *pred, void *event)
228 {
229 	u32 str_item = *(u32 *)(event + pred->offset);
230 	int str_loc = str_item & 0xffff;
231 	int str_len = str_item >> 16;
232 	char *addr = (char *)(event + str_loc);
233 	int cmp, match;
234 
235 	cmp = pred->regex.match(addr, &pred->regex, str_len);
236 
237 	match = cmp ^ pred->not;
238 
239 	return match;
240 }
241 
filter_pred_none(struct filter_pred * pred,void * event)242 static int filter_pred_none(struct filter_pred *pred, void *event)
243 {
244 	return 0;
245 }
246 
247 /*
248  * regex_match_foo - Basic regex callbacks
249  *
250  * @str: the string to be searched
251  * @r:   the regex structure containing the pattern string
252  * @len: the length of the string to be searched (including '\0')
253  *
254  * Note:
255  * - @str might not be NULL-terminated if it's of type DYN_STRING
256  *   or STATIC_STRING
257  */
258 
regex_match_full(char * str,struct regex * r,int len)259 static int regex_match_full(char *str, struct regex *r, int len)
260 {
261 	if (strncmp(str, r->pattern, len) == 0)
262 		return 1;
263 	return 0;
264 }
265 
regex_match_front(char * str,struct regex * r,int len)266 static int regex_match_front(char *str, struct regex *r, int len)
267 {
268 	if (strncmp(str, r->pattern, r->len) == 0)
269 		return 1;
270 	return 0;
271 }
272 
regex_match_middle(char * str,struct regex * r,int len)273 static int regex_match_middle(char *str, struct regex *r, int len)
274 {
275 	if (strnstr(str, r->pattern, len))
276 		return 1;
277 	return 0;
278 }
279 
regex_match_end(char * str,struct regex * r,int len)280 static int regex_match_end(char *str, struct regex *r, int len)
281 {
282 	int strlen = len - 1;
283 
284 	if (strlen >= r->len &&
285 	    memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
286 		return 1;
287 	return 0;
288 }
289 
290 /**
291  * filter_parse_regex - parse a basic regex
292  * @buff:   the raw regex
293  * @len:    length of the regex
294  * @search: will point to the beginning of the string to compare
295  * @not:    tell whether the match will have to be inverted
296  *
297  * This passes in a buffer containing a regex and this function will
298  * set search to point to the search part of the buffer and
299  * return the type of search it is (see enum above).
300  * This does modify buff.
301  *
302  * Returns enum type.
303  *  search returns the pointer to use for comparison.
304  *  not returns 1 if buff started with a '!'
305  *     0 otherwise.
306  */
filter_parse_regex(char * buff,int len,char ** search,int * not)307 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
308 {
309 	int type = MATCH_FULL;
310 	int i;
311 
312 	if (buff[0] == '!') {
313 		*not = 1;
314 		buff++;
315 		len--;
316 	} else
317 		*not = 0;
318 
319 	*search = buff;
320 
321 	for (i = 0; i < len; i++) {
322 		if (buff[i] == '*') {
323 			if (!i) {
324 				*search = buff + 1;
325 				type = MATCH_END_ONLY;
326 			} else {
327 				if (type == MATCH_END_ONLY)
328 					type = MATCH_MIDDLE_ONLY;
329 				else
330 					type = MATCH_FRONT_ONLY;
331 				buff[i] = 0;
332 				break;
333 			}
334 		}
335 	}
336 
337 	return type;
338 }
339 
filter_build_regex(struct filter_pred * pred)340 static void filter_build_regex(struct filter_pred *pred)
341 {
342 	struct regex *r = &pred->regex;
343 	char *search;
344 	enum regex_type type = MATCH_FULL;
345 	int not = 0;
346 
347 	if (pred->op == OP_GLOB) {
348 		type = filter_parse_regex(r->pattern, r->len, &search, &not);
349 		r->len = strlen(search);
350 		memmove(r->pattern, search, r->len+1);
351 	}
352 
353 	switch (type) {
354 	case MATCH_FULL:
355 		r->match = regex_match_full;
356 		break;
357 	case MATCH_FRONT_ONLY:
358 		r->match = regex_match_front;
359 		break;
360 	case MATCH_MIDDLE_ONLY:
361 		r->match = regex_match_middle;
362 		break;
363 	case MATCH_END_ONLY:
364 		r->match = regex_match_end;
365 		break;
366 	}
367 
368 	pred->not ^= not;
369 }
370 
371 enum move_type {
372 	MOVE_DOWN,
373 	MOVE_UP_FROM_LEFT,
374 	MOVE_UP_FROM_RIGHT
375 };
376 
377 static struct filter_pred *
get_pred_parent(struct filter_pred * pred,struct filter_pred * preds,int index,enum move_type * move)378 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
379 		int index, enum move_type *move)
380 {
381 	if (pred->parent & FILTER_PRED_IS_RIGHT)
382 		*move = MOVE_UP_FROM_RIGHT;
383 	else
384 		*move = MOVE_UP_FROM_LEFT;
385 	pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
386 
387 	return pred;
388 }
389 
390 enum walk_return {
391 	WALK_PRED_ABORT,
392 	WALK_PRED_PARENT,
393 	WALK_PRED_DEFAULT,
394 };
395 
396 typedef int (*filter_pred_walkcb_t) (enum move_type move,
397 				     struct filter_pred *pred,
398 				     int *err, void *data);
399 
walk_pred_tree(struct filter_pred * preds,struct filter_pred * root,filter_pred_walkcb_t cb,void * data)400 static int walk_pred_tree(struct filter_pred *preds,
401 			  struct filter_pred *root,
402 			  filter_pred_walkcb_t cb, void *data)
403 {
404 	struct filter_pred *pred = root;
405 	enum move_type move = MOVE_DOWN;
406 	int done = 0;
407 
408 	if  (!preds)
409 		return -EINVAL;
410 
411 	do {
412 		int err = 0, ret;
413 
414 		ret = cb(move, pred, &err, data);
415 		if (ret == WALK_PRED_ABORT)
416 			return err;
417 		if (ret == WALK_PRED_PARENT)
418 			goto get_parent;
419 
420 		switch (move) {
421 		case MOVE_DOWN:
422 			if (pred->left != FILTER_PRED_INVALID) {
423 				pred = &preds[pred->left];
424 				continue;
425 			}
426 			goto get_parent;
427 		case MOVE_UP_FROM_LEFT:
428 			pred = &preds[pred->right];
429 			move = MOVE_DOWN;
430 			continue;
431 		case MOVE_UP_FROM_RIGHT:
432  get_parent:
433 			if (pred == root)
434 				break;
435 			pred = get_pred_parent(pred, preds,
436 					       pred->parent,
437 					       &move);
438 			continue;
439 		}
440 		done = 1;
441 	} while (!done);
442 
443 	/* We are fine. */
444 	return 0;
445 }
446 
447 /*
448  * A series of AND or ORs where found together. Instead of
449  * climbing up and down the tree branches, an array of the
450  * ops were made in order of checks. We can just move across
451  * the array and short circuit if needed.
452  */
process_ops(struct filter_pred * preds,struct filter_pred * op,void * rec)453 static int process_ops(struct filter_pred *preds,
454 		       struct filter_pred *op, void *rec)
455 {
456 	struct filter_pred *pred;
457 	int match = 0;
458 	int type;
459 	int i;
460 
461 	/*
462 	 * Micro-optimization: We set type to true if op
463 	 * is an OR and false otherwise (AND). Then we
464 	 * just need to test if the match is equal to
465 	 * the type, and if it is, we can short circuit the
466 	 * rest of the checks:
467 	 *
468 	 * if ((match && op->op == OP_OR) ||
469 	 *     (!match && op->op == OP_AND))
470 	 *	  return match;
471 	 */
472 	type = op->op == OP_OR;
473 
474 	for (i = 0; i < op->val; i++) {
475 		pred = &preds[op->ops[i]];
476 		if (!WARN_ON_ONCE(!pred->fn))
477 			match = pred->fn(pred, rec);
478 		if (!!match == type)
479 			return match;
480 	}
481 	return match;
482 }
483 
484 struct filter_match_preds_data {
485 	struct filter_pred *preds;
486 	int match;
487 	void *rec;
488 };
489 
filter_match_preds_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)490 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
491 				 int *err, void *data)
492 {
493 	struct filter_match_preds_data *d = data;
494 
495 	*err = 0;
496 	switch (move) {
497 	case MOVE_DOWN:
498 		/* only AND and OR have children */
499 		if (pred->left != FILTER_PRED_INVALID) {
500 			/* If ops is set, then it was folded. */
501 			if (!pred->ops)
502 				return WALK_PRED_DEFAULT;
503 			/* We can treat folded ops as a leaf node */
504 			d->match = process_ops(d->preds, pred, d->rec);
505 		} else {
506 			if (!WARN_ON_ONCE(!pred->fn))
507 				d->match = pred->fn(pred, d->rec);
508 		}
509 
510 		return WALK_PRED_PARENT;
511 	case MOVE_UP_FROM_LEFT:
512 		/*
513 		 * Check for short circuits.
514 		 *
515 		 * Optimization: !!match == (pred->op == OP_OR)
516 		 *   is the same as:
517 		 * if ((match && pred->op == OP_OR) ||
518 		 *     (!match && pred->op == OP_AND))
519 		 */
520 		if (!!d->match == (pred->op == OP_OR))
521 			return WALK_PRED_PARENT;
522 		break;
523 	case MOVE_UP_FROM_RIGHT:
524 		break;
525 	}
526 
527 	return WALK_PRED_DEFAULT;
528 }
529 
530 /* return 1 if event matches, 0 otherwise (discard) */
filter_match_preds(struct event_filter * filter,void * rec)531 int filter_match_preds(struct event_filter *filter, void *rec)
532 {
533 	struct filter_pred *preds;
534 	struct filter_pred *root;
535 	struct filter_match_preds_data data = {
536 		/* match is currently meaningless */
537 		.match = -1,
538 		.rec   = rec,
539 	};
540 	int n_preds, ret;
541 
542 	/* no filter is considered a match */
543 	if (!filter)
544 		return 1;
545 
546 	n_preds = filter->n_preds;
547 	if (!n_preds)
548 		return 1;
549 
550 	/*
551 	 * n_preds, root and filter->preds are protect with preemption disabled.
552 	 */
553 	root = rcu_dereference_sched(filter->root);
554 	if (!root)
555 		return 1;
556 
557 	data.preds = preds = rcu_dereference_sched(filter->preds);
558 	ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
559 	WARN_ON(ret);
560 	return data.match;
561 }
562 EXPORT_SYMBOL_GPL(filter_match_preds);
563 
parse_error(struct filter_parse_state * ps,int err,int pos)564 static void parse_error(struct filter_parse_state *ps, int err, int pos)
565 {
566 	ps->lasterr = err;
567 	ps->lasterr_pos = pos;
568 }
569 
remove_filter_string(struct event_filter * filter)570 static void remove_filter_string(struct event_filter *filter)
571 {
572 	if (!filter)
573 		return;
574 
575 	kfree(filter->filter_string);
576 	filter->filter_string = NULL;
577 }
578 
replace_filter_string(struct event_filter * filter,char * filter_string)579 static int replace_filter_string(struct event_filter *filter,
580 				 char *filter_string)
581 {
582 	kfree(filter->filter_string);
583 	filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
584 	if (!filter->filter_string)
585 		return -ENOMEM;
586 
587 	return 0;
588 }
589 
append_filter_string(struct event_filter * filter,char * string)590 static int append_filter_string(struct event_filter *filter,
591 				char *string)
592 {
593 	int newlen;
594 	char *new_filter_string;
595 
596 	BUG_ON(!filter->filter_string);
597 	newlen = strlen(filter->filter_string) + strlen(string) + 1;
598 	new_filter_string = kmalloc(newlen, GFP_KERNEL);
599 	if (!new_filter_string)
600 		return -ENOMEM;
601 
602 	strcpy(new_filter_string, filter->filter_string);
603 	strcat(new_filter_string, string);
604 	kfree(filter->filter_string);
605 	filter->filter_string = new_filter_string;
606 
607 	return 0;
608 }
609 
append_filter_err(struct filter_parse_state * ps,struct event_filter * filter)610 static void append_filter_err(struct filter_parse_state *ps,
611 			      struct event_filter *filter)
612 {
613 	int pos = ps->lasterr_pos;
614 	char *buf, *pbuf;
615 
616 	buf = (char *)__get_free_page(GFP_TEMPORARY);
617 	if (!buf)
618 		return;
619 
620 	append_filter_string(filter, "\n");
621 	memset(buf, ' ', PAGE_SIZE);
622 	if (pos > PAGE_SIZE - 128)
623 		pos = 0;
624 	buf[pos] = '^';
625 	pbuf = &buf[pos] + 1;
626 
627 	sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
628 	append_filter_string(filter, buf);
629 	free_page((unsigned long) buf);
630 }
631 
print_event_filter(struct ftrace_event_call * call,struct trace_seq * s)632 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
633 {
634 	struct event_filter *filter;
635 
636 	mutex_lock(&event_mutex);
637 	filter = call->filter;
638 	if (filter && filter->filter_string)
639 		trace_seq_printf(s, "%s\n", filter->filter_string);
640 	else
641 		trace_seq_printf(s, "none\n");
642 	mutex_unlock(&event_mutex);
643 }
644 
print_subsystem_event_filter(struct event_subsystem * system,struct trace_seq * s)645 void print_subsystem_event_filter(struct event_subsystem *system,
646 				  struct trace_seq *s)
647 {
648 	struct event_filter *filter;
649 
650 	mutex_lock(&event_mutex);
651 	filter = system->filter;
652 	if (filter && filter->filter_string)
653 		trace_seq_printf(s, "%s\n", filter->filter_string);
654 	else
655 		trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
656 	mutex_unlock(&event_mutex);
657 }
658 
659 static struct ftrace_event_field *
__find_event_field(struct list_head * head,char * name)660 __find_event_field(struct list_head *head, char *name)
661 {
662 	struct ftrace_event_field *field;
663 
664 	list_for_each_entry(field, head, link) {
665 		if (!strcmp(field->name, name))
666 			return field;
667 	}
668 
669 	return NULL;
670 }
671 
672 static struct ftrace_event_field *
find_event_field(struct ftrace_event_call * call,char * name)673 find_event_field(struct ftrace_event_call *call, char *name)
674 {
675 	struct ftrace_event_field *field;
676 	struct list_head *head;
677 
678 	field = __find_event_field(&ftrace_common_fields, name);
679 	if (field)
680 		return field;
681 
682 	head = trace_get_fields(call);
683 	return __find_event_field(head, name);
684 }
685 
__alloc_pred_stack(struct pred_stack * stack,int n_preds)686 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
687 {
688 	stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
689 	if (!stack->preds)
690 		return -ENOMEM;
691 	stack->index = n_preds;
692 	return 0;
693 }
694 
__free_pred_stack(struct pred_stack * stack)695 static void __free_pred_stack(struct pred_stack *stack)
696 {
697 	kfree(stack->preds);
698 	stack->index = 0;
699 }
700 
__push_pred_stack(struct pred_stack * stack,struct filter_pred * pred)701 static int __push_pred_stack(struct pred_stack *stack,
702 			     struct filter_pred *pred)
703 {
704 	int index = stack->index;
705 
706 	if (WARN_ON(index == 0))
707 		return -ENOSPC;
708 
709 	stack->preds[--index] = pred;
710 	stack->index = index;
711 	return 0;
712 }
713 
714 static struct filter_pred *
__pop_pred_stack(struct pred_stack * stack)715 __pop_pred_stack(struct pred_stack *stack)
716 {
717 	struct filter_pred *pred;
718 	int index = stack->index;
719 
720 	pred = stack->preds[index++];
721 	if (!pred)
722 		return NULL;
723 
724 	stack->index = index;
725 	return pred;
726 }
727 
filter_set_pred(struct event_filter * filter,int idx,struct pred_stack * stack,struct filter_pred * src)728 static int filter_set_pred(struct event_filter *filter,
729 			   int idx,
730 			   struct pred_stack *stack,
731 			   struct filter_pred *src)
732 {
733 	struct filter_pred *dest = &filter->preds[idx];
734 	struct filter_pred *left;
735 	struct filter_pred *right;
736 
737 	*dest = *src;
738 	dest->index = idx;
739 
740 	if (dest->op == OP_OR || dest->op == OP_AND) {
741 		right = __pop_pred_stack(stack);
742 		left = __pop_pred_stack(stack);
743 		if (!left || !right)
744 			return -EINVAL;
745 		/*
746 		 * If both children can be folded
747 		 * and they are the same op as this op or a leaf,
748 		 * then this op can be folded.
749 		 */
750 		if (left->index & FILTER_PRED_FOLD &&
751 		    (left->op == dest->op ||
752 		     left->left == FILTER_PRED_INVALID) &&
753 		    right->index & FILTER_PRED_FOLD &&
754 		    (right->op == dest->op ||
755 		     right->left == FILTER_PRED_INVALID))
756 			dest->index |= FILTER_PRED_FOLD;
757 
758 		dest->left = left->index & ~FILTER_PRED_FOLD;
759 		dest->right = right->index & ~FILTER_PRED_FOLD;
760 		left->parent = dest->index & ~FILTER_PRED_FOLD;
761 		right->parent = dest->index | FILTER_PRED_IS_RIGHT;
762 	} else {
763 		/*
764 		 * Make dest->left invalid to be used as a quick
765 		 * way to know this is a leaf node.
766 		 */
767 		dest->left = FILTER_PRED_INVALID;
768 
769 		/* All leafs allow folding the parent ops. */
770 		dest->index |= FILTER_PRED_FOLD;
771 	}
772 
773 	return __push_pred_stack(stack, dest);
774 }
775 
__free_preds(struct event_filter * filter)776 static void __free_preds(struct event_filter *filter)
777 {
778 	if (filter->preds) {
779 		kfree(filter->preds);
780 		filter->preds = NULL;
781 	}
782 	filter->a_preds = 0;
783 	filter->n_preds = 0;
784 }
785 
filter_disable(struct ftrace_event_call * call)786 static void filter_disable(struct ftrace_event_call *call)
787 {
788 	call->flags &= ~TRACE_EVENT_FL_FILTERED;
789 }
790 
__free_filter(struct event_filter * filter)791 static void __free_filter(struct event_filter *filter)
792 {
793 	if (!filter)
794 		return;
795 
796 	__free_preds(filter);
797 	kfree(filter->filter_string);
798 	kfree(filter);
799 }
800 
801 /*
802  * Called when destroying the ftrace_event_call.
803  * The call is being freed, so we do not need to worry about
804  * the call being currently used. This is for module code removing
805  * the tracepoints from within it.
806  */
destroy_preds(struct ftrace_event_call * call)807 void destroy_preds(struct ftrace_event_call *call)
808 {
809 	__free_filter(call->filter);
810 	call->filter = NULL;
811 }
812 
__alloc_filter(void)813 static struct event_filter *__alloc_filter(void)
814 {
815 	struct event_filter *filter;
816 
817 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
818 	return filter;
819 }
820 
__alloc_preds(struct event_filter * filter,int n_preds)821 static int __alloc_preds(struct event_filter *filter, int n_preds)
822 {
823 	struct filter_pred *pred;
824 	int i;
825 
826 	if (filter->preds)
827 		__free_preds(filter);
828 
829 	filter->preds =
830 		kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
831 
832 	if (!filter->preds)
833 		return -ENOMEM;
834 
835 	filter->a_preds = n_preds;
836 	filter->n_preds = 0;
837 
838 	for (i = 0; i < n_preds; i++) {
839 		pred = &filter->preds[i];
840 		pred->fn = filter_pred_none;
841 	}
842 
843 	return 0;
844 }
845 
filter_free_subsystem_preds(struct event_subsystem * system)846 static void filter_free_subsystem_preds(struct event_subsystem *system)
847 {
848 	struct ftrace_event_call *call;
849 
850 	list_for_each_entry(call, &ftrace_events, list) {
851 		if (strcmp(call->class->system, system->name) != 0)
852 			continue;
853 
854 		filter_disable(call);
855 		remove_filter_string(call->filter);
856 	}
857 }
858 
filter_free_subsystem_filters(struct event_subsystem * system)859 static void filter_free_subsystem_filters(struct event_subsystem *system)
860 {
861 	struct ftrace_event_call *call;
862 
863 	list_for_each_entry(call, &ftrace_events, list) {
864 		if (strcmp(call->class->system, system->name) != 0)
865 			continue;
866 		__free_filter(call->filter);
867 		call->filter = NULL;
868 	}
869 }
870 
filter_add_pred(struct filter_parse_state * ps,struct event_filter * filter,struct filter_pred * pred,struct pred_stack * stack)871 static int filter_add_pred(struct filter_parse_state *ps,
872 			   struct event_filter *filter,
873 			   struct filter_pred *pred,
874 			   struct pred_stack *stack)
875 {
876 	int err;
877 
878 	if (WARN_ON(filter->n_preds == filter->a_preds)) {
879 		parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
880 		return -ENOSPC;
881 	}
882 
883 	err = filter_set_pred(filter, filter->n_preds, stack, pred);
884 	if (err)
885 		return err;
886 
887 	filter->n_preds++;
888 
889 	return 0;
890 }
891 
filter_assign_type(const char * type)892 int filter_assign_type(const char *type)
893 {
894 	if (strstr(type, "__data_loc") && strstr(type, "char"))
895 		return FILTER_DYN_STRING;
896 
897 	if (strchr(type, '[') && strstr(type, "char"))
898 		return FILTER_STATIC_STRING;
899 
900 	return FILTER_OTHER;
901 }
902 
is_string_field(struct ftrace_event_field * field)903 static bool is_string_field(struct ftrace_event_field *field)
904 {
905 	return field->filter_type == FILTER_DYN_STRING ||
906 	       field->filter_type == FILTER_STATIC_STRING ||
907 	       field->filter_type == FILTER_PTR_STRING;
908 }
909 
is_legal_op(struct ftrace_event_field * field,int op)910 static int is_legal_op(struct ftrace_event_field *field, int op)
911 {
912 	if (is_string_field(field) &&
913 	    (op != OP_EQ && op != OP_NE && op != OP_GLOB))
914 		return 0;
915 	if (!is_string_field(field) && op == OP_GLOB)
916 		return 0;
917 
918 	return 1;
919 }
920 
select_comparison_fn(int op,int field_size,int field_is_signed)921 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
922 					     int field_is_signed)
923 {
924 	filter_pred_fn_t fn = NULL;
925 
926 	switch (field_size) {
927 	case 8:
928 		if (op == OP_EQ || op == OP_NE)
929 			fn = filter_pred_64;
930 		else if (field_is_signed)
931 			fn = filter_pred_s64;
932 		else
933 			fn = filter_pred_u64;
934 		break;
935 	case 4:
936 		if (op == OP_EQ || op == OP_NE)
937 			fn = filter_pred_32;
938 		else if (field_is_signed)
939 			fn = filter_pred_s32;
940 		else
941 			fn = filter_pred_u32;
942 		break;
943 	case 2:
944 		if (op == OP_EQ || op == OP_NE)
945 			fn = filter_pred_16;
946 		else if (field_is_signed)
947 			fn = filter_pred_s16;
948 		else
949 			fn = filter_pred_u16;
950 		break;
951 	case 1:
952 		if (op == OP_EQ || op == OP_NE)
953 			fn = filter_pred_8;
954 		else if (field_is_signed)
955 			fn = filter_pred_s8;
956 		else
957 			fn = filter_pred_u8;
958 		break;
959 	}
960 
961 	return fn;
962 }
963 
init_pred(struct filter_parse_state * ps,struct ftrace_event_field * field,struct filter_pred * pred)964 static int init_pred(struct filter_parse_state *ps,
965 		     struct ftrace_event_field *field,
966 		     struct filter_pred *pred)
967 
968 {
969 	filter_pred_fn_t fn = filter_pred_none;
970 	unsigned long long val;
971 	int ret;
972 
973 	pred->offset = field->offset;
974 
975 	if (!is_legal_op(field, pred->op)) {
976 		parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
977 		return -EINVAL;
978 	}
979 
980 	if (is_string_field(field)) {
981 		filter_build_regex(pred);
982 
983 		if (field->filter_type == FILTER_STATIC_STRING) {
984 			fn = filter_pred_string;
985 			pred->regex.field_len = field->size;
986 		} else if (field->filter_type == FILTER_DYN_STRING)
987 			fn = filter_pred_strloc;
988 		else
989 			fn = filter_pred_pchar;
990 	} else {
991 		if (field->is_signed)
992 			ret = strict_strtoll(pred->regex.pattern, 0, &val);
993 		else
994 			ret = strict_strtoull(pred->regex.pattern, 0, &val);
995 		if (ret) {
996 			parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
997 			return -EINVAL;
998 		}
999 		pred->val = val;
1000 
1001 		fn = select_comparison_fn(pred->op, field->size,
1002 					  field->is_signed);
1003 		if (!fn) {
1004 			parse_error(ps, FILT_ERR_INVALID_OP, 0);
1005 			return -EINVAL;
1006 		}
1007 	}
1008 
1009 	if (pred->op == OP_NE)
1010 		pred->not = 1;
1011 
1012 	pred->fn = fn;
1013 	return 0;
1014 }
1015 
parse_init(struct filter_parse_state * ps,struct filter_op * ops,char * infix_string)1016 static void parse_init(struct filter_parse_state *ps,
1017 		       struct filter_op *ops,
1018 		       char *infix_string)
1019 {
1020 	memset(ps, '\0', sizeof(*ps));
1021 
1022 	ps->infix.string = infix_string;
1023 	ps->infix.cnt = strlen(infix_string);
1024 	ps->ops = ops;
1025 
1026 	INIT_LIST_HEAD(&ps->opstack);
1027 	INIT_LIST_HEAD(&ps->postfix);
1028 }
1029 
infix_next(struct filter_parse_state * ps)1030 static char infix_next(struct filter_parse_state *ps)
1031 {
1032 	ps->infix.cnt--;
1033 
1034 	return ps->infix.string[ps->infix.tail++];
1035 }
1036 
infix_peek(struct filter_parse_state * ps)1037 static char infix_peek(struct filter_parse_state *ps)
1038 {
1039 	if (ps->infix.tail == strlen(ps->infix.string))
1040 		return 0;
1041 
1042 	return ps->infix.string[ps->infix.tail];
1043 }
1044 
infix_advance(struct filter_parse_state * ps)1045 static void infix_advance(struct filter_parse_state *ps)
1046 {
1047 	ps->infix.cnt--;
1048 	ps->infix.tail++;
1049 }
1050 
is_precedence_lower(struct filter_parse_state * ps,int a,int b)1051 static inline int is_precedence_lower(struct filter_parse_state *ps,
1052 				      int a, int b)
1053 {
1054 	return ps->ops[a].precedence < ps->ops[b].precedence;
1055 }
1056 
is_op_char(struct filter_parse_state * ps,char c)1057 static inline int is_op_char(struct filter_parse_state *ps, char c)
1058 {
1059 	int i;
1060 
1061 	for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1062 		if (ps->ops[i].string[0] == c)
1063 			return 1;
1064 	}
1065 
1066 	return 0;
1067 }
1068 
infix_get_op(struct filter_parse_state * ps,char firstc)1069 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1070 {
1071 	char nextc = infix_peek(ps);
1072 	char opstr[3];
1073 	int i;
1074 
1075 	opstr[0] = firstc;
1076 	opstr[1] = nextc;
1077 	opstr[2] = '\0';
1078 
1079 	for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1080 		if (!strcmp(opstr, ps->ops[i].string)) {
1081 			infix_advance(ps);
1082 			return ps->ops[i].id;
1083 		}
1084 	}
1085 
1086 	opstr[1] = '\0';
1087 
1088 	for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1089 		if (!strcmp(opstr, ps->ops[i].string))
1090 			return ps->ops[i].id;
1091 	}
1092 
1093 	return OP_NONE;
1094 }
1095 
clear_operand_string(struct filter_parse_state * ps)1096 static inline void clear_operand_string(struct filter_parse_state *ps)
1097 {
1098 	memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1099 	ps->operand.tail = 0;
1100 }
1101 
append_operand_char(struct filter_parse_state * ps,char c)1102 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1103 {
1104 	if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1105 		return -EINVAL;
1106 
1107 	ps->operand.string[ps->operand.tail++] = c;
1108 
1109 	return 0;
1110 }
1111 
filter_opstack_push(struct filter_parse_state * ps,int op)1112 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1113 {
1114 	struct opstack_op *opstack_op;
1115 
1116 	opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1117 	if (!opstack_op)
1118 		return -ENOMEM;
1119 
1120 	opstack_op->op = op;
1121 	list_add(&opstack_op->list, &ps->opstack);
1122 
1123 	return 0;
1124 }
1125 
filter_opstack_empty(struct filter_parse_state * ps)1126 static int filter_opstack_empty(struct filter_parse_state *ps)
1127 {
1128 	return list_empty(&ps->opstack);
1129 }
1130 
filter_opstack_top(struct filter_parse_state * ps)1131 static int filter_opstack_top(struct filter_parse_state *ps)
1132 {
1133 	struct opstack_op *opstack_op;
1134 
1135 	if (filter_opstack_empty(ps))
1136 		return OP_NONE;
1137 
1138 	opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1139 
1140 	return opstack_op->op;
1141 }
1142 
filter_opstack_pop(struct filter_parse_state * ps)1143 static int filter_opstack_pop(struct filter_parse_state *ps)
1144 {
1145 	struct opstack_op *opstack_op;
1146 	int op;
1147 
1148 	if (filter_opstack_empty(ps))
1149 		return OP_NONE;
1150 
1151 	opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1152 	op = opstack_op->op;
1153 	list_del(&opstack_op->list);
1154 
1155 	kfree(opstack_op);
1156 
1157 	return op;
1158 }
1159 
filter_opstack_clear(struct filter_parse_state * ps)1160 static void filter_opstack_clear(struct filter_parse_state *ps)
1161 {
1162 	while (!filter_opstack_empty(ps))
1163 		filter_opstack_pop(ps);
1164 }
1165 
curr_operand(struct filter_parse_state * ps)1166 static char *curr_operand(struct filter_parse_state *ps)
1167 {
1168 	return ps->operand.string;
1169 }
1170 
postfix_append_operand(struct filter_parse_state * ps,char * operand)1171 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1172 {
1173 	struct postfix_elt *elt;
1174 
1175 	elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1176 	if (!elt)
1177 		return -ENOMEM;
1178 
1179 	elt->op = OP_NONE;
1180 	elt->operand = kstrdup(operand, GFP_KERNEL);
1181 	if (!elt->operand) {
1182 		kfree(elt);
1183 		return -ENOMEM;
1184 	}
1185 
1186 	list_add_tail(&elt->list, &ps->postfix);
1187 
1188 	return 0;
1189 }
1190 
postfix_append_op(struct filter_parse_state * ps,int op)1191 static int postfix_append_op(struct filter_parse_state *ps, int op)
1192 {
1193 	struct postfix_elt *elt;
1194 
1195 	elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1196 	if (!elt)
1197 		return -ENOMEM;
1198 
1199 	elt->op = op;
1200 	elt->operand = NULL;
1201 
1202 	list_add_tail(&elt->list, &ps->postfix);
1203 
1204 	return 0;
1205 }
1206 
postfix_clear(struct filter_parse_state * ps)1207 static void postfix_clear(struct filter_parse_state *ps)
1208 {
1209 	struct postfix_elt *elt;
1210 
1211 	while (!list_empty(&ps->postfix)) {
1212 		elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1213 		list_del(&elt->list);
1214 		kfree(elt->operand);
1215 		kfree(elt);
1216 	}
1217 }
1218 
filter_parse(struct filter_parse_state * ps)1219 static int filter_parse(struct filter_parse_state *ps)
1220 {
1221 	int in_string = 0;
1222 	int op, top_op;
1223 	char ch;
1224 
1225 	while ((ch = infix_next(ps))) {
1226 		if (ch == '"') {
1227 			in_string ^= 1;
1228 			continue;
1229 		}
1230 
1231 		if (in_string)
1232 			goto parse_operand;
1233 
1234 		if (isspace(ch))
1235 			continue;
1236 
1237 		if (is_op_char(ps, ch)) {
1238 			op = infix_get_op(ps, ch);
1239 			if (op == OP_NONE) {
1240 				parse_error(ps, FILT_ERR_INVALID_OP, 0);
1241 				return -EINVAL;
1242 			}
1243 
1244 			if (strlen(curr_operand(ps))) {
1245 				postfix_append_operand(ps, curr_operand(ps));
1246 				clear_operand_string(ps);
1247 			}
1248 
1249 			while (!filter_opstack_empty(ps)) {
1250 				top_op = filter_opstack_top(ps);
1251 				if (!is_precedence_lower(ps, top_op, op)) {
1252 					top_op = filter_opstack_pop(ps);
1253 					postfix_append_op(ps, top_op);
1254 					continue;
1255 				}
1256 				break;
1257 			}
1258 
1259 			filter_opstack_push(ps, op);
1260 			continue;
1261 		}
1262 
1263 		if (ch == '(') {
1264 			filter_opstack_push(ps, OP_OPEN_PAREN);
1265 			continue;
1266 		}
1267 
1268 		if (ch == ')') {
1269 			if (strlen(curr_operand(ps))) {
1270 				postfix_append_operand(ps, curr_operand(ps));
1271 				clear_operand_string(ps);
1272 			}
1273 
1274 			top_op = filter_opstack_pop(ps);
1275 			while (top_op != OP_NONE) {
1276 				if (top_op == OP_OPEN_PAREN)
1277 					break;
1278 				postfix_append_op(ps, top_op);
1279 				top_op = filter_opstack_pop(ps);
1280 			}
1281 			if (top_op == OP_NONE) {
1282 				parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1283 				return -EINVAL;
1284 			}
1285 			continue;
1286 		}
1287 parse_operand:
1288 		if (append_operand_char(ps, ch)) {
1289 			parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1290 			return -EINVAL;
1291 		}
1292 	}
1293 
1294 	if (strlen(curr_operand(ps)))
1295 		postfix_append_operand(ps, curr_operand(ps));
1296 
1297 	while (!filter_opstack_empty(ps)) {
1298 		top_op = filter_opstack_pop(ps);
1299 		if (top_op == OP_NONE)
1300 			break;
1301 		if (top_op == OP_OPEN_PAREN) {
1302 			parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1303 			return -EINVAL;
1304 		}
1305 		postfix_append_op(ps, top_op);
1306 	}
1307 
1308 	return 0;
1309 }
1310 
create_pred(struct filter_parse_state * ps,struct ftrace_event_call * call,int op,char * operand1,char * operand2)1311 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1312 				       struct ftrace_event_call *call,
1313 				       int op, char *operand1, char *operand2)
1314 {
1315 	struct ftrace_event_field *field;
1316 	static struct filter_pred pred;
1317 
1318 	memset(&pred, 0, sizeof(pred));
1319 	pred.op = op;
1320 
1321 	if (op == OP_AND || op == OP_OR)
1322 		return &pred;
1323 
1324 	if (!operand1 || !operand2) {
1325 		parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1326 		return NULL;
1327 	}
1328 
1329 	field = find_event_field(call, operand1);
1330 	if (!field) {
1331 		parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1332 		return NULL;
1333 	}
1334 
1335 	strcpy(pred.regex.pattern, operand2);
1336 	pred.regex.len = strlen(pred.regex.pattern);
1337 
1338 #ifdef CONFIG_FTRACE_STARTUP_TEST
1339 	pred.field = field;
1340 #endif
1341 	return init_pred(ps, field, &pred) ? NULL : &pred;
1342 }
1343 
check_preds(struct filter_parse_state * ps)1344 static int check_preds(struct filter_parse_state *ps)
1345 {
1346 	int n_normal_preds = 0, n_logical_preds = 0;
1347 	struct postfix_elt *elt;
1348 
1349 	list_for_each_entry(elt, &ps->postfix, list) {
1350 		if (elt->op == OP_NONE)
1351 			continue;
1352 
1353 		if (elt->op == OP_AND || elt->op == OP_OR) {
1354 			n_logical_preds++;
1355 			continue;
1356 		}
1357 		n_normal_preds++;
1358 	}
1359 
1360 	if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1361 		parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1362 		return -EINVAL;
1363 	}
1364 
1365 	return 0;
1366 }
1367 
count_preds(struct filter_parse_state * ps)1368 static int count_preds(struct filter_parse_state *ps)
1369 {
1370 	struct postfix_elt *elt;
1371 	int n_preds = 0;
1372 
1373 	list_for_each_entry(elt, &ps->postfix, list) {
1374 		if (elt->op == OP_NONE)
1375 			continue;
1376 		n_preds++;
1377 	}
1378 
1379 	return n_preds;
1380 }
1381 
1382 struct check_pred_data {
1383 	int count;
1384 	int max;
1385 };
1386 
check_pred_tree_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1387 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1388 			      int *err, void *data)
1389 {
1390 	struct check_pred_data *d = data;
1391 
1392 	if (WARN_ON(d->count++ > d->max)) {
1393 		*err = -EINVAL;
1394 		return WALK_PRED_ABORT;
1395 	}
1396 	return WALK_PRED_DEFAULT;
1397 }
1398 
1399 /*
1400  * The tree is walked at filtering of an event. If the tree is not correctly
1401  * built, it may cause an infinite loop. Check here that the tree does
1402  * indeed terminate.
1403  */
check_pred_tree(struct event_filter * filter,struct filter_pred * root)1404 static int check_pred_tree(struct event_filter *filter,
1405 			   struct filter_pred *root)
1406 {
1407 	struct check_pred_data data = {
1408 		/*
1409 		 * The max that we can hit a node is three times.
1410 		 * Once going down, once coming up from left, and
1411 		 * once coming up from right. This is more than enough
1412 		 * since leafs are only hit a single time.
1413 		 */
1414 		.max   = 3 * filter->n_preds,
1415 		.count = 0,
1416 	};
1417 
1418 	return walk_pred_tree(filter->preds, root,
1419 			      check_pred_tree_cb, &data);
1420 }
1421 
count_leafs_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1422 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1423 			  int *err, void *data)
1424 {
1425 	int *count = data;
1426 
1427 	if ((move == MOVE_DOWN) &&
1428 	    (pred->left == FILTER_PRED_INVALID))
1429 		(*count)++;
1430 
1431 	return WALK_PRED_DEFAULT;
1432 }
1433 
count_leafs(struct filter_pred * preds,struct filter_pred * root)1434 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1435 {
1436 	int count = 0, ret;
1437 
1438 	ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1439 	WARN_ON(ret);
1440 	return count;
1441 }
1442 
1443 struct fold_pred_data {
1444 	struct filter_pred *root;
1445 	int count;
1446 	int children;
1447 };
1448 
fold_pred_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1449 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1450 			int *err, void *data)
1451 {
1452 	struct fold_pred_data *d = data;
1453 	struct filter_pred *root = d->root;
1454 
1455 	if (move != MOVE_DOWN)
1456 		return WALK_PRED_DEFAULT;
1457 	if (pred->left != FILTER_PRED_INVALID)
1458 		return WALK_PRED_DEFAULT;
1459 
1460 	if (WARN_ON(d->count == d->children)) {
1461 		*err = -EINVAL;
1462 		return WALK_PRED_ABORT;
1463 	}
1464 
1465 	pred->index &= ~FILTER_PRED_FOLD;
1466 	root->ops[d->count++] = pred->index;
1467 	return WALK_PRED_DEFAULT;
1468 }
1469 
fold_pred(struct filter_pred * preds,struct filter_pred * root)1470 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1471 {
1472 	struct fold_pred_data data = {
1473 		.root  = root,
1474 		.count = 0,
1475 	};
1476 	int children;
1477 
1478 	/* No need to keep the fold flag */
1479 	root->index &= ~FILTER_PRED_FOLD;
1480 
1481 	/* If the root is a leaf then do nothing */
1482 	if (root->left == FILTER_PRED_INVALID)
1483 		return 0;
1484 
1485 	/* count the children */
1486 	children = count_leafs(preds, &preds[root->left]);
1487 	children += count_leafs(preds, &preds[root->right]);
1488 
1489 	root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL);
1490 	if (!root->ops)
1491 		return -ENOMEM;
1492 
1493 	root->val = children;
1494 	data.children = children;
1495 	return walk_pred_tree(preds, root, fold_pred_cb, &data);
1496 }
1497 
fold_pred_tree_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1498 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1499 			     int *err, void *data)
1500 {
1501 	struct filter_pred *preds = data;
1502 
1503 	if (move != MOVE_DOWN)
1504 		return WALK_PRED_DEFAULT;
1505 	if (!(pred->index & FILTER_PRED_FOLD))
1506 		return WALK_PRED_DEFAULT;
1507 
1508 	*err = fold_pred(preds, pred);
1509 	if (*err)
1510 		return WALK_PRED_ABORT;
1511 
1512 	/* eveyrhing below is folded, continue with parent */
1513 	return WALK_PRED_PARENT;
1514 }
1515 
1516 /*
1517  * To optimize the processing of the ops, if we have several "ors" or
1518  * "ands" together, we can put them in an array and process them all
1519  * together speeding up the filter logic.
1520  */
fold_pred_tree(struct event_filter * filter,struct filter_pred * root)1521 static int fold_pred_tree(struct event_filter *filter,
1522 			   struct filter_pred *root)
1523 {
1524 	return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1525 			      filter->preds);
1526 }
1527 
replace_preds(struct ftrace_event_call * call,struct event_filter * filter,struct filter_parse_state * ps,char * filter_string,bool dry_run)1528 static int replace_preds(struct ftrace_event_call *call,
1529 			 struct event_filter *filter,
1530 			 struct filter_parse_state *ps,
1531 			 char *filter_string,
1532 			 bool dry_run)
1533 {
1534 	char *operand1 = NULL, *operand2 = NULL;
1535 	struct filter_pred *pred;
1536 	struct filter_pred *root;
1537 	struct postfix_elt *elt;
1538 	struct pred_stack stack = { }; /* init to NULL */
1539 	int err;
1540 	int n_preds = 0;
1541 
1542 	n_preds = count_preds(ps);
1543 	if (n_preds >= MAX_FILTER_PRED) {
1544 		parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1545 		return -ENOSPC;
1546 	}
1547 
1548 	err = check_preds(ps);
1549 	if (err)
1550 		return err;
1551 
1552 	if (!dry_run) {
1553 		err = __alloc_pred_stack(&stack, n_preds);
1554 		if (err)
1555 			return err;
1556 		err = __alloc_preds(filter, n_preds);
1557 		if (err)
1558 			goto fail;
1559 	}
1560 
1561 	n_preds = 0;
1562 	list_for_each_entry(elt, &ps->postfix, list) {
1563 		if (elt->op == OP_NONE) {
1564 			if (!operand1)
1565 				operand1 = elt->operand;
1566 			else if (!operand2)
1567 				operand2 = elt->operand;
1568 			else {
1569 				parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1570 				err = -EINVAL;
1571 				goto fail;
1572 			}
1573 			continue;
1574 		}
1575 
1576 		if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1577 			parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1578 			err = -ENOSPC;
1579 			goto fail;
1580 		}
1581 
1582 		pred = create_pred(ps, call, elt->op, operand1, operand2);
1583 		if (!pred) {
1584 			err = -EINVAL;
1585 			goto fail;
1586 		}
1587 
1588 		if (!dry_run) {
1589 			err = filter_add_pred(ps, filter, pred, &stack);
1590 			if (err)
1591 				goto fail;
1592 		}
1593 
1594 		operand1 = operand2 = NULL;
1595 	}
1596 
1597 	if (!dry_run) {
1598 		/* We should have one item left on the stack */
1599 		pred = __pop_pred_stack(&stack);
1600 		if (!pred)
1601 			return -EINVAL;
1602 		/* This item is where we start from in matching */
1603 		root = pred;
1604 		/* Make sure the stack is empty */
1605 		pred = __pop_pred_stack(&stack);
1606 		if (WARN_ON(pred)) {
1607 			err = -EINVAL;
1608 			filter->root = NULL;
1609 			goto fail;
1610 		}
1611 		err = check_pred_tree(filter, root);
1612 		if (err)
1613 			goto fail;
1614 
1615 		/* Optimize the tree */
1616 		err = fold_pred_tree(filter, root);
1617 		if (err)
1618 			goto fail;
1619 
1620 		/* We don't set root until we know it works */
1621 		barrier();
1622 		filter->root = root;
1623 	}
1624 
1625 	err = 0;
1626 fail:
1627 	__free_pred_stack(&stack);
1628 	return err;
1629 }
1630 
1631 struct filter_list {
1632 	struct list_head	list;
1633 	struct event_filter	*filter;
1634 };
1635 
replace_system_preds(struct event_subsystem * system,struct filter_parse_state * ps,char * filter_string)1636 static int replace_system_preds(struct event_subsystem *system,
1637 				struct filter_parse_state *ps,
1638 				char *filter_string)
1639 {
1640 	struct ftrace_event_call *call;
1641 	struct filter_list *filter_item;
1642 	struct filter_list *tmp;
1643 	LIST_HEAD(filter_list);
1644 	bool fail = true;
1645 	int err;
1646 
1647 	list_for_each_entry(call, &ftrace_events, list) {
1648 
1649 		if (strcmp(call->class->system, system->name) != 0)
1650 			continue;
1651 
1652 		/*
1653 		 * Try to see if the filter can be applied
1654 		 *  (filter arg is ignored on dry_run)
1655 		 */
1656 		err = replace_preds(call, NULL, ps, filter_string, true);
1657 		if (err)
1658 			call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1659 		else
1660 			call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1661 	}
1662 
1663 	list_for_each_entry(call, &ftrace_events, list) {
1664 		struct event_filter *filter;
1665 
1666 		if (strcmp(call->class->system, system->name) != 0)
1667 			continue;
1668 
1669 		if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
1670 			continue;
1671 
1672 		filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1673 		if (!filter_item)
1674 			goto fail_mem;
1675 
1676 		list_add_tail(&filter_item->list, &filter_list);
1677 
1678 		filter_item->filter = __alloc_filter();
1679 		if (!filter_item->filter)
1680 			goto fail_mem;
1681 		filter = filter_item->filter;
1682 
1683 		/* Can only fail on no memory */
1684 		err = replace_filter_string(filter, filter_string);
1685 		if (err)
1686 			goto fail_mem;
1687 
1688 		err = replace_preds(call, filter, ps, filter_string, false);
1689 		if (err) {
1690 			filter_disable(call);
1691 			parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1692 			append_filter_err(ps, filter);
1693 		} else
1694 			call->flags |= TRACE_EVENT_FL_FILTERED;
1695 		/*
1696 		 * Regardless of if this returned an error, we still
1697 		 * replace the filter for the call.
1698 		 */
1699 		filter = call->filter;
1700 		rcu_assign_pointer(call->filter, filter_item->filter);
1701 		filter_item->filter = filter;
1702 
1703 		fail = false;
1704 	}
1705 
1706 	if (fail)
1707 		goto fail;
1708 
1709 	/*
1710 	 * The calls can still be using the old filters.
1711 	 * Do a synchronize_sched() to ensure all calls are
1712 	 * done with them before we free them.
1713 	 */
1714 	synchronize_sched();
1715 	list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1716 		__free_filter(filter_item->filter);
1717 		list_del(&filter_item->list);
1718 		kfree(filter_item);
1719 	}
1720 	return 0;
1721  fail:
1722 	/* No call succeeded */
1723 	list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1724 		list_del(&filter_item->list);
1725 		kfree(filter_item);
1726 	}
1727 	parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1728 	return -EINVAL;
1729  fail_mem:
1730 	/* If any call succeeded, we still need to sync */
1731 	if (!fail)
1732 		synchronize_sched();
1733 	list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1734 		__free_filter(filter_item->filter);
1735 		list_del(&filter_item->list);
1736 		kfree(filter_item);
1737 	}
1738 	return -ENOMEM;
1739 }
1740 
create_filter_start(char * filter_str,bool set_str,struct filter_parse_state ** psp,struct event_filter ** filterp)1741 static int create_filter_start(char *filter_str, bool set_str,
1742 			       struct filter_parse_state **psp,
1743 			       struct event_filter **filterp)
1744 {
1745 	struct event_filter *filter;
1746 	struct filter_parse_state *ps = NULL;
1747 	int err = 0;
1748 
1749 	WARN_ON_ONCE(*psp || *filterp);
1750 
1751 	/* allocate everything, and if any fails, free all and fail */
1752 	filter = __alloc_filter();
1753 	if (filter && set_str)
1754 		err = replace_filter_string(filter, filter_str);
1755 
1756 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1757 
1758 	if (!filter || !ps || err) {
1759 		kfree(ps);
1760 		__free_filter(filter);
1761 		return -ENOMEM;
1762 	}
1763 
1764 	/* we're committed to creating a new filter */
1765 	*filterp = filter;
1766 	*psp = ps;
1767 
1768 	parse_init(ps, filter_ops, filter_str);
1769 	err = filter_parse(ps);
1770 	if (err && set_str)
1771 		append_filter_err(ps, filter);
1772 	return err;
1773 }
1774 
create_filter_finish(struct filter_parse_state * ps)1775 static void create_filter_finish(struct filter_parse_state *ps)
1776 {
1777 	if (ps) {
1778 		filter_opstack_clear(ps);
1779 		postfix_clear(ps);
1780 		kfree(ps);
1781 	}
1782 }
1783 
1784 /**
1785  * create_filter - create a filter for a ftrace_event_call
1786  * @call: ftrace_event_call to create a filter for
1787  * @filter_str: filter string
1788  * @set_str: remember @filter_str and enable detailed error in filter
1789  * @filterp: out param for created filter (always updated on return)
1790  *
1791  * Creates a filter for @call with @filter_str.  If @set_str is %true,
1792  * @filter_str is copied and recorded in the new filter.
1793  *
1794  * On success, returns 0 and *@filterp points to the new filter.  On
1795  * failure, returns -errno and *@filterp may point to %NULL or to a new
1796  * filter.  In the latter case, the returned filter contains error
1797  * information if @set_str is %true and the caller is responsible for
1798  * freeing it.
1799  */
create_filter(struct ftrace_event_call * call,char * filter_str,bool set_str,struct event_filter ** filterp)1800 static int create_filter(struct ftrace_event_call *call,
1801 			 char *filter_str, bool set_str,
1802 			 struct event_filter **filterp)
1803 {
1804 	struct event_filter *filter = NULL;
1805 	struct filter_parse_state *ps = NULL;
1806 	int err;
1807 
1808 	err = create_filter_start(filter_str, set_str, &ps, &filter);
1809 	if (!err) {
1810 		err = replace_preds(call, filter, ps, filter_str, false);
1811 		if (err && set_str)
1812 			append_filter_err(ps, filter);
1813 	}
1814 	create_filter_finish(ps);
1815 
1816 	*filterp = filter;
1817 	return err;
1818 }
1819 
1820 /**
1821  * create_system_filter - create a filter for an event_subsystem
1822  * @system: event_subsystem to create a filter for
1823  * @filter_str: filter string
1824  * @filterp: out param for created filter (always updated on return)
1825  *
1826  * Identical to create_filter() except that it creates a subsystem filter
1827  * and always remembers @filter_str.
1828  */
create_system_filter(struct event_subsystem * system,char * filter_str,struct event_filter ** filterp)1829 static int create_system_filter(struct event_subsystem *system,
1830 				char *filter_str, struct event_filter **filterp)
1831 {
1832 	struct event_filter *filter = NULL;
1833 	struct filter_parse_state *ps = NULL;
1834 	int err;
1835 
1836 	err = create_filter_start(filter_str, true, &ps, &filter);
1837 	if (!err) {
1838 		err = replace_system_preds(system, ps, filter_str);
1839 		if (!err) {
1840 			/* System filters just show a default message */
1841 			kfree(filter->filter_string);
1842 			filter->filter_string = NULL;
1843 		} else {
1844 			append_filter_err(ps, filter);
1845 		}
1846 	}
1847 	create_filter_finish(ps);
1848 
1849 	*filterp = filter;
1850 	return err;
1851 }
1852 
apply_event_filter(struct ftrace_event_call * call,char * filter_string)1853 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1854 {
1855 	struct event_filter *filter;
1856 	int err = 0;
1857 
1858 	mutex_lock(&event_mutex);
1859 
1860 	if (!strcmp(strstrip(filter_string), "0")) {
1861 		filter_disable(call);
1862 		filter = call->filter;
1863 		if (!filter)
1864 			goto out_unlock;
1865 		RCU_INIT_POINTER(call->filter, NULL);
1866 		/* Make sure the filter is not being used */
1867 		synchronize_sched();
1868 		__free_filter(filter);
1869 		goto out_unlock;
1870 	}
1871 
1872 	err = create_filter(call, filter_string, true, &filter);
1873 
1874 	/*
1875 	 * Always swap the call filter with the new filter
1876 	 * even if there was an error. If there was an error
1877 	 * in the filter, we disable the filter and show the error
1878 	 * string
1879 	 */
1880 	if (filter) {
1881 		struct event_filter *tmp = call->filter;
1882 
1883 		if (!err)
1884 			call->flags |= TRACE_EVENT_FL_FILTERED;
1885 		else
1886 			filter_disable(call);
1887 
1888 		rcu_assign_pointer(call->filter, filter);
1889 
1890 		if (tmp) {
1891 			/* Make sure the call is done with the filter */
1892 			synchronize_sched();
1893 			__free_filter(tmp);
1894 		}
1895 	}
1896 out_unlock:
1897 	mutex_unlock(&event_mutex);
1898 
1899 	return err;
1900 }
1901 
apply_subsystem_event_filter(struct event_subsystem * system,char * filter_string)1902 int apply_subsystem_event_filter(struct event_subsystem *system,
1903 				 char *filter_string)
1904 {
1905 	struct event_filter *filter;
1906 	int err = 0;
1907 
1908 	mutex_lock(&event_mutex);
1909 
1910 	/* Make sure the system still has events */
1911 	if (!system->nr_events) {
1912 		err = -ENODEV;
1913 		goto out_unlock;
1914 	}
1915 
1916 	if (!strcmp(strstrip(filter_string), "0")) {
1917 		filter_free_subsystem_preds(system);
1918 		remove_filter_string(system->filter);
1919 		filter = system->filter;
1920 		system->filter = NULL;
1921 		/* Ensure all filters are no longer used */
1922 		synchronize_sched();
1923 		filter_free_subsystem_filters(system);
1924 		__free_filter(filter);
1925 		goto out_unlock;
1926 	}
1927 
1928 	err = create_system_filter(system, filter_string, &filter);
1929 	if (filter) {
1930 		/*
1931 		 * No event actually uses the system filter
1932 		 * we can free it without synchronize_sched().
1933 		 */
1934 		__free_filter(system->filter);
1935 		system->filter = filter;
1936 	}
1937 out_unlock:
1938 	mutex_unlock(&event_mutex);
1939 
1940 	return err;
1941 }
1942 
1943 #ifdef CONFIG_PERF_EVENTS
1944 
ftrace_profile_free_filter(struct perf_event * event)1945 void ftrace_profile_free_filter(struct perf_event *event)
1946 {
1947 	struct event_filter *filter = event->filter;
1948 
1949 	event->filter = NULL;
1950 	__free_filter(filter);
1951 }
1952 
ftrace_profile_set_filter(struct perf_event * event,int event_id,char * filter_str)1953 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1954 			      char *filter_str)
1955 {
1956 	int err;
1957 	struct event_filter *filter;
1958 	struct ftrace_event_call *call;
1959 
1960 	mutex_lock(&event_mutex);
1961 
1962 	call = event->tp_event;
1963 
1964 	err = -EINVAL;
1965 	if (!call)
1966 		goto out_unlock;
1967 
1968 	err = -EEXIST;
1969 	if (event->filter)
1970 		goto out_unlock;
1971 
1972 	err = create_filter(call, filter_str, false, &filter);
1973 	if (!err)
1974 		event->filter = filter;
1975 	else
1976 		__free_filter(filter);
1977 
1978 out_unlock:
1979 	mutex_unlock(&event_mutex);
1980 
1981 	return err;
1982 }
1983 
1984 #endif /* CONFIG_PERF_EVENTS */
1985 
1986 #ifdef CONFIG_FTRACE_STARTUP_TEST
1987 
1988 #include <linux/types.h>
1989 #include <linux/tracepoint.h>
1990 
1991 #define CREATE_TRACE_POINTS
1992 #include "trace_events_filter_test.h"
1993 
1994 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
1995 { \
1996 	.filter = FILTER, \
1997 	.rec    = { .a = va, .b = vb, .c = vc, .d = vd, \
1998 		    .e = ve, .f = vf, .g = vg, .h = vh }, \
1999 	.match  = m, \
2000 	.not_visited = nvisit, \
2001 }
2002 #define YES 1
2003 #define NO  0
2004 
2005 static struct test_filter_data_t {
2006 	char *filter;
2007 	struct ftrace_raw_ftrace_test_filter rec;
2008 	int match;
2009 	char *not_visited;
2010 } test_filter_data[] = {
2011 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2012 	       "e == 1 && f == 1 && g == 1 && h == 1"
2013 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2014 	DATA_REC(NO,  0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2015 	DATA_REC(NO,  1, 1, 1, 1, 1, 1, 1, 0, ""),
2016 #undef FILTER
2017 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2018 	       "e == 1 || f == 1 || g == 1 || h == 1"
2019 	DATA_REC(NO,  0, 0, 0, 0, 0, 0, 0, 0, ""),
2020 	DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2021 	DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2022 #undef FILTER
2023 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2024 	       "(e == 1 || f == 1) && (g == 1 || h == 1)"
2025 	DATA_REC(NO,  0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2026 	DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2027 	DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2028 	DATA_REC(NO,  1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2029 #undef FILTER
2030 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2031 	       "(e == 1 && f == 1) || (g == 1 && h == 1)"
2032 	DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2033 	DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2034 	DATA_REC(NO,  0, 0, 0, 0, 0, 0, 0, 1, ""),
2035 #undef FILTER
2036 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2037 	       "(e == 1 && f == 1) || (g == 1 && h == 1)"
2038 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2039 	DATA_REC(NO,  0, 0, 0, 0, 0, 0, 0, 1, ""),
2040 	DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2041 #undef FILTER
2042 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2043 	       "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2044 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2045 	DATA_REC(NO,  0, 0, 0, 0, 0, 0, 0, 0, ""),
2046 	DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2047 #undef FILTER
2048 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2049 	       "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2050 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2051 	DATA_REC(NO,  0, 1, 0, 1, 0, 1, 0, 1, ""),
2052 	DATA_REC(NO,  1, 0, 1, 0, 1, 0, 1, 0, ""),
2053 #undef FILTER
2054 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2055 	       "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2056 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2057 	DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2058 	DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2059 };
2060 
2061 #undef DATA_REC
2062 #undef FILTER
2063 #undef YES
2064 #undef NO
2065 
2066 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2067 
2068 static int test_pred_visited;
2069 
test_pred_visited_fn(struct filter_pred * pred,void * event)2070 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2071 {
2072 	struct ftrace_event_field *field = pred->field;
2073 
2074 	test_pred_visited = 1;
2075 	printk(KERN_INFO "\npred visited %s\n", field->name);
2076 	return 1;
2077 }
2078 
test_walk_pred_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)2079 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2080 			     int *err, void *data)
2081 {
2082 	char *fields = data;
2083 
2084 	if ((move == MOVE_DOWN) &&
2085 	    (pred->left == FILTER_PRED_INVALID)) {
2086 		struct ftrace_event_field *field = pred->field;
2087 
2088 		if (!field) {
2089 			WARN(1, "all leafs should have field defined");
2090 			return WALK_PRED_DEFAULT;
2091 		}
2092 		if (!strchr(fields, *field->name))
2093 			return WALK_PRED_DEFAULT;
2094 
2095 		WARN_ON(!pred->fn);
2096 		pred->fn = test_pred_visited_fn;
2097 	}
2098 	return WALK_PRED_DEFAULT;
2099 }
2100 
ftrace_test_event_filter(void)2101 static __init int ftrace_test_event_filter(void)
2102 {
2103 	int i;
2104 
2105 	printk(KERN_INFO "Testing ftrace filter: ");
2106 
2107 	for (i = 0; i < DATA_CNT; i++) {
2108 		struct event_filter *filter = NULL;
2109 		struct test_filter_data_t *d = &test_filter_data[i];
2110 		int err;
2111 
2112 		err = create_filter(&event_ftrace_test_filter, d->filter,
2113 				    false, &filter);
2114 		if (err) {
2115 			printk(KERN_INFO
2116 			       "Failed to get filter for '%s', err %d\n",
2117 			       d->filter, err);
2118 			__free_filter(filter);
2119 			break;
2120 		}
2121 
2122 		/*
2123 		 * The preemption disabling is not really needed for self
2124 		 * tests, but the rcu dereference will complain without it.
2125 		 */
2126 		preempt_disable();
2127 		if (*d->not_visited)
2128 			walk_pred_tree(filter->preds, filter->root,
2129 				       test_walk_pred_cb,
2130 				       d->not_visited);
2131 
2132 		test_pred_visited = 0;
2133 		err = filter_match_preds(filter, &d->rec);
2134 		preempt_enable();
2135 
2136 		__free_filter(filter);
2137 
2138 		if (test_pred_visited) {
2139 			printk(KERN_INFO
2140 			       "Failed, unwanted pred visited for filter %s\n",
2141 			       d->filter);
2142 			break;
2143 		}
2144 
2145 		if (err != d->match) {
2146 			printk(KERN_INFO
2147 			       "Failed to match filter '%s', expected %d\n",
2148 			       d->filter, d->match);
2149 			break;
2150 		}
2151 	}
2152 
2153 	if (i == DATA_CNT)
2154 		printk(KERN_CONT "OK\n");
2155 
2156 	return 0;
2157 }
2158 
2159 late_initcall(ftrace_test_event_filter);
2160 
2161 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2162