]> Pileus Git - ~andy/linux/blob - kernel/trace/trace_events_filter.c
tracing/filter: Use static allocation for filter predicates
[~andy/linux] / kernel / trace / trace_events_filter.c
1 /*
2  * trace_events_filter - generic event filtering
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19  */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 enum filter_op_ids
31 {
32         OP_OR,
33         OP_AND,
34         OP_GLOB,
35         OP_NE,
36         OP_EQ,
37         OP_LT,
38         OP_LE,
39         OP_GT,
40         OP_GE,
41         OP_NONE,
42         OP_OPEN_PAREN,
43 };
44
45 struct filter_op {
46         int id;
47         char *string;
48         int precedence;
49 };
50
51 static struct filter_op filter_ops[] = {
52         { OP_OR,        "||",           1 },
53         { OP_AND,       "&&",           2 },
54         { OP_GLOB,      "~",            4 },
55         { OP_NE,        "!=",           4 },
56         { OP_EQ,        "==",           4 },
57         { OP_LT,        "<",            5 },
58         { OP_LE,        "<=",           5 },
59         { OP_GT,        ">",            5 },
60         { OP_GE,        ">=",           5 },
61         { OP_NONE,      "OP_NONE",      0 },
62         { OP_OPEN_PAREN, "(",           0 },
63 };
64
65 enum {
66         FILT_ERR_NONE,
67         FILT_ERR_INVALID_OP,
68         FILT_ERR_UNBALANCED_PAREN,
69         FILT_ERR_TOO_MANY_OPERANDS,
70         FILT_ERR_OPERAND_TOO_LONG,
71         FILT_ERR_FIELD_NOT_FOUND,
72         FILT_ERR_ILLEGAL_FIELD_OP,
73         FILT_ERR_ILLEGAL_INTVAL,
74         FILT_ERR_BAD_SUBSYS_FILTER,
75         FILT_ERR_TOO_MANY_PREDS,
76         FILT_ERR_MISSING_FIELD,
77         FILT_ERR_INVALID_FILTER,
78 };
79
80 static char *err_text[] = {
81         "No error",
82         "Invalid operator",
83         "Unbalanced parens",
84         "Too many operands",
85         "Operand too long",
86         "Field not found",
87         "Illegal operation for field type",
88         "Illegal integer value",
89         "Couldn't find or set field in one of a subsystem's events",
90         "Too many terms in predicate expression",
91         "Missing field name and/or value",
92         "Meaningless filter expression",
93 };
94
95 struct opstack_op {
96         int op;
97         struct list_head list;
98 };
99
100 struct postfix_elt {
101         int op;
102         char *operand;
103         struct list_head list;
104 };
105
106 struct filter_parse_state {
107         struct filter_op *ops;
108         struct list_head opstack;
109         struct list_head postfix;
110         int lasterr;
111         int lasterr_pos;
112
113         struct {
114                 char *string;
115                 unsigned int cnt;
116                 unsigned int tail;
117         } infix;
118
119         struct {
120                 char string[MAX_FILTER_STR_VAL];
121                 int pos;
122                 unsigned int tail;
123         } operand;
124 };
125
126 struct pred_stack {
127         struct filter_pred      **preds;
128         int                     index;
129 };
130
131 #define DEFINE_COMPARISON_PRED(type)                                    \
132 static int filter_pred_##type(struct filter_pred *pred, void *event)    \
133 {                                                                       \
134         type *addr = (type *)(event + pred->offset);                    \
135         type val = (type)pred->val;                                     \
136         int match = 0;                                                  \
137                                                                         \
138         switch (pred->op) {                                             \
139         case OP_LT:                                                     \
140                 match = (*addr < val);                                  \
141                 break;                                                  \
142         case OP_LE:                                                     \
143                 match = (*addr <= val);                                 \
144                 break;                                                  \
145         case OP_GT:                                                     \
146                 match = (*addr > val);                                  \
147                 break;                                                  \
148         case OP_GE:                                                     \
149                 match = (*addr >= val);                                 \
150                 break;                                                  \
151         default:                                                        \
152                 break;                                                  \
153         }                                                               \
154                                                                         \
155         return match;                                                   \
156 }
157
158 #define DEFINE_EQUALITY_PRED(size)                                      \
159 static int filter_pred_##size(struct filter_pred *pred, void *event)    \
160 {                                                                       \
161         u##size *addr = (u##size *)(event + pred->offset);              \
162         u##size val = (u##size)pred->val;                               \
163         int match;                                                      \
164                                                                         \
165         match = (val == *addr) ^ pred->not;                             \
166                                                                         \
167         return match;                                                   \
168 }
169
170 DEFINE_COMPARISON_PRED(s64);
171 DEFINE_COMPARISON_PRED(u64);
172 DEFINE_COMPARISON_PRED(s32);
173 DEFINE_COMPARISON_PRED(u32);
174 DEFINE_COMPARISON_PRED(s16);
175 DEFINE_COMPARISON_PRED(u16);
176 DEFINE_COMPARISON_PRED(s8);
177 DEFINE_COMPARISON_PRED(u8);
178
179 DEFINE_EQUALITY_PRED(64);
180 DEFINE_EQUALITY_PRED(32);
181 DEFINE_EQUALITY_PRED(16);
182 DEFINE_EQUALITY_PRED(8);
183
184 /* Filter predicate for fixed sized arrays of characters */
185 static int filter_pred_string(struct filter_pred *pred, void *event)
186 {
187         char *addr = (char *)(event + pred->offset);
188         int cmp, match;
189
190         cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
191
192         match = cmp ^ pred->not;
193
194         return match;
195 }
196
197 /* Filter predicate for char * pointers */
198 static int filter_pred_pchar(struct filter_pred *pred, void *event)
199 {
200         char **addr = (char **)(event + pred->offset);
201         int cmp, match;
202         int len = strlen(*addr) + 1;    /* including tailing '\0' */
203
204         cmp = pred->regex.match(*addr, &pred->regex, len);
205
206         match = cmp ^ pred->not;
207
208         return match;
209 }
210
211 /*
212  * Filter predicate for dynamic sized arrays of characters.
213  * These are implemented through a list of strings at the end
214  * of the entry.
215  * Also each of these strings have a field in the entry which
216  * contains its offset from the beginning of the entry.
217  * We have then first to get this field, dereference it
218  * and add it to the address of the entry, and at last we have
219  * the address of the string.
220  */
221 static int filter_pred_strloc(struct filter_pred *pred, void *event)
222 {
223         u32 str_item = *(u32 *)(event + pred->offset);
224         int str_loc = str_item & 0xffff;
225         int str_len = str_item >> 16;
226         char *addr = (char *)(event + str_loc);
227         int cmp, match;
228
229         cmp = pred->regex.match(addr, &pred->regex, str_len);
230
231         match = cmp ^ pred->not;
232
233         return match;
234 }
235
236 static int filter_pred_none(struct filter_pred *pred, void *event)
237 {
238         return 0;
239 }
240
241 /*
242  * regex_match_foo - Basic regex callbacks
243  *
244  * @str: the string to be searched
245  * @r:   the regex structure containing the pattern string
246  * @len: the length of the string to be searched (including '\0')
247  *
248  * Note:
249  * - @str might not be NULL-terminated if it's of type DYN_STRING
250  *   or STATIC_STRING
251  */
252
253 static int regex_match_full(char *str, struct regex *r, int len)
254 {
255         if (strncmp(str, r->pattern, len) == 0)
256                 return 1;
257         return 0;
258 }
259
260 static int regex_match_front(char *str, struct regex *r, int len)
261 {
262         if (strncmp(str, r->pattern, r->len) == 0)
263                 return 1;
264         return 0;
265 }
266
267 static int regex_match_middle(char *str, struct regex *r, int len)
268 {
269         if (strnstr(str, r->pattern, len))
270                 return 1;
271         return 0;
272 }
273
274 static int regex_match_end(char *str, struct regex *r, int len)
275 {
276         int strlen = len - 1;
277
278         if (strlen >= r->len &&
279             memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
280                 return 1;
281         return 0;
282 }
283
284 /**
285  * filter_parse_regex - parse a basic regex
286  * @buff:   the raw regex
287  * @len:    length of the regex
288  * @search: will point to the beginning of the string to compare
289  * @not:    tell whether the match will have to be inverted
290  *
291  * This passes in a buffer containing a regex and this function will
292  * set search to point to the search part of the buffer and
293  * return the type of search it is (see enum above).
294  * This does modify buff.
295  *
296  * Returns enum type.
297  *  search returns the pointer to use for comparison.
298  *  not returns 1 if buff started with a '!'
299  *     0 otherwise.
300  */
301 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
302 {
303         int type = MATCH_FULL;
304         int i;
305
306         if (buff[0] == '!') {
307                 *not = 1;
308                 buff++;
309                 len--;
310         } else
311                 *not = 0;
312
313         *search = buff;
314
315         for (i = 0; i < len; i++) {
316                 if (buff[i] == '*') {
317                         if (!i) {
318                                 *search = buff + 1;
319                                 type = MATCH_END_ONLY;
320                         } else {
321                                 if (type == MATCH_END_ONLY)
322                                         type = MATCH_MIDDLE_ONLY;
323                                 else
324                                         type = MATCH_FRONT_ONLY;
325                                 buff[i] = 0;
326                                 break;
327                         }
328                 }
329         }
330
331         return type;
332 }
333
334 static void filter_build_regex(struct filter_pred *pred)
335 {
336         struct regex *r = &pred->regex;
337         char *search;
338         enum regex_type type = MATCH_FULL;
339         int not = 0;
340
341         if (pred->op == OP_GLOB) {
342                 type = filter_parse_regex(r->pattern, r->len, &search, &not);
343                 r->len = strlen(search);
344                 memmove(r->pattern, search, r->len+1);
345         }
346
347         switch (type) {
348         case MATCH_FULL:
349                 r->match = regex_match_full;
350                 break;
351         case MATCH_FRONT_ONLY:
352                 r->match = regex_match_front;
353                 break;
354         case MATCH_MIDDLE_ONLY:
355                 r->match = regex_match_middle;
356                 break;
357         case MATCH_END_ONLY:
358                 r->match = regex_match_end;
359                 break;
360         }
361
362         pred->not ^= not;
363 }
364
365 enum move_type {
366         MOVE_DOWN,
367         MOVE_UP_FROM_LEFT,
368         MOVE_UP_FROM_RIGHT
369 };
370
371 static struct filter_pred *
372 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
373                 int index, enum move_type *move)
374 {
375         if (pred->parent & FILTER_PRED_IS_RIGHT)
376                 *move = MOVE_UP_FROM_RIGHT;
377         else
378                 *move = MOVE_UP_FROM_LEFT;
379         pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
380
381         return pred;
382 }
383
384 /*
385  * A series of AND or ORs where found together. Instead of
386  * climbing up and down the tree branches, an array of the
387  * ops were made in order of checks. We can just move across
388  * the array and short circuit if needed.
389  */
390 static int process_ops(struct filter_pred *preds,
391                        struct filter_pred *op, void *rec)
392 {
393         struct filter_pred *pred;
394         int match = 0;
395         int type;
396         int i;
397
398         /*
399          * Micro-optimization: We set type to true if op
400          * is an OR and false otherwise (AND). Then we
401          * just need to test if the match is equal to
402          * the type, and if it is, we can short circuit the
403          * rest of the checks:
404          *
405          * if ((match && op->op == OP_OR) ||
406          *     (!match && op->op == OP_AND))
407          *        return match;
408          */
409         type = op->op == OP_OR;
410
411         for (i = 0; i < op->val; i++) {
412                 pred = &preds[op->ops[i]];
413                 match = pred->fn(pred, rec);
414                 if (!!match == type)
415                         return match;
416         }
417         return match;
418 }
419
420 /* return 1 if event matches, 0 otherwise (discard) */
421 int filter_match_preds(struct event_filter *filter, void *rec)
422 {
423         int match = -1;
424         enum move_type move = MOVE_DOWN;
425         struct filter_pred *preds;
426         struct filter_pred *pred;
427         struct filter_pred *root;
428         int n_preds;
429         int done = 0;
430
431         /* no filter is considered a match */
432         if (!filter)
433                 return 1;
434
435         n_preds = filter->n_preds;
436
437         if (!n_preds)
438                 return 1;
439
440         /*
441          * n_preds, root and filter->preds are protect with preemption disabled.
442          */
443         preds = rcu_dereference_sched(filter->preds);
444         root = rcu_dereference_sched(filter->root);
445         if (!root)
446                 return 1;
447
448         pred = root;
449
450         /* match is currently meaningless */
451         match = -1;
452
453         do {
454                 switch (move) {
455                 case MOVE_DOWN:
456                         /* only AND and OR have children */
457                         if (pred->left != FILTER_PRED_INVALID) {
458                                 /* If ops is set, then it was folded. */
459                                 if (!pred->ops) {
460                                         /* keep going to down the left side */
461                                         pred = &preds[pred->left];
462                                         continue;
463                                 }
464                                 /* We can treat folded ops as a leaf node */
465                                 match = process_ops(preds, pred, rec);
466                         } else
467                                 match = pred->fn(pred, rec);
468                         /* If this pred is the only pred */
469                         if (pred == root)
470                                 break;
471                         pred = get_pred_parent(pred, preds,
472                                                pred->parent, &move);
473                         continue;
474                 case MOVE_UP_FROM_LEFT:
475                         /*
476                          * Check for short circuits.
477                          *
478                          * Optimization: !!match == (pred->op == OP_OR)
479                          *   is the same as:
480                          * if ((match && pred->op == OP_OR) ||
481                          *     (!match && pred->op == OP_AND))
482                          */
483                         if (!!match == (pred->op == OP_OR)) {
484                                 if (pred == root)
485                                         break;
486                                 pred = get_pred_parent(pred, preds,
487                                                        pred->parent, &move);
488                                 continue;
489                         }
490                         /* now go down the right side of the tree. */
491                         pred = &preds[pred->right];
492                         move = MOVE_DOWN;
493                         continue;
494                 case MOVE_UP_FROM_RIGHT:
495                         /* We finished this equation. */
496                         if (pred == root)
497                                 break;
498                         pred = get_pred_parent(pred, preds,
499                                                pred->parent, &move);
500                         continue;
501                 }
502                 done = 1;
503         } while (!done);
504
505         return match;
506 }
507 EXPORT_SYMBOL_GPL(filter_match_preds);
508
509 static void parse_error(struct filter_parse_state *ps, int err, int pos)
510 {
511         ps->lasterr = err;
512         ps->lasterr_pos = pos;
513 }
514
515 static void remove_filter_string(struct event_filter *filter)
516 {
517         if (!filter)
518                 return;
519
520         kfree(filter->filter_string);
521         filter->filter_string = NULL;
522 }
523
524 static int replace_filter_string(struct event_filter *filter,
525                                  char *filter_string)
526 {
527         kfree(filter->filter_string);
528         filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
529         if (!filter->filter_string)
530                 return -ENOMEM;
531
532         return 0;
533 }
534
535 static int append_filter_string(struct event_filter *filter,
536                                 char *string)
537 {
538         int newlen;
539         char *new_filter_string;
540
541         BUG_ON(!filter->filter_string);
542         newlen = strlen(filter->filter_string) + strlen(string) + 1;
543         new_filter_string = kmalloc(newlen, GFP_KERNEL);
544         if (!new_filter_string)
545                 return -ENOMEM;
546
547         strcpy(new_filter_string, filter->filter_string);
548         strcat(new_filter_string, string);
549         kfree(filter->filter_string);
550         filter->filter_string = new_filter_string;
551
552         return 0;
553 }
554
555 static void append_filter_err(struct filter_parse_state *ps,
556                               struct event_filter *filter)
557 {
558         int pos = ps->lasterr_pos;
559         char *buf, *pbuf;
560
561         buf = (char *)__get_free_page(GFP_TEMPORARY);
562         if (!buf)
563                 return;
564
565         append_filter_string(filter, "\n");
566         memset(buf, ' ', PAGE_SIZE);
567         if (pos > PAGE_SIZE - 128)
568                 pos = 0;
569         buf[pos] = '^';
570         pbuf = &buf[pos] + 1;
571
572         sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
573         append_filter_string(filter, buf);
574         free_page((unsigned long) buf);
575 }
576
577 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
578 {
579         struct event_filter *filter;
580
581         mutex_lock(&event_mutex);
582         filter = call->filter;
583         if (filter && filter->filter_string)
584                 trace_seq_printf(s, "%s\n", filter->filter_string);
585         else
586                 trace_seq_printf(s, "none\n");
587         mutex_unlock(&event_mutex);
588 }
589
590 void print_subsystem_event_filter(struct event_subsystem *system,
591                                   struct trace_seq *s)
592 {
593         struct event_filter *filter;
594
595         mutex_lock(&event_mutex);
596         filter = system->filter;
597         if (filter && filter->filter_string)
598                 trace_seq_printf(s, "%s\n", filter->filter_string);
599         else
600                 trace_seq_printf(s, "none\n");
601         mutex_unlock(&event_mutex);
602 }
603
604 static struct ftrace_event_field *
605 __find_event_field(struct list_head *head, char *name)
606 {
607         struct ftrace_event_field *field;
608
609         list_for_each_entry(field, head, link) {
610                 if (!strcmp(field->name, name))
611                         return field;
612         }
613
614         return NULL;
615 }
616
617 static struct ftrace_event_field *
618 find_event_field(struct ftrace_event_call *call, char *name)
619 {
620         struct ftrace_event_field *field;
621         struct list_head *head;
622
623         field = __find_event_field(&ftrace_common_fields, name);
624         if (field)
625                 return field;
626
627         head = trace_get_fields(call);
628         return __find_event_field(head, name);
629 }
630
631 static void filter_free_pred(struct filter_pred *pred)
632 {
633         kfree(pred->field_name);
634 }
635
636 static void filter_clear_pred(struct filter_pred *pred)
637 {
638         kfree(pred->field_name);
639         pred->field_name = NULL;
640         pred->regex.len = 0;
641 }
642
643 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
644 {
645         stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
646         if (!stack->preds)
647                 return -ENOMEM;
648         stack->index = n_preds;
649         return 0;
650 }
651
652 static void __free_pred_stack(struct pred_stack *stack)
653 {
654         kfree(stack->preds);
655         stack->index = 0;
656 }
657
658 static int __push_pred_stack(struct pred_stack *stack,
659                              struct filter_pred *pred)
660 {
661         int index = stack->index;
662
663         if (WARN_ON(index == 0))
664                 return -ENOSPC;
665
666         stack->preds[--index] = pred;
667         stack->index = index;
668         return 0;
669 }
670
671 static struct filter_pred *
672 __pop_pred_stack(struct pred_stack *stack)
673 {
674         struct filter_pred *pred;
675         int index = stack->index;
676
677         pred = stack->preds[index++];
678         if (!pred)
679                 return NULL;
680
681         stack->index = index;
682         return pred;
683 }
684
685 static int filter_set_pred(struct event_filter *filter,
686                            int idx,
687                            struct pred_stack *stack,
688                            struct filter_pred *src,
689                            filter_pred_fn_t fn)
690 {
691         struct filter_pred *dest = &filter->preds[idx];
692         struct filter_pred *left;
693         struct filter_pred *right;
694
695         *dest = *src;
696         if (src->field_name) {
697                 dest->field_name = kstrdup(src->field_name, GFP_KERNEL);
698                 if (!dest->field_name)
699                         return -ENOMEM;
700         }
701         dest->fn = fn;
702         dest->index = idx;
703
704         if (dest->op == OP_OR || dest->op == OP_AND) {
705                 right = __pop_pred_stack(stack);
706                 left = __pop_pred_stack(stack);
707                 if (!left || !right)
708                         return -EINVAL;
709                 /*
710                  * If both children can be folded
711                  * and they are the same op as this op or a leaf,
712                  * then this op can be folded.
713                  */
714                 if (left->index & FILTER_PRED_FOLD &&
715                     (left->op == dest->op ||
716                      left->left == FILTER_PRED_INVALID) &&
717                     right->index & FILTER_PRED_FOLD &&
718                     (right->op == dest->op ||
719                      right->left == FILTER_PRED_INVALID))
720                         dest->index |= FILTER_PRED_FOLD;
721
722                 dest->left = left->index & ~FILTER_PRED_FOLD;
723                 dest->right = right->index & ~FILTER_PRED_FOLD;
724                 left->parent = dest->index & ~FILTER_PRED_FOLD;
725                 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
726         } else {
727                 /*
728                  * Make dest->left invalid to be used as a quick
729                  * way to know this is a leaf node.
730                  */
731                 dest->left = FILTER_PRED_INVALID;
732
733                 /* All leafs allow folding the parent ops. */
734                 dest->index |= FILTER_PRED_FOLD;
735         }
736
737         return __push_pred_stack(stack, dest);
738 }
739
740 static void __free_preds(struct event_filter *filter)
741 {
742         int i;
743
744         if (filter->preds) {
745                 for (i = 0; i < filter->a_preds; i++)
746                         kfree(filter->preds[i].field_name);
747                 kfree(filter->preds);
748                 filter->preds = NULL;
749         }
750         filter->a_preds = 0;
751         filter->n_preds = 0;
752 }
753
754 static void filter_disable(struct ftrace_event_call *call)
755 {
756         call->flags &= ~TRACE_EVENT_FL_FILTERED;
757 }
758
759 static void __free_filter(struct event_filter *filter)
760 {
761         if (!filter)
762                 return;
763
764         __free_preds(filter);
765         kfree(filter->filter_string);
766         kfree(filter);
767 }
768
769 /*
770  * Called when destroying the ftrace_event_call.
771  * The call is being freed, so we do not need to worry about
772  * the call being currently used. This is for module code removing
773  * the tracepoints from within it.
774  */
775 void destroy_preds(struct ftrace_event_call *call)
776 {
777         __free_filter(call->filter);
778         call->filter = NULL;
779 }
780
781 static struct event_filter *__alloc_filter(void)
782 {
783         struct event_filter *filter;
784
785         filter = kzalloc(sizeof(*filter), GFP_KERNEL);
786         return filter;
787 }
788
789 static int __alloc_preds(struct event_filter *filter, int n_preds)
790 {
791         struct filter_pred *pred;
792         int i;
793
794         if (filter->preds)
795                 __free_preds(filter);
796
797         filter->preds =
798                 kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
799
800         if (!filter->preds)
801                 return -ENOMEM;
802
803         filter->a_preds = n_preds;
804         filter->n_preds = 0;
805
806         for (i = 0; i < n_preds; i++) {
807                 pred = &filter->preds[i];
808                 pred->fn = filter_pred_none;
809         }
810
811         return 0;
812 }
813
814 static void filter_free_subsystem_preds(struct event_subsystem *system)
815 {
816         struct ftrace_event_call *call;
817
818         list_for_each_entry(call, &ftrace_events, list) {
819                 if (strcmp(call->class->system, system->name) != 0)
820                         continue;
821
822                 filter_disable(call);
823                 remove_filter_string(call->filter);
824         }
825 }
826
827 static void filter_free_subsystem_filters(struct event_subsystem *system)
828 {
829         struct ftrace_event_call *call;
830
831         list_for_each_entry(call, &ftrace_events, list) {
832                 if (strcmp(call->class->system, system->name) != 0)
833                         continue;
834                 __free_filter(call->filter);
835                 call->filter = NULL;
836         }
837 }
838
839 static int filter_add_pred_fn(struct filter_parse_state *ps,
840                               struct ftrace_event_call *call,
841                               struct event_filter *filter,
842                               struct filter_pred *pred,
843                               struct pred_stack *stack,
844                               filter_pred_fn_t fn)
845 {
846         int idx, err;
847
848         if (WARN_ON(filter->n_preds == filter->a_preds)) {
849                 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
850                 return -ENOSPC;
851         }
852
853         idx = filter->n_preds;
854         filter_clear_pred(&filter->preds[idx]);
855         err = filter_set_pred(filter, idx, stack, pred, fn);
856         if (err)
857                 return err;
858
859         filter->n_preds++;
860
861         return 0;
862 }
863
864 int filter_assign_type(const char *type)
865 {
866         if (strstr(type, "__data_loc") && strstr(type, "char"))
867                 return FILTER_DYN_STRING;
868
869         if (strchr(type, '[') && strstr(type, "char"))
870                 return FILTER_STATIC_STRING;
871
872         return FILTER_OTHER;
873 }
874
875 static bool is_string_field(struct ftrace_event_field *field)
876 {
877         return field->filter_type == FILTER_DYN_STRING ||
878                field->filter_type == FILTER_STATIC_STRING ||
879                field->filter_type == FILTER_PTR_STRING;
880 }
881
882 static int is_legal_op(struct ftrace_event_field *field, int op)
883 {
884         if (is_string_field(field) &&
885             (op != OP_EQ && op != OP_NE && op != OP_GLOB))
886                 return 0;
887         if (!is_string_field(field) && op == OP_GLOB)
888                 return 0;
889
890         return 1;
891 }
892
893 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
894                                              int field_is_signed)
895 {
896         filter_pred_fn_t fn = NULL;
897
898         switch (field_size) {
899         case 8:
900                 if (op == OP_EQ || op == OP_NE)
901                         fn = filter_pred_64;
902                 else if (field_is_signed)
903                         fn = filter_pred_s64;
904                 else
905                         fn = filter_pred_u64;
906                 break;
907         case 4:
908                 if (op == OP_EQ || op == OP_NE)
909                         fn = filter_pred_32;
910                 else if (field_is_signed)
911                         fn = filter_pred_s32;
912                 else
913                         fn = filter_pred_u32;
914                 break;
915         case 2:
916                 if (op == OP_EQ || op == OP_NE)
917                         fn = filter_pred_16;
918                 else if (field_is_signed)
919                         fn = filter_pred_s16;
920                 else
921                         fn = filter_pred_u16;
922                 break;
923         case 1:
924                 if (op == OP_EQ || op == OP_NE)
925                         fn = filter_pred_8;
926                 else if (field_is_signed)
927                         fn = filter_pred_s8;
928                 else
929                         fn = filter_pred_u8;
930                 break;
931         }
932
933         return fn;
934 }
935
936 static int filter_add_pred(struct filter_parse_state *ps,
937                            struct ftrace_event_call *call,
938                            struct event_filter *filter,
939                            struct filter_pred *pred,
940                            struct pred_stack *stack,
941                            bool dry_run)
942 {
943         struct ftrace_event_field *field;
944         filter_pred_fn_t fn;
945         unsigned long long val;
946         int ret;
947
948         fn = pred->fn = filter_pred_none;
949
950         if (pred->op == OP_AND)
951                 goto add_pred_fn;
952         else if (pred->op == OP_OR)
953                 goto add_pred_fn;
954
955         field = find_event_field(call, pred->field_name);
956         if (!field) {
957                 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
958                 return -EINVAL;
959         }
960
961         pred->offset = field->offset;
962
963         if (!is_legal_op(field, pred->op)) {
964                 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
965                 return -EINVAL;
966         }
967
968         if (is_string_field(field)) {
969                 filter_build_regex(pred);
970
971                 if (field->filter_type == FILTER_STATIC_STRING) {
972                         fn = filter_pred_string;
973                         pred->regex.field_len = field->size;
974                 } else if (field->filter_type == FILTER_DYN_STRING)
975                         fn = filter_pred_strloc;
976                 else
977                         fn = filter_pred_pchar;
978         } else {
979                 if (field->is_signed)
980                         ret = strict_strtoll(pred->regex.pattern, 0, &val);
981                 else
982                         ret = strict_strtoull(pred->regex.pattern, 0, &val);
983                 if (ret) {
984                         parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
985                         return -EINVAL;
986                 }
987                 pred->val = val;
988
989                 fn = select_comparison_fn(pred->op, field->size,
990                                           field->is_signed);
991                 if (!fn) {
992                         parse_error(ps, FILT_ERR_INVALID_OP, 0);
993                         return -EINVAL;
994                 }
995         }
996
997         if (pred->op == OP_NE)
998                 pred->not = 1;
999
1000 add_pred_fn:
1001         if (!dry_run)
1002                 return filter_add_pred_fn(ps, call, filter, pred, stack, fn);
1003         return 0;
1004 }
1005
1006 static void parse_init(struct filter_parse_state *ps,
1007                        struct filter_op *ops,
1008                        char *infix_string)
1009 {
1010         memset(ps, '\0', sizeof(*ps));
1011
1012         ps->infix.string = infix_string;
1013         ps->infix.cnt = strlen(infix_string);
1014         ps->ops = ops;
1015
1016         INIT_LIST_HEAD(&ps->opstack);
1017         INIT_LIST_HEAD(&ps->postfix);
1018 }
1019
1020 static char infix_next(struct filter_parse_state *ps)
1021 {
1022         ps->infix.cnt--;
1023
1024         return ps->infix.string[ps->infix.tail++];
1025 }
1026
1027 static char infix_peek(struct filter_parse_state *ps)
1028 {
1029         if (ps->infix.tail == strlen(ps->infix.string))
1030                 return 0;
1031
1032         return ps->infix.string[ps->infix.tail];
1033 }
1034
1035 static void infix_advance(struct filter_parse_state *ps)
1036 {
1037         ps->infix.cnt--;
1038         ps->infix.tail++;
1039 }
1040
1041 static inline int is_precedence_lower(struct filter_parse_state *ps,
1042                                       int a, int b)
1043 {
1044         return ps->ops[a].precedence < ps->ops[b].precedence;
1045 }
1046
1047 static inline int is_op_char(struct filter_parse_state *ps, char c)
1048 {
1049         int i;
1050
1051         for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1052                 if (ps->ops[i].string[0] == c)
1053                         return 1;
1054         }
1055
1056         return 0;
1057 }
1058
1059 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1060 {
1061         char nextc = infix_peek(ps);
1062         char opstr[3];
1063         int i;
1064
1065         opstr[0] = firstc;
1066         opstr[1] = nextc;
1067         opstr[2] = '\0';
1068
1069         for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1070                 if (!strcmp(opstr, ps->ops[i].string)) {
1071                         infix_advance(ps);
1072                         return ps->ops[i].id;
1073                 }
1074         }
1075
1076         opstr[1] = '\0';
1077
1078         for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1079                 if (!strcmp(opstr, ps->ops[i].string))
1080                         return ps->ops[i].id;
1081         }
1082
1083         return OP_NONE;
1084 }
1085
1086 static inline void clear_operand_string(struct filter_parse_state *ps)
1087 {
1088         memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1089         ps->operand.tail = 0;
1090 }
1091
1092 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1093 {
1094         if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1095                 return -EINVAL;
1096
1097         ps->operand.string[ps->operand.tail++] = c;
1098
1099         return 0;
1100 }
1101
1102 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1103 {
1104         struct opstack_op *opstack_op;
1105
1106         opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1107         if (!opstack_op)
1108                 return -ENOMEM;
1109
1110         opstack_op->op = op;
1111         list_add(&opstack_op->list, &ps->opstack);
1112
1113         return 0;
1114 }
1115
1116 static int filter_opstack_empty(struct filter_parse_state *ps)
1117 {
1118         return list_empty(&ps->opstack);
1119 }
1120
1121 static int filter_opstack_top(struct filter_parse_state *ps)
1122 {
1123         struct opstack_op *opstack_op;
1124
1125         if (filter_opstack_empty(ps))
1126                 return OP_NONE;
1127
1128         opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1129
1130         return opstack_op->op;
1131 }
1132
1133 static int filter_opstack_pop(struct filter_parse_state *ps)
1134 {
1135         struct opstack_op *opstack_op;
1136         int op;
1137
1138         if (filter_opstack_empty(ps))
1139                 return OP_NONE;
1140
1141         opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1142         op = opstack_op->op;
1143         list_del(&opstack_op->list);
1144
1145         kfree(opstack_op);
1146
1147         return op;
1148 }
1149
1150 static void filter_opstack_clear(struct filter_parse_state *ps)
1151 {
1152         while (!filter_opstack_empty(ps))
1153                 filter_opstack_pop(ps);
1154 }
1155
1156 static char *curr_operand(struct filter_parse_state *ps)
1157 {
1158         return ps->operand.string;
1159 }
1160
1161 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1162 {
1163         struct postfix_elt *elt;
1164
1165         elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1166         if (!elt)
1167                 return -ENOMEM;
1168
1169         elt->op = OP_NONE;
1170         elt->operand = kstrdup(operand, GFP_KERNEL);
1171         if (!elt->operand) {
1172                 kfree(elt);
1173                 return -ENOMEM;
1174         }
1175
1176         list_add_tail(&elt->list, &ps->postfix);
1177
1178         return 0;
1179 }
1180
1181 static int postfix_append_op(struct filter_parse_state *ps, int op)
1182 {
1183         struct postfix_elt *elt;
1184
1185         elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1186         if (!elt)
1187                 return -ENOMEM;
1188
1189         elt->op = op;
1190         elt->operand = NULL;
1191
1192         list_add_tail(&elt->list, &ps->postfix);
1193
1194         return 0;
1195 }
1196
1197 static void postfix_clear(struct filter_parse_state *ps)
1198 {
1199         struct postfix_elt *elt;
1200
1201         while (!list_empty(&ps->postfix)) {
1202                 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1203                 list_del(&elt->list);
1204                 kfree(elt->operand);
1205                 kfree(elt);
1206         }
1207 }
1208
1209 static int filter_parse(struct filter_parse_state *ps)
1210 {
1211         int in_string = 0;
1212         int op, top_op;
1213         char ch;
1214
1215         while ((ch = infix_next(ps))) {
1216                 if (ch == '"') {
1217                         in_string ^= 1;
1218                         continue;
1219                 }
1220
1221                 if (in_string)
1222                         goto parse_operand;
1223
1224                 if (isspace(ch))
1225                         continue;
1226
1227                 if (is_op_char(ps, ch)) {
1228                         op = infix_get_op(ps, ch);
1229                         if (op == OP_NONE) {
1230                                 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1231                                 return -EINVAL;
1232                         }
1233
1234                         if (strlen(curr_operand(ps))) {
1235                                 postfix_append_operand(ps, curr_operand(ps));
1236                                 clear_operand_string(ps);
1237                         }
1238
1239                         while (!filter_opstack_empty(ps)) {
1240                                 top_op = filter_opstack_top(ps);
1241                                 if (!is_precedence_lower(ps, top_op, op)) {
1242                                         top_op = filter_opstack_pop(ps);
1243                                         postfix_append_op(ps, top_op);
1244                                         continue;
1245                                 }
1246                                 break;
1247                         }
1248
1249                         filter_opstack_push(ps, op);
1250                         continue;
1251                 }
1252
1253                 if (ch == '(') {
1254                         filter_opstack_push(ps, OP_OPEN_PAREN);
1255                         continue;
1256                 }
1257
1258                 if (ch == ')') {
1259                         if (strlen(curr_operand(ps))) {
1260                                 postfix_append_operand(ps, curr_operand(ps));
1261                                 clear_operand_string(ps);
1262                         }
1263
1264                         top_op = filter_opstack_pop(ps);
1265                         while (top_op != OP_NONE) {
1266                                 if (top_op == OP_OPEN_PAREN)
1267                                         break;
1268                                 postfix_append_op(ps, top_op);
1269                                 top_op = filter_opstack_pop(ps);
1270                         }
1271                         if (top_op == OP_NONE) {
1272                                 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1273                                 return -EINVAL;
1274                         }
1275                         continue;
1276                 }
1277 parse_operand:
1278                 if (append_operand_char(ps, ch)) {
1279                         parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1280                         return -EINVAL;
1281                 }
1282         }
1283
1284         if (strlen(curr_operand(ps)))
1285                 postfix_append_operand(ps, curr_operand(ps));
1286
1287         while (!filter_opstack_empty(ps)) {
1288                 top_op = filter_opstack_pop(ps);
1289                 if (top_op == OP_NONE)
1290                         break;
1291                 if (top_op == OP_OPEN_PAREN) {
1292                         parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1293                         return -EINVAL;
1294                 }
1295                 postfix_append_op(ps, top_op);
1296         }
1297
1298         return 0;
1299 }
1300
1301 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1302                                        int op, char *operand1, char *operand2)
1303 {
1304         static struct filter_pred pred;
1305
1306         memset(&pred, 0, sizeof(pred));
1307         pred.op = op;
1308
1309         if (op == OP_AND || op == OP_OR)
1310                 return &pred;
1311
1312         if (!operand1 || !operand2) {
1313                 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1314                 return NULL;
1315         }
1316
1317         pred.field_name = kstrdup(operand1, GFP_KERNEL);
1318         if (!pred.field_name)
1319                 return NULL;
1320
1321         strcpy(pred.regex.pattern, operand2);
1322         pred.regex.len = strlen(pred.regex.pattern);
1323
1324         return &pred;
1325 }
1326
1327 static int check_preds(struct filter_parse_state *ps)
1328 {
1329         int n_normal_preds = 0, n_logical_preds = 0;
1330         struct postfix_elt *elt;
1331
1332         list_for_each_entry(elt, &ps->postfix, list) {
1333                 if (elt->op == OP_NONE)
1334                         continue;
1335
1336                 if (elt->op == OP_AND || elt->op == OP_OR) {
1337                         n_logical_preds++;
1338                         continue;
1339                 }
1340                 n_normal_preds++;
1341         }
1342
1343         if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1344                 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1345                 return -EINVAL;
1346         }
1347
1348         return 0;
1349 }
1350
1351 static int count_preds(struct filter_parse_state *ps)
1352 {
1353         struct postfix_elt *elt;
1354         int n_preds = 0;
1355
1356         list_for_each_entry(elt, &ps->postfix, list) {
1357                 if (elt->op == OP_NONE)
1358                         continue;
1359                 n_preds++;
1360         }
1361
1362         return n_preds;
1363 }
1364
1365 /*
1366  * The tree is walked at filtering of an event. If the tree is not correctly
1367  * built, it may cause an infinite loop. Check here that the tree does
1368  * indeed terminate.
1369  */
1370 static int check_pred_tree(struct event_filter *filter,
1371                            struct filter_pred *root)
1372 {
1373         struct filter_pred *preds;
1374         struct filter_pred *pred;
1375         enum move_type move = MOVE_DOWN;
1376         int count = 0;
1377         int done = 0;
1378         int max;
1379
1380         /*
1381          * The max that we can hit a node is three times.
1382          * Once going down, once coming up from left, and
1383          * once coming up from right. This is more than enough
1384          * since leafs are only hit a single time.
1385          */
1386         max = 3 * filter->n_preds;
1387
1388         preds = filter->preds;
1389         if  (!preds)
1390                 return -EINVAL;
1391         pred = root;
1392
1393         do {
1394                 if (WARN_ON(count++ > max))
1395                         return -EINVAL;
1396
1397                 switch (move) {
1398                 case MOVE_DOWN:
1399                         if (pred->left != FILTER_PRED_INVALID) {
1400                                 pred = &preds[pred->left];
1401                                 continue;
1402                         }
1403                         /* A leaf at the root is just a leaf in the tree */
1404                         if (pred == root)
1405                                 break;
1406                         pred = get_pred_parent(pred, preds,
1407                                                pred->parent, &move);
1408                         continue;
1409                 case MOVE_UP_FROM_LEFT:
1410                         pred = &preds[pred->right];
1411                         move = MOVE_DOWN;
1412                         continue;
1413                 case MOVE_UP_FROM_RIGHT:
1414                         if (pred == root)
1415                                 break;
1416                         pred = get_pred_parent(pred, preds,
1417                                                pred->parent, &move);
1418                         continue;
1419                 }
1420                 done = 1;
1421         } while (!done);
1422
1423         /* We are fine. */
1424         return 0;
1425 }
1426
1427 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1428 {
1429         struct filter_pred *pred;
1430         enum move_type move = MOVE_DOWN;
1431         int count = 0;
1432         int done = 0;
1433
1434         pred = root;
1435
1436         do {
1437                 switch (move) {
1438                 case MOVE_DOWN:
1439                         if (pred->left != FILTER_PRED_INVALID) {
1440                                 pred = &preds[pred->left];
1441                                 continue;
1442                         }
1443                         /* A leaf at the root is just a leaf in the tree */
1444                         if (pred == root)
1445                                 return 1;
1446                         count++;
1447                         pred = get_pred_parent(pred, preds,
1448                                                pred->parent, &move);
1449                         continue;
1450                 case MOVE_UP_FROM_LEFT:
1451                         pred = &preds[pred->right];
1452                         move = MOVE_DOWN;
1453                         continue;
1454                 case MOVE_UP_FROM_RIGHT:
1455                         if (pred == root)
1456                                 break;
1457                         pred = get_pred_parent(pred, preds,
1458                                                pred->parent, &move);
1459                         continue;
1460                 }
1461                 done = 1;
1462         } while (!done);
1463
1464         return count;
1465 }
1466
1467 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1468 {
1469         struct filter_pred *pred;
1470         enum move_type move = MOVE_DOWN;
1471         int count = 0;
1472         int children;
1473         int done = 0;
1474
1475         /* No need to keep the fold flag */
1476         root->index &= ~FILTER_PRED_FOLD;
1477
1478         /* If the root is a leaf then do nothing */
1479         if (root->left == FILTER_PRED_INVALID)
1480                 return 0;
1481
1482         /* count the children */
1483         children = count_leafs(preds, &preds[root->left]);
1484         children += count_leafs(preds, &preds[root->right]);
1485
1486         root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL);
1487         if (!root->ops)
1488                 return -ENOMEM;
1489
1490         root->val = children;
1491
1492         pred = root;
1493         do {
1494                 switch (move) {
1495                 case MOVE_DOWN:
1496                         if (pred->left != FILTER_PRED_INVALID) {
1497                                 pred = &preds[pred->left];
1498                                 continue;
1499                         }
1500                         if (WARN_ON(count == children))
1501                                 return -EINVAL;
1502                         pred->index &= ~FILTER_PRED_FOLD;
1503                         root->ops[count++] = pred->index;
1504                         pred = get_pred_parent(pred, preds,
1505                                                pred->parent, &move);
1506                         continue;
1507                 case MOVE_UP_FROM_LEFT:
1508                         pred = &preds[pred->right];
1509                         move = MOVE_DOWN;
1510                         continue;
1511                 case MOVE_UP_FROM_RIGHT:
1512                         if (pred == root)
1513                                 break;
1514                         pred = get_pred_parent(pred, preds,
1515                                                pred->parent, &move);
1516                         continue;
1517                 }
1518                 done = 1;
1519         } while (!done);
1520
1521         return 0;
1522 }
1523
1524 /*
1525  * To optimize the processing of the ops, if we have several "ors" or
1526  * "ands" together, we can put them in an array and process them all
1527  * together speeding up the filter logic.
1528  */
1529 static int fold_pred_tree(struct event_filter *filter,
1530                            struct filter_pred *root)
1531 {
1532         struct filter_pred *preds;
1533         struct filter_pred *pred;
1534         enum move_type move = MOVE_DOWN;
1535         int done = 0;
1536         int err;
1537
1538         preds = filter->preds;
1539         if  (!preds)
1540                 return -EINVAL;
1541         pred = root;
1542
1543         do {
1544                 switch (move) {
1545                 case MOVE_DOWN:
1546                         if (pred->index & FILTER_PRED_FOLD) {
1547                                 err = fold_pred(preds, pred);
1548                                 if (err)
1549                                         return err;
1550                                 /* Folded nodes are like leafs */
1551                         } else if (pred->left != FILTER_PRED_INVALID) {
1552                                 pred = &preds[pred->left];
1553                                 continue;
1554                         }
1555
1556                         /* A leaf at the root is just a leaf in the tree */
1557                         if (pred == root)
1558                                 break;
1559                         pred = get_pred_parent(pred, preds,
1560                                                pred->parent, &move);
1561                         continue;
1562                 case MOVE_UP_FROM_LEFT:
1563                         pred = &preds[pred->right];
1564                         move = MOVE_DOWN;
1565                         continue;
1566                 case MOVE_UP_FROM_RIGHT:
1567                         if (pred == root)
1568                                 break;
1569                         pred = get_pred_parent(pred, preds,
1570                                                pred->parent, &move);
1571                         continue;
1572                 }
1573                 done = 1;
1574         } while (!done);
1575
1576         return 0;
1577 }
1578
1579 static int replace_preds(struct ftrace_event_call *call,
1580                          struct event_filter *filter,
1581                          struct filter_parse_state *ps,
1582                          char *filter_string,
1583                          bool dry_run)
1584 {
1585         char *operand1 = NULL, *operand2 = NULL;
1586         struct filter_pred *pred;
1587         struct filter_pred *root;
1588         struct postfix_elt *elt;
1589         struct pred_stack stack = { }; /* init to NULL */
1590         int err;
1591         int n_preds = 0;
1592
1593         n_preds = count_preds(ps);
1594         if (n_preds >= MAX_FILTER_PRED) {
1595                 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1596                 return -ENOSPC;
1597         }
1598
1599         err = check_preds(ps);
1600         if (err)
1601                 return err;
1602
1603         if (!dry_run) {
1604                 err = __alloc_pred_stack(&stack, n_preds);
1605                 if (err)
1606                         return err;
1607                 err = __alloc_preds(filter, n_preds);
1608                 if (err)
1609                         goto fail;
1610         }
1611
1612         n_preds = 0;
1613         list_for_each_entry(elt, &ps->postfix, list) {
1614                 if (elt->op == OP_NONE) {
1615                         if (!operand1)
1616                                 operand1 = elt->operand;
1617                         else if (!operand2)
1618                                 operand2 = elt->operand;
1619                         else {
1620                                 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1621                                 err = -EINVAL;
1622                                 goto fail;
1623                         }
1624                         continue;
1625                 }
1626
1627                 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1628                         parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1629                         err = -ENOSPC;
1630                         goto fail;
1631                 }
1632
1633                 pred = create_pred(ps, elt->op, operand1, operand2);
1634                 if (!pred) {
1635                         err = -ENOMEM;
1636                         goto fail;
1637                 }
1638                 err = filter_add_pred(ps, call, filter, pred, &stack, dry_run);
1639                 filter_free_pred(pred);
1640                 if (err)
1641                         goto fail;
1642
1643                 operand1 = operand2 = NULL;
1644         }
1645
1646         if (!dry_run) {
1647                 /* We should have one item left on the stack */
1648                 pred = __pop_pred_stack(&stack);
1649                 if (!pred)
1650                         return -EINVAL;
1651                 /* This item is where we start from in matching */
1652                 root = pred;
1653                 /* Make sure the stack is empty */
1654                 pred = __pop_pred_stack(&stack);
1655                 if (WARN_ON(pred)) {
1656                         err = -EINVAL;
1657                         filter->root = NULL;
1658                         goto fail;
1659                 }
1660                 err = check_pred_tree(filter, root);
1661                 if (err)
1662                         goto fail;
1663
1664                 /* Optimize the tree */
1665                 err = fold_pred_tree(filter, root);
1666                 if (err)
1667                         goto fail;
1668
1669                 /* We don't set root until we know it works */
1670                 barrier();
1671                 filter->root = root;
1672         }
1673
1674         err = 0;
1675 fail:
1676         __free_pred_stack(&stack);
1677         return err;
1678 }
1679
1680 struct filter_list {
1681         struct list_head        list;
1682         struct event_filter     *filter;
1683 };
1684
1685 static int replace_system_preds(struct event_subsystem *system,
1686                                 struct filter_parse_state *ps,
1687                                 char *filter_string)
1688 {
1689         struct ftrace_event_call *call;
1690         struct filter_list *filter_item;
1691         struct filter_list *tmp;
1692         LIST_HEAD(filter_list);
1693         bool fail = true;
1694         int err;
1695
1696         list_for_each_entry(call, &ftrace_events, list) {
1697
1698                 if (strcmp(call->class->system, system->name) != 0)
1699                         continue;
1700
1701                 /*
1702                  * Try to see if the filter can be applied
1703                  *  (filter arg is ignored on dry_run)
1704                  */
1705                 err = replace_preds(call, NULL, ps, filter_string, true);
1706                 if (err)
1707                         goto fail;
1708         }
1709
1710         list_for_each_entry(call, &ftrace_events, list) {
1711                 struct event_filter *filter;
1712
1713                 if (strcmp(call->class->system, system->name) != 0)
1714                         continue;
1715
1716                 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1717                 if (!filter_item)
1718                         goto fail_mem;
1719
1720                 list_add_tail(&filter_item->list, &filter_list);
1721
1722                 filter_item->filter = __alloc_filter();
1723                 if (!filter_item->filter)
1724                         goto fail_mem;
1725                 filter = filter_item->filter;
1726
1727                 /* Can only fail on no memory */
1728                 err = replace_filter_string(filter, filter_string);
1729                 if (err)
1730                         goto fail_mem;
1731
1732                 err = replace_preds(call, filter, ps, filter_string, false);
1733                 if (err) {
1734                         filter_disable(call);
1735                         parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1736                         append_filter_err(ps, filter);
1737                 } else
1738                         call->flags |= TRACE_EVENT_FL_FILTERED;
1739                 /*
1740                  * Regardless of if this returned an error, we still
1741                  * replace the filter for the call.
1742                  */
1743                 filter = call->filter;
1744                 call->filter = filter_item->filter;
1745                 filter_item->filter = filter;
1746
1747                 fail = false;
1748         }
1749
1750         if (fail)
1751                 goto fail;
1752
1753         /*
1754          * The calls can still be using the old filters.
1755          * Do a synchronize_sched() to ensure all calls are
1756          * done with them before we free them.
1757          */
1758         synchronize_sched();
1759         list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1760                 __free_filter(filter_item->filter);
1761                 list_del(&filter_item->list);
1762                 kfree(filter_item);
1763         }
1764         return 0;
1765  fail:
1766         /* No call succeeded */
1767         list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1768                 list_del(&filter_item->list);
1769                 kfree(filter_item);
1770         }
1771         parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1772         return -EINVAL;
1773  fail_mem:
1774         /* If any call succeeded, we still need to sync */
1775         if (!fail)
1776                 synchronize_sched();
1777         list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1778                 __free_filter(filter_item->filter);
1779                 list_del(&filter_item->list);
1780                 kfree(filter_item);
1781         }
1782         return -ENOMEM;
1783 }
1784
1785 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1786 {
1787         struct filter_parse_state *ps;
1788         struct event_filter *filter;
1789         struct event_filter *tmp;
1790         int err = 0;
1791
1792         mutex_lock(&event_mutex);
1793
1794         if (!strcmp(strstrip(filter_string), "0")) {
1795                 filter_disable(call);
1796                 filter = call->filter;
1797                 if (!filter)
1798                         goto out_unlock;
1799                 call->filter = NULL;
1800                 /* Make sure the filter is not being used */
1801                 synchronize_sched();
1802                 __free_filter(filter);
1803                 goto out_unlock;
1804         }
1805
1806         err = -ENOMEM;
1807         ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1808         if (!ps)
1809                 goto out_unlock;
1810
1811         filter = __alloc_filter();
1812         if (!filter) {
1813                 kfree(ps);
1814                 goto out_unlock;
1815         }
1816
1817         replace_filter_string(filter, filter_string);
1818
1819         parse_init(ps, filter_ops, filter_string);
1820         err = filter_parse(ps);
1821         if (err) {
1822                 append_filter_err(ps, filter);
1823                 goto out;
1824         }
1825
1826         err = replace_preds(call, filter, ps, filter_string, false);
1827         if (err) {
1828                 filter_disable(call);
1829                 append_filter_err(ps, filter);
1830         } else
1831                 call->flags |= TRACE_EVENT_FL_FILTERED;
1832 out:
1833         /*
1834          * Always swap the call filter with the new filter
1835          * even if there was an error. If there was an error
1836          * in the filter, we disable the filter and show the error
1837          * string
1838          */
1839         tmp = call->filter;
1840         call->filter = filter;
1841         if (tmp) {
1842                 /* Make sure the call is done with the filter */
1843                 synchronize_sched();
1844                 __free_filter(tmp);
1845         }
1846         filter_opstack_clear(ps);
1847         postfix_clear(ps);
1848         kfree(ps);
1849 out_unlock:
1850         mutex_unlock(&event_mutex);
1851
1852         return err;
1853 }
1854
1855 int apply_subsystem_event_filter(struct event_subsystem *system,
1856                                  char *filter_string)
1857 {
1858         struct filter_parse_state *ps;
1859         struct event_filter *filter;
1860         int err = 0;
1861
1862         mutex_lock(&event_mutex);
1863
1864         /* Make sure the system still has events */
1865         if (!system->nr_events) {
1866                 err = -ENODEV;
1867                 goto out_unlock;
1868         }
1869
1870         if (!strcmp(strstrip(filter_string), "0")) {
1871                 filter_free_subsystem_preds(system);
1872                 remove_filter_string(system->filter);
1873                 filter = system->filter;
1874                 system->filter = NULL;
1875                 /* Ensure all filters are no longer used */
1876                 synchronize_sched();
1877                 filter_free_subsystem_filters(system);
1878                 __free_filter(filter);
1879                 goto out_unlock;
1880         }
1881
1882         err = -ENOMEM;
1883         ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1884         if (!ps)
1885                 goto out_unlock;
1886
1887         filter = __alloc_filter();
1888         if (!filter)
1889                 goto out;
1890
1891         replace_filter_string(filter, filter_string);
1892         /*
1893          * No event actually uses the system filter
1894          * we can free it without synchronize_sched().
1895          */
1896         __free_filter(system->filter);
1897         system->filter = filter;
1898
1899         parse_init(ps, filter_ops, filter_string);
1900         err = filter_parse(ps);
1901         if (err) {
1902                 append_filter_err(ps, system->filter);
1903                 goto out;
1904         }
1905
1906         err = replace_system_preds(system, ps, filter_string);
1907         if (err)
1908                 append_filter_err(ps, system->filter);
1909
1910 out:
1911         filter_opstack_clear(ps);
1912         postfix_clear(ps);
1913         kfree(ps);
1914 out_unlock:
1915         mutex_unlock(&event_mutex);
1916
1917         return err;
1918 }
1919
1920 #ifdef CONFIG_PERF_EVENTS
1921
1922 void ftrace_profile_free_filter(struct perf_event *event)
1923 {
1924         struct event_filter *filter = event->filter;
1925
1926         event->filter = NULL;
1927         __free_filter(filter);
1928 }
1929
1930 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1931                               char *filter_str)
1932 {
1933         int err;
1934         struct event_filter *filter;
1935         struct filter_parse_state *ps;
1936         struct ftrace_event_call *call = NULL;
1937
1938         mutex_lock(&event_mutex);
1939
1940         list_for_each_entry(call, &ftrace_events, list) {
1941                 if (call->event.type == event_id)
1942                         break;
1943         }
1944
1945         err = -EINVAL;
1946         if (&call->list == &ftrace_events)
1947                 goto out_unlock;
1948
1949         err = -EEXIST;
1950         if (event->filter)
1951                 goto out_unlock;
1952
1953         filter = __alloc_filter();
1954         if (!filter) {
1955                 err = PTR_ERR(filter);
1956                 goto out_unlock;
1957         }
1958
1959         err = -ENOMEM;
1960         ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1961         if (!ps)
1962                 goto free_filter;
1963
1964         parse_init(ps, filter_ops, filter_str);
1965         err = filter_parse(ps);
1966         if (err)
1967                 goto free_ps;
1968
1969         err = replace_preds(call, filter, ps, filter_str, false);
1970         if (!err)
1971                 event->filter = filter;
1972
1973 free_ps:
1974         filter_opstack_clear(ps);
1975         postfix_clear(ps);
1976         kfree(ps);
1977
1978 free_filter:
1979         if (err)
1980                 __free_filter(filter);
1981
1982 out_unlock:
1983         mutex_unlock(&event_mutex);
1984
1985         return err;
1986 }
1987
1988 #endif /* CONFIG_PERF_EVENTS */
1989