]> Pileus Git - ~andy/linux/blob - kernel/trace/trace_events_filter.c
tracing/filter: Change fold_pred_tree function to use walk_pred_tree
[~andy/linux] / kernel / trace / trace_events_filter.c
1 /*
2  * trace_events_filter - generic event filtering
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19  */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26
27 #include "trace.h"
28 #include "trace_output.h"
29
30 enum filter_op_ids
31 {
32         OP_OR,
33         OP_AND,
34         OP_GLOB,
35         OP_NE,
36         OP_EQ,
37         OP_LT,
38         OP_LE,
39         OP_GT,
40         OP_GE,
41         OP_NONE,
42         OP_OPEN_PAREN,
43 };
44
45 struct filter_op {
46         int id;
47         char *string;
48         int precedence;
49 };
50
51 static struct filter_op filter_ops[] = {
52         { OP_OR,        "||",           1 },
53         { OP_AND,       "&&",           2 },
54         { OP_GLOB,      "~",            4 },
55         { OP_NE,        "!=",           4 },
56         { OP_EQ,        "==",           4 },
57         { OP_LT,        "<",            5 },
58         { OP_LE,        "<=",           5 },
59         { OP_GT,        ">",            5 },
60         { OP_GE,        ">=",           5 },
61         { OP_NONE,      "OP_NONE",      0 },
62         { OP_OPEN_PAREN, "(",           0 },
63 };
64
65 enum {
66         FILT_ERR_NONE,
67         FILT_ERR_INVALID_OP,
68         FILT_ERR_UNBALANCED_PAREN,
69         FILT_ERR_TOO_MANY_OPERANDS,
70         FILT_ERR_OPERAND_TOO_LONG,
71         FILT_ERR_FIELD_NOT_FOUND,
72         FILT_ERR_ILLEGAL_FIELD_OP,
73         FILT_ERR_ILLEGAL_INTVAL,
74         FILT_ERR_BAD_SUBSYS_FILTER,
75         FILT_ERR_TOO_MANY_PREDS,
76         FILT_ERR_MISSING_FIELD,
77         FILT_ERR_INVALID_FILTER,
78 };
79
80 static char *err_text[] = {
81         "No error",
82         "Invalid operator",
83         "Unbalanced parens",
84         "Too many operands",
85         "Operand too long",
86         "Field not found",
87         "Illegal operation for field type",
88         "Illegal integer value",
89         "Couldn't find or set field in one of a subsystem's events",
90         "Too many terms in predicate expression",
91         "Missing field name and/or value",
92         "Meaningless filter expression",
93 };
94
95 struct opstack_op {
96         int op;
97         struct list_head list;
98 };
99
100 struct postfix_elt {
101         int op;
102         char *operand;
103         struct list_head list;
104 };
105
106 struct filter_parse_state {
107         struct filter_op *ops;
108         struct list_head opstack;
109         struct list_head postfix;
110         int lasterr;
111         int lasterr_pos;
112
113         struct {
114                 char *string;
115                 unsigned int cnt;
116                 unsigned int tail;
117         } infix;
118
119         struct {
120                 char string[MAX_FILTER_STR_VAL];
121                 int pos;
122                 unsigned int tail;
123         } operand;
124 };
125
126 struct pred_stack {
127         struct filter_pred      **preds;
128         int                     index;
129 };
130
131 #define DEFINE_COMPARISON_PRED(type)                                    \
132 static int filter_pred_##type(struct filter_pred *pred, void *event)    \
133 {                                                                       \
134         type *addr = (type *)(event + pred->offset);                    \
135         type val = (type)pred->val;                                     \
136         int match = 0;                                                  \
137                                                                         \
138         switch (pred->op) {                                             \
139         case OP_LT:                                                     \
140                 match = (*addr < val);                                  \
141                 break;                                                  \
142         case OP_LE:                                                     \
143                 match = (*addr <= val);                                 \
144                 break;                                                  \
145         case OP_GT:                                                     \
146                 match = (*addr > val);                                  \
147                 break;                                                  \
148         case OP_GE:                                                     \
149                 match = (*addr >= val);                                 \
150                 break;                                                  \
151         default:                                                        \
152                 break;                                                  \
153         }                                                               \
154                                                                         \
155         return match;                                                   \
156 }
157
158 #define DEFINE_EQUALITY_PRED(size)                                      \
159 static int filter_pred_##size(struct filter_pred *pred, void *event)    \
160 {                                                                       \
161         u##size *addr = (u##size *)(event + pred->offset);              \
162         u##size val = (u##size)pred->val;                               \
163         int match;                                                      \
164                                                                         \
165         match = (val == *addr) ^ pred->not;                             \
166                                                                         \
167         return match;                                                   \
168 }
169
170 DEFINE_COMPARISON_PRED(s64);
171 DEFINE_COMPARISON_PRED(u64);
172 DEFINE_COMPARISON_PRED(s32);
173 DEFINE_COMPARISON_PRED(u32);
174 DEFINE_COMPARISON_PRED(s16);
175 DEFINE_COMPARISON_PRED(u16);
176 DEFINE_COMPARISON_PRED(s8);
177 DEFINE_COMPARISON_PRED(u8);
178
179 DEFINE_EQUALITY_PRED(64);
180 DEFINE_EQUALITY_PRED(32);
181 DEFINE_EQUALITY_PRED(16);
182 DEFINE_EQUALITY_PRED(8);
183
184 /* Filter predicate for fixed sized arrays of characters */
185 static int filter_pred_string(struct filter_pred *pred, void *event)
186 {
187         char *addr = (char *)(event + pred->offset);
188         int cmp, match;
189
190         cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
191
192         match = cmp ^ pred->not;
193
194         return match;
195 }
196
197 /* Filter predicate for char * pointers */
198 static int filter_pred_pchar(struct filter_pred *pred, void *event)
199 {
200         char **addr = (char **)(event + pred->offset);
201         int cmp, match;
202         int len = strlen(*addr) + 1;    /* including tailing '\0' */
203
204         cmp = pred->regex.match(*addr, &pred->regex, len);
205
206         match = cmp ^ pred->not;
207
208         return match;
209 }
210
211 /*
212  * Filter predicate for dynamic sized arrays of characters.
213  * These are implemented through a list of strings at the end
214  * of the entry.
215  * Also each of these strings have a field in the entry which
216  * contains its offset from the beginning of the entry.
217  * We have then first to get this field, dereference it
218  * and add it to the address of the entry, and at last we have
219  * the address of the string.
220  */
221 static int filter_pred_strloc(struct filter_pred *pred, void *event)
222 {
223         u32 str_item = *(u32 *)(event + pred->offset);
224         int str_loc = str_item & 0xffff;
225         int str_len = str_item >> 16;
226         char *addr = (char *)(event + str_loc);
227         int cmp, match;
228
229         cmp = pred->regex.match(addr, &pred->regex, str_len);
230
231         match = cmp ^ pred->not;
232
233         return match;
234 }
235
236 static int filter_pred_none(struct filter_pred *pred, void *event)
237 {
238         return 0;
239 }
240
241 /*
242  * regex_match_foo - Basic regex callbacks
243  *
244  * @str: the string to be searched
245  * @r:   the regex structure containing the pattern string
246  * @len: the length of the string to be searched (including '\0')
247  *
248  * Note:
249  * - @str might not be NULL-terminated if it's of type DYN_STRING
250  *   or STATIC_STRING
251  */
252
253 static int regex_match_full(char *str, struct regex *r, int len)
254 {
255         if (strncmp(str, r->pattern, len) == 0)
256                 return 1;
257         return 0;
258 }
259
260 static int regex_match_front(char *str, struct regex *r, int len)
261 {
262         if (strncmp(str, r->pattern, r->len) == 0)
263                 return 1;
264         return 0;
265 }
266
267 static int regex_match_middle(char *str, struct regex *r, int len)
268 {
269         if (strnstr(str, r->pattern, len))
270                 return 1;
271         return 0;
272 }
273
274 static int regex_match_end(char *str, struct regex *r, int len)
275 {
276         int strlen = len - 1;
277
278         if (strlen >= r->len &&
279             memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
280                 return 1;
281         return 0;
282 }
283
284 /**
285  * filter_parse_regex - parse a basic regex
286  * @buff:   the raw regex
287  * @len:    length of the regex
288  * @search: will point to the beginning of the string to compare
289  * @not:    tell whether the match will have to be inverted
290  *
291  * This passes in a buffer containing a regex and this function will
292  * set search to point to the search part of the buffer and
293  * return the type of search it is (see enum above).
294  * This does modify buff.
295  *
296  * Returns enum type.
297  *  search returns the pointer to use for comparison.
298  *  not returns 1 if buff started with a '!'
299  *     0 otherwise.
300  */
301 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
302 {
303         int type = MATCH_FULL;
304         int i;
305
306         if (buff[0] == '!') {
307                 *not = 1;
308                 buff++;
309                 len--;
310         } else
311                 *not = 0;
312
313         *search = buff;
314
315         for (i = 0; i < len; i++) {
316                 if (buff[i] == '*') {
317                         if (!i) {
318                                 *search = buff + 1;
319                                 type = MATCH_END_ONLY;
320                         } else {
321                                 if (type == MATCH_END_ONLY)
322                                         type = MATCH_MIDDLE_ONLY;
323                                 else
324                                         type = MATCH_FRONT_ONLY;
325                                 buff[i] = 0;
326                                 break;
327                         }
328                 }
329         }
330
331         return type;
332 }
333
334 static void filter_build_regex(struct filter_pred *pred)
335 {
336         struct regex *r = &pred->regex;
337         char *search;
338         enum regex_type type = MATCH_FULL;
339         int not = 0;
340
341         if (pred->op == OP_GLOB) {
342                 type = filter_parse_regex(r->pattern, r->len, &search, &not);
343                 r->len = strlen(search);
344                 memmove(r->pattern, search, r->len+1);
345         }
346
347         switch (type) {
348         case MATCH_FULL:
349                 r->match = regex_match_full;
350                 break;
351         case MATCH_FRONT_ONLY:
352                 r->match = regex_match_front;
353                 break;
354         case MATCH_MIDDLE_ONLY:
355                 r->match = regex_match_middle;
356                 break;
357         case MATCH_END_ONLY:
358                 r->match = regex_match_end;
359                 break;
360         }
361
362         pred->not ^= not;
363 }
364
365 enum move_type {
366         MOVE_DOWN,
367         MOVE_UP_FROM_LEFT,
368         MOVE_UP_FROM_RIGHT
369 };
370
371 static struct filter_pred *
372 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
373                 int index, enum move_type *move)
374 {
375         if (pred->parent & FILTER_PRED_IS_RIGHT)
376                 *move = MOVE_UP_FROM_RIGHT;
377         else
378                 *move = MOVE_UP_FROM_LEFT;
379         pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
380
381         return pred;
382 }
383
384 enum walk_return {
385         WALK_PRED_ABORT,
386         WALK_PRED_PARENT,
387         WALK_PRED_DEFAULT,
388 };
389
390 typedef int (*filter_pred_walkcb_t) (enum move_type move,
391                                      struct filter_pred *pred,
392                                      int *err, void *data);
393
394 static int walk_pred_tree(struct filter_pred *preds,
395                           struct filter_pred *root,
396                           filter_pred_walkcb_t cb, void *data)
397 {
398         struct filter_pred *pred = root;
399         enum move_type move = MOVE_DOWN;
400         int done = 0;
401
402         if  (!preds)
403                 return -EINVAL;
404
405         do {
406                 int err = 0, ret;
407
408                 ret = cb(move, pred, &err, data);
409                 if (ret == WALK_PRED_ABORT)
410                         return err;
411                 if (ret == WALK_PRED_PARENT)
412                         goto get_parent;
413
414                 switch (move) {
415                 case MOVE_DOWN:
416                         if (pred->left != FILTER_PRED_INVALID) {
417                                 pred = &preds[pred->left];
418                                 continue;
419                         }
420                         goto get_parent;
421                 case MOVE_UP_FROM_LEFT:
422                         pred = &preds[pred->right];
423                         move = MOVE_DOWN;
424                         continue;
425                 case MOVE_UP_FROM_RIGHT:
426  get_parent:
427                         if (pred == root)
428                                 break;
429                         pred = get_pred_parent(pred, preds,
430                                                pred->parent,
431                                                &move);
432                         continue;
433                 }
434                 done = 1;
435         } while (!done);
436
437         /* We are fine. */
438         return 0;
439 }
440
441 /*
442  * A series of AND or ORs where found together. Instead of
443  * climbing up and down the tree branches, an array of the
444  * ops were made in order of checks. We can just move across
445  * the array and short circuit if needed.
446  */
447 static int process_ops(struct filter_pred *preds,
448                        struct filter_pred *op, void *rec)
449 {
450         struct filter_pred *pred;
451         int match = 0;
452         int type;
453         int i;
454
455         /*
456          * Micro-optimization: We set type to true if op
457          * is an OR and false otherwise (AND). Then we
458          * just need to test if the match is equal to
459          * the type, and if it is, we can short circuit the
460          * rest of the checks:
461          *
462          * if ((match && op->op == OP_OR) ||
463          *     (!match && op->op == OP_AND))
464          *        return match;
465          */
466         type = op->op == OP_OR;
467
468         for (i = 0; i < op->val; i++) {
469                 pred = &preds[op->ops[i]];
470                 match = pred->fn(pred, rec);
471                 if (!!match == type)
472                         return match;
473         }
474         return match;
475 }
476
477 /* return 1 if event matches, 0 otherwise (discard) */
478 int filter_match_preds(struct event_filter *filter, void *rec)
479 {
480         int match = -1;
481         enum move_type move = MOVE_DOWN;
482         struct filter_pred *preds;
483         struct filter_pred *pred;
484         struct filter_pred *root;
485         int n_preds;
486         int done = 0;
487
488         /* no filter is considered a match */
489         if (!filter)
490                 return 1;
491
492         n_preds = filter->n_preds;
493
494         if (!n_preds)
495                 return 1;
496
497         /*
498          * n_preds, root and filter->preds are protect with preemption disabled.
499          */
500         preds = rcu_dereference_sched(filter->preds);
501         root = rcu_dereference_sched(filter->root);
502         if (!root)
503                 return 1;
504
505         pred = root;
506
507         /* match is currently meaningless */
508         match = -1;
509
510         do {
511                 switch (move) {
512                 case MOVE_DOWN:
513                         /* only AND and OR have children */
514                         if (pred->left != FILTER_PRED_INVALID) {
515                                 /* If ops is set, then it was folded. */
516                                 if (!pred->ops) {
517                                         /* keep going to down the left side */
518                                         pred = &preds[pred->left];
519                                         continue;
520                                 }
521                                 /* We can treat folded ops as a leaf node */
522                                 match = process_ops(preds, pred, rec);
523                         } else
524                                 match = pred->fn(pred, rec);
525                         /* If this pred is the only pred */
526                         if (pred == root)
527                                 break;
528                         pred = get_pred_parent(pred, preds,
529                                                pred->parent, &move);
530                         continue;
531                 case MOVE_UP_FROM_LEFT:
532                         /*
533                          * Check for short circuits.
534                          *
535                          * Optimization: !!match == (pred->op == OP_OR)
536                          *   is the same as:
537                          * if ((match && pred->op == OP_OR) ||
538                          *     (!match && pred->op == OP_AND))
539                          */
540                         if (!!match == (pred->op == OP_OR)) {
541                                 if (pred == root)
542                                         break;
543                                 pred = get_pred_parent(pred, preds,
544                                                        pred->parent, &move);
545                                 continue;
546                         }
547                         /* now go down the right side of the tree. */
548                         pred = &preds[pred->right];
549                         move = MOVE_DOWN;
550                         continue;
551                 case MOVE_UP_FROM_RIGHT:
552                         /* We finished this equation. */
553                         if (pred == root)
554                                 break;
555                         pred = get_pred_parent(pred, preds,
556                                                pred->parent, &move);
557                         continue;
558                 }
559                 done = 1;
560         } while (!done);
561
562         return match;
563 }
564 EXPORT_SYMBOL_GPL(filter_match_preds);
565
566 static void parse_error(struct filter_parse_state *ps, int err, int pos)
567 {
568         ps->lasterr = err;
569         ps->lasterr_pos = pos;
570 }
571
572 static void remove_filter_string(struct event_filter *filter)
573 {
574         if (!filter)
575                 return;
576
577         kfree(filter->filter_string);
578         filter->filter_string = NULL;
579 }
580
581 static int replace_filter_string(struct event_filter *filter,
582                                  char *filter_string)
583 {
584         kfree(filter->filter_string);
585         filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
586         if (!filter->filter_string)
587                 return -ENOMEM;
588
589         return 0;
590 }
591
592 static int append_filter_string(struct event_filter *filter,
593                                 char *string)
594 {
595         int newlen;
596         char *new_filter_string;
597
598         BUG_ON(!filter->filter_string);
599         newlen = strlen(filter->filter_string) + strlen(string) + 1;
600         new_filter_string = kmalloc(newlen, GFP_KERNEL);
601         if (!new_filter_string)
602                 return -ENOMEM;
603
604         strcpy(new_filter_string, filter->filter_string);
605         strcat(new_filter_string, string);
606         kfree(filter->filter_string);
607         filter->filter_string = new_filter_string;
608
609         return 0;
610 }
611
612 static void append_filter_err(struct filter_parse_state *ps,
613                               struct event_filter *filter)
614 {
615         int pos = ps->lasterr_pos;
616         char *buf, *pbuf;
617
618         buf = (char *)__get_free_page(GFP_TEMPORARY);
619         if (!buf)
620                 return;
621
622         append_filter_string(filter, "\n");
623         memset(buf, ' ', PAGE_SIZE);
624         if (pos > PAGE_SIZE - 128)
625                 pos = 0;
626         buf[pos] = '^';
627         pbuf = &buf[pos] + 1;
628
629         sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
630         append_filter_string(filter, buf);
631         free_page((unsigned long) buf);
632 }
633
634 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
635 {
636         struct event_filter *filter;
637
638         mutex_lock(&event_mutex);
639         filter = call->filter;
640         if (filter && filter->filter_string)
641                 trace_seq_printf(s, "%s\n", filter->filter_string);
642         else
643                 trace_seq_printf(s, "none\n");
644         mutex_unlock(&event_mutex);
645 }
646
647 void print_subsystem_event_filter(struct event_subsystem *system,
648                                   struct trace_seq *s)
649 {
650         struct event_filter *filter;
651
652         mutex_lock(&event_mutex);
653         filter = system->filter;
654         if (filter && filter->filter_string)
655                 trace_seq_printf(s, "%s\n", filter->filter_string);
656         else
657                 trace_seq_printf(s, "none\n");
658         mutex_unlock(&event_mutex);
659 }
660
661 static struct ftrace_event_field *
662 __find_event_field(struct list_head *head, char *name)
663 {
664         struct ftrace_event_field *field;
665
666         list_for_each_entry(field, head, link) {
667                 if (!strcmp(field->name, name))
668                         return field;
669         }
670
671         return NULL;
672 }
673
674 static struct ftrace_event_field *
675 find_event_field(struct ftrace_event_call *call, char *name)
676 {
677         struct ftrace_event_field *field;
678         struct list_head *head;
679
680         field = __find_event_field(&ftrace_common_fields, name);
681         if (field)
682                 return field;
683
684         head = trace_get_fields(call);
685         return __find_event_field(head, name);
686 }
687
688 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
689 {
690         stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
691         if (!stack->preds)
692                 return -ENOMEM;
693         stack->index = n_preds;
694         return 0;
695 }
696
697 static void __free_pred_stack(struct pred_stack *stack)
698 {
699         kfree(stack->preds);
700         stack->index = 0;
701 }
702
703 static int __push_pred_stack(struct pred_stack *stack,
704                              struct filter_pred *pred)
705 {
706         int index = stack->index;
707
708         if (WARN_ON(index == 0))
709                 return -ENOSPC;
710
711         stack->preds[--index] = pred;
712         stack->index = index;
713         return 0;
714 }
715
716 static struct filter_pred *
717 __pop_pred_stack(struct pred_stack *stack)
718 {
719         struct filter_pred *pred;
720         int index = stack->index;
721
722         pred = stack->preds[index++];
723         if (!pred)
724                 return NULL;
725
726         stack->index = index;
727         return pred;
728 }
729
730 static int filter_set_pred(struct event_filter *filter,
731                            int idx,
732                            struct pred_stack *stack,
733                            struct filter_pred *src)
734 {
735         struct filter_pred *dest = &filter->preds[idx];
736         struct filter_pred *left;
737         struct filter_pred *right;
738
739         *dest = *src;
740         dest->index = idx;
741
742         if (dest->op == OP_OR || dest->op == OP_AND) {
743                 right = __pop_pred_stack(stack);
744                 left = __pop_pred_stack(stack);
745                 if (!left || !right)
746                         return -EINVAL;
747                 /*
748                  * If both children can be folded
749                  * and they are the same op as this op or a leaf,
750                  * then this op can be folded.
751                  */
752                 if (left->index & FILTER_PRED_FOLD &&
753                     (left->op == dest->op ||
754                      left->left == FILTER_PRED_INVALID) &&
755                     right->index & FILTER_PRED_FOLD &&
756                     (right->op == dest->op ||
757                      right->left == FILTER_PRED_INVALID))
758                         dest->index |= FILTER_PRED_FOLD;
759
760                 dest->left = left->index & ~FILTER_PRED_FOLD;
761                 dest->right = right->index & ~FILTER_PRED_FOLD;
762                 left->parent = dest->index & ~FILTER_PRED_FOLD;
763                 right->parent = dest->index | FILTER_PRED_IS_RIGHT;
764         } else {
765                 /*
766                  * Make dest->left invalid to be used as a quick
767                  * way to know this is a leaf node.
768                  */
769                 dest->left = FILTER_PRED_INVALID;
770
771                 /* All leafs allow folding the parent ops. */
772                 dest->index |= FILTER_PRED_FOLD;
773         }
774
775         return __push_pred_stack(stack, dest);
776 }
777
778 static void __free_preds(struct event_filter *filter)
779 {
780         if (filter->preds) {
781                 kfree(filter->preds);
782                 filter->preds = NULL;
783         }
784         filter->a_preds = 0;
785         filter->n_preds = 0;
786 }
787
788 static void filter_disable(struct ftrace_event_call *call)
789 {
790         call->flags &= ~TRACE_EVENT_FL_FILTERED;
791 }
792
793 static void __free_filter(struct event_filter *filter)
794 {
795         if (!filter)
796                 return;
797
798         __free_preds(filter);
799         kfree(filter->filter_string);
800         kfree(filter);
801 }
802
803 /*
804  * Called when destroying the ftrace_event_call.
805  * The call is being freed, so we do not need to worry about
806  * the call being currently used. This is for module code removing
807  * the tracepoints from within it.
808  */
809 void destroy_preds(struct ftrace_event_call *call)
810 {
811         __free_filter(call->filter);
812         call->filter = NULL;
813 }
814
815 static struct event_filter *__alloc_filter(void)
816 {
817         struct event_filter *filter;
818
819         filter = kzalloc(sizeof(*filter), GFP_KERNEL);
820         return filter;
821 }
822
823 static int __alloc_preds(struct event_filter *filter, int n_preds)
824 {
825         struct filter_pred *pred;
826         int i;
827
828         if (filter->preds)
829                 __free_preds(filter);
830
831         filter->preds =
832                 kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
833
834         if (!filter->preds)
835                 return -ENOMEM;
836
837         filter->a_preds = n_preds;
838         filter->n_preds = 0;
839
840         for (i = 0; i < n_preds; i++) {
841                 pred = &filter->preds[i];
842                 pred->fn = filter_pred_none;
843         }
844
845         return 0;
846 }
847
848 static void filter_free_subsystem_preds(struct event_subsystem *system)
849 {
850         struct ftrace_event_call *call;
851
852         list_for_each_entry(call, &ftrace_events, list) {
853                 if (strcmp(call->class->system, system->name) != 0)
854                         continue;
855
856                 filter_disable(call);
857                 remove_filter_string(call->filter);
858         }
859 }
860
861 static void filter_free_subsystem_filters(struct event_subsystem *system)
862 {
863         struct ftrace_event_call *call;
864
865         list_for_each_entry(call, &ftrace_events, list) {
866                 if (strcmp(call->class->system, system->name) != 0)
867                         continue;
868                 __free_filter(call->filter);
869                 call->filter = NULL;
870         }
871 }
872
873 static int filter_add_pred(struct filter_parse_state *ps,
874                            struct event_filter *filter,
875                            struct filter_pred *pred,
876                            struct pred_stack *stack)
877 {
878         int err;
879
880         if (WARN_ON(filter->n_preds == filter->a_preds)) {
881                 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
882                 return -ENOSPC;
883         }
884
885         err = filter_set_pred(filter, filter->n_preds, stack, pred);
886         if (err)
887                 return err;
888
889         filter->n_preds++;
890
891         return 0;
892 }
893
894 int filter_assign_type(const char *type)
895 {
896         if (strstr(type, "__data_loc") && strstr(type, "char"))
897                 return FILTER_DYN_STRING;
898
899         if (strchr(type, '[') && strstr(type, "char"))
900                 return FILTER_STATIC_STRING;
901
902         return FILTER_OTHER;
903 }
904
905 static bool is_string_field(struct ftrace_event_field *field)
906 {
907         return field->filter_type == FILTER_DYN_STRING ||
908                field->filter_type == FILTER_STATIC_STRING ||
909                field->filter_type == FILTER_PTR_STRING;
910 }
911
912 static int is_legal_op(struct ftrace_event_field *field, int op)
913 {
914         if (is_string_field(field) &&
915             (op != OP_EQ && op != OP_NE && op != OP_GLOB))
916                 return 0;
917         if (!is_string_field(field) && op == OP_GLOB)
918                 return 0;
919
920         return 1;
921 }
922
923 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
924                                              int field_is_signed)
925 {
926         filter_pred_fn_t fn = NULL;
927
928         switch (field_size) {
929         case 8:
930                 if (op == OP_EQ || op == OP_NE)
931                         fn = filter_pred_64;
932                 else if (field_is_signed)
933                         fn = filter_pred_s64;
934                 else
935                         fn = filter_pred_u64;
936                 break;
937         case 4:
938                 if (op == OP_EQ || op == OP_NE)
939                         fn = filter_pred_32;
940                 else if (field_is_signed)
941                         fn = filter_pred_s32;
942                 else
943                         fn = filter_pred_u32;
944                 break;
945         case 2:
946                 if (op == OP_EQ || op == OP_NE)
947                         fn = filter_pred_16;
948                 else if (field_is_signed)
949                         fn = filter_pred_s16;
950                 else
951                         fn = filter_pred_u16;
952                 break;
953         case 1:
954                 if (op == OP_EQ || op == OP_NE)
955                         fn = filter_pred_8;
956                 else if (field_is_signed)
957                         fn = filter_pred_s8;
958                 else
959                         fn = filter_pred_u8;
960                 break;
961         }
962
963         return fn;
964 }
965
966 static int init_pred(struct filter_parse_state *ps,
967                      struct ftrace_event_field *field,
968                      struct filter_pred *pred)
969
970 {
971         filter_pred_fn_t fn = filter_pred_none;
972         unsigned long long val;
973         int ret;
974
975         pred->offset = field->offset;
976
977         if (!is_legal_op(field, pred->op)) {
978                 parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
979                 return -EINVAL;
980         }
981
982         if (is_string_field(field)) {
983                 filter_build_regex(pred);
984
985                 if (field->filter_type == FILTER_STATIC_STRING) {
986                         fn = filter_pred_string;
987                         pred->regex.field_len = field->size;
988                 } else if (field->filter_type == FILTER_DYN_STRING)
989                         fn = filter_pred_strloc;
990                 else
991                         fn = filter_pred_pchar;
992         } else {
993                 if (field->is_signed)
994                         ret = strict_strtoll(pred->regex.pattern, 0, &val);
995                 else
996                         ret = strict_strtoull(pred->regex.pattern, 0, &val);
997                 if (ret) {
998                         parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
999                         return -EINVAL;
1000                 }
1001                 pred->val = val;
1002
1003                 fn = select_comparison_fn(pred->op, field->size,
1004                                           field->is_signed);
1005                 if (!fn) {
1006                         parse_error(ps, FILT_ERR_INVALID_OP, 0);
1007                         return -EINVAL;
1008                 }
1009         }
1010
1011         if (pred->op == OP_NE)
1012                 pred->not = 1;
1013
1014         pred->fn = fn;
1015         return 0;
1016 }
1017
1018 static void parse_init(struct filter_parse_state *ps,
1019                        struct filter_op *ops,
1020                        char *infix_string)
1021 {
1022         memset(ps, '\0', sizeof(*ps));
1023
1024         ps->infix.string = infix_string;
1025         ps->infix.cnt = strlen(infix_string);
1026         ps->ops = ops;
1027
1028         INIT_LIST_HEAD(&ps->opstack);
1029         INIT_LIST_HEAD(&ps->postfix);
1030 }
1031
1032 static char infix_next(struct filter_parse_state *ps)
1033 {
1034         ps->infix.cnt--;
1035
1036         return ps->infix.string[ps->infix.tail++];
1037 }
1038
1039 static char infix_peek(struct filter_parse_state *ps)
1040 {
1041         if (ps->infix.tail == strlen(ps->infix.string))
1042                 return 0;
1043
1044         return ps->infix.string[ps->infix.tail];
1045 }
1046
1047 static void infix_advance(struct filter_parse_state *ps)
1048 {
1049         ps->infix.cnt--;
1050         ps->infix.tail++;
1051 }
1052
1053 static inline int is_precedence_lower(struct filter_parse_state *ps,
1054                                       int a, int b)
1055 {
1056         return ps->ops[a].precedence < ps->ops[b].precedence;
1057 }
1058
1059 static inline int is_op_char(struct filter_parse_state *ps, char c)
1060 {
1061         int i;
1062
1063         for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1064                 if (ps->ops[i].string[0] == c)
1065                         return 1;
1066         }
1067
1068         return 0;
1069 }
1070
1071 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1072 {
1073         char nextc = infix_peek(ps);
1074         char opstr[3];
1075         int i;
1076
1077         opstr[0] = firstc;
1078         opstr[1] = nextc;
1079         opstr[2] = '\0';
1080
1081         for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1082                 if (!strcmp(opstr, ps->ops[i].string)) {
1083                         infix_advance(ps);
1084                         return ps->ops[i].id;
1085                 }
1086         }
1087
1088         opstr[1] = '\0';
1089
1090         for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1091                 if (!strcmp(opstr, ps->ops[i].string))
1092                         return ps->ops[i].id;
1093         }
1094
1095         return OP_NONE;
1096 }
1097
1098 static inline void clear_operand_string(struct filter_parse_state *ps)
1099 {
1100         memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1101         ps->operand.tail = 0;
1102 }
1103
1104 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1105 {
1106         if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1107                 return -EINVAL;
1108
1109         ps->operand.string[ps->operand.tail++] = c;
1110
1111         return 0;
1112 }
1113
1114 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1115 {
1116         struct opstack_op *opstack_op;
1117
1118         opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1119         if (!opstack_op)
1120                 return -ENOMEM;
1121
1122         opstack_op->op = op;
1123         list_add(&opstack_op->list, &ps->opstack);
1124
1125         return 0;
1126 }
1127
1128 static int filter_opstack_empty(struct filter_parse_state *ps)
1129 {
1130         return list_empty(&ps->opstack);
1131 }
1132
1133 static int filter_opstack_top(struct filter_parse_state *ps)
1134 {
1135         struct opstack_op *opstack_op;
1136
1137         if (filter_opstack_empty(ps))
1138                 return OP_NONE;
1139
1140         opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1141
1142         return opstack_op->op;
1143 }
1144
1145 static int filter_opstack_pop(struct filter_parse_state *ps)
1146 {
1147         struct opstack_op *opstack_op;
1148         int op;
1149
1150         if (filter_opstack_empty(ps))
1151                 return OP_NONE;
1152
1153         opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1154         op = opstack_op->op;
1155         list_del(&opstack_op->list);
1156
1157         kfree(opstack_op);
1158
1159         return op;
1160 }
1161
1162 static void filter_opstack_clear(struct filter_parse_state *ps)
1163 {
1164         while (!filter_opstack_empty(ps))
1165                 filter_opstack_pop(ps);
1166 }
1167
1168 static char *curr_operand(struct filter_parse_state *ps)
1169 {
1170         return ps->operand.string;
1171 }
1172
1173 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1174 {
1175         struct postfix_elt *elt;
1176
1177         elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1178         if (!elt)
1179                 return -ENOMEM;
1180
1181         elt->op = OP_NONE;
1182         elt->operand = kstrdup(operand, GFP_KERNEL);
1183         if (!elt->operand) {
1184                 kfree(elt);
1185                 return -ENOMEM;
1186         }
1187
1188         list_add_tail(&elt->list, &ps->postfix);
1189
1190         return 0;
1191 }
1192
1193 static int postfix_append_op(struct filter_parse_state *ps, int op)
1194 {
1195         struct postfix_elt *elt;
1196
1197         elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1198         if (!elt)
1199                 return -ENOMEM;
1200
1201         elt->op = op;
1202         elt->operand = NULL;
1203
1204         list_add_tail(&elt->list, &ps->postfix);
1205
1206         return 0;
1207 }
1208
1209 static void postfix_clear(struct filter_parse_state *ps)
1210 {
1211         struct postfix_elt *elt;
1212
1213         while (!list_empty(&ps->postfix)) {
1214                 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1215                 list_del(&elt->list);
1216                 kfree(elt->operand);
1217                 kfree(elt);
1218         }
1219 }
1220
1221 static int filter_parse(struct filter_parse_state *ps)
1222 {
1223         int in_string = 0;
1224         int op, top_op;
1225         char ch;
1226
1227         while ((ch = infix_next(ps))) {
1228                 if (ch == '"') {
1229                         in_string ^= 1;
1230                         continue;
1231                 }
1232
1233                 if (in_string)
1234                         goto parse_operand;
1235
1236                 if (isspace(ch))
1237                         continue;
1238
1239                 if (is_op_char(ps, ch)) {
1240                         op = infix_get_op(ps, ch);
1241                         if (op == OP_NONE) {
1242                                 parse_error(ps, FILT_ERR_INVALID_OP, 0);
1243                                 return -EINVAL;
1244                         }
1245
1246                         if (strlen(curr_operand(ps))) {
1247                                 postfix_append_operand(ps, curr_operand(ps));
1248                                 clear_operand_string(ps);
1249                         }
1250
1251                         while (!filter_opstack_empty(ps)) {
1252                                 top_op = filter_opstack_top(ps);
1253                                 if (!is_precedence_lower(ps, top_op, op)) {
1254                                         top_op = filter_opstack_pop(ps);
1255                                         postfix_append_op(ps, top_op);
1256                                         continue;
1257                                 }
1258                                 break;
1259                         }
1260
1261                         filter_opstack_push(ps, op);
1262                         continue;
1263                 }
1264
1265                 if (ch == '(') {
1266                         filter_opstack_push(ps, OP_OPEN_PAREN);
1267                         continue;
1268                 }
1269
1270                 if (ch == ')') {
1271                         if (strlen(curr_operand(ps))) {
1272                                 postfix_append_operand(ps, curr_operand(ps));
1273                                 clear_operand_string(ps);
1274                         }
1275
1276                         top_op = filter_opstack_pop(ps);
1277                         while (top_op != OP_NONE) {
1278                                 if (top_op == OP_OPEN_PAREN)
1279                                         break;
1280                                 postfix_append_op(ps, top_op);
1281                                 top_op = filter_opstack_pop(ps);
1282                         }
1283                         if (top_op == OP_NONE) {
1284                                 parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1285                                 return -EINVAL;
1286                         }
1287                         continue;
1288                 }
1289 parse_operand:
1290                 if (append_operand_char(ps, ch)) {
1291                         parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1292                         return -EINVAL;
1293                 }
1294         }
1295
1296         if (strlen(curr_operand(ps)))
1297                 postfix_append_operand(ps, curr_operand(ps));
1298
1299         while (!filter_opstack_empty(ps)) {
1300                 top_op = filter_opstack_pop(ps);
1301                 if (top_op == OP_NONE)
1302                         break;
1303                 if (top_op == OP_OPEN_PAREN) {
1304                         parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1305                         return -EINVAL;
1306                 }
1307                 postfix_append_op(ps, top_op);
1308         }
1309
1310         return 0;
1311 }
1312
1313 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1314                                        struct ftrace_event_call *call,
1315                                        int op, char *operand1, char *operand2)
1316 {
1317         struct ftrace_event_field *field;
1318         static struct filter_pred pred;
1319
1320         memset(&pred, 0, sizeof(pred));
1321         pred.op = op;
1322
1323         if (op == OP_AND || op == OP_OR)
1324                 return &pred;
1325
1326         if (!operand1 || !operand2) {
1327                 parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1328                 return NULL;
1329         }
1330
1331         field = find_event_field(call, operand1);
1332         if (!field) {
1333                 parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1334                 return NULL;
1335         }
1336
1337         strcpy(pred.regex.pattern, operand2);
1338         pred.regex.len = strlen(pred.regex.pattern);
1339
1340         return init_pred(ps, field, &pred) ? NULL : &pred;
1341 }
1342
1343 static int check_preds(struct filter_parse_state *ps)
1344 {
1345         int n_normal_preds = 0, n_logical_preds = 0;
1346         struct postfix_elt *elt;
1347
1348         list_for_each_entry(elt, &ps->postfix, list) {
1349                 if (elt->op == OP_NONE)
1350                         continue;
1351
1352                 if (elt->op == OP_AND || elt->op == OP_OR) {
1353                         n_logical_preds++;
1354                         continue;
1355                 }
1356                 n_normal_preds++;
1357         }
1358
1359         if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1360                 parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1361                 return -EINVAL;
1362         }
1363
1364         return 0;
1365 }
1366
1367 static int count_preds(struct filter_parse_state *ps)
1368 {
1369         struct postfix_elt *elt;
1370         int n_preds = 0;
1371
1372         list_for_each_entry(elt, &ps->postfix, list) {
1373                 if (elt->op == OP_NONE)
1374                         continue;
1375                 n_preds++;
1376         }
1377
1378         return n_preds;
1379 }
1380
1381 struct check_pred_data {
1382         int count;
1383         int max;
1384 };
1385
1386 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1387                               int *err, void *data)
1388 {
1389         struct check_pred_data *d = data;
1390
1391         if (WARN_ON(d->count++ > d->max)) {
1392                 *err = -EINVAL;
1393                 return WALK_PRED_ABORT;
1394         }
1395         return WALK_PRED_DEFAULT;
1396 }
1397
1398 /*
1399  * The tree is walked at filtering of an event. If the tree is not correctly
1400  * built, it may cause an infinite loop. Check here that the tree does
1401  * indeed terminate.
1402  */
1403 static int check_pred_tree(struct event_filter *filter,
1404                            struct filter_pred *root)
1405 {
1406         struct check_pred_data data = {
1407                 /*
1408                  * The max that we can hit a node is three times.
1409                  * Once going down, once coming up from left, and
1410                  * once coming up from right. This is more than enough
1411                  * since leafs are only hit a single time.
1412                  */
1413                 .max   = 3 * filter->n_preds,
1414                 .count = 0,
1415         };
1416
1417         return walk_pred_tree(filter->preds, root,
1418                               check_pred_tree_cb, &data);
1419 }
1420
1421 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1422                           int *err, void *data)
1423 {
1424         int *count = data;
1425
1426         if ((move == MOVE_DOWN) &&
1427             (pred->left == FILTER_PRED_INVALID))
1428                 (*count)++;
1429
1430         return WALK_PRED_DEFAULT;
1431 }
1432
1433 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1434 {
1435         int count = 0, ret;
1436
1437         ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1438         WARN_ON(ret);
1439         return count;
1440 }
1441
1442 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1443 {
1444         struct filter_pred *pred;
1445         enum move_type move = MOVE_DOWN;
1446         int count = 0;
1447         int children;
1448         int done = 0;
1449
1450         /* No need to keep the fold flag */
1451         root->index &= ~FILTER_PRED_FOLD;
1452
1453         /* If the root is a leaf then do nothing */
1454         if (root->left == FILTER_PRED_INVALID)
1455                 return 0;
1456
1457         /* count the children */
1458         children = count_leafs(preds, &preds[root->left]);
1459         children += count_leafs(preds, &preds[root->right]);
1460
1461         root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL);
1462         if (!root->ops)
1463                 return -ENOMEM;
1464
1465         root->val = children;
1466
1467         pred = root;
1468         do {
1469                 switch (move) {
1470                 case MOVE_DOWN:
1471                         if (pred->left != FILTER_PRED_INVALID) {
1472                                 pred = &preds[pred->left];
1473                                 continue;
1474                         }
1475                         if (WARN_ON(count == children))
1476                                 return -EINVAL;
1477                         pred->index &= ~FILTER_PRED_FOLD;
1478                         root->ops[count++] = pred->index;
1479                         pred = get_pred_parent(pred, preds,
1480                                                pred->parent, &move);
1481                         continue;
1482                 case MOVE_UP_FROM_LEFT:
1483                         pred = &preds[pred->right];
1484                         move = MOVE_DOWN;
1485                         continue;
1486                 case MOVE_UP_FROM_RIGHT:
1487                         if (pred == root)
1488                                 break;
1489                         pred = get_pred_parent(pred, preds,
1490                                                pred->parent, &move);
1491                         continue;
1492                 }
1493                 done = 1;
1494         } while (!done);
1495
1496         return 0;
1497 }
1498
1499 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1500                              int *err, void *data)
1501 {
1502         struct filter_pred *preds = data;
1503
1504         if (move != MOVE_DOWN)
1505                 return WALK_PRED_DEFAULT;
1506         if (!(pred->index & FILTER_PRED_FOLD))
1507                 return WALK_PRED_DEFAULT;
1508
1509         *err = fold_pred(preds, pred);
1510         if (*err)
1511                 return WALK_PRED_ABORT;
1512
1513         /* eveyrhing below is folded, continue with parent */
1514         return WALK_PRED_PARENT;
1515 }
1516
1517 /*
1518  * To optimize the processing of the ops, if we have several "ors" or
1519  * "ands" together, we can put them in an array and process them all
1520  * together speeding up the filter logic.
1521  */
1522 static int fold_pred_tree(struct event_filter *filter,
1523                            struct filter_pred *root)
1524 {
1525         return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1526                               filter->preds);
1527 }
1528
1529 static int replace_preds(struct ftrace_event_call *call,
1530                          struct event_filter *filter,
1531                          struct filter_parse_state *ps,
1532                          char *filter_string,
1533                          bool dry_run)
1534 {
1535         char *operand1 = NULL, *operand2 = NULL;
1536         struct filter_pred *pred;
1537         struct filter_pred *root;
1538         struct postfix_elt *elt;
1539         struct pred_stack stack = { }; /* init to NULL */
1540         int err;
1541         int n_preds = 0;
1542
1543         n_preds = count_preds(ps);
1544         if (n_preds >= MAX_FILTER_PRED) {
1545                 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1546                 return -ENOSPC;
1547         }
1548
1549         err = check_preds(ps);
1550         if (err)
1551                 return err;
1552
1553         if (!dry_run) {
1554                 err = __alloc_pred_stack(&stack, n_preds);
1555                 if (err)
1556                         return err;
1557                 err = __alloc_preds(filter, n_preds);
1558                 if (err)
1559                         goto fail;
1560         }
1561
1562         n_preds = 0;
1563         list_for_each_entry(elt, &ps->postfix, list) {
1564                 if (elt->op == OP_NONE) {
1565                         if (!operand1)
1566                                 operand1 = elt->operand;
1567                         else if (!operand2)
1568                                 operand2 = elt->operand;
1569                         else {
1570                                 parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1571                                 err = -EINVAL;
1572                                 goto fail;
1573                         }
1574                         continue;
1575                 }
1576
1577                 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1578                         parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1579                         err = -ENOSPC;
1580                         goto fail;
1581                 }
1582
1583                 pred = create_pred(ps, call, elt->op, operand1, operand2);
1584                 if (!pred) {
1585                         err = -EINVAL;
1586                         goto fail;
1587                 }
1588
1589                 if (!dry_run) {
1590                         err = filter_add_pred(ps, filter, pred, &stack);
1591                         if (err)
1592                                 goto fail;
1593                 }
1594
1595                 operand1 = operand2 = NULL;
1596         }
1597
1598         if (!dry_run) {
1599                 /* We should have one item left on the stack */
1600                 pred = __pop_pred_stack(&stack);
1601                 if (!pred)
1602                         return -EINVAL;
1603                 /* This item is where we start from in matching */
1604                 root = pred;
1605                 /* Make sure the stack is empty */
1606                 pred = __pop_pred_stack(&stack);
1607                 if (WARN_ON(pred)) {
1608                         err = -EINVAL;
1609                         filter->root = NULL;
1610                         goto fail;
1611                 }
1612                 err = check_pred_tree(filter, root);
1613                 if (err)
1614                         goto fail;
1615
1616                 /* Optimize the tree */
1617                 err = fold_pred_tree(filter, root);
1618                 if (err)
1619                         goto fail;
1620
1621                 /* We don't set root until we know it works */
1622                 barrier();
1623                 filter->root = root;
1624         }
1625
1626         err = 0;
1627 fail:
1628         __free_pred_stack(&stack);
1629         return err;
1630 }
1631
1632 struct filter_list {
1633         struct list_head        list;
1634         struct event_filter     *filter;
1635 };
1636
1637 static int replace_system_preds(struct event_subsystem *system,
1638                                 struct filter_parse_state *ps,
1639                                 char *filter_string)
1640 {
1641         struct ftrace_event_call *call;
1642         struct filter_list *filter_item;
1643         struct filter_list *tmp;
1644         LIST_HEAD(filter_list);
1645         bool fail = true;
1646         int err;
1647
1648         list_for_each_entry(call, &ftrace_events, list) {
1649
1650                 if (strcmp(call->class->system, system->name) != 0)
1651                         continue;
1652
1653                 /*
1654                  * Try to see if the filter can be applied
1655                  *  (filter arg is ignored on dry_run)
1656                  */
1657                 err = replace_preds(call, NULL, ps, filter_string, true);
1658                 if (err)
1659                         goto fail;
1660         }
1661
1662         list_for_each_entry(call, &ftrace_events, list) {
1663                 struct event_filter *filter;
1664
1665                 if (strcmp(call->class->system, system->name) != 0)
1666                         continue;
1667
1668                 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1669                 if (!filter_item)
1670                         goto fail_mem;
1671
1672                 list_add_tail(&filter_item->list, &filter_list);
1673
1674                 filter_item->filter = __alloc_filter();
1675                 if (!filter_item->filter)
1676                         goto fail_mem;
1677                 filter = filter_item->filter;
1678
1679                 /* Can only fail on no memory */
1680                 err = replace_filter_string(filter, filter_string);
1681                 if (err)
1682                         goto fail_mem;
1683
1684                 err = replace_preds(call, filter, ps, filter_string, false);
1685                 if (err) {
1686                         filter_disable(call);
1687                         parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1688                         append_filter_err(ps, filter);
1689                 } else
1690                         call->flags |= TRACE_EVENT_FL_FILTERED;
1691                 /*
1692                  * Regardless of if this returned an error, we still
1693                  * replace the filter for the call.
1694                  */
1695                 filter = call->filter;
1696                 call->filter = filter_item->filter;
1697                 filter_item->filter = filter;
1698
1699                 fail = false;
1700         }
1701
1702         if (fail)
1703                 goto fail;
1704
1705         /*
1706          * The calls can still be using the old filters.
1707          * Do a synchronize_sched() to ensure all calls are
1708          * done with them before we free them.
1709          */
1710         synchronize_sched();
1711         list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1712                 __free_filter(filter_item->filter);
1713                 list_del(&filter_item->list);
1714                 kfree(filter_item);
1715         }
1716         return 0;
1717  fail:
1718         /* No call succeeded */
1719         list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1720                 list_del(&filter_item->list);
1721                 kfree(filter_item);
1722         }
1723         parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1724         return -EINVAL;
1725  fail_mem:
1726         /* If any call succeeded, we still need to sync */
1727         if (!fail)
1728                 synchronize_sched();
1729         list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1730                 __free_filter(filter_item->filter);
1731                 list_del(&filter_item->list);
1732                 kfree(filter_item);
1733         }
1734         return -ENOMEM;
1735 }
1736
1737 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1738 {
1739         struct filter_parse_state *ps;
1740         struct event_filter *filter;
1741         struct event_filter *tmp;
1742         int err = 0;
1743
1744         mutex_lock(&event_mutex);
1745
1746         if (!strcmp(strstrip(filter_string), "0")) {
1747                 filter_disable(call);
1748                 filter = call->filter;
1749                 if (!filter)
1750                         goto out_unlock;
1751                 call->filter = NULL;
1752                 /* Make sure the filter is not being used */
1753                 synchronize_sched();
1754                 __free_filter(filter);
1755                 goto out_unlock;
1756         }
1757
1758         err = -ENOMEM;
1759         ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1760         if (!ps)
1761                 goto out_unlock;
1762
1763         filter = __alloc_filter();
1764         if (!filter) {
1765                 kfree(ps);
1766                 goto out_unlock;
1767         }
1768
1769         replace_filter_string(filter, filter_string);
1770
1771         parse_init(ps, filter_ops, filter_string);
1772         err = filter_parse(ps);
1773         if (err) {
1774                 append_filter_err(ps, filter);
1775                 goto out;
1776         }
1777
1778         err = replace_preds(call, filter, ps, filter_string, false);
1779         if (err) {
1780                 filter_disable(call);
1781                 append_filter_err(ps, filter);
1782         } else
1783                 call->flags |= TRACE_EVENT_FL_FILTERED;
1784 out:
1785         /*
1786          * Always swap the call filter with the new filter
1787          * even if there was an error. If there was an error
1788          * in the filter, we disable the filter and show the error
1789          * string
1790          */
1791         tmp = call->filter;
1792         call->filter = filter;
1793         if (tmp) {
1794                 /* Make sure the call is done with the filter */
1795                 synchronize_sched();
1796                 __free_filter(tmp);
1797         }
1798         filter_opstack_clear(ps);
1799         postfix_clear(ps);
1800         kfree(ps);
1801 out_unlock:
1802         mutex_unlock(&event_mutex);
1803
1804         return err;
1805 }
1806
1807 int apply_subsystem_event_filter(struct event_subsystem *system,
1808                                  char *filter_string)
1809 {
1810         struct filter_parse_state *ps;
1811         struct event_filter *filter;
1812         int err = 0;
1813
1814         mutex_lock(&event_mutex);
1815
1816         /* Make sure the system still has events */
1817         if (!system->nr_events) {
1818                 err = -ENODEV;
1819                 goto out_unlock;
1820         }
1821
1822         if (!strcmp(strstrip(filter_string), "0")) {
1823                 filter_free_subsystem_preds(system);
1824                 remove_filter_string(system->filter);
1825                 filter = system->filter;
1826                 system->filter = NULL;
1827                 /* Ensure all filters are no longer used */
1828                 synchronize_sched();
1829                 filter_free_subsystem_filters(system);
1830                 __free_filter(filter);
1831                 goto out_unlock;
1832         }
1833
1834         err = -ENOMEM;
1835         ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1836         if (!ps)
1837                 goto out_unlock;
1838
1839         filter = __alloc_filter();
1840         if (!filter)
1841                 goto out;
1842
1843         replace_filter_string(filter, filter_string);
1844         /*
1845          * No event actually uses the system filter
1846          * we can free it without synchronize_sched().
1847          */
1848         __free_filter(system->filter);
1849         system->filter = filter;
1850
1851         parse_init(ps, filter_ops, filter_string);
1852         err = filter_parse(ps);
1853         if (err) {
1854                 append_filter_err(ps, system->filter);
1855                 goto out;
1856         }
1857
1858         err = replace_system_preds(system, ps, filter_string);
1859         if (err)
1860                 append_filter_err(ps, system->filter);
1861
1862 out:
1863         filter_opstack_clear(ps);
1864         postfix_clear(ps);
1865         kfree(ps);
1866 out_unlock:
1867         mutex_unlock(&event_mutex);
1868
1869         return err;
1870 }
1871
1872 #ifdef CONFIG_PERF_EVENTS
1873
1874 void ftrace_profile_free_filter(struct perf_event *event)
1875 {
1876         struct event_filter *filter = event->filter;
1877
1878         event->filter = NULL;
1879         __free_filter(filter);
1880 }
1881
1882 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1883                               char *filter_str)
1884 {
1885         int err;
1886         struct event_filter *filter;
1887         struct filter_parse_state *ps;
1888         struct ftrace_event_call *call;
1889
1890         mutex_lock(&event_mutex);
1891
1892         call = event->tp_event;
1893
1894         err = -EINVAL;
1895         if (!call)
1896                 goto out_unlock;
1897
1898         err = -EEXIST;
1899         if (event->filter)
1900                 goto out_unlock;
1901
1902         filter = __alloc_filter();
1903         if (!filter) {
1904                 err = PTR_ERR(filter);
1905                 goto out_unlock;
1906         }
1907
1908         err = -ENOMEM;
1909         ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1910         if (!ps)
1911                 goto free_filter;
1912
1913         parse_init(ps, filter_ops, filter_str);
1914         err = filter_parse(ps);
1915         if (err)
1916                 goto free_ps;
1917
1918         err = replace_preds(call, filter, ps, filter_str, false);
1919         if (!err)
1920                 event->filter = filter;
1921
1922 free_ps:
1923         filter_opstack_clear(ps);
1924         postfix_clear(ps);
1925         kfree(ps);
1926
1927 free_filter:
1928         if (err)
1929                 __free_filter(filter);
1930
1931 out_unlock:
1932         mutex_unlock(&event_mutex);
1933
1934         return err;
1935 }
1936
1937 #endif /* CONFIG_PERF_EVENTS */
1938