]> Pileus Git - ~andy/linux/blobdiff - kernel/trace/ftrace.c
ftrace: Synchronize setting function_trace_op with ftrace_trace_function
[~andy/linux] / kernel / trace / ftrace.c
index 22fa556967609465a0dbd98e238ee1d14b52e328..0ffb811cbb1fa86575e415632f01c3765a5a29a1 100644 (file)
@@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
 
 /* Current function tracing op */
 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
+/* What to set function_trace_op to */
+static struct ftrace_ops *set_function_trace_op;
 
 /* List for set_ftrace_pid's pids. */
 LIST_HEAD(ftrace_pids);
@@ -278,6 +280,23 @@ static void update_global_ops(void)
        global_ops.func = func;
 }
 
+static void ftrace_sync(struct work_struct *work)
+{
+       /*
+        * This function is just a stub to implement a hard force
+        * of synchronize_sched(). This requires synchronizing
+        * tasks even in userspace and idle.
+        *
+        * Yes, function tracing is rude.
+        */
+}
+
+static void ftrace_sync_ipi(void *data)
+{
+       /* Probably not needed, but do it anyway */
+       smp_rmb();
+}
+
 static void update_ftrace_function(void)
 {
        ftrace_func_t func;
@@ -296,16 +315,59 @@ static void update_ftrace_function(void)
             !FTRACE_FORCE_LIST_FUNC)) {
                /* Set the ftrace_ops that the arch callback uses */
                if (ftrace_ops_list == &global_ops)
-                       function_trace_op = ftrace_global_list;
+                       set_function_trace_op = ftrace_global_list;
                else
-                       function_trace_op = ftrace_ops_list;
+                       set_function_trace_op = ftrace_ops_list;
                func = ftrace_ops_list->func;
        } else {
                /* Just use the default ftrace_ops */
-               function_trace_op = &ftrace_list_end;
+               set_function_trace_op = &ftrace_list_end;
                func = ftrace_ops_list_func;
        }
 
+       /* If there's no change, then do nothing more here */
+       if (ftrace_trace_function == func)
+               return;
+
+       /*
+        * If we are using the list function, it doesn't care
+        * about the function_trace_ops.
+        */
+       if (func == ftrace_ops_list_func) {
+               ftrace_trace_function = func;
+               /*
+                * Don't even bother setting function_trace_ops,
+                * it would be racy to do so anyway.
+                */
+               return;
+       }
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+       /*
+        * For static tracing, we need to be a bit more careful.
+        * The function change takes affect immediately. Thus,
+        * we need to coorditate the setting of the function_trace_ops
+        * with the setting of the ftrace_trace_function.
+        *
+        * Set the function to the list ops, which will call the
+        * function we want, albeit indirectly, but it handles the
+        * ftrace_ops and doesn't depend on function_trace_op.
+        */
+       ftrace_trace_function = ftrace_ops_list_func;
+       /*
+        * Make sure all CPUs see this. Yes this is slow, but static
+        * tracing is slow and nasty to have enabled.
+        */
+       schedule_on_each_cpu(ftrace_sync);
+       /* Now all cpus are using the list ops. */
+       function_trace_op = set_function_trace_op;
+       /* Make sure the function_trace_op is visible on all CPUs */
+       smp_wmb();
+       /* Nasty way to force a rmb on all cpus */
+       smp_call_function(ftrace_sync_ipi, NULL, 1);
+       /* OK, we are all set to update the ftrace_trace_function now! */
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
        ftrace_trace_function = func;
 }
 
@@ -367,9 +429,6 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
 
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
-       if (unlikely(ftrace_disabled))
-               return -ENODEV;
-
        if (FTRACE_WARN_ON(ops == &global_ops))
                return -EINVAL;
 
@@ -413,24 +472,10 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        return 0;
 }
 
-static void ftrace_sync(struct work_struct *work)
-{
-       /*
-        * This function is just a stub to implement a hard force
-        * of synchronize_sched(). This requires synchronizing
-        * tasks even in userspace and idle.
-        *
-        * Yes, function tracing is rude.
-        */
-}
-
 static int __unregister_ftrace_function(struct ftrace_ops *ops)
 {
        int ret;
 
-       if (ftrace_disabled)
-               return -ENODEV;
-
        if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
                return -EBUSY;
 
@@ -1088,19 +1133,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 
 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
 
-loff_t
-ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
-{
-       loff_t ret;
-
-       if (file->f_mode & FMODE_READ)
-               ret = seq_lseek(file, offset, whence);
-       else
-               file->f_pos = ret = 1;
-
-       return ret;
-}
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
@@ -1998,8 +2030,14 @@ void ftrace_modify_all_code(int command)
        else if (command & FTRACE_DISABLE_CALLS)
                ftrace_replace_code(0);
 
-       if (update && ftrace_trace_function != ftrace_ops_list_func)
+       if (update && ftrace_trace_function != ftrace_ops_list_func) {
+               function_trace_op = set_function_trace_op;
+               smp_wmb();
+               /* If irqs are disabled, we are in stop machine */
+               if (!irqs_disabled())
+                       smp_call_function(ftrace_sync_ipi, NULL, 1);
                ftrace_update_ftrace_func(ftrace_trace_function);
+       }
 
        if (command & FTRACE_START_FUNC_RET)
                ftrace_enable_ftrace_graph_caller();
@@ -2088,10 +2126,15 @@ static void ftrace_startup_enable(int command)
 static int ftrace_startup(struct ftrace_ops *ops, int command)
 {
        bool hash_enable = true;
+       int ret;
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
+       ret = __register_ftrace_function(ops);
+       if (ret)
+               return ret;
+
        ftrace_start_up++;
        command |= FTRACE_UPDATE_CALLS;
 
@@ -2113,12 +2156,17 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
        return 0;
 }
 
-static void ftrace_shutdown(struct ftrace_ops *ops, int command)
+static int ftrace_shutdown(struct ftrace_ops *ops, int command)
 {
        bool hash_disable = true;
+       int ret;
 
        if (unlikely(ftrace_disabled))
-               return;
+               return -ENODEV;
+
+       ret = __unregister_ftrace_function(ops);
+       if (ret)
+               return ret;
 
        ftrace_start_up--;
        /*
@@ -2153,9 +2201,10 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
        }
 
        if (!command || !ftrace_enabled)
-               return;
+               return 0;
 
        ftrace_run_update_code(command);
+       return 0;
 }
 
 static void ftrace_startup_sysctl(void)
@@ -2734,7 +2783,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)
  * routine, you can use ftrace_filter_write() for the write
  * routine if @flag has FTRACE_ITER_FILTER set, or
  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
- * ftrace_filter_lseek() should be used as the lseek routine, and
+ * tracing_lseek() should be used as the lseek routine, and
  * release must call ftrace_regex_release().
  */
 int
@@ -3060,16 +3109,13 @@ static void __enable_ftrace_function_probe(void)
        if (i == FTRACE_FUNC_HASHSIZE)
                return;
 
-       ret = __register_ftrace_function(&trace_probe_ops);
-       if (!ret)
-               ret = ftrace_startup(&trace_probe_ops, 0);
+       ret = ftrace_startup(&trace_probe_ops, 0);
 
        ftrace_probe_registered = 1;
 }
 
 static void __disable_ftrace_function_probe(void)
 {
-       int ret;
        int i;
 
        if (!ftrace_probe_registered)
@@ -3082,9 +3128,7 @@ static void __disable_ftrace_function_probe(void)
        }
 
        /* no more funcs left */
-       ret = __unregister_ftrace_function(&trace_probe_ops);
-       if (!ret)
-               ftrace_shutdown(&trace_probe_ops, 0);
+       ftrace_shutdown(&trace_probe_ops, 0);
 
        ftrace_probe_registered = 0;
 }
@@ -3767,7 +3811,7 @@ static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
        .read = seq_read,
        .write = ftrace_filter_write,
-       .llseek = ftrace_filter_lseek,
+       .llseek = tracing_lseek,
        .release = ftrace_regex_release,
 };
 
@@ -3775,7 +3819,7 @@ static const struct file_operations ftrace_notrace_fops = {
        .open = ftrace_notrace_open,
        .read = seq_read,
        .write = ftrace_notrace_write,
-       .llseek = ftrace_filter_lseek,
+       .llseek = tracing_lseek,
        .release = ftrace_regex_release,
 };
 
@@ -4038,7 +4082,7 @@ static const struct file_operations ftrace_graph_fops = {
        .open           = ftrace_graph_open,
        .read           = seq_read,
        .write          = ftrace_graph_write,
-       .llseek         = ftrace_filter_lseek,
+       .llseek         = tracing_lseek,
        .release        = ftrace_graph_release,
 };
 
@@ -4046,7 +4090,7 @@ static const struct file_operations ftrace_graph_notrace_fops = {
        .open           = ftrace_graph_notrace_open,
        .read           = seq_read,
        .write          = ftrace_graph_write,
-       .llseek         = ftrace_filter_lseek,
+       .llseek         = tracing_lseek,
        .release        = ftrace_graph_release,
 };
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -4366,12 +4410,15 @@ core_initcall(ftrace_nodyn_init);
 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
 static inline void ftrace_startup_enable(int command) { }
 /* Keep as macros so we do not need to define the commands */
-# define ftrace_startup(ops, command)                  \
-       ({                                              \
-               (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
-               0;                                      \
+# define ftrace_startup(ops, command)                                  \
+       ({                                                              \
+               int ___ret = __register_ftrace_function(ops);           \
+               if (!___ret)                                            \
+                       (ops)->flags |= FTRACE_OPS_FL_ENABLED;          \
+               ___ret;                                                 \
        })
-# define ftrace_shutdown(ops, command) do { } while (0)
+# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
+
 # define ftrace_startup_sysctl()       do { } while (0)
 # define ftrace_shutdown_sysctl()      do { } while (0)
 
@@ -4716,7 +4763,7 @@ static const struct file_operations ftrace_pid_fops = {
        .open           = ftrace_pid_open,
        .write          = ftrace_pid_write,
        .read           = seq_read,
-       .llseek         = ftrace_filter_lseek,
+       .llseek         = tracing_lseek,
        .release        = ftrace_pid_release,
 };
 
@@ -4780,9 +4827,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
 
        mutex_lock(&ftrace_lock);
 
-       ret = __register_ftrace_function(ops);
-       if (!ret)
-               ret = ftrace_startup(ops, 0);
+       ret = ftrace_startup(ops, 0);
 
        mutex_unlock(&ftrace_lock);
 
@@ -4801,9 +4846,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
        int ret;
 
        mutex_lock(&ftrace_lock);
-       ret = __unregister_ftrace_function(ops);
-       if (!ret)
-               ftrace_shutdown(ops, 0);
+       ret = ftrace_shutdown(ops, 0);
        mutex_unlock(&ftrace_lock);
 
        return ret;
@@ -4997,6 +5040,13 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
        return NOTIFY_DONE;
 }
 
+/* Just a place holder for function graph */
+static struct ftrace_ops fgraph_ops __read_mostly = {
+       .func           = ftrace_stub,
+       .flags          = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
+                               FTRACE_OPS_FL_RECURSION_SAFE,
+};
+
 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
                        trace_func_graph_ent_t entryfunc)
 {
@@ -5023,7 +5073,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        ftrace_graph_return = retfunc;
        ftrace_graph_entry = entryfunc;
 
-       ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
+       ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
 
 out:
        mutex_unlock(&ftrace_lock);
@@ -5040,7 +5090,7 @@ void unregister_ftrace_graph(void)
        ftrace_graph_active--;
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
        ftrace_graph_entry = ftrace_graph_entry_stub;
-       ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
+       ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
        unregister_pm_notifier(&ftrace_suspend_notifier);
        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);