]> Pileus Git - ~andy/linux/blobdiff - kernel/trace/ftrace.c
ftrace: Synchronize setting function_trace_op with ftrace_trace_function
[~andy/linux] / kernel / trace / ftrace.c
index 0e9f9eaade2f6a2dd0e729cd2d3bb38b4f6f8ec0..0ffb811cbb1fa86575e415632f01c3765a5a29a1 100644 (file)
@@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
 
 /* Current function tracing op */
 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
+/* What to set function_trace_op to */
+static struct ftrace_ops *set_function_trace_op;
 
 /* List for set_ftrace_pid's pids. */
 LIST_HEAD(ftrace_pids);
@@ -278,6 +280,23 @@ static void update_global_ops(void)
        global_ops.func = func;
 }
 
+static void ftrace_sync(struct work_struct *work)
+{
+       /*
+        * This function is just a stub to implement a hard force
+        * of synchronize_sched(). This requires synchronizing
+        * tasks even in userspace and idle.
+        *
+        * Yes, function tracing is rude.
+        */
+}
+
+static void ftrace_sync_ipi(void *data)
+{
+       /* Probably not needed, but do it anyway */
+       smp_rmb();
+}
+
 static void update_ftrace_function(void)
 {
        ftrace_func_t func;
@@ -296,16 +315,59 @@ static void update_ftrace_function(void)
             !FTRACE_FORCE_LIST_FUNC)) {
                /* Set the ftrace_ops that the arch callback uses */
                if (ftrace_ops_list == &global_ops)
-                       function_trace_op = ftrace_global_list;
+                       set_function_trace_op = ftrace_global_list;
                else
-                       function_trace_op = ftrace_ops_list;
+                       set_function_trace_op = ftrace_ops_list;
                func = ftrace_ops_list->func;
        } else {
                /* Just use the default ftrace_ops */
-               function_trace_op = &ftrace_list_end;
+               set_function_trace_op = &ftrace_list_end;
                func = ftrace_ops_list_func;
        }
 
+       /* If there's no change, then do nothing more here */
+       if (ftrace_trace_function == func)
+               return;
+
+       /*
+        * If we are using the list function, it doesn't care
+        * about the function_trace_ops.
+        */
+       if (func == ftrace_ops_list_func) {
+               ftrace_trace_function = func;
+               /*
+                * Don't even bother setting function_trace_ops,
+                * it would be racy to do so anyway.
+                */
+               return;
+       }
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+       /*
+        * For static tracing, we need to be a bit more careful.
+        * The function change takes affect immediately. Thus,
+        * we need to coorditate the setting of the function_trace_ops
+        * with the setting of the ftrace_trace_function.
+        *
+        * Set the function to the list ops, which will call the
+        * function we want, albeit indirectly, but it handles the
+        * ftrace_ops and doesn't depend on function_trace_op.
+        */
+       ftrace_trace_function = ftrace_ops_list_func;
+       /*
+        * Make sure all CPUs see this. Yes this is slow, but static
+        * tracing is slow and nasty to have enabled.
+        */
+       schedule_on_each_cpu(ftrace_sync);
+       /* Now all cpus are using the list ops. */
+       function_trace_op = set_function_trace_op;
+       /* Make sure the function_trace_op is visible on all CPUs */
+       smp_wmb();
+       /* Nasty way to force a rmb on all cpus */
+       smp_call_function(ftrace_sync_ipi, NULL, 1);
+       /* OK, we are all set to update the ftrace_trace_function now! */
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
        ftrace_trace_function = func;
 }
 
@@ -410,17 +472,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        return 0;
 }
 
-static void ftrace_sync(struct work_struct *work)
-{
-       /*
-        * This function is just a stub to implement a hard force
-        * of synchronize_sched(). This requires synchronizing
-        * tasks even in userspace and idle.
-        *
-        * Yes, function tracing is rude.
-        */
-}
-
 static int __unregister_ftrace_function(struct ftrace_ops *ops)
 {
        int ret;
@@ -1082,19 +1133,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 
 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
 
-loff_t
-ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
-{
-       loff_t ret;
-
-       if (file->f_mode & FMODE_READ)
-               ret = seq_lseek(file, offset, whence);
-       else
-               file->f_pos = ret = 1;
-
-       return ret;
-}
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
@@ -1992,8 +2030,14 @@ void ftrace_modify_all_code(int command)
        else if (command & FTRACE_DISABLE_CALLS)
                ftrace_replace_code(0);
 
-       if (update && ftrace_trace_function != ftrace_ops_list_func)
+       if (update && ftrace_trace_function != ftrace_ops_list_func) {
+               function_trace_op = set_function_trace_op;
+               smp_wmb();
+               /* If irqs are disabled, we are in stop machine */
+               if (!irqs_disabled())
+                       smp_call_function(ftrace_sync_ipi, NULL, 1);
                ftrace_update_ftrace_func(ftrace_trace_function);
+       }
 
        if (command & FTRACE_START_FUNC_RET)
                ftrace_enable_ftrace_graph_caller();
@@ -2739,7 +2783,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)
  * routine, you can use ftrace_filter_write() for the write
  * routine if @flag has FTRACE_ITER_FILTER set, or
  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
- * ftrace_filter_lseek() should be used as the lseek routine, and
+ * tracing_lseek() should be used as the lseek routine, and
  * release must call ftrace_regex_release().
  */
 int
@@ -3767,7 +3811,7 @@ static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
        .read = seq_read,
        .write = ftrace_filter_write,
-       .llseek = ftrace_filter_lseek,
+       .llseek = tracing_lseek,
        .release = ftrace_regex_release,
 };
 
@@ -3775,7 +3819,7 @@ static const struct file_operations ftrace_notrace_fops = {
        .open = ftrace_notrace_open,
        .read = seq_read,
        .write = ftrace_notrace_write,
-       .llseek = ftrace_filter_lseek,
+       .llseek = tracing_lseek,
        .release = ftrace_regex_release,
 };
 
@@ -4038,7 +4082,7 @@ static const struct file_operations ftrace_graph_fops = {
        .open           = ftrace_graph_open,
        .read           = seq_read,
        .write          = ftrace_graph_write,
-       .llseek         = ftrace_filter_lseek,
+       .llseek         = tracing_lseek,
        .release        = ftrace_graph_release,
 };
 
@@ -4046,7 +4090,7 @@ static const struct file_operations ftrace_graph_notrace_fops = {
        .open           = ftrace_graph_notrace_open,
        .read           = seq_read,
        .write          = ftrace_graph_write,
-       .llseek         = ftrace_filter_lseek,
+       .llseek         = tracing_lseek,
        .release        = ftrace_graph_release,
 };
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -4719,7 +4763,7 @@ static const struct file_operations ftrace_pid_fops = {
        .open           = ftrace_pid_open,
        .write          = ftrace_pid_write,
        .read           = seq_read,
-       .llseek         = ftrace_filter_lseek,
+       .llseek         = tracing_lseek,
        .release        = ftrace_pid_release,
 };