]> Pileus Git - ~andy/linux/blobdiff - kernel/trace/trace_functions.c
Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git...
[~andy/linux] / kernel / trace / trace_functions.c
index fdff65dff1bb5e2101970185e04072f34dc0cd57..483162a9f9080258fa08b76247e40103d8e5a6c5 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/ftrace.h>
+#include <linux/pstore.h>
 #include <linux/fs.h>
 
 #include "trace.h"
@@ -75,6 +76,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
        preempt_enable_notrace();
 }
 
+/* Our two options */
+enum {
+       TRACE_FUNC_OPT_STACK    = 0x1,
+       TRACE_FUNC_OPT_PSTORE   = 0x2,
+};
+
+static struct tracer_flags func_flags;
+
 static void
 function_trace_call(unsigned long ip, unsigned long parent_ip,
                    struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -100,6 +109,12 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
        disabled = atomic_inc_return(&data->disabled);
 
        if (likely(disabled == 1)) {
+               /*
+                * So far tracing doesn't support multiple buffers, so
+                * we make an explicit call for now.
+                */
+               if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
+                       pstore_ftrace_call(ip, parent_ip);
                pc = preempt_count();
                trace_function(tr, ip, parent_ip, flags, pc);
        }
@@ -162,14 +177,12 @@ static struct ftrace_ops trace_stack_ops __read_mostly =
        .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
 };
 
-/* Our two options */
-enum {
-       TRACE_FUNC_OPT_STACK = 0x1,
-};
-
 static struct tracer_opt func_opts[] = {
 #ifdef CONFIG_STACKTRACE
        { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
+#endif
+#ifdef CONFIG_PSTORE_FTRACE
+       { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
 #endif
        { } /* Always set a last empty entry */
 };
@@ -208,10 +221,11 @@ static void tracing_stop_function_trace(void)
 
 static int func_set_flag(u32 old_flags, u32 bit, int set)
 {
-       if (bit == TRACE_FUNC_OPT_STACK) {
+       switch (bit) {
+       case TRACE_FUNC_OPT_STACK:
                /* do nothing if already set */
                if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
-                       return 0;
+                       break;
 
                if (set) {
                        unregister_ftrace_function(&trace_ops);
@@ -221,10 +235,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
                        register_ftrace_function(&trace_ops);
                }
 
-               return 0;
+               break;
+       case TRACE_FUNC_OPT_PSTORE:
+               break;
+       default:
+               return -EINVAL;
        }
 
-       return -EINVAL;
+       return 0;
 }
 
 static struct tracer function_trace __read_mostly =