]> Pileus Git - ~andy/linux/blobdiff - kernel/trace/trace_functions.c
tracing: Consolidate updating of count for traceon/off
[~andy/linux] / kernel / trace / trace_functions.c
index 8e3ad8082ab7074fde4ea9da403961afd2eadc45..38cfb290ecd9516d8292264ed987e4c886224e03 100644 (file)
@@ -28,7 +28,7 @@ static void tracing_stop_function_trace(void);
 static int function_trace_init(struct trace_array *tr)
 {
        func_trace = tr;
-       tr->cpu = get_cpu();
+       tr->trace_buffer.cpu = get_cpu();
        put_cpu();
 
        tracing_start_cmdline_record();
@@ -44,35 +44,7 @@ static void function_trace_reset(struct trace_array *tr)
 
 static void function_trace_start(struct trace_array *tr)
 {
-       tracing_reset_online_cpus(tr);
-}
-
-static void
-function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
-                                struct ftrace_ops *op, struct pt_regs *pt_regs)
-{
-       struct trace_array *tr = func_trace;
-       struct trace_array_cpu *data;
-       unsigned long flags;
-       long disabled;
-       int cpu;
-       int pc;
-
-       if (unlikely(!ftrace_function_enabled))
-               return;
-
-       pc = preempt_count();
-       preempt_disable_notrace();
-       local_save_flags(flags);
-       cpu = raw_smp_processor_id();
-       data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
-
-       if (likely(disabled == 1))
-               trace_function(tr, ip, parent_ip, flags, pc);
-
-       atomic_dec(&data->disabled);
-       preempt_enable_notrace();
+       tracing_reset_online_cpus(&tr->trace_buffer);
 }
 
 /* Our option */
@@ -85,34 +57,34 @@ static struct tracer_flags func_flags;
 static void
 function_trace_call(unsigned long ip, unsigned long parent_ip,
                    struct ftrace_ops *op, struct pt_regs *pt_regs)
-
 {
        struct trace_array *tr = func_trace;
        struct trace_array_cpu *data;
        unsigned long flags;
-       long disabled;
+       int bit;
        int cpu;
        int pc;
 
        if (unlikely(!ftrace_function_enabled))
                return;
 
-       /*
-        * Need to use raw, since this must be called before the
-        * recursive protection is performed.
-        */
-       local_irq_save(flags);
-       cpu = raw_smp_processor_id();
-       data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
+       pc = preempt_count();
+       preempt_disable_notrace();
 
-       if (likely(disabled == 1)) {
-               pc = preempt_count();
+       bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
+       if (bit < 0)
+               goto out;
+
+       cpu = smp_processor_id();
+       data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+       if (!atomic_read(&data->disabled)) {
+               local_save_flags(flags);
                trace_function(tr, ip, parent_ip, flags, pc);
        }
+       trace_clear_recursion(bit);
 
-       atomic_dec(&data->disabled);
-       local_irq_restore(flags);
+ out:
+       preempt_enable_notrace();
 }
 
 static void
@@ -135,7 +107,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
         */
        local_irq_save(flags);
        cpu = raw_smp_processor_id();
-       data = tr->data[cpu];
+       data = per_cpu_ptr(tr->trace_buffer.data, cpu);
        disabled = atomic_inc_return(&data->disabled);
 
        if (likely(disabled == 1)) {
@@ -185,11 +157,6 @@ static void tracing_start_function_trace(void)
 {
        ftrace_function_enabled = 0;
 
-       if (trace_flags & TRACE_ITER_PREEMPTONLY)
-               trace_ops.func = function_trace_call_preempt_only;
-       else
-               trace_ops.func = function_trace_call;
-
        if (func_flags.val & TRACE_FUNC_OPT_STACK)
                register_ftrace_function(&trace_stack_ops);
        else
@@ -247,38 +214,37 @@ static struct tracer function_trace __read_mostly =
 };
 
 #ifdef CONFIG_DYNAMIC_FTRACE
-static void
-ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
+static int update_count(void **data)
 {
-       long *count = (long *)data;
-
-       if (tracing_is_on())
-               return;
+       unsigned long *count = (long *)data;
 
        if (!*count)
-               return;
+               return 0;
 
        if (*count != -1)
                (*count)--;
 
-       tracing_on();
+       return 1;
 }
 
 static void
-ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
+ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
 {
-       long *count = (long *)data;
-
-       if (!tracing_is_on())
+       if (tracing_is_on())
                return;
 
-       if (!*count)
-               return;
+       if (update_count(data))
+               tracing_on();
+}
 
-       if (*count != -1)
-               (*count)--;
+static void
+ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
+{
+       if (!tracing_is_on())
+               return;
 
-       tracing_off();
+       if (update_count(data))
+               tracing_off();
 }
 
 static int