]> Pileus Git - ~andy/linux/commitdiff
ftrace: move function tracer functions out of trace.c
authorSteven Rostedt <srostedt@redhat.com>
Fri, 16 Jan 2009 01:40:23 +0000 (20:40 -0500)
committerIngo Molnar <mingo@elte.hu>
Fri, 16 Jan 2009 11:17:10 +0000 (12:17 +0100)
Impact: clean up of trace.c

The function tracer functions were put in trace.c because it needed
to share static variables that were in trace.c.  Since then, those
variables have become global for various reasons. This patch moves
the function tracer functions into trace_function.c where they belong.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/trace/trace.c
kernel/trace/trace_functions.c

index 3c54cb12522892a6a771b899feb10ecd71a43361..2585ffb6c6b5fd05e7110c27408eeb77a4589d7f 100644 (file)
@@ -1046,65 +1046,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
        local_irq_restore(flags);
 }
 
-#ifdef CONFIG_FUNCTION_TRACER
-static void
-function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
-{
-       struct trace_array *tr = &global_trace;
-       struct trace_array_cpu *data;
-       unsigned long flags;
-       long disabled;
-       int cpu, resched;
-       int pc;
-
-       if (unlikely(!ftrace_function_enabled))
-               return;
-
-       pc = preempt_count();
-       resched = ftrace_preempt_disable();
-       local_save_flags(flags);
-       cpu = raw_smp_processor_id();
-       data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
-
-       if (likely(disabled == 1))
-               trace_function(tr, data, ip, parent_ip, flags, pc);
-
-       atomic_dec(&data->disabled);
-       ftrace_preempt_enable(resched);
-}
-
-static void
-function_trace_call(unsigned long ip, unsigned long parent_ip)
-{
-       struct trace_array *tr = &global_trace;
-       struct trace_array_cpu *data;
-       unsigned long flags;
-       long disabled;
-       int cpu;
-       int pc;
-
-       if (unlikely(!ftrace_function_enabled))
-               return;
-
-       /*
-        * Need to use raw, since this must be called before the
-        * recursive protection is performed.
-        */
-       local_irq_save(flags);
-       cpu = raw_smp_processor_id();
-       data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
-
-       if (likely(disabled == 1)) {
-               pc = preempt_count();
-               trace_function(tr, data, ip, parent_ip, flags, pc);
-       }
-
-       atomic_dec(&data->disabled);
-       local_irq_restore(flags);
-}
-
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 int trace_graph_entry(struct ftrace_graph_ent *trace)
 {
@@ -1162,31 +1103,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
-static struct ftrace_ops trace_ops __read_mostly =
-{
-       .func = function_trace_call,
-};
-
-void tracing_start_function_trace(void)
-{
-       ftrace_function_enabled = 0;
-
-       if (trace_flags & TRACE_ITER_PREEMPTONLY)
-               trace_ops.func = function_trace_call_preempt_only;
-       else
-               trace_ops.func = function_trace_call;
-
-       register_ftrace_function(&trace_ops);
-       ftrace_function_enabled = 1;
-}
-
-void tracing_stop_function_trace(void)
-{
-       ftrace_function_enabled = 0;
-       unregister_ftrace_function(&trace_ops);
-}
-#endif
-
 enum trace_file_type {
        TRACE_FILE_LAT_FMT      = 1,
        TRACE_FILE_ANNOTATE     = 2,
index 3a5fa08cedb0a78caacd7ff3d96cf3333f4e04f0..2dce3c7370d13a420042f7ae77e05d5613753517 100644 (file)
@@ -20,6 +20,7 @@ static struct trace_array     *func_trace;
 
 static void start_function_trace(struct trace_array *tr)
 {
+       func_trace = tr;
        tr->cpu = get_cpu();
        tracing_reset_online_cpus(tr);
        put_cpu();
@@ -36,7 +37,6 @@ static void stop_function_trace(struct trace_array *tr)
 
 static int function_trace_init(struct trace_array *tr)
 {
-       func_trace = tr;
        start_function_trace(tr);
        return 0;
 }
@@ -51,6 +51,64 @@ static void function_trace_start(struct trace_array *tr)
        tracing_reset_online_cpus(tr);
 }
 
+static void
+function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
+{
+       struct trace_array *tr = func_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu, resched;
+       int pc;
+
+       if (unlikely(!ftrace_function_enabled))
+               return;
+
+       pc = preempt_count();
+       resched = ftrace_preempt_disable();
+       local_save_flags(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1))
+               trace_function(tr, data, ip, parent_ip, flags, pc);
+
+       atomic_dec(&data->disabled);
+       ftrace_preempt_enable(resched);
+}
+
+static void
+function_trace_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct trace_array *tr = func_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+       int pc;
+
+       if (unlikely(!ftrace_function_enabled))
+               return;
+
+       /*
+        * Need to use raw, since this must be called before the
+        * recursive protection is performed.
+        */
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1)) {
+               pc = preempt_count();
+               trace_function(tr, data, ip, parent_ip, flags, pc);
+       }
+
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
+}
+
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
 {
@@ -90,6 +148,30 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
        local_irq_restore(flags);
 }
 
+
+static struct ftrace_ops trace_ops __read_mostly =
+{
+       .func = function_trace_call,
+};
+
+void tracing_start_function_trace(void)
+{
+       ftrace_function_enabled = 0;
+
+       if (trace_flags & TRACE_ITER_PREEMPTONLY)
+               trace_ops.func = function_trace_call_preempt_only;
+       else
+               trace_ops.func = function_trace_call;
+
+       register_ftrace_function(&trace_ops);
+       ftrace_function_enabled = 1;
+}
+
+void tracing_stop_function_trace(void)
+{
+       ftrace_function_enabled = 0;
+       unregister_ftrace_function(&trace_ops);
+}
 static struct ftrace_ops trace_stack_ops __read_mostly =
 {
        .func = function_stack_trace_call,