]> Pileus Git - ~andy/linux/blobdiff - kernel/trace/trace_event_profile.c
Merge commit 'linus/master' into tracing/kprobes
[~andy/linux] / kernel / trace / trace_event_profile.c
index 55a25c933d159d5a289265e604d608d96277e63c..e812f1c1264cffb4ca36803dd0b11fc87f90d450 100644 (file)
@@ -8,6 +8,57 @@
 #include <linux/module.h>
 #include "trace.h"
 
+/*
+ * We can't use a size but a type in alloc_percpu()
+ * So let's create a dummy type that matches the desired size
+ */
+typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
+
+char           *trace_profile_buf;
+EXPORT_SYMBOL_GPL(trace_profile_buf);
+
+char           *trace_profile_buf_nmi;
+EXPORT_SYMBOL_GPL(trace_profile_buf_nmi);
+
+/* Count the events in use (per event id, not per instance) */
+static int     total_profile_count;
+
+static int ftrace_profile_enable_event(struct ftrace_event_call *event)
+{
+       char *buf;
+       int ret = -ENOMEM;
+
+       if (atomic_inc_return(&event->profile_count))
+               return 0;
+
+       if (!total_profile_count++) {
+               buf = (char *)alloc_percpu(profile_buf_t);
+               if (!buf)
+                       goto fail_buf;
+
+               rcu_assign_pointer(trace_profile_buf, buf);
+
+               buf = (char *)alloc_percpu(profile_buf_t);
+               if (!buf)
+                       goto fail_buf_nmi;
+
+               rcu_assign_pointer(trace_profile_buf_nmi, buf);
+       }
+
+       ret = event->profile_enable(event);
+       if (!ret)
+               return 0;
+
+       kfree(trace_profile_buf_nmi);
+fail_buf_nmi:
+       kfree(trace_profile_buf);
+fail_buf:
+       total_profile_count--;
+       atomic_dec(&event->profile_count);
+
+       return ret;
+}
+
 int ftrace_profile_enable(int event_id)
 {
        struct ftrace_event_call *event;
@@ -17,7 +68,7 @@ int ftrace_profile_enable(int event_id)
        list_for_each_entry(event, &ftrace_events, list) {
                if (event->id == event_id && event->profile_enable &&
                    try_module_get(event->mod)) {
-                       ret = event->profile_enable(event);
+                       ret = ftrace_profile_enable_event(event);
                        break;
                }
        }
@@ -26,6 +77,33 @@ int ftrace_profile_enable(int event_id)
        return ret;
 }
 
+static void ftrace_profile_disable_event(struct ftrace_event_call *event)
+{
+       char *buf, *nmi_buf;
+
+       if (!atomic_add_negative(-1, &event->profile_count))
+               return;
+
+       event->profile_disable(event);
+
+       if (!--total_profile_count) {
+               buf = trace_profile_buf;
+               rcu_assign_pointer(trace_profile_buf, NULL);
+
+               nmi_buf = trace_profile_buf_nmi;
+               rcu_assign_pointer(trace_profile_buf_nmi, NULL);
+
+               /*
+                * Ensure every events in profiling have finished before
+                * releasing the buffers
+                */
+               synchronize_sched();
+
+               free_percpu(buf);
+               free_percpu(nmi_buf);
+       }
+}
+
 void ftrace_profile_disable(int event_id)
 {
        struct ftrace_event_call *event;
@@ -33,7 +111,7 @@ void ftrace_profile_disable(int event_id)
        mutex_lock(&event_mutex);
        list_for_each_entry(event, &ftrace_events, list) {
                if (event->id == event_id) {
-                       event->profile_disable(event);
+                       ftrace_profile_disable_event(event);
                        module_put(event->mod);
                        break;
                }