]> Pileus Git - ~andy/linux/blob - kernel/trace/trace_event_profile.c
Merge branch 'tracing/core-v3' of git://git.kernel.org/pub/scm/linux/kernel/git/frede...
[~andy/linux] / kernel / trace / trace_event_profile.c
1 /*
2  * trace event based perf counter profiling
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include "trace.h"
10
11 /*
12  * We can't use a size but a type in alloc_percpu()
13  * So let's create a dummy type that matches the desired size
14  */
15 typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
16
17 char            *trace_profile_buf;
18 char            *trace_profile_buf_nmi;
19
20 /* Count the events in use (per event id, not per instance) */
21 static int      total_profile_count;
22
23 static int ftrace_profile_enable_event(struct ftrace_event_call *event)
24 {
25         char *buf;
26         int ret = -ENOMEM;
27
28         if (atomic_inc_return(&event->profile_count))
29                 return 0;
30
31         if (!total_profile_count++) {
32                 buf = (char *)alloc_percpu(profile_buf_t);
33                 if (!buf)
34                         goto fail_buf;
35
36                 rcu_assign_pointer(trace_profile_buf, buf);
37
38                 buf = (char *)alloc_percpu(profile_buf_t);
39                 if (!buf)
40                         goto fail_buf_nmi;
41
42                 rcu_assign_pointer(trace_profile_buf_nmi, buf);
43         }
44
45         ret = event->profile_enable();
46         if (!ret)
47                 return 0;
48
49         kfree(trace_profile_buf_nmi);
50 fail_buf_nmi:
51         kfree(trace_profile_buf);
52 fail_buf:
53         total_profile_count--;
54         atomic_dec(&event->profile_count);
55
56         return ret;
57 }
58
59 int ftrace_profile_enable(int event_id)
60 {
61         struct ftrace_event_call *event;
62         int ret = -EINVAL;
63
64         mutex_lock(&event_mutex);
65         list_for_each_entry(event, &ftrace_events, list) {
66                 if (event->id == event_id && event->profile_enable &&
67                     try_module_get(event->mod)) {
68                         ret = ftrace_profile_enable_event(event);
69                         break;
70                 }
71         }
72         mutex_unlock(&event_mutex);
73
74         return ret;
75 }
76
77 static void ftrace_profile_disable_event(struct ftrace_event_call *event)
78 {
79         char *buf, *nmi_buf;
80
81         if (!atomic_add_negative(-1, &event->profile_count))
82                 return;
83
84         event->profile_disable();
85
86         if (!--total_profile_count) {
87                 buf = trace_profile_buf;
88                 rcu_assign_pointer(trace_profile_buf, NULL);
89
90                 nmi_buf = trace_profile_buf_nmi;
91                 rcu_assign_pointer(trace_profile_buf_nmi, NULL);
92
93                 /*
94                  * Ensure every events in profiling have finished before
95                  * releasing the buffers
96                  */
97                 synchronize_sched();
98
99                 free_percpu(buf);
100                 free_percpu(nmi_buf);
101         }
102 }
103
104 void ftrace_profile_disable(int event_id)
105 {
106         struct ftrace_event_call *event;
107
108         mutex_lock(&event_mutex);
109         list_for_each_entry(event, &ftrace_events, list) {
110                 if (event->id == event_id) {
111                         ftrace_profile_disable_event(event);
112                         module_put(event->mod);
113                         break;
114                 }
115         }
116         mutex_unlock(&event_mutex);
117 }