1 #ifndef _LINUX_FTRACE_EVENT_H
2 #define _LINUX_FTRACE_EVENT_H
4 #include <linux/ring_buffer.h>
5 #include <linux/trace_seq.h>
6 #include <linux/percpu.h>
7 #include <linux/hardirq.h>
8 #include <linux/perf_event.h>
14 struct trace_print_flags {
19 struct trace_print_flags_u64 {
20 unsigned long long mask;
24 const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
26 const struct trace_print_flags *flag_array);
28 const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
29 const struct trace_print_flags *symbol_array);
31 #if BITS_PER_LONG == 32
32 const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
33 unsigned long long val,
34 const struct trace_print_flags_u64
38 const char *ftrace_print_hex_seq(struct trace_seq *p,
39 const unsigned char *buf, int len);
42 * The trace entry - the most basic unit of tracing. This is what
43 * is printed in the end as a single line in the trace output, such as:
45 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
50 unsigned char preempt_count;
54 #define FTRACE_MAX_EVENT \
55 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
58 * Trace iterator - used by printout routines who present trace
59 * results to users and which routines might sleep, etc:
61 struct trace_iterator {
62 struct trace_array *tr;
67 struct ring_buffer_iter **buffer_iter;
68 unsigned long iter_flags;
70 /* trace_seq for __print_flags() and __print_symbolic() etc. */
71 struct trace_seq tmp_seq;
73 /* The below is zeroed out in pipe_read */
75 struct trace_entry *ent;
76 unsigned long lost_events;
85 cpumask_var_t started;
88 enum trace_iter_flags {
89 TRACE_FILE_LAT_FMT = 1,
90 TRACE_FILE_ANNOTATE = 2,
91 TRACE_FILE_TIME_IN_NS = 4,
97 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
98 int flags, struct trace_event *event);
100 struct trace_event_functions {
101 trace_print_func trace;
102 trace_print_func raw;
103 trace_print_func hex;
104 trace_print_func binary;
108 struct hlist_node node;
109 struct list_head list;
111 struct trace_event_functions *funcs;
114 extern int register_ftrace_event(struct trace_event *event);
115 extern int unregister_ftrace_event(struct trace_event *event);
117 /* Return values for print_line callback */
119 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
120 TRACE_TYPE_HANDLED = 1,
121 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
122 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
125 void tracing_generic_entry_update(struct trace_entry *entry,
128 struct ring_buffer_event *
129 trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
130 int type, unsigned long len,
131 unsigned long flags, int pc);
132 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
133 struct ring_buffer_event *event,
134 unsigned long flags, int pc);
135 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
136 struct ring_buffer_event *event,
137 unsigned long flags, int pc);
138 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
139 struct ring_buffer_event *event,
140 unsigned long flags, int pc,
141 struct pt_regs *regs);
142 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
143 struct ring_buffer_event *event);
145 void tracing_record_cmdline(struct task_struct *tsk);
151 TRACE_REG_UNREGISTER,
152 #ifdef CONFIG_PERF_EVENTS
153 TRACE_REG_PERF_REGISTER,
154 TRACE_REG_PERF_UNREGISTER,
156 TRACE_REG_PERF_CLOSE,
162 struct ftrace_event_call;
164 struct ftrace_event_class {
167 #ifdef CONFIG_PERF_EVENTS
170 int (*reg)(struct ftrace_event_call *event,
171 enum trace_reg type, void *data);
172 int (*define_fields)(struct ftrace_event_call *);
173 struct list_head *(*get_fields)(struct ftrace_event_call *);
174 struct list_head fields;
175 int (*raw_init)(struct ftrace_event_call *);
178 extern int ftrace_event_reg(struct ftrace_event_call *event,
179 enum trace_reg type, void *data);
182 TRACE_EVENT_FL_ENABLED_BIT,
183 TRACE_EVENT_FL_FILTERED_BIT,
184 TRACE_EVENT_FL_RECORDED_CMD_BIT,
185 TRACE_EVENT_FL_CAP_ANY_BIT,
186 TRACE_EVENT_FL_NO_SET_FILTER_BIT,
187 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
191 TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
192 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
193 TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
194 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
195 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
196 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
199 struct ftrace_event_call {
200 struct list_head list;
201 struct ftrace_event_class *class;
204 struct trace_event event;
205 const char *print_fmt;
206 struct event_filter *filter;
213 * bit 2: filter_active
214 * bit 3: enabled cmd record
215 * bit 4: allow trace by non root (cap any)
216 * bit 5: failed to apply filter
217 * bit 6: ftrace internal event (do not enable)
219 * Changes to flags must hold the event_mutex.
221 * Note: Reads of flags do not hold the event_mutex since
222 * they occur in critical sections. But the way flags
223 * is currently used, these changes do no affect the code
224 * except that when a change is made, it may have a slight
225 * delay in propagating the changes to other CPUs due to
230 #ifdef CONFIG_PERF_EVENTS
232 struct hlist_head __percpu *perf_events;
236 #define __TRACE_EVENT_FLAGS(name, value) \
237 static int __init trace_init_flags_##name(void) \
239 event_##name.flags = value; \
242 early_initcall(trace_init_flags_##name);
244 #define PERF_MAX_TRACE_SIZE 2048
246 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
248 extern void destroy_preds(struct ftrace_event_call *call);
249 extern int filter_match_preds(struct event_filter *filter, void *rec);
250 extern int filter_current_check_discard(struct ring_buffer *buffer,
251 struct ftrace_event_call *call,
253 struct ring_buffer_event *event);
257 FILTER_STATIC_STRING,
263 #define EVENT_STORAGE_SIZE 128
264 extern struct mutex event_storage_mutex;
265 extern char event_storage[EVENT_STORAGE_SIZE];
267 extern int trace_event_raw_init(struct ftrace_event_call *call);
268 extern int trace_define_field(struct ftrace_event_call *call, const char *type,
269 const char *name, int offset, int size,
270 int is_signed, int filter_type);
271 extern int trace_add_event_call(struct ftrace_event_call *call);
272 extern void trace_remove_event_call(struct ftrace_event_call *call);
274 #define is_signed_type(type) (((type)(-1)) < (type)0)
276 int trace_set_clr_event(const char *system, const char *event, int set);
279 * The double __builtin_constant_p is because gcc will give us an error
280 * if we try to allocate the static variable to fmt if it is not a
281 * constant. Even with the outer if statement optimizing out.
283 #define event_trace_printk(ip, fmt, args...) \
285 __trace_printk_check_format(fmt, ##args); \
286 tracing_record_cmdline(current); \
287 if (__builtin_constant_p(fmt)) { \
288 static const char *trace_printk_fmt \
289 __attribute__((section("__trace_printk_fmt"))) = \
290 __builtin_constant_p(fmt) ? fmt : NULL; \
292 __trace_bprintk(ip, trace_printk_fmt, ##args); \
294 __trace_printk(ip, fmt, ##args); \
297 #ifdef CONFIG_PERF_EVENTS
300 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
302 extern int perf_trace_init(struct perf_event *event);
303 extern void perf_trace_destroy(struct perf_event *event);
304 extern int perf_trace_add(struct perf_event *event, int flags);
305 extern void perf_trace_del(struct perf_event *event, int flags);
306 extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
308 extern void ftrace_profile_free_filter(struct perf_event *event);
309 extern void *perf_trace_buf_prepare(int size, unsigned short type,
310 struct pt_regs *regs, int *rctxp);
313 perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
314 u64 count, struct pt_regs *regs, void *head,
315 struct task_struct *task)
317 perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
321 #endif /* _LINUX_FTRACE_EVENT_H */