]> Pileus Git - ~andy/linux/blob - kernel/trace/trace_output.c
tracing: add lock depth to entries
[~andy/linux] / kernel / trace / trace_output.c
1 /*
2  * trace_output.c
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11
12 #include "trace_output.h"
13
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE  128
16
17 DECLARE_RWSEM(trace_event_mutex);
18
19 DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20 EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
21
22 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
23
24 static int next_event_type = __TRACE_LAST_TYPE + 1;
25
26 void trace_print_seq(struct seq_file *m, struct trace_seq *s)
27 {
28         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
29
30         seq_write(m, s->buffer, len);
31
32         trace_seq_init(s);
33 }
34
35 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
36 {
37         struct trace_seq *s = &iter->seq;
38         struct trace_entry *entry = iter->ent;
39         struct bprint_entry *field;
40         int ret;
41
42         trace_assign_type(field, entry);
43
44         ret = trace_seq_bprintf(s, field->fmt, field->buf);
45         if (!ret)
46                 return TRACE_TYPE_PARTIAL_LINE;
47
48         return TRACE_TYPE_HANDLED;
49 }
50
51 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
52 {
53         struct trace_seq *s = &iter->seq;
54         struct trace_entry *entry = iter->ent;
55         struct print_entry *field;
56         int ret;
57
58         trace_assign_type(field, entry);
59
60         ret = trace_seq_printf(s, "%s", field->buf);
61         if (!ret)
62                 return TRACE_TYPE_PARTIAL_LINE;
63
64         return TRACE_TYPE_HANDLED;
65 }
66
67 /**
68  * trace_seq_printf - sequence printing of trace information
69  * @s: trace sequence descriptor
70  * @fmt: printf format string
71  *
72  * The tracer may use either sequence operations or its own
73  * copy to user routines. To simplify formating of a trace
74  * trace_seq_printf is used to store strings into a special
75  * buffer (@s). Then the output may be either used by
76  * the sequencer or pulled into another buffer.
77  */
78 int
79 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
80 {
81         int len = (PAGE_SIZE - 1) - s->len;
82         va_list ap;
83         int ret;
84
85         if (!len)
86                 return 0;
87
88         va_start(ap, fmt);
89         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
90         va_end(ap);
91
92         /* If we can't write it all, don't bother writing anything */
93         if (ret >= len)
94                 return 0;
95
96         s->len += ret;
97
98         return len;
99 }
100 EXPORT_SYMBOL_GPL(trace_seq_printf);
101
102 /**
103  * trace_seq_vprintf - sequence printing of trace information
104  * @s: trace sequence descriptor
105  * @fmt: printf format string
106  *
107  * The tracer may use either sequence operations or its own
108  * copy to user routines. To simplify formating of a trace
109  * trace_seq_printf is used to store strings into a special
110  * buffer (@s). Then the output may be either used by
111  * the sequencer or pulled into another buffer.
112  */
113 int
114 trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
115 {
116         int len = (PAGE_SIZE - 1) - s->len;
117         int ret;
118
119         if (!len)
120                 return 0;
121
122         ret = vsnprintf(s->buffer + s->len, len, fmt, args);
123
124         /* If we can't write it all, don't bother writing anything */
125         if (ret >= len)
126                 return 0;
127
128         s->len += ret;
129
130         return len;
131 }
132 EXPORT_SYMBOL_GPL(trace_seq_vprintf);
133
134 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
135 {
136         int len = (PAGE_SIZE - 1) - s->len;
137         int ret;
138
139         if (!len)
140                 return 0;
141
142         ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
143
144         /* If we can't write it all, don't bother writing anything */
145         if (ret >= len)
146                 return 0;
147
148         s->len += ret;
149
150         return len;
151 }
152
153 /**
154  * trace_seq_puts - trace sequence printing of simple string
155  * @s: trace sequence descriptor
156  * @str: simple string to record
157  *
158  * The tracer may use either the sequence operations or its own
159  * copy to user routines. This function records a simple string
160  * into a special buffer (@s) for later retrieval by a sequencer
161  * or other mechanism.
162  */
163 int trace_seq_puts(struct trace_seq *s, const char *str)
164 {
165         int len = strlen(str);
166
167         if (len > ((PAGE_SIZE - 1) - s->len))
168                 return 0;
169
170         memcpy(s->buffer + s->len, str, len);
171         s->len += len;
172
173         return len;
174 }
175
176 int trace_seq_putc(struct trace_seq *s, unsigned char c)
177 {
178         if (s->len >= (PAGE_SIZE - 1))
179                 return 0;
180
181         s->buffer[s->len++] = c;
182
183         return 1;
184 }
185
186 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
187 {
188         if (len > ((PAGE_SIZE - 1) - s->len))
189                 return 0;
190
191         memcpy(s->buffer + s->len, mem, len);
192         s->len += len;
193
194         return len;
195 }
196
197 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
198 {
199         unsigned char hex[HEX_CHARS];
200         const unsigned char *data = mem;
201         int i, j;
202
203 #ifdef __BIG_ENDIAN
204         for (i = 0, j = 0; i < len; i++) {
205 #else
206         for (i = len-1, j = 0; i >= 0; i--) {
207 #endif
208                 hex[j++] = hex_asc_hi(data[i]);
209                 hex[j++] = hex_asc_lo(data[i]);
210         }
211         hex[j++] = ' ';
212
213         return trace_seq_putmem(s, hex, j);
214 }
215
216 void *trace_seq_reserve(struct trace_seq *s, size_t len)
217 {
218         void *ret;
219
220         if (len > ((PAGE_SIZE - 1) - s->len))
221                 return NULL;
222
223         ret = s->buffer + s->len;
224         s->len += len;
225
226         return ret;
227 }
228
229 int trace_seq_path(struct trace_seq *s, struct path *path)
230 {
231         unsigned char *p;
232
233         if (s->len >= (PAGE_SIZE - 1))
234                 return 0;
235         p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
236         if (!IS_ERR(p)) {
237                 p = mangle_path(s->buffer + s->len, p, "\n");
238                 if (p) {
239                         s->len = p - s->buffer;
240                         return 1;
241                 }
242         } else {
243                 s->buffer[s->len++] = '?';
244                 return 1;
245         }
246
247         return 0;
248 }
249
250 const char *
251 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
252                        unsigned long flags,
253                        const struct trace_print_flags *flag_array)
254 {
255         unsigned long mask;
256         const char *str;
257         const char *ret = p->buffer + p->len;
258         int i;
259
260         for (i = 0;  flag_array[i].name && flags; i++) {
261
262                 mask = flag_array[i].mask;
263                 if ((flags & mask) != mask)
264                         continue;
265
266                 str = flag_array[i].name;
267                 flags &= ~mask;
268                 if (p->len && delim)
269                         trace_seq_puts(p, delim);
270                 trace_seq_puts(p, str);
271         }
272
273         /* check for left over flags */
274         if (flags) {
275                 if (p->len && delim)
276                         trace_seq_puts(p, delim);
277                 trace_seq_printf(p, "0x%lx", flags);
278         }
279
280         trace_seq_putc(p, 0);
281
282         return ret;
283 }
284 EXPORT_SYMBOL(ftrace_print_flags_seq);
285
286 const char *
287 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
288                          const struct trace_print_flags *symbol_array)
289 {
290         int i;
291         const char *ret = p->buffer + p->len;
292
293         for (i = 0;  symbol_array[i].name; i++) {
294
295                 if (val != symbol_array[i].mask)
296                         continue;
297
298                 trace_seq_puts(p, symbol_array[i].name);
299                 break;
300         }
301
302         if (!p->len)
303                 trace_seq_printf(p, "0x%lx", val);
304                 
305         trace_seq_putc(p, 0);
306
307         return ret;
308 }
309 EXPORT_SYMBOL(ftrace_print_symbols_seq);
310
311 #ifdef CONFIG_KRETPROBES
312 static inline const char *kretprobed(const char *name)
313 {
314         static const char tramp_name[] = "kretprobe_trampoline";
315         int size = sizeof(tramp_name);
316
317         if (strncmp(tramp_name, name, size) == 0)
318                 return "[unknown/kretprobe'd]";
319         return name;
320 }
321 #else
322 static inline const char *kretprobed(const char *name)
323 {
324         return name;
325 }
326 #endif /* CONFIG_KRETPROBES */
327
328 static int
329 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
330 {
331 #ifdef CONFIG_KALLSYMS
332         char str[KSYM_SYMBOL_LEN];
333         const char *name;
334
335         kallsyms_lookup(address, NULL, NULL, NULL, str);
336
337         name = kretprobed(str);
338
339         return trace_seq_printf(s, fmt, name);
340 #endif
341         return 1;
342 }
343
344 static int
345 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
346                      unsigned long address)
347 {
348 #ifdef CONFIG_KALLSYMS
349         char str[KSYM_SYMBOL_LEN];
350         const char *name;
351
352         sprint_symbol(str, address);
353         name = kretprobed(str);
354
355         return trace_seq_printf(s, fmt, name);
356 #endif
357         return 1;
358 }
359
360 #ifndef CONFIG_64BIT
361 # define IP_FMT "%08lx"
362 #else
363 # define IP_FMT "%016lx"
364 #endif
365
366 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
367                       unsigned long ip, unsigned long sym_flags)
368 {
369         struct file *file = NULL;
370         unsigned long vmstart = 0;
371         int ret = 1;
372
373         if (mm) {
374                 const struct vm_area_struct *vma;
375
376                 down_read(&mm->mmap_sem);
377                 vma = find_vma(mm, ip);
378                 if (vma) {
379                         file = vma->vm_file;
380                         vmstart = vma->vm_start;
381                 }
382                 if (file) {
383                         ret = trace_seq_path(s, &file->f_path);
384                         if (ret)
385                                 ret = trace_seq_printf(s, "[+0x%lx]",
386                                                        ip - vmstart);
387                 }
388                 up_read(&mm->mmap_sem);
389         }
390         if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
391                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
392         return ret;
393 }
394
395 int
396 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
397                       unsigned long sym_flags)
398 {
399         struct mm_struct *mm = NULL;
400         int ret = 1;
401         unsigned int i;
402
403         if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
404                 struct task_struct *task;
405                 /*
406                  * we do the lookup on the thread group leader,
407                  * since individual threads might have already quit!
408                  */
409                 rcu_read_lock();
410                 task = find_task_by_vpid(entry->tgid);
411                 if (task)
412                         mm = get_task_mm(task);
413                 rcu_read_unlock();
414         }
415
416         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
417                 unsigned long ip = entry->caller[i];
418
419                 if (ip == ULONG_MAX || !ret)
420                         break;
421                 if (ret)
422                         ret = trace_seq_puts(s, " => ");
423                 if (!ip) {
424                         if (ret)
425                                 ret = trace_seq_puts(s, "??");
426                         if (ret)
427                                 ret = trace_seq_puts(s, "\n");
428                         continue;
429                 }
430                 if (!ret)
431                         break;
432                 if (ret)
433                         ret = seq_print_user_ip(s, mm, ip, sym_flags);
434                 ret = trace_seq_puts(s, "\n");
435         }
436
437         if (mm)
438                 mmput(mm);
439         return ret;
440 }
441
442 int
443 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
444 {
445         int ret;
446
447         if (!ip)
448                 return trace_seq_printf(s, "0");
449
450         if (sym_flags & TRACE_ITER_SYM_OFFSET)
451                 ret = seq_print_sym_offset(s, "%s", ip);
452         else
453                 ret = seq_print_sym_short(s, "%s", ip);
454
455         if (!ret)
456                 return 0;
457
458         if (sym_flags & TRACE_ITER_SYM_ADDR)
459                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
460         return ret;
461 }
462
463 static int
464 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
465 {
466         int hardirq, softirq;
467         char comm[TASK_COMM_LEN];
468         int ret;
469
470         trace_find_cmdline(entry->pid, comm);
471         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
472         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
473
474         if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
475                               comm, entry->pid, cpu,
476                               (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
477                                 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
478                                   'X' : '.',
479                               (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
480                                 'N' : '.',
481                               (hardirq && softirq) ? 'H' :
482                                 hardirq ? 'h' : softirq ? 's' : '.'))
483                 return 0;
484
485         if (entry->lock_depth < 0)
486                 ret = trace_seq_putc(s, '.');
487         else
488                 ret = trace_seq_printf(s, "%d", entry->lock_depth);
489         if (!ret)
490                 return 0;
491
492         if (entry->preempt_count)
493                 return trace_seq_printf(s, "%x", entry->preempt_count);
494         return trace_seq_putc(s, '.');
495 }
496
497 static unsigned long preempt_mark_thresh = 100;
498
499 static int
500 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
501                     unsigned long rel_usecs)
502 {
503         return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
504                                 rel_usecs > preempt_mark_thresh ? '!' :
505                                   rel_usecs > 1 ? '+' : ' ');
506 }
507
508 int trace_print_context(struct trace_iterator *iter)
509 {
510         struct trace_seq *s = &iter->seq;
511         struct trace_entry *entry = iter->ent;
512         unsigned long long t = ns2usecs(iter->ts);
513         unsigned long usec_rem = do_div(t, USEC_PER_SEC);
514         unsigned long secs = (unsigned long)t;
515         char comm[TASK_COMM_LEN];
516
517         trace_find_cmdline(entry->pid, comm);
518
519         return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
520                                 comm, entry->pid, iter->cpu, secs, usec_rem);
521 }
522
523 int trace_print_lat_context(struct trace_iterator *iter)
524 {
525         u64 next_ts;
526         int ret;
527         struct trace_seq *s = &iter->seq;
528         struct trace_entry *entry = iter->ent,
529                            *next_entry = trace_find_next_entry(iter, NULL,
530                                                                &next_ts);
531         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
532         unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
533         unsigned long rel_usecs;
534
535         if (!next_entry)
536                 next_ts = iter->ts;
537         rel_usecs = ns2usecs(next_ts - iter->ts);
538
539         if (verbose) {
540                 char comm[TASK_COMM_LEN];
541
542                 trace_find_cmdline(entry->pid, comm);
543
544                 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
545                                        " %ld.%03ldms (+%ld.%03ldms): ", comm,
546                                        entry->pid, iter->cpu, entry->flags,
547                                        entry->preempt_count, iter->idx,
548                                        ns2usecs(iter->ts),
549                                        abs_usecs / USEC_PER_MSEC,
550                                        abs_usecs % USEC_PER_MSEC,
551                                        rel_usecs / USEC_PER_MSEC,
552                                        rel_usecs % USEC_PER_MSEC);
553         } else {
554                 ret = lat_print_generic(s, entry, iter->cpu);
555                 if (ret)
556                         ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
557         }
558
559         return ret;
560 }
561
562 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
563
564 static int task_state_char(unsigned long state)
565 {
566         int bit = state ? __ffs(state) + 1 : 0;
567
568         return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
569 }
570
571 /**
572  * ftrace_find_event - find a registered event
573  * @type: the type of event to look for
574  *
575  * Returns an event of type @type otherwise NULL
576  * Called with trace_event_read_lock() held.
577  */
578 struct trace_event *ftrace_find_event(int type)
579 {
580         struct trace_event *event;
581         struct hlist_node *n;
582         unsigned key;
583
584         key = type & (EVENT_HASHSIZE - 1);
585
586         hlist_for_each_entry(event, n, &event_hash[key], node) {
587                 if (event->type == type)
588                         return event;
589         }
590
591         return NULL;
592 }
593
594 static LIST_HEAD(ftrace_event_list);
595
596 static int trace_search_list(struct list_head **list)
597 {
598         struct trace_event *e;
599         int last = __TRACE_LAST_TYPE;
600
601         if (list_empty(&ftrace_event_list)) {
602                 *list = &ftrace_event_list;
603                 return last + 1;
604         }
605
606         /*
607          * We used up all possible max events,
608          * lets see if somebody freed one.
609          */
610         list_for_each_entry(e, &ftrace_event_list, list) {
611                 if (e->type != last + 1)
612                         break;
613                 last++;
614         }
615
616         /* Did we used up all 65 thousand events??? */
617         if ((last + 1) > FTRACE_MAX_EVENT)
618                 return 0;
619
620         *list = &e->list;
621         return last + 1;
622 }
623
624 void trace_event_read_lock(void)
625 {
626         down_read(&trace_event_mutex);
627 }
628
629 void trace_event_read_unlock(void)
630 {
631         up_read(&trace_event_mutex);
632 }
633
634 /**
635  * register_ftrace_event - register output for an event type
636  * @event: the event type to register
637  *
638  * Event types are stored in a hash and this hash is used to
639  * find a way to print an event. If the @event->type is set
640  * then it will use that type, otherwise it will assign a
641  * type to use.
642  *
643  * If you assign your own type, please make sure it is added
644  * to the trace_type enum in trace.h, to avoid collisions
645  * with the dynamic types.
646  *
647  * Returns the event type number or zero on error.
648  */
649 int register_ftrace_event(struct trace_event *event)
650 {
651         unsigned key;
652         int ret = 0;
653
654         down_write(&trace_event_mutex);
655
656         if (WARN_ON(!event))
657                 goto out;
658
659         INIT_LIST_HEAD(&event->list);
660
661         if (!event->type) {
662                 struct list_head *list = NULL;
663
664                 if (next_event_type > FTRACE_MAX_EVENT) {
665
666                         event->type = trace_search_list(&list);
667                         if (!event->type)
668                                 goto out;
669
670                 } else {
671                         
672                         event->type = next_event_type++;
673                         list = &ftrace_event_list;
674                 }
675
676                 if (WARN_ON(ftrace_find_event(event->type)))
677                         goto out;
678
679                 list_add_tail(&event->list, list);
680
681         } else if (event->type > __TRACE_LAST_TYPE) {
682                 printk(KERN_WARNING "Need to add type to trace.h\n");
683                 WARN_ON(1);
684                 goto out;
685         } else {
686                 /* Is this event already used */
687                 if (ftrace_find_event(event->type))
688                         goto out;
689         }
690
691         if (event->trace == NULL)
692                 event->trace = trace_nop_print;
693         if (event->raw == NULL)
694                 event->raw = trace_nop_print;
695         if (event->hex == NULL)
696                 event->hex = trace_nop_print;
697         if (event->binary == NULL)
698                 event->binary = trace_nop_print;
699
700         key = event->type & (EVENT_HASHSIZE - 1);
701
702         hlist_add_head(&event->node, &event_hash[key]);
703
704         ret = event->type;
705  out:
706         up_write(&trace_event_mutex);
707
708         return ret;
709 }
710 EXPORT_SYMBOL_GPL(register_ftrace_event);
711
712 /*
713  * Used by module code with the trace_event_mutex held for write.
714  */
715 int __unregister_ftrace_event(struct trace_event *event)
716 {
717         hlist_del(&event->node);
718         list_del(&event->list);
719         return 0;
720 }
721
722 /**
723  * unregister_ftrace_event - remove a no longer used event
724  * @event: the event to remove
725  */
726 int unregister_ftrace_event(struct trace_event *event)
727 {
728         down_write(&trace_event_mutex);
729         __unregister_ftrace_event(event);
730         up_write(&trace_event_mutex);
731
732         return 0;
733 }
734 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
735
736 /*
737  * Standard events
738  */
739
740 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
741 {
742         return TRACE_TYPE_HANDLED;
743 }
744
745 /* TRACE_FN */
746 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
747 {
748         struct ftrace_entry *field;
749         struct trace_seq *s = &iter->seq;
750
751         trace_assign_type(field, iter->ent);
752
753         if (!seq_print_ip_sym(s, field->ip, flags))
754                 goto partial;
755
756         if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
757                 if (!trace_seq_printf(s, " <-"))
758                         goto partial;
759                 if (!seq_print_ip_sym(s,
760                                       field->parent_ip,
761                                       flags))
762                         goto partial;
763         }
764         if (!trace_seq_printf(s, "\n"))
765                 goto partial;
766
767         return TRACE_TYPE_HANDLED;
768
769  partial:
770         return TRACE_TYPE_PARTIAL_LINE;
771 }
772
773 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
774 {
775         struct ftrace_entry *field;
776
777         trace_assign_type(field, iter->ent);
778
779         if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
780                               field->ip,
781                               field->parent_ip))
782                 return TRACE_TYPE_PARTIAL_LINE;
783
784         return TRACE_TYPE_HANDLED;
785 }
786
787 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
788 {
789         struct ftrace_entry *field;
790         struct trace_seq *s = &iter->seq;
791
792         trace_assign_type(field, iter->ent);
793
794         SEQ_PUT_HEX_FIELD_RET(s, field->ip);
795         SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
796
797         return TRACE_TYPE_HANDLED;
798 }
799
800 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
801 {
802         struct ftrace_entry *field;
803         struct trace_seq *s = &iter->seq;
804
805         trace_assign_type(field, iter->ent);
806
807         SEQ_PUT_FIELD_RET(s, field->ip);
808         SEQ_PUT_FIELD_RET(s, field->parent_ip);
809
810         return TRACE_TYPE_HANDLED;
811 }
812
813 static struct trace_event trace_fn_event = {
814         .type           = TRACE_FN,
815         .trace          = trace_fn_trace,
816         .raw            = trace_fn_raw,
817         .hex            = trace_fn_hex,
818         .binary         = trace_fn_bin,
819 };
820
821 /* TRACE_CTX an TRACE_WAKE */
822 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
823                                              char *delim)
824 {
825         struct ctx_switch_entry *field;
826         char comm[TASK_COMM_LEN];
827         int S, T;
828
829
830         trace_assign_type(field, iter->ent);
831
832         T = task_state_char(field->next_state);
833         S = task_state_char(field->prev_state);
834         trace_find_cmdline(field->next_pid, comm);
835         if (!trace_seq_printf(&iter->seq,
836                               " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
837                               field->prev_pid,
838                               field->prev_prio,
839                               S, delim,
840                               field->next_cpu,
841                               field->next_pid,
842                               field->next_prio,
843                               T, comm))
844                 return TRACE_TYPE_PARTIAL_LINE;
845
846         return TRACE_TYPE_HANDLED;
847 }
848
849 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
850 {
851         return trace_ctxwake_print(iter, "==>");
852 }
853
854 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
855                                           int flags)
856 {
857         return trace_ctxwake_print(iter, "  +");
858 }
859
860 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
861 {
862         struct ctx_switch_entry *field;
863         int T;
864
865         trace_assign_type(field, iter->ent);
866
867         if (!S)
868                 task_state_char(field->prev_state);
869         T = task_state_char(field->next_state);
870         if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
871                               field->prev_pid,
872                               field->prev_prio,
873                               S,
874                               field->next_cpu,
875                               field->next_pid,
876                               field->next_prio,
877                               T))
878                 return TRACE_TYPE_PARTIAL_LINE;
879
880         return TRACE_TYPE_HANDLED;
881 }
882
883 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
884 {
885         return trace_ctxwake_raw(iter, 0);
886 }
887
888 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
889 {
890         return trace_ctxwake_raw(iter, '+');
891 }
892
893
894 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
895 {
896         struct ctx_switch_entry *field;
897         struct trace_seq *s = &iter->seq;
898         int T;
899
900         trace_assign_type(field, iter->ent);
901
902         if (!S)
903                 task_state_char(field->prev_state);
904         T = task_state_char(field->next_state);
905
906         SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
907         SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
908         SEQ_PUT_HEX_FIELD_RET(s, S);
909         SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
910         SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
911         SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
912         SEQ_PUT_HEX_FIELD_RET(s, T);
913
914         return TRACE_TYPE_HANDLED;
915 }
916
917 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
918 {
919         return trace_ctxwake_hex(iter, 0);
920 }
921
922 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
923 {
924         return trace_ctxwake_hex(iter, '+');
925 }
926
927 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
928                                            int flags)
929 {
930         struct ctx_switch_entry *field;
931         struct trace_seq *s = &iter->seq;
932
933         trace_assign_type(field, iter->ent);
934
935         SEQ_PUT_FIELD_RET(s, field->prev_pid);
936         SEQ_PUT_FIELD_RET(s, field->prev_prio);
937         SEQ_PUT_FIELD_RET(s, field->prev_state);
938         SEQ_PUT_FIELD_RET(s, field->next_pid);
939         SEQ_PUT_FIELD_RET(s, field->next_prio);
940         SEQ_PUT_FIELD_RET(s, field->next_state);
941
942         return TRACE_TYPE_HANDLED;
943 }
944
945 static struct trace_event trace_ctx_event = {
946         .type           = TRACE_CTX,
947         .trace          = trace_ctx_print,
948         .raw            = trace_ctx_raw,
949         .hex            = trace_ctx_hex,
950         .binary         = trace_ctxwake_bin,
951 };
952
953 static struct trace_event trace_wake_event = {
954         .type           = TRACE_WAKE,
955         .trace          = trace_wake_print,
956         .raw            = trace_wake_raw,
957         .hex            = trace_wake_hex,
958         .binary         = trace_ctxwake_bin,
959 };
960
961 /* TRACE_SPECIAL */
962 static enum print_line_t trace_special_print(struct trace_iterator *iter,
963                                              int flags)
964 {
965         struct special_entry *field;
966
967         trace_assign_type(field, iter->ent);
968
969         if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
970                               field->arg1,
971                               field->arg2,
972                               field->arg3))
973                 return TRACE_TYPE_PARTIAL_LINE;
974
975         return TRACE_TYPE_HANDLED;
976 }
977
978 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
979                                            int flags)
980 {
981         struct special_entry *field;
982         struct trace_seq *s = &iter->seq;
983
984         trace_assign_type(field, iter->ent);
985
986         SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
987         SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
988         SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
989
990         return TRACE_TYPE_HANDLED;
991 }
992
993 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
994                                            int flags)
995 {
996         struct special_entry *field;
997         struct trace_seq *s = &iter->seq;
998
999         trace_assign_type(field, iter->ent);
1000
1001         SEQ_PUT_FIELD_RET(s, field->arg1);
1002         SEQ_PUT_FIELD_RET(s, field->arg2);
1003         SEQ_PUT_FIELD_RET(s, field->arg3);
1004
1005         return TRACE_TYPE_HANDLED;
1006 }
1007
1008 static struct trace_event trace_special_event = {
1009         .type           = TRACE_SPECIAL,
1010         .trace          = trace_special_print,
1011         .raw            = trace_special_print,
1012         .hex            = trace_special_hex,
1013         .binary         = trace_special_bin,
1014 };
1015
1016 /* TRACE_STACK */
1017
1018 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1019                                            int flags)
1020 {
1021         struct stack_entry *field;
1022         struct trace_seq *s = &iter->seq;
1023         int i;
1024
1025         trace_assign_type(field, iter->ent);
1026
1027         if (!trace_seq_puts(s, "<stack trace>\n"))
1028                 goto partial;
1029         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1030                 if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
1031                         break;
1032                 if (!trace_seq_puts(s, " => "))
1033                         goto partial;
1034
1035                 if (!seq_print_ip_sym(s, field->caller[i], flags))
1036                         goto partial;
1037                 if (!trace_seq_puts(s, "\n"))
1038                         goto partial;
1039         }
1040
1041         return TRACE_TYPE_HANDLED;
1042
1043  partial:
1044         return TRACE_TYPE_PARTIAL_LINE;
1045 }
1046
1047 static struct trace_event trace_stack_event = {
1048         .type           = TRACE_STACK,
1049         .trace          = trace_stack_print,
1050         .raw            = trace_special_print,
1051         .hex            = trace_special_hex,
1052         .binary         = trace_special_bin,
1053 };
1054
1055 /* TRACE_USER_STACK */
1056 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1057                                                 int flags)
1058 {
1059         struct userstack_entry *field;
1060         struct trace_seq *s = &iter->seq;
1061
1062         trace_assign_type(field, iter->ent);
1063
1064         if (!trace_seq_puts(s, "<user stack trace>\n"))
1065                 goto partial;
1066
1067         if (!seq_print_userip_objs(field, s, flags))
1068                 goto partial;
1069
1070         return TRACE_TYPE_HANDLED;
1071
1072  partial:
1073         return TRACE_TYPE_PARTIAL_LINE;
1074 }
1075
1076 static struct trace_event trace_user_stack_event = {
1077         .type           = TRACE_USER_STACK,
1078         .trace          = trace_user_stack_print,
1079         .raw            = trace_special_print,
1080         .hex            = trace_special_hex,
1081         .binary         = trace_special_bin,
1082 };
1083
1084 /* TRACE_BPRINT */
1085 static enum print_line_t
1086 trace_bprint_print(struct trace_iterator *iter, int flags)
1087 {
1088         struct trace_entry *entry = iter->ent;
1089         struct trace_seq *s = &iter->seq;
1090         struct bprint_entry *field;
1091
1092         trace_assign_type(field, entry);
1093
1094         if (!seq_print_ip_sym(s, field->ip, flags))
1095                 goto partial;
1096
1097         if (!trace_seq_puts(s, ": "))
1098                 goto partial;
1099
1100         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1101                 goto partial;
1102
1103         return TRACE_TYPE_HANDLED;
1104
1105  partial:
1106         return TRACE_TYPE_PARTIAL_LINE;
1107 }
1108
1109
1110 static enum print_line_t
1111 trace_bprint_raw(struct trace_iterator *iter, int flags)
1112 {
1113         struct bprint_entry *field;
1114         struct trace_seq *s = &iter->seq;
1115
1116         trace_assign_type(field, iter->ent);
1117
1118         if (!trace_seq_printf(s, ": %lx : ", field->ip))
1119                 goto partial;
1120
1121         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1122                 goto partial;
1123
1124         return TRACE_TYPE_HANDLED;
1125
1126  partial:
1127         return TRACE_TYPE_PARTIAL_LINE;
1128 }
1129
1130
1131 static struct trace_event trace_bprint_event = {
1132         .type           = TRACE_BPRINT,
1133         .trace          = trace_bprint_print,
1134         .raw            = trace_bprint_raw,
1135 };
1136
1137 /* TRACE_PRINT */
1138 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1139                                            int flags)
1140 {
1141         struct print_entry *field;
1142         struct trace_seq *s = &iter->seq;
1143
1144         trace_assign_type(field, iter->ent);
1145
1146         if (!seq_print_ip_sym(s, field->ip, flags))
1147                 goto partial;
1148
1149         if (!trace_seq_printf(s, ": %s", field->buf))
1150                 goto partial;
1151
1152         return TRACE_TYPE_HANDLED;
1153
1154  partial:
1155         return TRACE_TYPE_PARTIAL_LINE;
1156 }
1157
1158 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1159 {
1160         struct print_entry *field;
1161
1162         trace_assign_type(field, iter->ent);
1163
1164         if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1165                 goto partial;
1166
1167         return TRACE_TYPE_HANDLED;
1168
1169  partial:
1170         return TRACE_TYPE_PARTIAL_LINE;
1171 }
1172
1173 static struct trace_event trace_print_event = {
1174         .type           = TRACE_PRINT,
1175         .trace          = trace_print_print,
1176         .raw            = trace_print_raw,
1177 };
1178
1179
1180 static struct trace_event *events[] __initdata = {
1181         &trace_fn_event,
1182         &trace_ctx_event,
1183         &trace_wake_event,
1184         &trace_special_event,
1185         &trace_stack_event,
1186         &trace_user_stack_event,
1187         &trace_bprint_event,
1188         &trace_print_event,
1189         NULL
1190 };
1191
1192 __init static int init_events(void)
1193 {
1194         struct trace_event *event;
1195         int i, ret;
1196
1197         for (i = 0; events[i]; i++) {
1198                 event = events[i];
1199
1200                 ret = register_ftrace_event(event);
1201                 if (!ret) {
1202                         printk(KERN_WARNING "event %d failed to register\n",
1203                                event->type);
1204                         WARN_ON_ONCE(1);
1205                 }
1206         }
1207
1208         return 0;
1209 }
1210 device_initcall(init_events);