]> Pileus Git - ~andy/linux/blob - kernel/trace/trace.c
tracing: Add snapshot trigger to function probes
[~andy/linux] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 Nadia Yvette Chambers
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/pagemap.h>
24 #include <linux/hardirq.h>
25 #include <linux/linkage.h>
26 #include <linux/uaccess.h>
27 #include <linux/kprobes.h>
28 #include <linux/ftrace.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/splice.h>
32 #include <linux/kdebug.h>
33 #include <linux/string.h>
34 #include <linux/rwsem.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
37 #include <linux/init.h>
38 #include <linux/poll.h>
39 #include <linux/nmi.h>
40 #include <linux/fs.h>
41 #include <linux/sched/rt.h>
42
43 #include "trace.h"
44 #include "trace_output.h"
45
46 /*
47  * On boot up, the ring buffer is set to the minimum size, so that
48  * we do not waste memory on systems that are not using tracing.
49  */
50 bool ring_buffer_expanded;
51
52 /*
53  * We need to change this state when a selftest is running.
54  * A selftest will lurk into the ring-buffer to count the
55  * entries inserted during the selftest although some concurrent
56  * insertions into the ring-buffer such as trace_printk could occurred
57  * at the same time, giving false positive or negative results.
58  */
59 static bool __read_mostly tracing_selftest_running;
60
61 /*
62  * If a tracer is running, we do not want to run SELFTEST.
63  */
64 bool __read_mostly tracing_selftest_disabled;
65
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt[] = {
68         { }
69 };
70
71 static struct tracer_flags dummy_tracer_flags = {
72         .val = 0,
73         .opts = dummy_tracer_opt
74 };
75
76 static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77 {
78         return 0;
79 }
80
81 /*
82  * To prevent the comm cache from being overwritten when no
83  * tracing is active, only save the comm when a trace event
84  * occurred.
85  */
86 static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
88 /*
89  * Kill all tracing for good (never come back).
90  * It is initialized to 1 but will turn to zero if the initialization
91  * of the tracer is successful. But that is the only place that sets
92  * this back to zero.
93  */
94 static int tracing_disabled = 1;
95
96 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
97
98 cpumask_var_t __read_mostly     tracing_buffer_mask;
99
100 /*
101  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102  *
103  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104  * is set, then ftrace_dump is called. This will output the contents
105  * of the ftrace buffers to the console.  This is very useful for
106  * capturing traces that lead to crashes and outputing it to a
107  * serial console.
108  *
109  * It is default off, but you can enable it with either specifying
110  * "ftrace_dump_on_oops" in the kernel command line, or setting
111  * /proc/sys/kernel/ftrace_dump_on_oops
112  * Set 1 if you want to dump buffers of all CPUs
113  * Set 2 if you want to dump the buffer of the CPU that triggered oops
114  */
115
116 enum ftrace_dump_mode ftrace_dump_on_oops;
117
118 static int tracing_set_tracer(const char *buf);
119
120 #define MAX_TRACER_SIZE         100
121 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
122 static char *default_bootup_tracer;
123
124 static bool allocate_snapshot;
125
126 static int __init set_cmdline_ftrace(char *str)
127 {
128         strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
129         default_bootup_tracer = bootup_tracer_buf;
130         /* We are using ftrace early, expand it */
131         ring_buffer_expanded = true;
132         return 1;
133 }
134 __setup("ftrace=", set_cmdline_ftrace);
135
136 static int __init set_ftrace_dump_on_oops(char *str)
137 {
138         if (*str++ != '=' || !*str) {
139                 ftrace_dump_on_oops = DUMP_ALL;
140                 return 1;
141         }
142
143         if (!strcmp("orig_cpu", str)) {
144                 ftrace_dump_on_oops = DUMP_ORIG;
145                 return 1;
146         }
147
148         return 0;
149 }
150 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
151
152 static int __init boot_alloc_snapshot(char *str)
153 {
154         allocate_snapshot = true;
155         /* We also need the main ring buffer expanded */
156         ring_buffer_expanded = true;
157         return 1;
158 }
159 __setup("alloc_snapshot", boot_alloc_snapshot);
160
161
162 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
163 static char *trace_boot_options __initdata;
164
165 static int __init set_trace_boot_options(char *str)
166 {
167         strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
168         trace_boot_options = trace_boot_options_buf;
169         return 0;
170 }
171 __setup("trace_options=", set_trace_boot_options);
172
173 unsigned long long ns2usecs(cycle_t nsec)
174 {
175         nsec += 500;
176         do_div(nsec, 1000);
177         return nsec;
178 }
179
180 /*
181  * The global_trace is the descriptor that holds the tracing
182  * buffers for the live tracing. For each CPU, it contains
183  * a link list of pages that will store trace entries. The
184  * page descriptor of the pages in the memory is used to hold
185  * the link list by linking the lru item in the page descriptor
186  * to each of the pages in the buffer per CPU.
187  *
188  * For each active CPU there is a data field that holds the
189  * pages for the buffer for that CPU. Each CPU has the same number
190  * of pages allocated for its buffer.
191  */
192 static struct trace_array       global_trace;
193
194 LIST_HEAD(ftrace_trace_arrays);
195
196 int filter_current_check_discard(struct ring_buffer *buffer,
197                                  struct ftrace_event_call *call, void *rec,
198                                  struct ring_buffer_event *event)
199 {
200         return filter_check_discard(call, rec, buffer, event);
201 }
202 EXPORT_SYMBOL_GPL(filter_current_check_discard);
203
204 cycle_t ftrace_now(int cpu)
205 {
206         u64 ts;
207
208         /* Early boot up does not have a buffer yet */
209         if (!global_trace.trace_buffer.buffer)
210                 return trace_clock_local();
211
212         ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
213         ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
214
215         return ts;
216 }
217
218 int tracing_is_enabled(void)
219 {
220         return tracing_is_on();
221 }
222
223 /*
224  * trace_buf_size is the size in bytes that is allocated
225  * for a buffer. Note, the number of bytes is always rounded
226  * to page size.
227  *
228  * This number is purposely set to a low number of 16384.
229  * If the dump on oops happens, it will be much appreciated
230  * to not have to wait for all that output. Anyway this can be
231  * boot time and run time configurable.
232  */
233 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
234
235 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
236
237 /* trace_types holds a link list of available tracers. */
238 static struct tracer            *trace_types __read_mostly;
239
240 /*
241  * trace_types_lock is used to protect the trace_types list.
242  */
243 static DEFINE_MUTEX(trace_types_lock);
244
245 /*
246  * serialize the access of the ring buffer
247  *
248  * ring buffer serializes readers, but it is low level protection.
249  * The validity of the events (which returns by ring_buffer_peek() ..etc)
250  * are not protected by ring buffer.
251  *
252  * The content of events may become garbage if we allow other process consumes
253  * these events concurrently:
254  *   A) the page of the consumed events may become a normal page
255  *      (not reader page) in ring buffer, and this page will be rewrited
256  *      by events producer.
257  *   B) The page of the consumed events may become a page for splice_read,
258  *      and this page will be returned to system.
259  *
260  * These primitives allow multi process access to different cpu ring buffer
261  * concurrently.
262  *
263  * These primitives don't distinguish read-only and read-consume access.
264  * Multi read-only access are also serialized.
265  */
266
267 #ifdef CONFIG_SMP
268 static DECLARE_RWSEM(all_cpu_access_lock);
269 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
270
271 static inline void trace_access_lock(int cpu)
272 {
273         if (cpu == RING_BUFFER_ALL_CPUS) {
274                 /* gain it for accessing the whole ring buffer. */
275                 down_write(&all_cpu_access_lock);
276         } else {
277                 /* gain it for accessing a cpu ring buffer. */
278
279                 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
280                 down_read(&all_cpu_access_lock);
281
282                 /* Secondly block other access to this @cpu ring buffer. */
283                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
284         }
285 }
286
287 static inline void trace_access_unlock(int cpu)
288 {
289         if (cpu == RING_BUFFER_ALL_CPUS) {
290                 up_write(&all_cpu_access_lock);
291         } else {
292                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
293                 up_read(&all_cpu_access_lock);
294         }
295 }
296
297 static inline void trace_access_lock_init(void)
298 {
299         int cpu;
300
301         for_each_possible_cpu(cpu)
302                 mutex_init(&per_cpu(cpu_access_lock, cpu));
303 }
304
305 #else
306
307 static DEFINE_MUTEX(access_lock);
308
309 static inline void trace_access_lock(int cpu)
310 {
311         (void)cpu;
312         mutex_lock(&access_lock);
313 }
314
315 static inline void trace_access_unlock(int cpu)
316 {
317         (void)cpu;
318         mutex_unlock(&access_lock);
319 }
320
321 static inline void trace_access_lock_init(void)
322 {
323 }
324
325 #endif
326
327 /* trace_flags holds trace_options default values */
328 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
329         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
330         TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
331         TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS;
332
333 /**
334  * tracing_on - enable tracing buffers
335  *
336  * This function enables tracing buffers that may have been
337  * disabled with tracing_off.
338  */
339 void tracing_on(void)
340 {
341         if (global_trace.trace_buffer.buffer)
342                 ring_buffer_record_on(global_trace.trace_buffer.buffer);
343         /*
344          * This flag is only looked at when buffers haven't been
345          * allocated yet. We don't really care about the race
346          * between setting this flag and actually turning
347          * on the buffer.
348          */
349         global_trace.buffer_disabled = 0;
350 }
351 EXPORT_SYMBOL_GPL(tracing_on);
352
353 /**
354  * __trace_puts - write a constant string into the trace buffer.
355  * @ip:    The address of the caller
356  * @str:   The constant string to write
357  * @size:  The size of the string.
358  */
359 int __trace_puts(unsigned long ip, const char *str, int size)
360 {
361         struct ring_buffer_event *event;
362         struct ring_buffer *buffer;
363         struct print_entry *entry;
364         unsigned long irq_flags;
365         int alloc;
366
367         alloc = sizeof(*entry) + size + 2; /* possible \n added */
368
369         local_save_flags(irq_flags);
370         buffer = global_trace.trace_buffer.buffer;
371         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
372                                           irq_flags, preempt_count());
373         if (!event)
374                 return 0;
375
376         entry = ring_buffer_event_data(event);
377         entry->ip = ip;
378
379         memcpy(&entry->buf, str, size);
380
381         /* Add a newline if necessary */
382         if (entry->buf[size - 1] != '\n') {
383                 entry->buf[size] = '\n';
384                 entry->buf[size + 1] = '\0';
385         } else
386                 entry->buf[size] = '\0';
387
388         __buffer_unlock_commit(buffer, event);
389
390         return size;
391 }
392 EXPORT_SYMBOL_GPL(__trace_puts);
393
394 /**
395  * __trace_bputs - write the pointer to a constant string into trace buffer
396  * @ip:    The address of the caller
397  * @str:   The constant string to write to the buffer to
398  */
399 int __trace_bputs(unsigned long ip, const char *str)
400 {
401         struct ring_buffer_event *event;
402         struct ring_buffer *buffer;
403         struct bputs_entry *entry;
404         unsigned long irq_flags;
405         int size = sizeof(struct bputs_entry);
406
407         local_save_flags(irq_flags);
408         buffer = global_trace.trace_buffer.buffer;
409         event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
410                                           irq_flags, preempt_count());
411         if (!event)
412                 return 0;
413
414         entry = ring_buffer_event_data(event);
415         entry->ip                       = ip;
416         entry->str                      = str;
417
418         __buffer_unlock_commit(buffer, event);
419
420         return 1;
421 }
422 EXPORT_SYMBOL_GPL(__trace_bputs);
423
424 #ifdef CONFIG_TRACER_SNAPSHOT
425 /**
426  * trace_snapshot - take a snapshot of the current buffer.
427  *
428  * This causes a swap between the snapshot buffer and the current live
429  * tracing buffer. You can use this to take snapshots of the live
430  * trace when some condition is triggered, but continue to trace.
431  *
432  * Note, make sure to allocate the snapshot with either
433  * a tracing_snapshot_alloc(), or by doing it manually
434  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
435  *
436  * If the snapshot buffer is not allocated, it will stop tracing.
437  * Basically making a permanent snapshot.
438  */
439 void tracing_snapshot(void)
440 {
441         struct trace_array *tr = &global_trace;
442         struct tracer *tracer = tr->current_trace;
443         unsigned long flags;
444
445         if (in_nmi()) {
446                 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
447                 internal_trace_puts("*** snapshot is being ignored        ***\n");
448                 return;
449         }
450
451         if (!tr->allocated_snapshot) {
452                 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
453                 internal_trace_puts("*** stopping trace here!   ***\n");
454                 tracing_off();
455                 return;
456         }
457
458         /* Note, snapshot can not be used when the tracer uses it */
459         if (tracer->use_max_tr) {
460                 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
461                 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
462                 return;
463         }
464
465         local_irq_save(flags);
466         update_max_tr(tr, current, smp_processor_id());
467         local_irq_restore(flags);
468 }
469 EXPORT_SYMBOL_GPL(tracing_snapshot);
470
471 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
472                                         struct trace_buffer *size_buf, int cpu_id);
473 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
474
475 static int alloc_snapshot(struct trace_array *tr)
476 {
477         int ret;
478
479         if (!tr->allocated_snapshot) {
480
481                 /* allocate spare buffer */
482                 ret = resize_buffer_duplicate_size(&tr->max_buffer,
483                                    &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
484                 if (ret < 0)
485                         return ret;
486
487                 tr->allocated_snapshot = true;
488         }
489
490         return 0;
491 }
492
493 void free_snapshot(struct trace_array *tr)
494 {
495         /*
496          * We don't free the ring buffer. instead, resize it because
497          * The max_tr ring buffer has some state (e.g. ring->clock) and
498          * we want preserve it.
499          */
500         ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
501         set_buffer_entries(&tr->max_buffer, 1);
502         tracing_reset_online_cpus(&tr->max_buffer);
503         tr->allocated_snapshot = false;
504 }
505
506 /**
507  * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
508  *
509  * This is similar to trace_snapshot(), but it will allocate the
510  * snapshot buffer if it isn't already allocated. Use this only
511  * where it is safe to sleep, as the allocation may sleep.
512  *
513  * This causes a swap between the snapshot buffer and the current live
514  * tracing buffer. You can use this to take snapshots of the live
515  * trace when some condition is triggered, but continue to trace.
516  */
517 void tracing_snapshot_alloc(void)
518 {
519         struct trace_array *tr = &global_trace;
520         int ret;
521
522         ret = alloc_snapshot(tr);
523         if (WARN_ON(ret < 0))
524                 return;
525
526         tracing_snapshot();
527 }
528 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
529 #else
530 void tracing_snapshot(void)
531 {
532         WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
533 }
534 EXPORT_SYMBOL_GPL(tracing_snapshot);
535 void tracing_snapshot_alloc(void)
536 {
537         /* Give warning */
538         tracing_snapshot();
539 }
540 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
541 #endif /* CONFIG_TRACER_SNAPSHOT */
542
543 /**
544  * tracing_off - turn off tracing buffers
545  *
546  * This function stops the tracing buffers from recording data.
547  * It does not disable any overhead the tracers themselves may
548  * be causing. This function simply causes all recording to
549  * the ring buffers to fail.
550  */
551 void tracing_off(void)
552 {
553         if (global_trace.trace_buffer.buffer)
554                 ring_buffer_record_off(global_trace.trace_buffer.buffer);
555         /*
556          * This flag is only looked at when buffers haven't been
557          * allocated yet. We don't really care about the race
558          * between setting this flag and actually turning
559          * on the buffer.
560          */
561         global_trace.buffer_disabled = 1;
562 }
563 EXPORT_SYMBOL_GPL(tracing_off);
564
565 /**
566  * tracing_is_on - show state of ring buffers enabled
567  */
568 int tracing_is_on(void)
569 {
570         if (global_trace.trace_buffer.buffer)
571                 return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
572         return !global_trace.buffer_disabled;
573 }
574 EXPORT_SYMBOL_GPL(tracing_is_on);
575
576 static int __init set_buf_size(char *str)
577 {
578         unsigned long buf_size;
579
580         if (!str)
581                 return 0;
582         buf_size = memparse(str, &str);
583         /* nr_entries can not be zero */
584         if (buf_size == 0)
585                 return 0;
586         trace_buf_size = buf_size;
587         return 1;
588 }
589 __setup("trace_buf_size=", set_buf_size);
590
591 static int __init set_tracing_thresh(char *str)
592 {
593         unsigned long threshold;
594         int ret;
595
596         if (!str)
597                 return 0;
598         ret = kstrtoul(str, 0, &threshold);
599         if (ret < 0)
600                 return 0;
601         tracing_thresh = threshold * 1000;
602         return 1;
603 }
604 __setup("tracing_thresh=", set_tracing_thresh);
605
606 unsigned long nsecs_to_usecs(unsigned long nsecs)
607 {
608         return nsecs / 1000;
609 }
610
611 /* These must match the bit postions in trace_iterator_flags */
612 static const char *trace_options[] = {
613         "print-parent",
614         "sym-offset",
615         "sym-addr",
616         "verbose",
617         "raw",
618         "hex",
619         "bin",
620         "block",
621         "stacktrace",
622         "trace_printk",
623         "ftrace_preempt",
624         "branch",
625         "annotate",
626         "userstacktrace",
627         "sym-userobj",
628         "printk-msg-only",
629         "context-info",
630         "latency-format",
631         "sleep-time",
632         "graph-time",
633         "record-cmd",
634         "overwrite",
635         "disable_on_free",
636         "irq-info",
637         "markers",
638         NULL
639 };
640
641 static struct {
642         u64 (*func)(void);
643         const char *name;
644         int in_ns;              /* is this clock in nanoseconds? */
645 } trace_clocks[] = {
646         { trace_clock_local,    "local",        1 },
647         { trace_clock_global,   "global",       1 },
648         { trace_clock_counter,  "counter",      0 },
649         ARCH_TRACE_CLOCKS
650 };
651
652 int trace_clock_id;
653
654 /*
655  * trace_parser_get_init - gets the buffer for trace parser
656  */
657 int trace_parser_get_init(struct trace_parser *parser, int size)
658 {
659         memset(parser, 0, sizeof(*parser));
660
661         parser->buffer = kmalloc(size, GFP_KERNEL);
662         if (!parser->buffer)
663                 return 1;
664
665         parser->size = size;
666         return 0;
667 }
668
669 /*
670  * trace_parser_put - frees the buffer for trace parser
671  */
672 void trace_parser_put(struct trace_parser *parser)
673 {
674         kfree(parser->buffer);
675 }
676
677 /*
678  * trace_get_user - reads the user input string separated by  space
679  * (matched by isspace(ch))
680  *
681  * For each string found the 'struct trace_parser' is updated,
682  * and the function returns.
683  *
684  * Returns number of bytes read.
685  *
686  * See kernel/trace/trace.h for 'struct trace_parser' details.
687  */
688 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
689         size_t cnt, loff_t *ppos)
690 {
691         char ch;
692         size_t read = 0;
693         ssize_t ret;
694
695         if (!*ppos)
696                 trace_parser_clear(parser);
697
698         ret = get_user(ch, ubuf++);
699         if (ret)
700                 goto out;
701
702         read++;
703         cnt--;
704
705         /*
706          * The parser is not finished with the last write,
707          * continue reading the user input without skipping spaces.
708          */
709         if (!parser->cont) {
710                 /* skip white space */
711                 while (cnt && isspace(ch)) {
712                         ret = get_user(ch, ubuf++);
713                         if (ret)
714                                 goto out;
715                         read++;
716                         cnt--;
717                 }
718
719                 /* only spaces were written */
720                 if (isspace(ch)) {
721                         *ppos += read;
722                         ret = read;
723                         goto out;
724                 }
725
726                 parser->idx = 0;
727         }
728
729         /* read the non-space input */
730         while (cnt && !isspace(ch)) {
731                 if (parser->idx < parser->size - 1)
732                         parser->buffer[parser->idx++] = ch;
733                 else {
734                         ret = -EINVAL;
735                         goto out;
736                 }
737                 ret = get_user(ch, ubuf++);
738                 if (ret)
739                         goto out;
740                 read++;
741                 cnt--;
742         }
743
744         /* We either got finished input or we have to wait for another call. */
745         if (isspace(ch)) {
746                 parser->buffer[parser->idx] = 0;
747                 parser->cont = false;
748         } else {
749                 parser->cont = true;
750                 parser->buffer[parser->idx++] = ch;
751         }
752
753         *ppos += read;
754         ret = read;
755
756 out:
757         return ret;
758 }
759
760 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
761 {
762         int len;
763         int ret;
764
765         if (!cnt)
766                 return 0;
767
768         if (s->len <= s->readpos)
769                 return -EBUSY;
770
771         len = s->len - s->readpos;
772         if (cnt > len)
773                 cnt = len;
774         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
775         if (ret == cnt)
776                 return -EFAULT;
777
778         cnt -= ret;
779
780         s->readpos += cnt;
781         return cnt;
782 }
783
784 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
785 {
786         int len;
787
788         if (s->len <= s->readpos)
789                 return -EBUSY;
790
791         len = s->len - s->readpos;
792         if (cnt > len)
793                 cnt = len;
794         memcpy(buf, s->buffer + s->readpos, cnt);
795
796         s->readpos += cnt;
797         return cnt;
798 }
799
800 /*
801  * ftrace_max_lock is used to protect the swapping of buffers
802  * when taking a max snapshot. The buffers themselves are
803  * protected by per_cpu spinlocks. But the action of the swap
804  * needs its own lock.
805  *
806  * This is defined as a arch_spinlock_t in order to help
807  * with performance when lockdep debugging is enabled.
808  *
809  * It is also used in other places outside the update_max_tr
810  * so it needs to be defined outside of the
811  * CONFIG_TRACER_MAX_TRACE.
812  */
813 static arch_spinlock_t ftrace_max_lock =
814         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
815
816 unsigned long __read_mostly     tracing_thresh;
817
818 #ifdef CONFIG_TRACER_MAX_TRACE
819 unsigned long __read_mostly     tracing_max_latency;
820
821 /*
822  * Copy the new maximum trace into the separate maximum-trace
823  * structure. (this way the maximum trace is permanently saved,
824  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
825  */
826 static void
827 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
828 {
829         struct trace_buffer *trace_buf = &tr->trace_buffer;
830         struct trace_buffer *max_buf = &tr->max_buffer;
831         struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
832         struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
833
834         max_buf->cpu = cpu;
835         max_buf->time_start = data->preempt_timestamp;
836
837         max_data->saved_latency = tracing_max_latency;
838         max_data->critical_start = data->critical_start;
839         max_data->critical_end = data->critical_end;
840
841         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
842         max_data->pid = tsk->pid;
843         max_data->uid = task_uid(tsk);
844         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
845         max_data->policy = tsk->policy;
846         max_data->rt_priority = tsk->rt_priority;
847
848         /* record this tasks comm */
849         tracing_record_cmdline(tsk);
850 }
851
852 /**
853  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
854  * @tr: tracer
855  * @tsk: the task with the latency
856  * @cpu: The cpu that initiated the trace.
857  *
858  * Flip the buffers between the @tr and the max_tr and record information
859  * about which task was the cause of this latency.
860  */
861 void
862 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
863 {
864         struct ring_buffer *buf;
865
866         if (tr->stop_count)
867                 return;
868
869         WARN_ON_ONCE(!irqs_disabled());
870
871         if (!tr->allocated_snapshot) {
872                 /* Only the nop tracer should hit this when disabling */
873                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
874                 return;
875         }
876
877         arch_spin_lock(&ftrace_max_lock);
878
879         buf = tr->trace_buffer.buffer;
880         tr->trace_buffer.buffer = tr->max_buffer.buffer;
881         tr->max_buffer.buffer = buf;
882
883         __update_max_tr(tr, tsk, cpu);
884         arch_spin_unlock(&ftrace_max_lock);
885 }
886
887 /**
888  * update_max_tr_single - only copy one trace over, and reset the rest
889  * @tr - tracer
890  * @tsk - task with the latency
891  * @cpu - the cpu of the buffer to copy.
892  *
893  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
894  */
895 void
896 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
897 {
898         int ret;
899
900         if (tr->stop_count)
901                 return;
902
903         WARN_ON_ONCE(!irqs_disabled());
904         if (WARN_ON_ONCE(!tr->allocated_snapshot))
905                 return;
906
907         arch_spin_lock(&ftrace_max_lock);
908
909         ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
910
911         if (ret == -EBUSY) {
912                 /*
913                  * We failed to swap the buffer due to a commit taking
914                  * place on this CPU. We fail to record, but we reset
915                  * the max trace buffer (no one writes directly to it)
916                  * and flag that it failed.
917                  */
918                 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
919                         "Failed to swap buffers due to commit in progress\n");
920         }
921
922         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
923
924         __update_max_tr(tr, tsk, cpu);
925         arch_spin_unlock(&ftrace_max_lock);
926 }
927 #endif /* CONFIG_TRACER_MAX_TRACE */
928
929 static void default_wait_pipe(struct trace_iterator *iter)
930 {
931         /* Iterators are static, they should be filled or empty */
932         if (trace_buffer_iter(iter, iter->cpu_file))
933                 return;
934
935         ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
936 }
937
938 #ifdef CONFIG_FTRACE_STARTUP_TEST
939 static int run_tracer_selftest(struct tracer *type)
940 {
941         struct trace_array *tr = &global_trace;
942         struct tracer *saved_tracer = tr->current_trace;
943         int ret;
944
945         if (!type->selftest || tracing_selftest_disabled)
946                 return 0;
947
948         /*
949          * Run a selftest on this tracer.
950          * Here we reset the trace buffer, and set the current
951          * tracer to be this tracer. The tracer can then run some
952          * internal tracing to verify that everything is in order.
953          * If we fail, we do not register this tracer.
954          */
955         tracing_reset_online_cpus(&tr->trace_buffer);
956
957         tr->current_trace = type;
958
959 #ifdef CONFIG_TRACER_MAX_TRACE
960         if (type->use_max_tr) {
961                 /* If we expanded the buffers, make sure the max is expanded too */
962                 if (ring_buffer_expanded)
963                         ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
964                                            RING_BUFFER_ALL_CPUS);
965                 tr->allocated_snapshot = true;
966         }
967 #endif
968
969         /* the test is responsible for initializing and enabling */
970         pr_info("Testing tracer %s: ", type->name);
971         ret = type->selftest(type, tr);
972         /* the test is responsible for resetting too */
973         tr->current_trace = saved_tracer;
974         if (ret) {
975                 printk(KERN_CONT "FAILED!\n");
976                 /* Add the warning after printing 'FAILED' */
977                 WARN_ON(1);
978                 return -1;
979         }
980         /* Only reset on passing, to avoid touching corrupted buffers */
981         tracing_reset_online_cpus(&tr->trace_buffer);
982
983 #ifdef CONFIG_TRACER_MAX_TRACE
984         if (type->use_max_tr) {
985                 tr->allocated_snapshot = false;
986
987                 /* Shrink the max buffer again */
988                 if (ring_buffer_expanded)
989                         ring_buffer_resize(tr->max_buffer.buffer, 1,
990                                            RING_BUFFER_ALL_CPUS);
991         }
992 #endif
993
994         printk(KERN_CONT "PASSED\n");
995         return 0;
996 }
997 #else
998 static inline int run_tracer_selftest(struct tracer *type)
999 {
1000         return 0;
1001 }
1002 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1003
1004 /**
1005  * register_tracer - register a tracer with the ftrace system.
1006  * @type - the plugin for the tracer
1007  *
1008  * Register a new plugin tracer.
1009  */
1010 int register_tracer(struct tracer *type)
1011 {
1012         struct tracer *t;
1013         int ret = 0;
1014
1015         if (!type->name) {
1016                 pr_info("Tracer must have a name\n");
1017                 return -1;
1018         }
1019
1020         if (strlen(type->name) >= MAX_TRACER_SIZE) {
1021                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1022                 return -1;
1023         }
1024
1025         mutex_lock(&trace_types_lock);
1026
1027         tracing_selftest_running = true;
1028
1029         for (t = trace_types; t; t = t->next) {
1030                 if (strcmp(type->name, t->name) == 0) {
1031                         /* already found */
1032                         pr_info("Tracer %s already registered\n",
1033                                 type->name);
1034                         ret = -1;
1035                         goto out;
1036                 }
1037         }
1038
1039         if (!type->set_flag)
1040                 type->set_flag = &dummy_set_flag;
1041         if (!type->flags)
1042                 type->flags = &dummy_tracer_flags;
1043         else
1044                 if (!type->flags->opts)
1045                         type->flags->opts = dummy_tracer_opt;
1046         if (!type->wait_pipe)
1047                 type->wait_pipe = default_wait_pipe;
1048
1049         ret = run_tracer_selftest(type);
1050         if (ret < 0)
1051                 goto out;
1052
1053         type->next = trace_types;
1054         trace_types = type;
1055
1056  out:
1057         tracing_selftest_running = false;
1058         mutex_unlock(&trace_types_lock);
1059
1060         if (ret || !default_bootup_tracer)
1061                 goto out_unlock;
1062
1063         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1064                 goto out_unlock;
1065
1066         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1067         /* Do we want this tracer to start on bootup? */
1068         tracing_set_tracer(type->name);
1069         default_bootup_tracer = NULL;
1070         /* disable other selftests, since this will break it. */
1071         tracing_selftest_disabled = true;
1072 #ifdef CONFIG_FTRACE_STARTUP_TEST
1073         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1074                type->name);
1075 #endif
1076
1077  out_unlock:
1078         return ret;
1079 }
1080
1081 void tracing_reset(struct trace_buffer *buf, int cpu)
1082 {
1083         struct ring_buffer *buffer = buf->buffer;
1084
1085         if (!buffer)
1086                 return;
1087
1088         ring_buffer_record_disable(buffer);
1089
1090         /* Make sure all commits have finished */
1091         synchronize_sched();
1092         ring_buffer_reset_cpu(buffer, cpu);
1093
1094         ring_buffer_record_enable(buffer);
1095 }
1096
1097 void tracing_reset_online_cpus(struct trace_buffer *buf)
1098 {
1099         struct ring_buffer *buffer = buf->buffer;
1100         int cpu;
1101
1102         if (!buffer)
1103                 return;
1104
1105         ring_buffer_record_disable(buffer);
1106
1107         /* Make sure all commits have finished */
1108         synchronize_sched();
1109
1110         buf->time_start = ftrace_now(buf->cpu);
1111
1112         for_each_online_cpu(cpu)
1113                 ring_buffer_reset_cpu(buffer, cpu);
1114
1115         ring_buffer_record_enable(buffer);
1116 }
1117
1118 void tracing_reset_current(int cpu)
1119 {
1120         tracing_reset(&global_trace.trace_buffer, cpu);
1121 }
1122
1123 void tracing_reset_all_online_cpus(void)
1124 {
1125         struct trace_array *tr;
1126
1127         mutex_lock(&trace_types_lock);
1128         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1129                 tracing_reset_online_cpus(&tr->trace_buffer);
1130 #ifdef CONFIG_TRACER_MAX_TRACE
1131                 tracing_reset_online_cpus(&tr->max_buffer);
1132 #endif
1133         }
1134         mutex_unlock(&trace_types_lock);
1135 }
1136
1137 #define SAVED_CMDLINES 128
1138 #define NO_CMDLINE_MAP UINT_MAX
1139 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1140 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1141 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1142 static int cmdline_idx;
1143 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1144
1145 /* temporary disable recording */
1146 static atomic_t trace_record_cmdline_disabled __read_mostly;
1147
1148 static void trace_init_cmdlines(void)
1149 {
1150         memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1151         memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
1152         cmdline_idx = 0;
1153 }
1154
1155 int is_tracing_stopped(void)
1156 {
1157         return global_trace.stop_count;
1158 }
1159
1160 /**
1161  * ftrace_off_permanent - disable all ftrace code permanently
1162  *
1163  * This should only be called when a serious anomally has
1164  * been detected.  This will turn off the function tracing,
1165  * ring buffers, and other tracing utilites. It takes no
1166  * locks and can be called from any context.
1167  */
1168 void ftrace_off_permanent(void)
1169 {
1170         tracing_disabled = 1;
1171         ftrace_stop();
1172         tracing_off_permanent();
1173 }
1174
1175 /**
1176  * tracing_start - quick start of the tracer
1177  *
1178  * If tracing is enabled but was stopped by tracing_stop,
1179  * this will start the tracer back up.
1180  */
1181 void tracing_start(void)
1182 {
1183         struct ring_buffer *buffer;
1184         unsigned long flags;
1185
1186         if (tracing_disabled)
1187                 return;
1188
1189         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1190         if (--global_trace.stop_count) {
1191                 if (global_trace.stop_count < 0) {
1192                         /* Someone screwed up their debugging */
1193                         WARN_ON_ONCE(1);
1194                         global_trace.stop_count = 0;
1195                 }
1196                 goto out;
1197         }
1198
1199         /* Prevent the buffers from switching */
1200         arch_spin_lock(&ftrace_max_lock);
1201
1202         buffer = global_trace.trace_buffer.buffer;
1203         if (buffer)
1204                 ring_buffer_record_enable(buffer);
1205
1206 #ifdef CONFIG_TRACER_MAX_TRACE
1207         buffer = global_trace.max_buffer.buffer;
1208         if (buffer)
1209                 ring_buffer_record_enable(buffer);
1210 #endif
1211
1212         arch_spin_unlock(&ftrace_max_lock);
1213
1214         ftrace_start();
1215  out:
1216         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1217 }
1218
1219 static void tracing_start_tr(struct trace_array *tr)
1220 {
1221         struct ring_buffer *buffer;
1222         unsigned long flags;
1223
1224         if (tracing_disabled)
1225                 return;
1226
1227         /* If global, we need to also start the max tracer */
1228         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1229                 return tracing_start();
1230
1231         raw_spin_lock_irqsave(&tr->start_lock, flags);
1232
1233         if (--tr->stop_count) {
1234                 if (tr->stop_count < 0) {
1235                         /* Someone screwed up their debugging */
1236                         WARN_ON_ONCE(1);
1237                         tr->stop_count = 0;
1238                 }
1239                 goto out;
1240         }
1241
1242         buffer = tr->trace_buffer.buffer;
1243         if (buffer)
1244                 ring_buffer_record_enable(buffer);
1245
1246  out:
1247         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1248 }
1249
1250 /**
1251  * tracing_stop - quick stop of the tracer
1252  *
1253  * Light weight way to stop tracing. Use in conjunction with
1254  * tracing_start.
1255  */
1256 void tracing_stop(void)
1257 {
1258         struct ring_buffer *buffer;
1259         unsigned long flags;
1260
1261         ftrace_stop();
1262         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1263         if (global_trace.stop_count++)
1264                 goto out;
1265
1266         /* Prevent the buffers from switching */
1267         arch_spin_lock(&ftrace_max_lock);
1268
1269         buffer = global_trace.trace_buffer.buffer;
1270         if (buffer)
1271                 ring_buffer_record_disable(buffer);
1272
1273 #ifdef CONFIG_TRACER_MAX_TRACE
1274         buffer = global_trace.max_buffer.buffer;
1275         if (buffer)
1276                 ring_buffer_record_disable(buffer);
1277 #endif
1278
1279         arch_spin_unlock(&ftrace_max_lock);
1280
1281  out:
1282         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1283 }
1284
1285 static void tracing_stop_tr(struct trace_array *tr)
1286 {
1287         struct ring_buffer *buffer;
1288         unsigned long flags;
1289
1290         /* If global, we need to also stop the max tracer */
1291         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1292                 return tracing_stop();
1293
1294         raw_spin_lock_irqsave(&tr->start_lock, flags);
1295         if (tr->stop_count++)
1296                 goto out;
1297
1298         buffer = tr->trace_buffer.buffer;
1299         if (buffer)
1300                 ring_buffer_record_disable(buffer);
1301
1302  out:
1303         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1304 }
1305
1306 void trace_stop_cmdline_recording(void);
1307
1308 static void trace_save_cmdline(struct task_struct *tsk)
1309 {
1310         unsigned pid, idx;
1311
1312         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1313                 return;
1314
1315         /*
1316          * It's not the end of the world if we don't get
1317          * the lock, but we also don't want to spin
1318          * nor do we want to disable interrupts,
1319          * so if we miss here, then better luck next time.
1320          */
1321         if (!arch_spin_trylock(&trace_cmdline_lock))
1322                 return;
1323
1324         idx = map_pid_to_cmdline[tsk->pid];
1325         if (idx == NO_CMDLINE_MAP) {
1326                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1327
1328                 /*
1329                  * Check whether the cmdline buffer at idx has a pid
1330                  * mapped. We are going to overwrite that entry so we
1331                  * need to clear the map_pid_to_cmdline. Otherwise we
1332                  * would read the new comm for the old pid.
1333                  */
1334                 pid = map_cmdline_to_pid[idx];
1335                 if (pid != NO_CMDLINE_MAP)
1336                         map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1337
1338                 map_cmdline_to_pid[idx] = tsk->pid;
1339                 map_pid_to_cmdline[tsk->pid] = idx;
1340
1341                 cmdline_idx = idx;
1342         }
1343
1344         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1345
1346         arch_spin_unlock(&trace_cmdline_lock);
1347 }
1348
1349 void trace_find_cmdline(int pid, char comm[])
1350 {
1351         unsigned map;
1352
1353         if (!pid) {
1354                 strcpy(comm, "<idle>");
1355                 return;
1356         }
1357
1358         if (WARN_ON_ONCE(pid < 0)) {
1359                 strcpy(comm, "<XXX>");
1360                 return;
1361         }
1362
1363         if (pid > PID_MAX_DEFAULT) {
1364                 strcpy(comm, "<...>");
1365                 return;
1366         }
1367
1368         preempt_disable();
1369         arch_spin_lock(&trace_cmdline_lock);
1370         map = map_pid_to_cmdline[pid];
1371         if (map != NO_CMDLINE_MAP)
1372                 strcpy(comm, saved_cmdlines[map]);
1373         else
1374                 strcpy(comm, "<...>");
1375
1376         arch_spin_unlock(&trace_cmdline_lock);
1377         preempt_enable();
1378 }
1379
1380 void tracing_record_cmdline(struct task_struct *tsk)
1381 {
1382         if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1383                 return;
1384
1385         if (!__this_cpu_read(trace_cmdline_save))
1386                 return;
1387
1388         __this_cpu_write(trace_cmdline_save, false);
1389
1390         trace_save_cmdline(tsk);
1391 }
1392
1393 void
1394 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1395                              int pc)
1396 {
1397         struct task_struct *tsk = current;
1398
1399         entry->preempt_count            = pc & 0xff;
1400         entry->pid                      = (tsk) ? tsk->pid : 0;
1401         entry->flags =
1402 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1403                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1404 #else
1405                 TRACE_FLAG_IRQS_NOSUPPORT |
1406 #endif
1407                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1408                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1409                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1410 }
1411 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1412
1413 struct ring_buffer_event *
1414 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1415                           int type,
1416                           unsigned long len,
1417                           unsigned long flags, int pc)
1418 {
1419         struct ring_buffer_event *event;
1420
1421         event = ring_buffer_lock_reserve(buffer, len);
1422         if (event != NULL) {
1423                 struct trace_entry *ent = ring_buffer_event_data(event);
1424
1425                 tracing_generic_entry_update(ent, flags, pc);
1426                 ent->type = type;
1427         }
1428
1429         return event;
1430 }
1431
1432 void
1433 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1434 {
1435         __this_cpu_write(trace_cmdline_save, true);
1436         ring_buffer_unlock_commit(buffer, event);
1437 }
1438
1439 static inline void
1440 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1441                              struct ring_buffer_event *event,
1442                              unsigned long flags, int pc)
1443 {
1444         __buffer_unlock_commit(buffer, event);
1445
1446         ftrace_trace_stack(buffer, flags, 6, pc);
1447         ftrace_trace_userstack(buffer, flags, pc);
1448 }
1449
1450 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1451                                 struct ring_buffer_event *event,
1452                                 unsigned long flags, int pc)
1453 {
1454         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1455 }
1456 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1457
1458 struct ring_buffer_event *
1459 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1460                           struct ftrace_event_file *ftrace_file,
1461                           int type, unsigned long len,
1462                           unsigned long flags, int pc)
1463 {
1464         *current_rb = ftrace_file->tr->trace_buffer.buffer;
1465         return trace_buffer_lock_reserve(*current_rb,
1466                                          type, len, flags, pc);
1467 }
1468 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1469
1470 struct ring_buffer_event *
1471 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1472                                   int type, unsigned long len,
1473                                   unsigned long flags, int pc)
1474 {
1475         *current_rb = global_trace.trace_buffer.buffer;
1476         return trace_buffer_lock_reserve(*current_rb,
1477                                          type, len, flags, pc);
1478 }
1479 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1480
1481 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1482                                         struct ring_buffer_event *event,
1483                                         unsigned long flags, int pc)
1484 {
1485         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1486 }
1487 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1488
1489 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1490                                      struct ring_buffer_event *event,
1491                                      unsigned long flags, int pc,
1492                                      struct pt_regs *regs)
1493 {
1494         __buffer_unlock_commit(buffer, event);
1495
1496         ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1497         ftrace_trace_userstack(buffer, flags, pc);
1498 }
1499 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1500
1501 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1502                                          struct ring_buffer_event *event)
1503 {
1504         ring_buffer_discard_commit(buffer, event);
1505 }
1506 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1507
1508 void
1509 trace_function(struct trace_array *tr,
1510                unsigned long ip, unsigned long parent_ip, unsigned long flags,
1511                int pc)
1512 {
1513         struct ftrace_event_call *call = &event_function;
1514         struct ring_buffer *buffer = tr->trace_buffer.buffer;
1515         struct ring_buffer_event *event;
1516         struct ftrace_entry *entry;
1517
1518         /* If we are reading the ring buffer, don't trace */
1519         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1520                 return;
1521
1522         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1523                                           flags, pc);
1524         if (!event)
1525                 return;
1526         entry   = ring_buffer_event_data(event);
1527         entry->ip                       = ip;
1528         entry->parent_ip                = parent_ip;
1529
1530         if (!filter_check_discard(call, entry, buffer, event))
1531                 __buffer_unlock_commit(buffer, event);
1532 }
1533
1534 void
1535 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1536        unsigned long ip, unsigned long parent_ip, unsigned long flags,
1537        int pc)
1538 {
1539         if (likely(!atomic_read(&data->disabled)))
1540                 trace_function(tr, ip, parent_ip, flags, pc);
1541 }
1542
1543 #ifdef CONFIG_STACKTRACE
1544
1545 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1546 struct ftrace_stack {
1547         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
1548 };
1549
1550 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1551 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1552
1553 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1554                                  unsigned long flags,
1555                                  int skip, int pc, struct pt_regs *regs)
1556 {
1557         struct ftrace_event_call *call = &event_kernel_stack;
1558         struct ring_buffer_event *event;
1559         struct stack_entry *entry;
1560         struct stack_trace trace;
1561         int use_stack;
1562         int size = FTRACE_STACK_ENTRIES;
1563
1564         trace.nr_entries        = 0;
1565         trace.skip              = skip;
1566
1567         /*
1568          * Since events can happen in NMIs there's no safe way to
1569          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1570          * or NMI comes in, it will just have to use the default
1571          * FTRACE_STACK_SIZE.
1572          */
1573         preempt_disable_notrace();
1574
1575         use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1576         /*
1577          * We don't need any atomic variables, just a barrier.
1578          * If an interrupt comes in, we don't care, because it would
1579          * have exited and put the counter back to what we want.
1580          * We just need a barrier to keep gcc from moving things
1581          * around.
1582          */
1583         barrier();
1584         if (use_stack == 1) {
1585                 trace.entries           = &__get_cpu_var(ftrace_stack).calls[0];
1586                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
1587
1588                 if (regs)
1589                         save_stack_trace_regs(regs, &trace);
1590                 else
1591                         save_stack_trace(&trace);
1592
1593                 if (trace.nr_entries > size)
1594                         size = trace.nr_entries;
1595         } else
1596                 /* From now on, use_stack is a boolean */
1597                 use_stack = 0;
1598
1599         size *= sizeof(unsigned long);
1600
1601         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1602                                           sizeof(*entry) + size, flags, pc);
1603         if (!event)
1604                 goto out;
1605         entry = ring_buffer_event_data(event);
1606
1607         memset(&entry->caller, 0, size);
1608
1609         if (use_stack)
1610                 memcpy(&entry->caller, trace.entries,
1611                        trace.nr_entries * sizeof(unsigned long));
1612         else {
1613                 trace.max_entries       = FTRACE_STACK_ENTRIES;
1614                 trace.entries           = entry->caller;
1615                 if (regs)
1616                         save_stack_trace_regs(regs, &trace);
1617                 else
1618                         save_stack_trace(&trace);
1619         }
1620
1621         entry->size = trace.nr_entries;
1622
1623         if (!filter_check_discard(call, entry, buffer, event))
1624                 __buffer_unlock_commit(buffer, event);
1625
1626  out:
1627         /* Again, don't let gcc optimize things here */
1628         barrier();
1629         __this_cpu_dec(ftrace_stack_reserve);
1630         preempt_enable_notrace();
1631
1632 }
1633
1634 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1635                              int skip, int pc, struct pt_regs *regs)
1636 {
1637         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1638                 return;
1639
1640         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1641 }
1642
1643 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1644                         int skip, int pc)
1645 {
1646         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1647                 return;
1648
1649         __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1650 }
1651
1652 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1653                    int pc)
1654 {
1655         __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1656 }
1657
1658 /**
1659  * trace_dump_stack - record a stack back trace in the trace buffer
1660  */
1661 void trace_dump_stack(void)
1662 {
1663         unsigned long flags;
1664
1665         if (tracing_disabled || tracing_selftest_running)
1666                 return;
1667
1668         local_save_flags(flags);
1669
1670         /* skipping 3 traces, seems to get us at the caller of this function */
1671         __ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, 3,
1672                              preempt_count(), NULL);
1673 }
1674
1675 static DEFINE_PER_CPU(int, user_stack_count);
1676
1677 void
1678 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1679 {
1680         struct ftrace_event_call *call = &event_user_stack;
1681         struct ring_buffer_event *event;
1682         struct userstack_entry *entry;
1683         struct stack_trace trace;
1684
1685         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1686                 return;
1687
1688         /*
1689          * NMIs can not handle page faults, even with fix ups.
1690          * The save user stack can (and often does) fault.
1691          */
1692         if (unlikely(in_nmi()))
1693                 return;
1694
1695         /*
1696          * prevent recursion, since the user stack tracing may
1697          * trigger other kernel events.
1698          */
1699         preempt_disable();
1700         if (__this_cpu_read(user_stack_count))
1701                 goto out;
1702
1703         __this_cpu_inc(user_stack_count);
1704
1705         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1706                                           sizeof(*entry), flags, pc);
1707         if (!event)
1708                 goto out_drop_count;
1709         entry   = ring_buffer_event_data(event);
1710
1711         entry->tgid             = current->tgid;
1712         memset(&entry->caller, 0, sizeof(entry->caller));
1713
1714         trace.nr_entries        = 0;
1715         trace.max_entries       = FTRACE_STACK_ENTRIES;
1716         trace.skip              = 0;
1717         trace.entries           = entry->caller;
1718
1719         save_stack_trace_user(&trace);
1720         if (!filter_check_discard(call, entry, buffer, event))
1721                 __buffer_unlock_commit(buffer, event);
1722
1723  out_drop_count:
1724         __this_cpu_dec(user_stack_count);
1725  out:
1726         preempt_enable();
1727 }
1728
1729 #ifdef UNUSED
1730 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1731 {
1732         ftrace_trace_userstack(tr, flags, preempt_count());
1733 }
1734 #endif /* UNUSED */
1735
1736 #endif /* CONFIG_STACKTRACE */
1737
1738 /* created for use with alloc_percpu */
1739 struct trace_buffer_struct {
1740         char buffer[TRACE_BUF_SIZE];
1741 };
1742
1743 static struct trace_buffer_struct *trace_percpu_buffer;
1744 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1745 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1746 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1747
1748 /*
1749  * The buffer used is dependent on the context. There is a per cpu
1750  * buffer for normal context, softirq contex, hard irq context and
1751  * for NMI context. Thise allows for lockless recording.
1752  *
1753  * Note, if the buffers failed to be allocated, then this returns NULL
1754  */
1755 static char *get_trace_buf(void)
1756 {
1757         struct trace_buffer_struct *percpu_buffer;
1758
1759         /*
1760          * If we have allocated per cpu buffers, then we do not
1761          * need to do any locking.
1762          */
1763         if (in_nmi())
1764                 percpu_buffer = trace_percpu_nmi_buffer;
1765         else if (in_irq())
1766                 percpu_buffer = trace_percpu_irq_buffer;
1767         else if (in_softirq())
1768                 percpu_buffer = trace_percpu_sirq_buffer;
1769         else
1770                 percpu_buffer = trace_percpu_buffer;
1771
1772         if (!percpu_buffer)
1773                 return NULL;
1774
1775         return this_cpu_ptr(&percpu_buffer->buffer[0]);
1776 }
1777
1778 static int alloc_percpu_trace_buffer(void)
1779 {
1780         struct trace_buffer_struct *buffers;
1781         struct trace_buffer_struct *sirq_buffers;
1782         struct trace_buffer_struct *irq_buffers;
1783         struct trace_buffer_struct *nmi_buffers;
1784
1785         buffers = alloc_percpu(struct trace_buffer_struct);
1786         if (!buffers)
1787                 goto err_warn;
1788
1789         sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1790         if (!sirq_buffers)
1791                 goto err_sirq;
1792
1793         irq_buffers = alloc_percpu(struct trace_buffer_struct);
1794         if (!irq_buffers)
1795                 goto err_irq;
1796
1797         nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1798         if (!nmi_buffers)
1799                 goto err_nmi;
1800
1801         trace_percpu_buffer = buffers;
1802         trace_percpu_sirq_buffer = sirq_buffers;
1803         trace_percpu_irq_buffer = irq_buffers;
1804         trace_percpu_nmi_buffer = nmi_buffers;
1805
1806         return 0;
1807
1808  err_nmi:
1809         free_percpu(irq_buffers);
1810  err_irq:
1811         free_percpu(sirq_buffers);
1812  err_sirq:
1813         free_percpu(buffers);
1814  err_warn:
1815         WARN(1, "Could not allocate percpu trace_printk buffer");
1816         return -ENOMEM;
1817 }
1818
1819 static int buffers_allocated;
1820
1821 void trace_printk_init_buffers(void)
1822 {
1823         if (buffers_allocated)
1824                 return;
1825
1826         if (alloc_percpu_trace_buffer())
1827                 return;
1828
1829         pr_info("ftrace: Allocated trace_printk buffers\n");
1830
1831         /* Expand the buffers to set size */
1832         tracing_update_buffers();
1833
1834         buffers_allocated = 1;
1835
1836         /*
1837          * trace_printk_init_buffers() can be called by modules.
1838          * If that happens, then we need to start cmdline recording
1839          * directly here. If the global_trace.buffer is already
1840          * allocated here, then this was called by module code.
1841          */
1842         if (global_trace.trace_buffer.buffer)
1843                 tracing_start_cmdline_record();
1844 }
1845
1846 void trace_printk_start_comm(void)
1847 {
1848         /* Start tracing comms if trace printk is set */
1849         if (!buffers_allocated)
1850                 return;
1851         tracing_start_cmdline_record();
1852 }
1853
1854 static void trace_printk_start_stop_comm(int enabled)
1855 {
1856         if (!buffers_allocated)
1857                 return;
1858
1859         if (enabled)
1860                 tracing_start_cmdline_record();
1861         else
1862                 tracing_stop_cmdline_record();
1863 }
1864
1865 /**
1866  * trace_vbprintk - write binary msg to tracing buffer
1867  *
1868  */
1869 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1870 {
1871         struct ftrace_event_call *call = &event_bprint;
1872         struct ring_buffer_event *event;
1873         struct ring_buffer *buffer;
1874         struct trace_array *tr = &global_trace;
1875         struct bprint_entry *entry;
1876         unsigned long flags;
1877         char *tbuffer;
1878         int len = 0, size, pc;
1879
1880         if (unlikely(tracing_selftest_running || tracing_disabled))
1881                 return 0;
1882
1883         /* Don't pollute graph traces with trace_vprintk internals */
1884         pause_graph_tracing();
1885
1886         pc = preempt_count();
1887         preempt_disable_notrace();
1888
1889         tbuffer = get_trace_buf();
1890         if (!tbuffer) {
1891                 len = 0;
1892                 goto out;
1893         }
1894
1895         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1896
1897         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1898                 goto out;
1899
1900         local_save_flags(flags);
1901         size = sizeof(*entry) + sizeof(u32) * len;
1902         buffer = tr->trace_buffer.buffer;
1903         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1904                                           flags, pc);
1905         if (!event)
1906                 goto out;
1907         entry = ring_buffer_event_data(event);
1908         entry->ip                       = ip;
1909         entry->fmt                      = fmt;
1910
1911         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
1912         if (!filter_check_discard(call, entry, buffer, event)) {
1913                 __buffer_unlock_commit(buffer, event);
1914                 ftrace_trace_stack(buffer, flags, 6, pc);
1915         }
1916
1917 out:
1918         preempt_enable_notrace();
1919         unpause_graph_tracing();
1920
1921         return len;
1922 }
1923 EXPORT_SYMBOL_GPL(trace_vbprintk);
1924
1925 static int
1926 __trace_array_vprintk(struct ring_buffer *buffer,
1927                       unsigned long ip, const char *fmt, va_list args)
1928 {
1929         struct ftrace_event_call *call = &event_print;
1930         struct ring_buffer_event *event;
1931         int len = 0, size, pc;
1932         struct print_entry *entry;
1933         unsigned long flags;
1934         char *tbuffer;
1935
1936         if (tracing_disabled || tracing_selftest_running)
1937                 return 0;
1938
1939         /* Don't pollute graph traces with trace_vprintk internals */
1940         pause_graph_tracing();
1941
1942         pc = preempt_count();
1943         preempt_disable_notrace();
1944
1945
1946         tbuffer = get_trace_buf();
1947         if (!tbuffer) {
1948                 len = 0;
1949                 goto out;
1950         }
1951
1952         len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
1953         if (len > TRACE_BUF_SIZE)
1954                 goto out;
1955
1956         local_save_flags(flags);
1957         size = sizeof(*entry) + len + 1;
1958         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1959                                           flags, pc);
1960         if (!event)
1961                 goto out;
1962         entry = ring_buffer_event_data(event);
1963         entry->ip = ip;
1964
1965         memcpy(&entry->buf, tbuffer, len);
1966         entry->buf[len] = '\0';
1967         if (!filter_check_discard(call, entry, buffer, event)) {
1968                 __buffer_unlock_commit(buffer, event);
1969                 ftrace_trace_stack(buffer, flags, 6, pc);
1970         }
1971  out:
1972         preempt_enable_notrace();
1973         unpause_graph_tracing();
1974
1975         return len;
1976 }
1977
1978 int trace_array_vprintk(struct trace_array *tr,
1979                         unsigned long ip, const char *fmt, va_list args)
1980 {
1981         return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
1982 }
1983
1984 int trace_array_printk(struct trace_array *tr,
1985                        unsigned long ip, const char *fmt, ...)
1986 {
1987         int ret;
1988         va_list ap;
1989
1990         if (!(trace_flags & TRACE_ITER_PRINTK))
1991                 return 0;
1992
1993         va_start(ap, fmt);
1994         ret = trace_array_vprintk(tr, ip, fmt, ap);
1995         va_end(ap);
1996         return ret;
1997 }
1998
1999 int trace_array_printk_buf(struct ring_buffer *buffer,
2000                            unsigned long ip, const char *fmt, ...)
2001 {
2002         int ret;
2003         va_list ap;
2004
2005         if (!(trace_flags & TRACE_ITER_PRINTK))
2006                 return 0;
2007
2008         va_start(ap, fmt);
2009         ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2010         va_end(ap);
2011         return ret;
2012 }
2013
2014 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2015 {
2016         return trace_array_vprintk(&global_trace, ip, fmt, args);
2017 }
2018 EXPORT_SYMBOL_GPL(trace_vprintk);
2019
2020 static void trace_iterator_increment(struct trace_iterator *iter)
2021 {
2022         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2023
2024         iter->idx++;
2025         if (buf_iter)
2026                 ring_buffer_read(buf_iter, NULL);
2027 }
2028
2029 static struct trace_entry *
2030 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2031                 unsigned long *lost_events)
2032 {
2033         struct ring_buffer_event *event;
2034         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2035
2036         if (buf_iter)
2037                 event = ring_buffer_iter_peek(buf_iter, ts);
2038         else
2039                 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2040                                          lost_events);
2041
2042         if (event) {
2043                 iter->ent_size = ring_buffer_event_length(event);
2044                 return ring_buffer_event_data(event);
2045         }
2046         iter->ent_size = 0;
2047         return NULL;
2048 }
2049
2050 static struct trace_entry *
2051 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2052                   unsigned long *missing_events, u64 *ent_ts)
2053 {
2054         struct ring_buffer *buffer = iter->trace_buffer->buffer;
2055         struct trace_entry *ent, *next = NULL;
2056         unsigned long lost_events = 0, next_lost = 0;
2057         int cpu_file = iter->cpu_file;
2058         u64 next_ts = 0, ts;
2059         int next_cpu = -1;
2060         int next_size = 0;
2061         int cpu;
2062
2063         /*
2064          * If we are in a per_cpu trace file, don't bother by iterating over
2065          * all cpu and peek directly.
2066          */
2067         if (cpu_file > RING_BUFFER_ALL_CPUS) {
2068                 if (ring_buffer_empty_cpu(buffer, cpu_file))
2069                         return NULL;
2070                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2071                 if (ent_cpu)
2072                         *ent_cpu = cpu_file;
2073
2074                 return ent;
2075         }
2076
2077         for_each_tracing_cpu(cpu) {
2078
2079                 if (ring_buffer_empty_cpu(buffer, cpu))
2080                         continue;
2081
2082                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2083
2084                 /*
2085                  * Pick the entry with the smallest timestamp:
2086                  */
2087                 if (ent && (!next || ts < next_ts)) {
2088                         next = ent;
2089                         next_cpu = cpu;
2090                         next_ts = ts;
2091                         next_lost = lost_events;
2092                         next_size = iter->ent_size;
2093                 }
2094         }
2095
2096         iter->ent_size = next_size;
2097
2098         if (ent_cpu)
2099                 *ent_cpu = next_cpu;
2100
2101         if (ent_ts)
2102                 *ent_ts = next_ts;
2103
2104         if (missing_events)
2105                 *missing_events = next_lost;
2106
2107         return next;
2108 }
2109
2110 /* Find the next real entry, without updating the iterator itself */
2111 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2112                                           int *ent_cpu, u64 *ent_ts)
2113 {
2114         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2115 }
2116
2117 /* Find the next real entry, and increment the iterator to the next entry */
2118 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2119 {
2120         iter->ent = __find_next_entry(iter, &iter->cpu,
2121                                       &iter->lost_events, &iter->ts);
2122
2123         if (iter->ent)
2124                 trace_iterator_increment(iter);
2125
2126         return iter->ent ? iter : NULL;
2127 }
2128
2129 static void trace_consume(struct trace_iterator *iter)
2130 {
2131         ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2132                             &iter->lost_events);
2133 }
2134
2135 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2136 {
2137         struct trace_iterator *iter = m->private;
2138         int i = (int)*pos;
2139         void *ent;
2140
2141         WARN_ON_ONCE(iter->leftover);
2142
2143         (*pos)++;
2144
2145         /* can't go backwards */
2146         if (iter->idx > i)
2147                 return NULL;
2148
2149         if (iter->idx < 0)
2150                 ent = trace_find_next_entry_inc(iter);
2151         else
2152                 ent = iter;
2153
2154         while (ent && iter->idx < i)
2155                 ent = trace_find_next_entry_inc(iter);
2156
2157         iter->pos = *pos;
2158
2159         return ent;
2160 }
2161
2162 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2163 {
2164         struct ring_buffer_event *event;
2165         struct ring_buffer_iter *buf_iter;
2166         unsigned long entries = 0;
2167         u64 ts;
2168
2169         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2170
2171         buf_iter = trace_buffer_iter(iter, cpu);
2172         if (!buf_iter)
2173                 return;
2174
2175         ring_buffer_iter_reset(buf_iter);
2176
2177         /*
2178          * We could have the case with the max latency tracers
2179          * that a reset never took place on a cpu. This is evident
2180          * by the timestamp being before the start of the buffer.
2181          */
2182         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2183                 if (ts >= iter->trace_buffer->time_start)
2184                         break;
2185                 entries++;
2186                 ring_buffer_read(buf_iter, NULL);
2187         }
2188
2189         per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2190 }
2191
2192 /*
2193  * The current tracer is copied to avoid a global locking
2194  * all around.
2195  */
2196 static void *s_start(struct seq_file *m, loff_t *pos)
2197 {
2198         struct trace_iterator *iter = m->private;
2199         struct trace_array *tr = iter->tr;
2200         int cpu_file = iter->cpu_file;
2201         void *p = NULL;
2202         loff_t l = 0;
2203         int cpu;
2204
2205         /*
2206          * copy the tracer to avoid using a global lock all around.
2207          * iter->trace is a copy of current_trace, the pointer to the
2208          * name may be used instead of a strcmp(), as iter->trace->name
2209          * will point to the same string as current_trace->name.
2210          */
2211         mutex_lock(&trace_types_lock);
2212         if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2213                 *iter->trace = *tr->current_trace;
2214         mutex_unlock(&trace_types_lock);
2215
2216 #ifdef CONFIG_TRACER_MAX_TRACE
2217         if (iter->snapshot && iter->trace->use_max_tr)
2218                 return ERR_PTR(-EBUSY);
2219 #endif
2220
2221         if (!iter->snapshot)
2222                 atomic_inc(&trace_record_cmdline_disabled);
2223
2224         if (*pos != iter->pos) {
2225                 iter->ent = NULL;
2226                 iter->cpu = 0;
2227                 iter->idx = -1;
2228
2229                 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2230                         for_each_tracing_cpu(cpu)
2231                                 tracing_iter_reset(iter, cpu);
2232                 } else
2233                         tracing_iter_reset(iter, cpu_file);
2234
2235                 iter->leftover = 0;
2236                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2237                         ;
2238
2239         } else {
2240                 /*
2241                  * If we overflowed the seq_file before, then we want
2242                  * to just reuse the trace_seq buffer again.
2243                  */
2244                 if (iter->leftover)
2245                         p = iter;
2246                 else {
2247                         l = *pos - 1;
2248                         p = s_next(m, p, &l);
2249                 }
2250         }
2251
2252         trace_event_read_lock();
2253         trace_access_lock(cpu_file);
2254         return p;
2255 }
2256
2257 static void s_stop(struct seq_file *m, void *p)
2258 {
2259         struct trace_iterator *iter = m->private;
2260
2261 #ifdef CONFIG_TRACER_MAX_TRACE
2262         if (iter->snapshot && iter->trace->use_max_tr)
2263                 return;
2264 #endif
2265
2266         if (!iter->snapshot)
2267                 atomic_dec(&trace_record_cmdline_disabled);
2268
2269         trace_access_unlock(iter->cpu_file);
2270         trace_event_read_unlock();
2271 }
2272
2273 static void
2274 get_total_entries(struct trace_buffer *buf,
2275                   unsigned long *total, unsigned long *entries)
2276 {
2277         unsigned long count;
2278         int cpu;
2279
2280         *total = 0;
2281         *entries = 0;
2282
2283         for_each_tracing_cpu(cpu) {
2284                 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2285                 /*
2286                  * If this buffer has skipped entries, then we hold all
2287                  * entries for the trace and we need to ignore the
2288                  * ones before the time stamp.
2289                  */
2290                 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2291                         count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2292                         /* total is the same as the entries */
2293                         *total += count;
2294                 } else
2295                         *total += count +
2296                                 ring_buffer_overrun_cpu(buf->buffer, cpu);
2297                 *entries += count;
2298         }
2299 }
2300
2301 static void print_lat_help_header(struct seq_file *m)
2302 {
2303         seq_puts(m, "#                  _------=> CPU#            \n");
2304         seq_puts(m, "#                 / _-----=> irqs-off        \n");
2305         seq_puts(m, "#                | / _----=> need-resched    \n");
2306         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
2307         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
2308         seq_puts(m, "#                |||| /     delay             \n");
2309         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
2310         seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
2311 }
2312
2313 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2314 {
2315         unsigned long total;
2316         unsigned long entries;
2317
2318         get_total_entries(buf, &total, &entries);
2319         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2320                    entries, total, num_online_cpus());
2321         seq_puts(m, "#\n");
2322 }
2323
2324 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2325 {
2326         print_event_info(buf, m);
2327         seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
2328         seq_puts(m, "#              | |       |          |         |\n");
2329 }
2330
2331 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2332 {
2333         print_event_info(buf, m);
2334         seq_puts(m, "#                              _-----=> irqs-off\n");
2335         seq_puts(m, "#                             / _----=> need-resched\n");
2336         seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
2337         seq_puts(m, "#                            || / _--=> preempt-depth\n");
2338         seq_puts(m, "#                            ||| /     delay\n");
2339         seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
2340         seq_puts(m, "#              | |       |   ||||       |         |\n");
2341 }
2342
2343 void
2344 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2345 {
2346         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2347         struct trace_buffer *buf = iter->trace_buffer;
2348         struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2349         struct tracer *type = iter->trace;
2350         unsigned long entries;
2351         unsigned long total;
2352         const char *name = "preemption";
2353
2354         name = type->name;
2355
2356         get_total_entries(buf, &total, &entries);
2357
2358         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2359                    name, UTS_RELEASE);
2360         seq_puts(m, "# -----------------------------------"
2361                  "---------------------------------\n");
2362         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2363                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2364                    nsecs_to_usecs(data->saved_latency),
2365                    entries,
2366                    total,
2367                    buf->cpu,
2368 #if defined(CONFIG_PREEMPT_NONE)
2369                    "server",
2370 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2371                    "desktop",
2372 #elif defined(CONFIG_PREEMPT)
2373                    "preempt",
2374 #else
2375                    "unknown",
2376 #endif
2377                    /* These are reserved for later use */
2378                    0, 0, 0, 0);
2379 #ifdef CONFIG_SMP
2380         seq_printf(m, " #P:%d)\n", num_online_cpus());
2381 #else
2382         seq_puts(m, ")\n");
2383 #endif
2384         seq_puts(m, "#    -----------------\n");
2385         seq_printf(m, "#    | task: %.16s-%d "
2386                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2387                    data->comm, data->pid,
2388                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2389                    data->policy, data->rt_priority);
2390         seq_puts(m, "#    -----------------\n");
2391
2392         if (data->critical_start) {
2393                 seq_puts(m, "#  => started at: ");
2394                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2395                 trace_print_seq(m, &iter->seq);
2396                 seq_puts(m, "\n#  => ended at:   ");
2397                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2398                 trace_print_seq(m, &iter->seq);
2399                 seq_puts(m, "\n#\n");
2400         }
2401
2402         seq_puts(m, "#\n");
2403 }
2404
2405 static void test_cpu_buff_start(struct trace_iterator *iter)
2406 {
2407         struct trace_seq *s = &iter->seq;
2408
2409         if (!(trace_flags & TRACE_ITER_ANNOTATE))
2410                 return;
2411
2412         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2413                 return;
2414
2415         if (cpumask_test_cpu(iter->cpu, iter->started))
2416                 return;
2417
2418         if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2419                 return;
2420
2421         cpumask_set_cpu(iter->cpu, iter->started);
2422
2423         /* Don't print started cpu buffer for the first entry of the trace */
2424         if (iter->idx > 1)
2425                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2426                                 iter->cpu);
2427 }
2428
2429 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2430 {
2431         struct trace_seq *s = &iter->seq;
2432         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2433         struct trace_entry *entry;
2434         struct trace_event *event;
2435
2436         entry = iter->ent;
2437
2438         test_cpu_buff_start(iter);
2439
2440         event = ftrace_find_event(entry->type);
2441
2442         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2443                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2444                         if (!trace_print_lat_context(iter))
2445                                 goto partial;
2446                 } else {
2447                         if (!trace_print_context(iter))
2448                                 goto partial;
2449                 }
2450         }
2451
2452         if (event)
2453                 return event->funcs->trace(iter, sym_flags, event);
2454
2455         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2456                 goto partial;
2457
2458         return TRACE_TYPE_HANDLED;
2459 partial:
2460         return TRACE_TYPE_PARTIAL_LINE;
2461 }
2462
2463 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2464 {
2465         struct trace_seq *s = &iter->seq;
2466         struct trace_entry *entry;
2467         struct trace_event *event;
2468
2469         entry = iter->ent;
2470
2471         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2472                 if (!trace_seq_printf(s, "%d %d %llu ",
2473                                       entry->pid, iter->cpu, iter->ts))
2474                         goto partial;
2475         }
2476
2477         event = ftrace_find_event(entry->type);
2478         if (event)
2479                 return event->funcs->raw(iter, 0, event);
2480
2481         if (!trace_seq_printf(s, "%d ?\n", entry->type))
2482                 goto partial;
2483
2484         return TRACE_TYPE_HANDLED;
2485 partial:
2486         return TRACE_TYPE_PARTIAL_LINE;
2487 }
2488
2489 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2490 {
2491         struct trace_seq *s = &iter->seq;
2492         unsigned char newline = '\n';
2493         struct trace_entry *entry;
2494         struct trace_event *event;
2495
2496         entry = iter->ent;
2497
2498         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2499                 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2500                 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2501                 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2502         }
2503
2504         event = ftrace_find_event(entry->type);
2505         if (event) {
2506                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2507                 if (ret != TRACE_TYPE_HANDLED)
2508                         return ret;
2509         }
2510
2511         SEQ_PUT_FIELD_RET(s, newline);
2512
2513         return TRACE_TYPE_HANDLED;
2514 }
2515
2516 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2517 {
2518         struct trace_seq *s = &iter->seq;
2519         struct trace_entry *entry;
2520         struct trace_event *event;
2521
2522         entry = iter->ent;
2523
2524         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2525                 SEQ_PUT_FIELD_RET(s, entry->pid);
2526                 SEQ_PUT_FIELD_RET(s, iter->cpu);
2527                 SEQ_PUT_FIELD_RET(s, iter->ts);
2528         }
2529
2530         event = ftrace_find_event(entry->type);
2531         return event ? event->funcs->binary(iter, 0, event) :
2532                 TRACE_TYPE_HANDLED;
2533 }
2534
2535 int trace_empty(struct trace_iterator *iter)
2536 {
2537         struct ring_buffer_iter *buf_iter;
2538         int cpu;
2539
2540         /* If we are looking at one CPU buffer, only check that one */
2541         if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2542                 cpu = iter->cpu_file;
2543                 buf_iter = trace_buffer_iter(iter, cpu);
2544                 if (buf_iter) {
2545                         if (!ring_buffer_iter_empty(buf_iter))
2546                                 return 0;
2547                 } else {
2548                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2549                                 return 0;
2550                 }
2551                 return 1;
2552         }
2553
2554         for_each_tracing_cpu(cpu) {
2555                 buf_iter = trace_buffer_iter(iter, cpu);
2556                 if (buf_iter) {
2557                         if (!ring_buffer_iter_empty(buf_iter))
2558                                 return 0;
2559                 } else {
2560                         if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2561                                 return 0;
2562                 }
2563         }
2564
2565         return 1;
2566 }
2567
2568 /*  Called with trace_event_read_lock() held. */
2569 enum print_line_t print_trace_line(struct trace_iterator *iter)
2570 {
2571         enum print_line_t ret;
2572
2573         if (iter->lost_events &&
2574             !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2575                                  iter->cpu, iter->lost_events))
2576                 return TRACE_TYPE_PARTIAL_LINE;
2577
2578         if (iter->trace && iter->trace->print_line) {
2579                 ret = iter->trace->print_line(iter);
2580                 if (ret != TRACE_TYPE_UNHANDLED)
2581                         return ret;
2582         }
2583
2584         if (iter->ent->type == TRACE_BPUTS &&
2585                         trace_flags & TRACE_ITER_PRINTK &&
2586                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2587                 return trace_print_bputs_msg_only(iter);
2588
2589         if (iter->ent->type == TRACE_BPRINT &&
2590                         trace_flags & TRACE_ITER_PRINTK &&
2591                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2592                 return trace_print_bprintk_msg_only(iter);
2593
2594         if (iter->ent->type == TRACE_PRINT &&
2595                         trace_flags & TRACE_ITER_PRINTK &&
2596                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2597                 return trace_print_printk_msg_only(iter);
2598
2599         if (trace_flags & TRACE_ITER_BIN)
2600                 return print_bin_fmt(iter);
2601
2602         if (trace_flags & TRACE_ITER_HEX)
2603                 return print_hex_fmt(iter);
2604
2605         if (trace_flags & TRACE_ITER_RAW)
2606                 return print_raw_fmt(iter);
2607
2608         return print_trace_fmt(iter);
2609 }
2610
2611 void trace_latency_header(struct seq_file *m)
2612 {
2613         struct trace_iterator *iter = m->private;
2614
2615         /* print nothing if the buffers are empty */
2616         if (trace_empty(iter))
2617                 return;
2618
2619         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2620                 print_trace_header(m, iter);
2621
2622         if (!(trace_flags & TRACE_ITER_VERBOSE))
2623                 print_lat_help_header(m);
2624 }
2625
2626 void trace_default_header(struct seq_file *m)
2627 {
2628         struct trace_iterator *iter = m->private;
2629
2630         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2631                 return;
2632
2633         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2634                 /* print nothing if the buffers are empty */
2635                 if (trace_empty(iter))
2636                         return;
2637                 print_trace_header(m, iter);
2638                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2639                         print_lat_help_header(m);
2640         } else {
2641                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2642                         if (trace_flags & TRACE_ITER_IRQ_INFO)
2643                                 print_func_help_header_irq(iter->trace_buffer, m);
2644                         else
2645                                 print_func_help_header(iter->trace_buffer, m);
2646                 }
2647         }
2648 }
2649
2650 static void test_ftrace_alive(struct seq_file *m)
2651 {
2652         if (!ftrace_is_dead())
2653                 return;
2654         seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2655         seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
2656 }
2657
2658 #ifdef CONFIG_TRACER_MAX_TRACE
2659 static void show_snapshot_main_help(struct seq_file *m)
2660 {
2661         seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2662         seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2663         seq_printf(m, "#                      Takes a snapshot of the main buffer.\n");
2664         seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2665         seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2666         seq_printf(m, "#                       is not a '0' or '1')\n");
2667 }
2668
2669 static void show_snapshot_percpu_help(struct seq_file *m)
2670 {
2671         seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2672 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2673         seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2674         seq_printf(m, "#                      Takes a snapshot of the main buffer for this cpu.\n");
2675 #else
2676         seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2677         seq_printf(m, "#                     Must use main snapshot file to allocate.\n");
2678 #endif
2679         seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2680         seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2681         seq_printf(m, "#                       is not a '0' or '1')\n");
2682 }
2683
2684 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2685 {
2686         if (iter->tr->allocated_snapshot)
2687                 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2688         else
2689                 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2690
2691         seq_printf(m, "# Snapshot commands:\n");
2692         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2693                 show_snapshot_main_help(m);
2694         else
2695                 show_snapshot_percpu_help(m);
2696 }
2697 #else
2698 /* Should never be called */
2699 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2700 #endif
2701
2702 static int s_show(struct seq_file *m, void *v)
2703 {
2704         struct trace_iterator *iter = v;
2705         int ret;
2706
2707         if (iter->ent == NULL) {
2708                 if (iter->tr) {
2709                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
2710                         seq_puts(m, "#\n");
2711                         test_ftrace_alive(m);
2712                 }
2713                 if (iter->snapshot && trace_empty(iter))
2714                         print_snapshot_help(m, iter);
2715                 else if (iter->trace && iter->trace->print_header)
2716                         iter->trace->print_header(m);
2717                 else
2718                         trace_default_header(m);
2719
2720         } else if (iter->leftover) {
2721                 /*
2722                  * If we filled the seq_file buffer earlier, we
2723                  * want to just show it now.
2724                  */
2725                 ret = trace_print_seq(m, &iter->seq);
2726
2727                 /* ret should this time be zero, but you never know */
2728                 iter->leftover = ret;
2729
2730         } else {
2731                 print_trace_line(iter);
2732                 ret = trace_print_seq(m, &iter->seq);
2733                 /*
2734                  * If we overflow the seq_file buffer, then it will
2735                  * ask us for this data again at start up.
2736                  * Use that instead.
2737                  *  ret is 0 if seq_file write succeeded.
2738                  *        -1 otherwise.
2739                  */
2740                 iter->leftover = ret;
2741         }
2742
2743         return 0;
2744 }
2745
2746 static const struct seq_operations tracer_seq_ops = {
2747         .start          = s_start,
2748         .next           = s_next,
2749         .stop           = s_stop,
2750         .show           = s_show,
2751 };
2752
2753 static struct trace_iterator *
2754 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2755 {
2756         struct trace_cpu *tc = inode->i_private;
2757         struct trace_array *tr = tc->tr;
2758         struct trace_iterator *iter;
2759         int cpu;
2760
2761         if (tracing_disabled)
2762                 return ERR_PTR(-ENODEV);
2763
2764         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2765         if (!iter)
2766                 return ERR_PTR(-ENOMEM);
2767
2768         iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2769                                     GFP_KERNEL);
2770         if (!iter->buffer_iter)
2771                 goto release;
2772
2773         /*
2774          * We make a copy of the current tracer to avoid concurrent
2775          * changes on it while we are reading.
2776          */
2777         mutex_lock(&trace_types_lock);
2778         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2779         if (!iter->trace)
2780                 goto fail;
2781
2782         *iter->trace = *tr->current_trace;
2783
2784         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2785                 goto fail;
2786
2787         iter->tr = tr;
2788
2789 #ifdef CONFIG_TRACER_MAX_TRACE
2790         /* Currently only the top directory has a snapshot */
2791         if (tr->current_trace->print_max || snapshot)
2792                 iter->trace_buffer = &tr->max_buffer;
2793         else
2794 #endif
2795                 iter->trace_buffer = &tr->trace_buffer;
2796         iter->snapshot = snapshot;
2797         iter->pos = -1;
2798         mutex_init(&iter->mutex);
2799         iter->cpu_file = tc->cpu;
2800
2801         /* Notify the tracer early; before we stop tracing. */
2802         if (iter->trace && iter->trace->open)
2803                 iter->trace->open(iter);
2804
2805         /* Annotate start of buffers if we had overruns */
2806         if (ring_buffer_overruns(iter->trace_buffer->buffer))
2807                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2808
2809         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2810         if (trace_clocks[trace_clock_id].in_ns)
2811                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2812
2813         /* stop the trace while dumping if we are not opening "snapshot" */
2814         if (!iter->snapshot)
2815                 tracing_stop_tr(tr);
2816
2817         if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
2818                 for_each_tracing_cpu(cpu) {
2819                         iter->buffer_iter[cpu] =
2820                                 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2821                 }
2822                 ring_buffer_read_prepare_sync();
2823                 for_each_tracing_cpu(cpu) {
2824                         ring_buffer_read_start(iter->buffer_iter[cpu]);
2825                         tracing_iter_reset(iter, cpu);
2826                 }
2827         } else {
2828                 cpu = iter->cpu_file;
2829                 iter->buffer_iter[cpu] =
2830                         ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2831                 ring_buffer_read_prepare_sync();
2832                 ring_buffer_read_start(iter->buffer_iter[cpu]);
2833                 tracing_iter_reset(iter, cpu);
2834         }
2835
2836         tr->ref++;
2837
2838         mutex_unlock(&trace_types_lock);
2839
2840         return iter;
2841
2842  fail:
2843         mutex_unlock(&trace_types_lock);
2844         kfree(iter->trace);
2845         kfree(iter->buffer_iter);
2846 release:
2847         seq_release_private(inode, file);
2848         return ERR_PTR(-ENOMEM);
2849 }
2850
2851 int tracing_open_generic(struct inode *inode, struct file *filp)
2852 {
2853         if (tracing_disabled)
2854                 return -ENODEV;
2855
2856         filp->private_data = inode->i_private;
2857         return 0;
2858 }
2859
2860 static int tracing_release(struct inode *inode, struct file *file)
2861 {
2862         struct seq_file *m = file->private_data;
2863         struct trace_iterator *iter;
2864         struct trace_array *tr;
2865         int cpu;
2866
2867         if (!(file->f_mode & FMODE_READ))
2868                 return 0;
2869
2870         iter = m->private;
2871         tr = iter->tr;
2872
2873         mutex_lock(&trace_types_lock);
2874
2875         WARN_ON(!tr->ref);
2876         tr->ref--;
2877
2878         for_each_tracing_cpu(cpu) {
2879                 if (iter->buffer_iter[cpu])
2880                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
2881         }
2882
2883         if (iter->trace && iter->trace->close)
2884                 iter->trace->close(iter);
2885
2886         if (!iter->snapshot)
2887                 /* reenable tracing if it was previously enabled */
2888                 tracing_start_tr(tr);
2889         mutex_unlock(&trace_types_lock);
2890
2891         mutex_destroy(&iter->mutex);
2892         free_cpumask_var(iter->started);
2893         kfree(iter->trace);
2894         kfree(iter->buffer_iter);
2895         seq_release_private(inode, file);
2896         return 0;
2897 }
2898
2899 static int tracing_open(struct inode *inode, struct file *file)
2900 {
2901         struct trace_iterator *iter;
2902         int ret = 0;
2903
2904         /* If this file was open for write, then erase contents */
2905         if ((file->f_mode & FMODE_WRITE) &&
2906             (file->f_flags & O_TRUNC)) {
2907                 struct trace_cpu *tc = inode->i_private;
2908                 struct trace_array *tr = tc->tr;
2909
2910                 if (tc->cpu == RING_BUFFER_ALL_CPUS)
2911                         tracing_reset_online_cpus(&tr->trace_buffer);
2912                 else
2913                         tracing_reset(&tr->trace_buffer, tc->cpu);
2914         }
2915
2916         if (file->f_mode & FMODE_READ) {
2917                 iter = __tracing_open(inode, file, false);
2918                 if (IS_ERR(iter))
2919                         ret = PTR_ERR(iter);
2920                 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2921                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
2922         }
2923         return ret;
2924 }
2925
2926 static void *
2927 t_next(struct seq_file *m, void *v, loff_t *pos)
2928 {
2929         struct tracer *t = v;
2930
2931         (*pos)++;
2932
2933         if (t)
2934                 t = t->next;
2935
2936         return t;
2937 }
2938
2939 static void *t_start(struct seq_file *m, loff_t *pos)
2940 {
2941         struct tracer *t;
2942         loff_t l = 0;
2943
2944         mutex_lock(&trace_types_lock);
2945         for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2946                 ;
2947
2948         return t;
2949 }
2950
2951 static void t_stop(struct seq_file *m, void *p)
2952 {
2953         mutex_unlock(&trace_types_lock);
2954 }
2955
2956 static int t_show(struct seq_file *m, void *v)
2957 {
2958         struct tracer *t = v;
2959
2960         if (!t)
2961                 return 0;
2962
2963         seq_printf(m, "%s", t->name);
2964         if (t->next)
2965                 seq_putc(m, ' ');
2966         else
2967                 seq_putc(m, '\n');
2968
2969         return 0;
2970 }
2971
2972 static const struct seq_operations show_traces_seq_ops = {
2973         .start          = t_start,
2974         .next           = t_next,
2975         .stop           = t_stop,
2976         .show           = t_show,
2977 };
2978
2979 static int show_traces_open(struct inode *inode, struct file *file)
2980 {
2981         if (tracing_disabled)
2982                 return -ENODEV;
2983
2984         return seq_open(file, &show_traces_seq_ops);
2985 }
2986
2987 static ssize_t
2988 tracing_write_stub(struct file *filp, const char __user *ubuf,
2989                    size_t count, loff_t *ppos)
2990 {
2991         return count;
2992 }
2993
2994 static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
2995 {
2996         if (file->f_mode & FMODE_READ)
2997                 return seq_lseek(file, offset, origin);
2998         else
2999                 return 0;
3000 }
3001
3002 static const struct file_operations tracing_fops = {
3003         .open           = tracing_open,
3004         .read           = seq_read,
3005         .write          = tracing_write_stub,
3006         .llseek         = tracing_seek,
3007         .release        = tracing_release,
3008 };
3009
3010 static const struct file_operations show_traces_fops = {
3011         .open           = show_traces_open,
3012         .read           = seq_read,
3013         .release        = seq_release,
3014         .llseek         = seq_lseek,
3015 };
3016
3017 /*
3018  * Only trace on a CPU if the bitmask is set:
3019  */
3020 static cpumask_var_t tracing_cpumask;
3021
3022 /*
3023  * The tracer itself will not take this lock, but still we want
3024  * to provide a consistent cpumask to user-space:
3025  */
3026 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3027
3028 /*
3029  * Temporary storage for the character representation of the
3030  * CPU bitmask (and one more byte for the newline):
3031  */
3032 static char mask_str[NR_CPUS + 1];
3033
3034 static ssize_t
3035 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3036                      size_t count, loff_t *ppos)
3037 {
3038         int len;
3039
3040         mutex_lock(&tracing_cpumask_update_lock);
3041
3042         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
3043         if (count - len < 2) {
3044                 count = -EINVAL;
3045                 goto out_err;
3046         }
3047         len += sprintf(mask_str + len, "\n");
3048         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3049
3050 out_err:
3051         mutex_unlock(&tracing_cpumask_update_lock);
3052
3053         return count;
3054 }
3055
3056 static ssize_t
3057 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3058                       size_t count, loff_t *ppos)
3059 {
3060         struct trace_array *tr = filp->private_data;
3061         cpumask_var_t tracing_cpumask_new;
3062         int err, cpu;
3063
3064         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3065                 return -ENOMEM;
3066
3067         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3068         if (err)
3069                 goto err_unlock;
3070
3071         mutex_lock(&tracing_cpumask_update_lock);
3072
3073         local_irq_disable();
3074         arch_spin_lock(&ftrace_max_lock);
3075         for_each_tracing_cpu(cpu) {
3076                 /*
3077                  * Increase/decrease the disabled counter if we are
3078                  * about to flip a bit in the cpumask:
3079                  */
3080                 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
3081                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3082                         atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3083                         ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3084                 }
3085                 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
3086                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3087                         atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3088                         ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3089                 }
3090         }
3091         arch_spin_unlock(&ftrace_max_lock);
3092         local_irq_enable();
3093
3094         cpumask_copy(tracing_cpumask, tracing_cpumask_new);
3095
3096         mutex_unlock(&tracing_cpumask_update_lock);
3097         free_cpumask_var(tracing_cpumask_new);
3098
3099         return count;
3100
3101 err_unlock:
3102         free_cpumask_var(tracing_cpumask_new);
3103
3104         return err;
3105 }
3106
3107 static const struct file_operations tracing_cpumask_fops = {
3108         .open           = tracing_open_generic,
3109         .read           = tracing_cpumask_read,
3110         .write          = tracing_cpumask_write,
3111         .llseek         = generic_file_llseek,
3112 };
3113
3114 static int tracing_trace_options_show(struct seq_file *m, void *v)
3115 {
3116         struct tracer_opt *trace_opts;
3117         struct trace_array *tr = m->private;
3118         u32 tracer_flags;
3119         int i;
3120
3121         mutex_lock(&trace_types_lock);
3122         tracer_flags = tr->current_trace->flags->val;
3123         trace_opts = tr->current_trace->flags->opts;
3124
3125         for (i = 0; trace_options[i]; i++) {
3126                 if (trace_flags & (1 << i))
3127                         seq_printf(m, "%s\n", trace_options[i]);
3128                 else
3129                         seq_printf(m, "no%s\n", trace_options[i]);
3130         }
3131
3132         for (i = 0; trace_opts[i].name; i++) {
3133                 if (tracer_flags & trace_opts[i].bit)
3134                         seq_printf(m, "%s\n", trace_opts[i].name);
3135                 else
3136                         seq_printf(m, "no%s\n", trace_opts[i].name);
3137         }
3138         mutex_unlock(&trace_types_lock);
3139
3140         return 0;
3141 }
3142
3143 static int __set_tracer_option(struct tracer *trace,
3144                                struct tracer_flags *tracer_flags,
3145                                struct tracer_opt *opts, int neg)
3146 {
3147         int ret;
3148
3149         ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
3150         if (ret)
3151                 return ret;
3152
3153         if (neg)
3154                 tracer_flags->val &= ~opts->bit;
3155         else
3156                 tracer_flags->val |= opts->bit;
3157         return 0;
3158 }
3159
3160 /* Try to assign a tracer specific option */
3161 static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3162 {
3163         struct tracer_flags *tracer_flags = trace->flags;
3164         struct tracer_opt *opts = NULL;
3165         int i;
3166
3167         for (i = 0; tracer_flags->opts[i].name; i++) {
3168                 opts = &tracer_flags->opts[i];
3169
3170                 if (strcmp(cmp, opts->name) == 0)
3171                         return __set_tracer_option(trace, trace->flags,
3172                                                    opts, neg);
3173         }
3174
3175         return -EINVAL;
3176 }
3177
3178 /* Some tracers require overwrite to stay enabled */
3179 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3180 {
3181         if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3182                 return -1;
3183
3184         return 0;
3185 }
3186
3187 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3188 {
3189         /* do nothing if flag is already set */
3190         if (!!(trace_flags & mask) == !!enabled)
3191                 return 0;
3192
3193         /* Give the tracer a chance to approve the change */
3194         if (tr->current_trace->flag_changed)
3195                 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
3196                         return -EINVAL;
3197
3198         if (enabled)
3199                 trace_flags |= mask;
3200         else
3201                 trace_flags &= ~mask;
3202
3203         if (mask == TRACE_ITER_RECORD_CMD)
3204                 trace_event_enable_cmd_record(enabled);
3205
3206         if (mask == TRACE_ITER_OVERWRITE) {
3207                 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3208 #ifdef CONFIG_TRACER_MAX_TRACE
3209                 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3210 #endif
3211         }
3212
3213         if (mask == TRACE_ITER_PRINTK)
3214                 trace_printk_start_stop_comm(enabled);
3215
3216         return 0;
3217 }
3218
3219 static int trace_set_options(struct trace_array *tr, char *option)
3220 {
3221         char *cmp;
3222         int neg = 0;
3223         int ret = -ENODEV;
3224         int i;
3225
3226         cmp = strstrip(option);
3227
3228         if (strncmp(cmp, "no", 2) == 0) {
3229                 neg = 1;
3230                 cmp += 2;
3231         }
3232
3233         mutex_lock(&trace_types_lock);
3234
3235         for (i = 0; trace_options[i]; i++) {
3236                 if (strcmp(cmp, trace_options[i]) == 0) {
3237                         ret = set_tracer_flag(tr, 1 << i, !neg);
3238                         break;
3239                 }
3240         }
3241
3242         /* If no option could be set, test the specific tracer options */
3243         if (!trace_options[i])
3244                 ret = set_tracer_option(tr->current_trace, cmp, neg);
3245
3246         mutex_unlock(&trace_types_lock);
3247
3248         return ret;
3249 }
3250
3251 static ssize_t
3252 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3253                         size_t cnt, loff_t *ppos)
3254 {
3255         struct seq_file *m = filp->private_data;
3256         struct trace_array *tr = m->private;
3257         char buf[64];
3258         int ret;
3259
3260         if (cnt >= sizeof(buf))
3261                 return -EINVAL;
3262
3263         if (copy_from_user(&buf, ubuf, cnt))
3264                 return -EFAULT;
3265
3266         buf[cnt] = 0;
3267
3268         ret = trace_set_options(tr, buf);
3269         if (ret < 0)
3270                 return ret;
3271
3272         *ppos += cnt;
3273
3274         return cnt;
3275 }
3276
3277 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3278 {
3279         if (tracing_disabled)
3280                 return -ENODEV;
3281
3282         return single_open(file, tracing_trace_options_show, inode->i_private);
3283 }
3284
3285 static const struct file_operations tracing_iter_fops = {
3286         .open           = tracing_trace_options_open,
3287         .read           = seq_read,
3288         .llseek         = seq_lseek,
3289         .release        = single_release,
3290         .write          = tracing_trace_options_write,
3291 };
3292
3293 static const char readme_msg[] =
3294         "tracing mini-HOWTO:\n\n"
3295         "# mount -t debugfs nodev /sys/kernel/debug\n\n"
3296         "# cat /sys/kernel/debug/tracing/available_tracers\n"
3297         "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
3298         "# cat /sys/kernel/debug/tracing/current_tracer\n"
3299         "nop\n"
3300         "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
3301         "# cat /sys/kernel/debug/tracing/current_tracer\n"
3302         "wakeup\n"
3303         "# cat /sys/kernel/debug/tracing/trace_options\n"
3304         "noprint-parent nosym-offset nosym-addr noverbose\n"
3305         "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
3306         "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
3307         "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
3308         "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
3309 ;
3310
3311 static ssize_t
3312 tracing_readme_read(struct file *filp, char __user *ubuf,
3313                        size_t cnt, loff_t *ppos)
3314 {
3315         return simple_read_from_buffer(ubuf, cnt, ppos,
3316                                         readme_msg, strlen(readme_msg));
3317 }
3318
3319 static const struct file_operations tracing_readme_fops = {
3320         .open           = tracing_open_generic,
3321         .read           = tracing_readme_read,
3322         .llseek         = generic_file_llseek,
3323 };
3324
3325 static ssize_t
3326 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3327                                 size_t cnt, loff_t *ppos)
3328 {
3329         char *buf_comm;
3330         char *file_buf;
3331         char *buf;
3332         int len = 0;
3333         int pid;
3334         int i;
3335
3336         file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3337         if (!file_buf)
3338                 return -ENOMEM;
3339
3340         buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3341         if (!buf_comm) {
3342                 kfree(file_buf);
3343                 return -ENOMEM;
3344         }
3345
3346         buf = file_buf;
3347
3348         for (i = 0; i < SAVED_CMDLINES; i++) {
3349                 int r;
3350
3351                 pid = map_cmdline_to_pid[i];
3352                 if (pid == -1 || pid == NO_CMDLINE_MAP)
3353                         continue;
3354
3355                 trace_find_cmdline(pid, buf_comm);
3356                 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3357                 buf += r;
3358                 len += r;
3359         }
3360
3361         len = simple_read_from_buffer(ubuf, cnt, ppos,
3362                                       file_buf, len);
3363
3364         kfree(file_buf);
3365         kfree(buf_comm);
3366
3367         return len;
3368 }
3369
3370 static const struct file_operations tracing_saved_cmdlines_fops = {
3371     .open       = tracing_open_generic,
3372     .read       = tracing_saved_cmdlines_read,
3373     .llseek     = generic_file_llseek,
3374 };
3375
3376 static ssize_t
3377 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3378                        size_t cnt, loff_t *ppos)
3379 {
3380         struct trace_array *tr = filp->private_data;
3381         char buf[MAX_TRACER_SIZE+2];
3382         int r;
3383
3384         mutex_lock(&trace_types_lock);
3385         r = sprintf(buf, "%s\n", tr->current_trace->name);
3386         mutex_unlock(&trace_types_lock);
3387
3388         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3389 }
3390
3391 int tracer_init(struct tracer *t, struct trace_array *tr)
3392 {
3393         tracing_reset_online_cpus(&tr->trace_buffer);
3394         return t->init(tr);
3395 }
3396
3397 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3398 {
3399         int cpu;
3400
3401         for_each_tracing_cpu(cpu)
3402                 per_cpu_ptr(buf->data, cpu)->entries = val;
3403 }
3404
3405 #ifdef CONFIG_TRACER_MAX_TRACE
3406 /* resize @tr's buffer to the size of @size_tr's entries */
3407 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3408                                         struct trace_buffer *size_buf, int cpu_id)
3409 {
3410         int cpu, ret = 0;
3411
3412         if (cpu_id == RING_BUFFER_ALL_CPUS) {
3413                 for_each_tracing_cpu(cpu) {
3414                         ret = ring_buffer_resize(trace_buf->buffer,
3415                                  per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3416                         if (ret < 0)
3417                                 break;
3418                         per_cpu_ptr(trace_buf->data, cpu)->entries =
3419                                 per_cpu_ptr(size_buf->data, cpu)->entries;
3420                 }
3421         } else {
3422                 ret = ring_buffer_resize(trace_buf->buffer,
3423                                  per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3424                 if (ret == 0)
3425                         per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3426                                 per_cpu_ptr(size_buf->data, cpu_id)->entries;
3427         }
3428
3429         return ret;
3430 }
3431 #endif /* CONFIG_TRACER_MAX_TRACE */
3432
3433 static int __tracing_resize_ring_buffer(struct trace_array *tr,
3434                                         unsigned long size, int cpu)
3435 {
3436         int ret;
3437
3438         /*
3439          * If kernel or user changes the size of the ring buffer
3440          * we use the size that was given, and we can forget about
3441          * expanding it later.
3442          */
3443         ring_buffer_expanded = true;
3444
3445         /* May be called before buffers are initialized */
3446         if (!tr->trace_buffer.buffer)
3447                 return 0;
3448
3449         ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3450         if (ret < 0)
3451                 return ret;
3452
3453 #ifdef CONFIG_TRACER_MAX_TRACE
3454         if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3455             !tr->current_trace->use_max_tr)
3456                 goto out;
3457
3458         ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3459         if (ret < 0) {
3460                 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3461                                                      &tr->trace_buffer, cpu);
3462                 if (r < 0) {
3463                         /*
3464                          * AARGH! We are left with different
3465                          * size max buffer!!!!
3466                          * The max buffer is our "snapshot" buffer.
3467                          * When a tracer needs a snapshot (one of the
3468                          * latency tracers), it swaps the max buffer
3469                          * with the saved snap shot. We succeeded to
3470                          * update the size of the main buffer, but failed to
3471                          * update the size of the max buffer. But when we tried
3472                          * to reset the main buffer to the original size, we
3473                          * failed there too. This is very unlikely to
3474                          * happen, but if it does, warn and kill all
3475                          * tracing.
3476                          */
3477                         WARN_ON(1);
3478                         tracing_disabled = 1;
3479                 }
3480                 return ret;
3481         }
3482
3483         if (cpu == RING_BUFFER_ALL_CPUS)
3484                 set_buffer_entries(&tr->max_buffer, size);
3485         else
3486                 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
3487
3488  out:
3489 #endif /* CONFIG_TRACER_MAX_TRACE */
3490
3491         if (cpu == RING_BUFFER_ALL_CPUS)
3492                 set_buffer_entries(&tr->trace_buffer, size);
3493         else
3494                 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
3495
3496         return ret;
3497 }
3498
3499 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3500                                           unsigned long size, int cpu_id)
3501 {
3502         int ret = size;
3503
3504         mutex_lock(&trace_types_lock);
3505
3506         if (cpu_id != RING_BUFFER_ALL_CPUS) {
3507                 /* make sure, this cpu is enabled in the mask */
3508                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3509                         ret = -EINVAL;
3510                         goto out;
3511                 }
3512         }
3513
3514         ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
3515         if (ret < 0)
3516                 ret = -ENOMEM;
3517
3518 out:
3519         mutex_unlock(&trace_types_lock);
3520
3521         return ret;
3522 }
3523
3524
3525 /**
3526  * tracing_update_buffers - used by tracing facility to expand ring buffers
3527  *
3528  * To save on memory when the tracing is never used on a system with it
3529  * configured in. The ring buffers are set to a minimum size. But once
3530  * a user starts to use the tracing facility, then they need to grow
3531  * to their default size.
3532  *
3533  * This function is to be called when a tracer is about to be used.
3534  */
3535 int tracing_update_buffers(void)
3536 {
3537         int ret = 0;
3538
3539         mutex_lock(&trace_types_lock);
3540         if (!ring_buffer_expanded)
3541                 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
3542                                                 RING_BUFFER_ALL_CPUS);
3543         mutex_unlock(&trace_types_lock);
3544
3545         return ret;
3546 }
3547
3548 struct trace_option_dentry;
3549
3550 static struct trace_option_dentry *
3551 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
3552
3553 static void
3554 destroy_trace_option_files(struct trace_option_dentry *topts);
3555
3556 static int tracing_set_tracer(const char *buf)
3557 {
3558         static struct trace_option_dentry *topts;
3559         struct trace_array *tr = &global_trace;
3560         struct tracer *t;
3561 #ifdef CONFIG_TRACER_MAX_TRACE
3562         bool had_max_tr;
3563 #endif
3564         int ret = 0;
3565
3566         mutex_lock(&trace_types_lock);
3567
3568         if (!ring_buffer_expanded) {
3569                 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
3570                                                 RING_BUFFER_ALL_CPUS);
3571                 if (ret < 0)
3572                         goto out;
3573                 ret = 0;
3574         }
3575
3576         for (t = trace_types; t; t = t->next) {
3577                 if (strcmp(t->name, buf) == 0)
3578                         break;
3579         }
3580         if (!t) {
3581                 ret = -EINVAL;
3582                 goto out;
3583         }
3584         if (t == tr->current_trace)
3585                 goto out;
3586
3587         trace_branch_disable();
3588
3589         tr->current_trace->enabled = false;
3590
3591         if (tr->current_trace->reset)
3592                 tr->current_trace->reset(tr);
3593
3594         /* Current trace needs to be nop_trace before synchronize_sched */
3595         tr->current_trace = &nop_trace;
3596
3597 #ifdef CONFIG_TRACER_MAX_TRACE
3598         had_max_tr = tr->allocated_snapshot;
3599
3600         if (had_max_tr && !t->use_max_tr) {
3601                 /*
3602                  * We need to make sure that the update_max_tr sees that
3603                  * current_trace changed to nop_trace to keep it from
3604                  * swapping the buffers after we resize it.
3605                  * The update_max_tr is called from interrupts disabled
3606                  * so a synchronized_sched() is sufficient.
3607                  */
3608                 synchronize_sched();
3609                 free_snapshot(tr);
3610         }
3611 #endif
3612         destroy_trace_option_files(topts);
3613
3614         topts = create_trace_option_files(tr, t);
3615
3616 #ifdef CONFIG_TRACER_MAX_TRACE
3617         if (t->use_max_tr && !had_max_tr) {
3618                 ret = alloc_snapshot(tr);
3619                 if (ret < 0)
3620                         goto out;
3621         }
3622 #endif
3623
3624         if (t->init) {
3625                 ret = tracer_init(t, tr);
3626                 if (ret)
3627                         goto out;
3628         }
3629
3630         tr->current_trace = t;
3631         tr->current_trace->enabled = true;
3632         trace_branch_enable(tr);
3633  out:
3634         mutex_unlock(&trace_types_lock);
3635
3636         return ret;
3637 }
3638
3639 static ssize_t
3640 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3641                         size_t cnt, loff_t *ppos)
3642 {
3643         char buf[MAX_TRACER_SIZE+1];
3644         int i;
3645         size_t ret;
3646         int err;
3647
3648         ret = cnt;
3649
3650         if (cnt > MAX_TRACER_SIZE)
3651                 cnt = MAX_TRACER_SIZE;
3652
3653         if (copy_from_user(&buf, ubuf, cnt))
3654                 return -EFAULT;
3655
3656         buf[cnt] = 0;
3657
3658         /* strip ending whitespace. */
3659         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3660                 buf[i] = 0;
3661
3662         err = tracing_set_tracer(buf);
3663         if (err)
3664                 return err;
3665
3666         *ppos += ret;
3667
3668         return ret;
3669 }
3670
3671 static ssize_t
3672 tracing_max_lat_read(struct file *filp, char __user *ubuf,
3673                      size_t cnt, loff_t *ppos)
3674 {
3675         unsigned long *ptr = filp->private_data;
3676         char buf[64];
3677         int r;
3678
3679         r = snprintf(buf, sizeof(buf), "%ld\n",
3680                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3681         if (r > sizeof(buf))
3682                 r = sizeof(buf);
3683         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3684 }
3685
3686 static ssize_t
3687 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3688                       size_t cnt, loff_t *ppos)
3689 {
3690         unsigned long *ptr = filp->private_data;
3691         unsigned long val;
3692         int ret;
3693
3694         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3695         if (ret)
3696                 return ret;
3697
3698         *ptr = val * 1000;
3699
3700         return cnt;
3701 }
3702
3703 static int tracing_open_pipe(struct inode *inode, struct file *filp)
3704 {
3705         struct trace_cpu *tc = inode->i_private;
3706         struct trace_array *tr = tc->tr;
3707         struct trace_iterator *iter;
3708         int ret = 0;
3709
3710         if (tracing_disabled)
3711                 return -ENODEV;
3712
3713         mutex_lock(&trace_types_lock);
3714
3715         /* create a buffer to store the information to pass to userspace */
3716         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3717         if (!iter) {
3718                 ret = -ENOMEM;
3719                 goto out;
3720         }
3721
3722         /*
3723          * We make a copy of the current tracer to avoid concurrent
3724          * changes on it while we are reading.
3725          */
3726         iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3727         if (!iter->trace) {
3728                 ret = -ENOMEM;
3729                 goto fail;
3730         }
3731         *iter->trace = *tr->current_trace;
3732
3733         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3734                 ret = -ENOMEM;
3735                 goto fail;
3736         }
3737
3738         /* trace pipe does not show start of buffer */
3739         cpumask_setall(iter->started);
3740
3741         if (trace_flags & TRACE_ITER_LATENCY_FMT)
3742                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3743
3744         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3745         if (trace_clocks[trace_clock_id].in_ns)
3746                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3747
3748         iter->cpu_file = tc->cpu;
3749         iter->tr = tc->tr;
3750         iter->trace_buffer = &tc->tr->trace_buffer;
3751         mutex_init(&iter->mutex);
3752         filp->private_data = iter;
3753
3754         if (iter->trace->pipe_open)
3755                 iter->trace->pipe_open(iter);
3756
3757         nonseekable_open(inode, filp);
3758 out:
3759         mutex_unlock(&trace_types_lock);
3760         return ret;
3761
3762 fail:
3763         kfree(iter->trace);
3764         kfree(iter);
3765         mutex_unlock(&trace_types_lock);
3766         return ret;
3767 }
3768
3769 static int tracing_release_pipe(struct inode *inode, struct file *file)
3770 {
3771         struct trace_iterator *iter = file->private_data;
3772
3773         mutex_lock(&trace_types_lock);
3774
3775         if (iter->trace->pipe_close)
3776                 iter->trace->pipe_close(iter);
3777
3778         mutex_unlock(&trace_types_lock);
3779
3780         free_cpumask_var(iter->started);
3781         mutex_destroy(&iter->mutex);
3782         kfree(iter->trace);
3783         kfree(iter);
3784
3785         return 0;
3786 }
3787
3788 static unsigned int
3789 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
3790 {
3791         /* Iterators are static, they should be filled or empty */
3792         if (trace_buffer_iter(iter, iter->cpu_file))
3793                 return POLLIN | POLLRDNORM;
3794
3795         if (trace_flags & TRACE_ITER_BLOCK)
3796                 /*
3797                  * Always select as readable when in blocking mode
3798                  */
3799                 return POLLIN | POLLRDNORM;
3800         else
3801                 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
3802                                              filp, poll_table);
3803 }
3804
3805 static unsigned int
3806 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3807 {
3808         struct trace_iterator *iter = filp->private_data;
3809
3810         return trace_poll(iter, filp, poll_table);
3811 }
3812
3813 /*
3814  * This is a make-shift waitqueue.
3815  * A tracer might use this callback on some rare cases:
3816  *
3817  *  1) the current tracer might hold the runqueue lock when it wakes up
3818  *     a reader, hence a deadlock (sched, function, and function graph tracers)
3819  *  2) the function tracers, trace all functions, we don't want
3820  *     the overhead of calling wake_up and friends
3821  *     (and tracing them too)
3822  *
3823  *     Anyway, this is really very primitive wakeup.
3824  */
3825 void poll_wait_pipe(struct trace_iterator *iter)
3826 {
3827         set_current_state(TASK_INTERRUPTIBLE);
3828         /* sleep for 100 msecs, and try again. */
3829         schedule_timeout(HZ / 10);
3830 }
3831
3832 /* Must be called with trace_types_lock mutex held. */
3833 static int tracing_wait_pipe(struct file *filp)
3834 {
3835         struct trace_iterator *iter = filp->private_data;
3836
3837         while (trace_empty(iter)) {
3838
3839                 if ((filp->f_flags & O_NONBLOCK)) {
3840                         return -EAGAIN;
3841                 }
3842
3843                 mutex_unlock(&iter->mutex);
3844
3845                 iter->trace->wait_pipe(iter);
3846
3847                 mutex_lock(&iter->mutex);
3848
3849                 if (signal_pending(current))
3850                         return -EINTR;
3851
3852                 /*
3853                  * We block until we read something and tracing is disabled.
3854                  * We still block if tracing is disabled, but we have never
3855                  * read anything. This allows a user to cat this file, and
3856                  * then enable tracing. But after we have read something,
3857                  * we give an EOF when tracing is again disabled.
3858                  *
3859                  * iter->pos will be 0 if we haven't read anything.
3860                  */
3861                 if (!tracing_is_enabled() && iter->pos)
3862                         break;
3863         }
3864
3865         return 1;
3866 }
3867
3868 /*
3869  * Consumer reader.
3870  */
3871 static ssize_t
3872 tracing_read_pipe(struct file *filp, char __user *ubuf,
3873                   size_t cnt, loff_t *ppos)
3874 {
3875         struct trace_iterator *iter = filp->private_data;
3876         struct trace_array *tr = iter->tr;
3877         ssize_t sret;
3878
3879         /* return any leftover data */
3880         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3881         if (sret != -EBUSY)
3882                 return sret;
3883
3884         trace_seq_init(&iter->seq);
3885
3886         /* copy the tracer to avoid using a global lock all around */
3887         mutex_lock(&trace_types_lock);
3888         if (unlikely(iter->trace->name != tr->current_trace->name))
3889                 *iter->trace = *tr->current_trace;
3890         mutex_unlock(&trace_types_lock);
3891
3892         /*
3893          * Avoid more than one consumer on a single file descriptor
3894          * This is just a matter of traces coherency, the ring buffer itself
3895          * is protected.
3896          */
3897         mutex_lock(&iter->mutex);
3898         if (iter->trace->read) {
3899                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3900                 if (sret)
3901                         goto out;
3902         }
3903
3904 waitagain:
3905         sret = tracing_wait_pipe(filp);
3906         if (sret <= 0)
3907                 goto out;
3908
3909         /* stop when tracing is finished */
3910         if (trace_empty(iter)) {
3911                 sret = 0;
3912                 goto out;
3913         }
3914
3915         if (cnt >= PAGE_SIZE)
3916                 cnt = PAGE_SIZE - 1;
3917
3918         /* reset all but tr, trace, and overruns */
3919         memset(&iter->seq, 0,
3920                sizeof(struct trace_iterator) -
3921                offsetof(struct trace_iterator, seq));
3922         iter->pos = -1;
3923
3924         trace_event_read_lock();
3925         trace_access_lock(iter->cpu_file);
3926         while (trace_find_next_entry_inc(iter) != NULL) {
3927                 enum print_line_t ret;
3928                 int len = iter->seq.len;
3929
3930                 ret = print_trace_line(iter);
3931                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3932                         /* don't print partial lines */
3933                         iter->seq.len = len;
3934                         break;
3935                 }
3936                 if (ret != TRACE_TYPE_NO_CONSUME)
3937                         trace_consume(iter);
3938
3939                 if (iter->seq.len >= cnt)
3940                         break;
3941
3942                 /*
3943                  * Setting the full flag means we reached the trace_seq buffer
3944                  * size and we should leave by partial output condition above.
3945                  * One of the trace_seq_* functions is not used properly.
3946                  */
3947                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
3948                           iter->ent->type);
3949         }
3950         trace_access_unlock(iter->cpu_file);
3951         trace_event_read_unlock();
3952
3953         /* Now copy what we have to the user */
3954         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3955         if (iter->seq.readpos >= iter->seq.len)
3956                 trace_seq_init(&iter->seq);
3957
3958         /*
3959          * If there was nothing to send to user, in spite of consuming trace
3960          * entries, go back to wait for more entries.
3961          */
3962         if (sret == -EBUSY)
3963                 goto waitagain;
3964
3965 out:
3966         mutex_unlock(&iter->mutex);
3967
3968         return sret;
3969 }
3970
3971 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3972                                      struct pipe_buffer *buf)
3973 {
3974         __free_page(buf->page);
3975 }
3976
3977 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3978                                      unsigned int idx)
3979 {
3980         __free_page(spd->pages[idx]);
3981 }
3982
3983 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3984         .can_merge              = 0,
3985         .map                    = generic_pipe_buf_map,
3986         .unmap                  = generic_pipe_buf_unmap,
3987         .confirm                = generic_pipe_buf_confirm,
3988         .release                = tracing_pipe_buf_release,
3989         .steal                  = generic_pipe_buf_steal,
3990         .get                    = generic_pipe_buf_get,
3991 };
3992
3993 static size_t
3994 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3995 {
3996         size_t count;
3997         int ret;
3998
3999         /* Seq buffer is page-sized, exactly what we need. */
4000         for (;;) {
4001                 count = iter->seq.len;
4002                 ret = print_trace_line(iter);
4003                 count = iter->seq.len - count;
4004                 if (rem < count) {
4005                         rem = 0;
4006                         iter->seq.len -= count;
4007                         break;
4008                 }
4009                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4010                         iter->seq.len -= count;
4011                         break;
4012                 }
4013
4014                 if (ret != TRACE_TYPE_NO_CONSUME)
4015                         trace_consume(iter);
4016                 rem -= count;
4017                 if (!trace_find_next_entry_inc(iter))   {
4018                         rem = 0;
4019                         iter->ent = NULL;
4020                         break;
4021                 }
4022         }
4023
4024         return rem;
4025 }
4026
4027 static ssize_t tracing_splice_read_pipe(struct file *filp,
4028                                         loff_t *ppos,
4029                                         struct pipe_inode_info *pipe,
4030                                         size_t len,
4031                                         unsigned int flags)
4032 {
4033         struct page *pages_def[PIPE_DEF_BUFFERS];
4034         struct partial_page partial_def[PIPE_DEF_BUFFERS];
4035         struct trace_iterator *iter = filp->private_data;
4036         struct splice_pipe_desc spd = {
4037                 .pages          = pages_def,
4038                 .partial        = partial_def,
4039                 .nr_pages       = 0, /* This gets updated below. */
4040                 .nr_pages_max   = PIPE_DEF_BUFFERS,
4041                 .flags          = flags,
4042                 .ops            = &tracing_pipe_buf_ops,
4043                 .spd_release    = tracing_spd_release_pipe,
4044         };
4045         struct trace_array *tr = iter->tr;
4046         ssize_t ret;
4047         size_t rem;
4048         unsigned int i;
4049
4050         if (splice_grow_spd(pipe, &spd))
4051                 return -ENOMEM;
4052
4053         /* copy the tracer to avoid using a global lock all around */
4054         mutex_lock(&trace_types_lock);
4055         if (unlikely(iter->trace->name != tr->current_trace->name))
4056                 *iter->trace = *tr->current_trace;
4057         mutex_unlock(&trace_types_lock);
4058
4059         mutex_lock(&iter->mutex);
4060
4061         if (iter->trace->splice_read) {
4062                 ret = iter->trace->splice_read(iter, filp,
4063                                                ppos, pipe, len, flags);
4064                 if (ret)
4065                         goto out_err;
4066         }
4067
4068         ret = tracing_wait_pipe(filp);
4069         if (ret <= 0)
4070                 goto out_err;
4071
4072         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4073                 ret = -EFAULT;
4074                 goto out_err;
4075         }
4076
4077         trace_event_read_lock();
4078         trace_access_lock(iter->cpu_file);
4079
4080         /* Fill as many pages as possible. */
4081         for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4082                 spd.pages[i] = alloc_page(GFP_KERNEL);
4083                 if (!spd.pages[i])
4084                         break;
4085
4086                 rem = tracing_fill_pipe_page(rem, iter);
4087
4088                 /* Copy the data into the page, so we can start over. */
4089                 ret = trace_seq_to_buffer(&iter->seq,
4090                                           page_address(spd.pages[i]),
4091                                           iter->seq.len);
4092                 if (ret < 0) {
4093                         __free_page(spd.pages[i]);
4094                         break;
4095                 }
4096                 spd.partial[i].offset = 0;
4097                 spd.partial[i].len = iter->seq.len;
4098
4099                 trace_seq_init(&iter->seq);
4100         }
4101
4102         trace_access_unlock(iter->cpu_file);
4103         trace_event_read_unlock();
4104         mutex_unlock(&iter->mutex);
4105
4106         spd.nr_pages = i;
4107
4108         ret = splice_to_pipe(pipe, &spd);
4109 out:
4110         splice_shrink_spd(&spd);
4111         return ret;
4112
4113 out_err:
4114         mutex_unlock(&iter->mutex);
4115         goto out;
4116 }
4117
4118 static ssize_t
4119 tracing_entries_read(struct file *filp, char __user *ubuf,
4120                      size_t cnt, loff_t *ppos)
4121 {
4122         struct trace_cpu *tc = filp->private_data;
4123         struct trace_array *tr = tc->tr;
4124         char buf[64];
4125         int r = 0;
4126         ssize_t ret;
4127
4128         mutex_lock(&trace_types_lock);
4129
4130         if (tc->cpu == RING_BUFFER_ALL_CPUS) {
4131                 int cpu, buf_size_same;
4132                 unsigned long size;
4133
4134                 size = 0;
4135                 buf_size_same = 1;
4136                 /* check if all cpu sizes are same */
4137                 for_each_tracing_cpu(cpu) {
4138                         /* fill in the size from first enabled cpu */
4139                         if (size == 0)
4140                                 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4141                         if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4142                                 buf_size_same = 0;
4143                                 break;
4144                         }
4145                 }
4146
4147                 if (buf_size_same) {
4148                         if (!ring_buffer_expanded)
4149                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
4150                                             size >> 10,
4151                                             trace_buf_size >> 10);
4152                         else
4153                                 r = sprintf(buf, "%lu\n", size >> 10);
4154                 } else
4155                         r = sprintf(buf, "X\n");
4156         } else
4157                 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
4158
4159         mutex_unlock(&trace_types_lock);
4160
4161         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4162         return ret;
4163 }
4164
4165 static ssize_t
4166 tracing_entries_write(struct file *filp, const char __user *ubuf,
4167                       size_t cnt, loff_t *ppos)
4168 {
4169         struct trace_cpu *tc = filp->private_data;
4170         unsigned long val;
4171         int ret;
4172
4173         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4174         if (ret)
4175                 return ret;
4176
4177         /* must have at least 1 entry */
4178         if (!val)
4179                 return -EINVAL;
4180
4181         /* value is in KB */
4182         val <<= 10;
4183
4184         ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
4185         if (ret < 0)
4186                 return ret;
4187
4188         *ppos += cnt;
4189
4190         return cnt;
4191 }
4192
4193 static ssize_t
4194 tracing_total_entries_read(struct file *filp, char __user *ubuf,
4195                                 size_t cnt, loff_t *ppos)
4196 {
4197         struct trace_array *tr = filp->private_data;
4198         char buf[64];
4199         int r, cpu;
4200         unsigned long size = 0, expanded_size = 0;
4201
4202         mutex_lock(&trace_types_lock);
4203         for_each_tracing_cpu(cpu) {
4204                 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4205                 if (!ring_buffer_expanded)
4206                         expanded_size += trace_buf_size >> 10;
4207         }
4208         if (ring_buffer_expanded)
4209                 r = sprintf(buf, "%lu\n", size);
4210         else
4211                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4212         mutex_unlock(&trace_types_lock);
4213
4214         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4215 }
4216
4217 static ssize_t
4218 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4219                           size_t cnt, loff_t *ppos)
4220 {
4221         /*
4222          * There is no need to read what the user has written, this function
4223          * is just to make sure that there is no error when "echo" is used
4224          */
4225
4226         *ppos += cnt;
4227
4228         return cnt;
4229 }
4230
4231 static int
4232 tracing_free_buffer_release(struct inode *inode, struct file *filp)
4233 {
4234         struct trace_array *tr = inode->i_private;
4235
4236         /* disable tracing ? */
4237         if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4238                 tracing_off();
4239         /* resize the ring buffer to 0 */
4240         tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4241
4242         return 0;
4243 }
4244
4245 static ssize_t
4246 tracing_mark_write(struct file *filp, const char __user *ubuf,
4247                                         size_t cnt, loff_t *fpos)
4248 {
4249         unsigned long addr = (unsigned long)ubuf;
4250         struct ring_buffer_event *event;
4251         struct ring_buffer *buffer;
4252         struct print_entry *entry;
4253         unsigned long irq_flags;
4254         struct page *pages[2];
4255         void *map_page[2];
4256         int nr_pages = 1;
4257         ssize_t written;
4258         int offset;
4259         int size;
4260         int len;
4261         int ret;
4262         int i;
4263
4264         if (tracing_disabled)
4265                 return -EINVAL;
4266
4267         if (!(trace_flags & TRACE_ITER_MARKERS))
4268                 return -EINVAL;
4269
4270         if (cnt > TRACE_BUF_SIZE)
4271                 cnt = TRACE_BUF_SIZE;
4272
4273         /*
4274          * Userspace is injecting traces into the kernel trace buffer.
4275          * We want to be as non intrusive as possible.
4276          * To do so, we do not want to allocate any special buffers
4277          * or take any locks, but instead write the userspace data
4278          * straight into the ring buffer.
4279          *
4280          * First we need to pin the userspace buffer into memory,
4281          * which, most likely it is, because it just referenced it.
4282          * But there's no guarantee that it is. By using get_user_pages_fast()
4283          * and kmap_atomic/kunmap_atomic() we can get access to the
4284          * pages directly. We then write the data directly into the
4285          * ring buffer.
4286          */
4287         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4288
4289         /* check if we cross pages */
4290         if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4291                 nr_pages = 2;
4292
4293         offset = addr & (PAGE_SIZE - 1);
4294         addr &= PAGE_MASK;
4295
4296         ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4297         if (ret < nr_pages) {
4298                 while (--ret >= 0)
4299                         put_page(pages[ret]);
4300                 written = -EFAULT;
4301                 goto out;
4302         }
4303
4304         for (i = 0; i < nr_pages; i++)
4305                 map_page[i] = kmap_atomic(pages[i]);
4306
4307         local_save_flags(irq_flags);
4308         size = sizeof(*entry) + cnt + 2; /* possible \n added */
4309         buffer = global_trace.trace_buffer.buffer;
4310         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4311                                           irq_flags, preempt_count());
4312         if (!event) {
4313                 /* Ring buffer disabled, return as if not open for write */
4314                 written = -EBADF;
4315                 goto out_unlock;
4316         }
4317
4318         entry = ring_buffer_event_data(event);
4319         entry->ip = _THIS_IP_;
4320
4321         if (nr_pages == 2) {
4322                 len = PAGE_SIZE - offset;
4323                 memcpy(&entry->buf, map_page[0] + offset, len);
4324                 memcpy(&entry->buf[len], map_page[1], cnt - len);
4325         } else
4326                 memcpy(&entry->buf, map_page[0] + offset, cnt);
4327
4328         if (entry->buf[cnt - 1] != '\n') {
4329                 entry->buf[cnt] = '\n';
4330                 entry->buf[cnt + 1] = '\0';
4331         } else
4332                 entry->buf[cnt] = '\0';
4333
4334         __buffer_unlock_commit(buffer, event);
4335
4336         written = cnt;
4337
4338         *fpos += written;
4339
4340  out_unlock:
4341         for (i = 0; i < nr_pages; i++){
4342                 kunmap_atomic(map_page[i]);
4343                 put_page(pages[i]);
4344         }
4345  out:
4346         return written;
4347 }
4348
4349 static int tracing_clock_show(struct seq_file *m, void *v)
4350 {
4351         struct trace_array *tr = m->private;
4352         int i;
4353
4354         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4355                 seq_printf(m,
4356                         "%s%s%s%s", i ? " " : "",
4357                         i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4358                         i == tr->clock_id ? "]" : "");
4359         seq_putc(m, '\n');
4360
4361         return 0;
4362 }
4363
4364 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4365                                    size_t cnt, loff_t *fpos)
4366 {
4367         struct seq_file *m = filp->private_data;
4368         struct trace_array *tr = m->private;
4369         char buf[64];
4370         const char *clockstr;
4371         int i;
4372
4373         if (cnt >= sizeof(buf))
4374                 return -EINVAL;
4375
4376         if (copy_from_user(&buf, ubuf, cnt))
4377                 return -EFAULT;
4378
4379         buf[cnt] = 0;
4380
4381         clockstr = strstrip(buf);
4382
4383         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4384                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4385                         break;
4386         }
4387         if (i == ARRAY_SIZE(trace_clocks))
4388                 return -EINVAL;
4389
4390         mutex_lock(&trace_types_lock);
4391
4392         tr->clock_id = i;
4393
4394         ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4395
4396         /*
4397          * New clock may not be consistent with the previous clock.
4398          * Reset the buffer so that it doesn't have incomparable timestamps.
4399          */
4400         tracing_reset_online_cpus(&global_trace.trace_buffer);
4401
4402 #ifdef CONFIG_TRACER_MAX_TRACE
4403         if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4404                 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4405         tracing_reset_online_cpus(&global_trace.max_buffer);
4406 #endif
4407
4408         mutex_unlock(&trace_types_lock);
4409
4410         *fpos += cnt;
4411
4412         return cnt;
4413 }
4414
4415 static int tracing_clock_open(struct inode *inode, struct file *file)
4416 {
4417         if (tracing_disabled)
4418                 return -ENODEV;
4419
4420         return single_open(file, tracing_clock_show, inode->i_private);
4421 }
4422
4423 struct ftrace_buffer_info {
4424         struct trace_iterator   iter;
4425         void                    *spare;
4426         unsigned int            read;
4427 };
4428
4429 #ifdef CONFIG_TRACER_SNAPSHOT
4430 static int tracing_snapshot_open(struct inode *inode, struct file *file)
4431 {
4432         struct trace_cpu *tc = inode->i_private;
4433         struct trace_iterator *iter;
4434         struct seq_file *m;
4435         int ret = 0;
4436
4437         if (file->f_mode & FMODE_READ) {
4438                 iter = __tracing_open(inode, file, true);
4439                 if (IS_ERR(iter))
4440                         ret = PTR_ERR(iter);
4441         } else {
4442                 /* Writes still need the seq_file to hold the private data */
4443                 m = kzalloc(sizeof(*m), GFP_KERNEL);
4444                 if (!m)
4445                         return -ENOMEM;
4446                 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4447                 if (!iter) {
4448                         kfree(m);
4449                         return -ENOMEM;
4450                 }
4451                 iter->tr = tc->tr;
4452                 iter->trace_buffer = &tc->tr->max_buffer;
4453                 iter->cpu_file = tc->cpu;
4454                 m->private = iter;
4455                 file->private_data = m;
4456         }
4457
4458         return ret;
4459 }
4460
4461 static ssize_t
4462 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4463                        loff_t *ppos)
4464 {
4465         struct seq_file *m = filp->private_data;
4466         struct trace_iterator *iter = m->private;
4467         struct trace_array *tr = iter->tr;
4468         unsigned long val;
4469         int ret;
4470
4471         ret = tracing_update_buffers();
4472         if (ret < 0)
4473                 return ret;
4474
4475         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4476         if (ret)
4477                 return ret;
4478
4479         mutex_lock(&trace_types_lock);
4480
4481         if (tr->current_trace->use_max_tr) {
4482                 ret = -EBUSY;
4483                 goto out;
4484         }
4485
4486         switch (val) {
4487         case 0:
4488                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4489                         ret = -EINVAL;
4490                         break;
4491                 }
4492                 if (tr->allocated_snapshot)
4493                         free_snapshot(tr);
4494                 break;
4495         case 1:
4496 /* Only allow per-cpu swap if the ring buffer supports it */
4497 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4498                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4499                         ret = -EINVAL;
4500                         break;
4501                 }
4502 #endif
4503                 if (!tr->allocated_snapshot) {
4504                         ret = alloc_snapshot(tr);
4505                         if (ret < 0)
4506                                 break;
4507                 }
4508                 local_irq_disable();
4509                 /* Now, we're going to swap */
4510                 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4511                         update_max_tr(tr, current, smp_processor_id());
4512                 else
4513                         update_max_tr_single(tr, current, iter->cpu_file);
4514                 local_irq_enable();
4515                 break;
4516         default:
4517                 if (tr->allocated_snapshot) {
4518                         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4519                                 tracing_reset_online_cpus(&tr->max_buffer);
4520                         else
4521                                 tracing_reset(&tr->max_buffer, iter->cpu_file);
4522                 }
4523                 break;
4524         }
4525
4526         if (ret >= 0) {
4527                 *ppos += cnt;
4528                 ret = cnt;
4529         }
4530 out:
4531         mutex_unlock(&trace_types_lock);
4532         return ret;
4533 }
4534
4535 static int tracing_snapshot_release(struct inode *inode, struct file *file)
4536 {
4537         struct seq_file *m = file->private_data;
4538
4539         if (file->f_mode & FMODE_READ)
4540                 return tracing_release(inode, file);
4541
4542         /* If write only, the seq_file is just a stub */
4543         if (m)
4544                 kfree(m->private);
4545         kfree(m);
4546
4547         return 0;
4548 }
4549
4550 static int tracing_buffers_open(struct inode *inode, struct file *filp);
4551 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4552                                     size_t count, loff_t *ppos);
4553 static int tracing_buffers_release(struct inode *inode, struct file *file);
4554 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4555                    struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4556
4557 static int snapshot_raw_open(struct inode *inode, struct file *filp)
4558 {
4559         struct ftrace_buffer_info *info;
4560         int ret;
4561
4562         ret = tracing_buffers_open(inode, filp);
4563         if (ret < 0)
4564                 return ret;
4565
4566         info = filp->private_data;
4567
4568         if (info->iter.trace->use_max_tr) {
4569                 tracing_buffers_release(inode, filp);
4570                 return -EBUSY;
4571         }
4572
4573         info->iter.snapshot = true;
4574         info->iter.trace_buffer = &info->iter.tr->max_buffer;
4575
4576         return ret;
4577 }
4578
4579 #endif /* CONFIG_TRACER_SNAPSHOT */
4580
4581
4582 static const struct file_operations tracing_max_lat_fops = {
4583         .open           = tracing_open_generic,
4584         .read           = tracing_max_lat_read,
4585         .write          = tracing_max_lat_write,
4586         .llseek         = generic_file_llseek,
4587 };
4588
4589 static const struct file_operations set_tracer_fops = {
4590         .open           = tracing_open_generic,
4591         .read           = tracing_set_trace_read,
4592         .write          = tracing_set_trace_write,
4593         .llseek         = generic_file_llseek,
4594 };
4595
4596 static const struct file_operations tracing_pipe_fops = {
4597         .open           = tracing_open_pipe,
4598         .poll           = tracing_poll_pipe,
4599         .read           = tracing_read_pipe,
4600         .splice_read    = tracing_splice_read_pipe,
4601         .release        = tracing_release_pipe,
4602         .llseek         = no_llseek,
4603 };
4604
4605 static const struct file_operations tracing_entries_fops = {
4606         .open           = tracing_open_generic,
4607         .read           = tracing_entries_read,
4608         .write          = tracing_entries_write,
4609         .llseek         = generic_file_llseek,
4610 };
4611
4612 static const struct file_operations tracing_total_entries_fops = {
4613         .open           = tracing_open_generic,
4614         .read           = tracing_total_entries_read,
4615         .llseek         = generic_file_llseek,
4616 };
4617
4618 static const struct file_operations tracing_free_buffer_fops = {
4619         .write          = tracing_free_buffer_write,
4620         .release        = tracing_free_buffer_release,
4621 };
4622
4623 static const struct file_operations tracing_mark_fops = {
4624         .open           = tracing_open_generic,
4625         .write          = tracing_mark_write,
4626         .llseek         = generic_file_llseek,
4627 };
4628
4629 static const struct file_operations trace_clock_fops = {
4630         .open           = tracing_clock_open,
4631         .read           = seq_read,
4632         .llseek         = seq_lseek,
4633         .release        = single_release,
4634         .write          = tracing_clock_write,
4635 };
4636
4637 #ifdef CONFIG_TRACER_SNAPSHOT
4638 static const struct file_operations snapshot_fops = {
4639         .open           = tracing_snapshot_open,
4640         .read           = seq_read,
4641         .write          = tracing_snapshot_write,
4642         .llseek         = tracing_seek,
4643         .release        = tracing_snapshot_release,
4644 };
4645
4646 static const struct file_operations snapshot_raw_fops = {
4647         .open           = snapshot_raw_open,
4648         .read           = tracing_buffers_read,
4649         .release        = tracing_buffers_release,
4650         .splice_read    = tracing_buffers_splice_read,
4651         .llseek         = no_llseek,
4652 };
4653
4654 #endif /* CONFIG_TRACER_SNAPSHOT */
4655
4656 static int tracing_buffers_open(struct inode *inode, struct file *filp)
4657 {
4658         struct trace_cpu *tc = inode->i_private;
4659         struct trace_array *tr = tc->tr;
4660         struct ftrace_buffer_info *info;
4661
4662         if (tracing_disabled)
4663                 return -ENODEV;
4664
4665         info = kzalloc(sizeof(*info), GFP_KERNEL);
4666         if (!info)
4667                 return -ENOMEM;
4668
4669         mutex_lock(&trace_types_lock);
4670
4671         tr->ref++;
4672
4673         info->iter.tr           = tr;
4674         info->iter.cpu_file     = tc->cpu;
4675         info->iter.trace        = tr->current_trace;
4676         info->iter.trace_buffer = &tr->trace_buffer;
4677         info->spare             = NULL;
4678         /* Force reading ring buffer for first read */
4679         info->read              = (unsigned int)-1;
4680
4681         filp->private_data = info;
4682
4683         mutex_unlock(&trace_types_lock);
4684
4685         return nonseekable_open(inode, filp);
4686 }
4687
4688 static unsigned int
4689 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
4690 {
4691         struct ftrace_buffer_info *info = filp->private_data;
4692         struct trace_iterator *iter = &info->iter;
4693
4694         return trace_poll(iter, filp, poll_table);
4695 }
4696
4697 static ssize_t
4698 tracing_buffers_read(struct file *filp, char __user *ubuf,
4699                      size_t count, loff_t *ppos)
4700 {
4701         struct ftrace_buffer_info *info = filp->private_data;
4702         struct trace_iterator *iter = &info->iter;
4703         ssize_t ret;
4704         ssize_t size;
4705
4706         if (!count)
4707                 return 0;
4708
4709         mutex_lock(&trace_types_lock);
4710
4711 #ifdef CONFIG_TRACER_MAX_TRACE
4712         if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
4713                 size = -EBUSY;
4714                 goto out_unlock;
4715         }
4716 #endif
4717
4718         if (!info->spare)
4719                 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
4720                                                           iter->cpu_file);
4721         size = -ENOMEM;
4722         if (!info->spare)
4723                 goto out_unlock;
4724
4725         /* Do we have previous read data to read? */
4726         if (info->read < PAGE_SIZE)
4727                 goto read;
4728
4729  again:
4730         trace_access_lock(iter->cpu_file);
4731         ret = ring_buffer_read_page(iter->trace_buffer->buffer,
4732                                     &info->spare,
4733                                     count,
4734                                     iter->cpu_file, 0);
4735         trace_access_unlock(iter->cpu_file);
4736
4737         if (ret < 0) {
4738                 if (trace_empty(iter)) {
4739                         if ((filp->f_flags & O_NONBLOCK)) {
4740                                 size = -EAGAIN;
4741                                 goto out_unlock;
4742                         }
4743                         mutex_unlock(&trace_types_lock);
4744                         iter->trace->wait_pipe(iter);
4745                         mutex_lock(&trace_types_lock);
4746                         if (signal_pending(current)) {
4747                                 size = -EINTR;
4748                                 goto out_unlock;
4749                         }
4750                         goto again;
4751                 }
4752                 size = 0;
4753                 goto out_unlock;
4754         }
4755
4756         info->read = 0;
4757  read:
4758         size = PAGE_SIZE - info->read;
4759         if (size > count)
4760                 size = count;
4761
4762         ret = copy_to_user(ubuf, info->spare + info->read, size);
4763         if (ret == size) {
4764                 size = -EFAULT;
4765                 goto out_unlock;
4766         }
4767         size -= ret;
4768
4769         *ppos += size;
4770         info->read += size;
4771
4772  out_unlock:
4773         mutex_unlock(&trace_types_lock);
4774
4775         return size;
4776 }
4777
4778 static int tracing_buffers_release(struct inode *inode, struct file *file)
4779 {
4780         struct ftrace_buffer_info *info = file->private_data;
4781         struct trace_iterator *iter = &info->iter;
4782
4783         mutex_lock(&trace_types_lock);
4784
4785         WARN_ON(!iter->tr->ref);
4786         iter->tr->ref--;
4787
4788         if (info->spare)
4789                 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
4790         kfree(info);
4791
4792         mutex_unlock(&trace_types_lock);
4793
4794         return 0;
4795 }
4796
4797 struct buffer_ref {
4798         struct ring_buffer      *buffer;
4799         void                    *page;
4800         int                     ref;
4801 };
4802
4803 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
4804                                     struct pipe_buffer *buf)
4805 {
4806         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4807
4808         if (--ref->ref)
4809                 return;
4810
4811         ring_buffer_free_read_page(ref->buffer, ref->page);
4812         kfree(ref);
4813         buf->private = 0;
4814 }
4815
4816 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
4817                                 struct pipe_buffer *buf)
4818 {
4819         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4820
4821         ref->ref++;
4822 }
4823
4824 /* Pipe buffer operations for a buffer. */
4825 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
4826         .can_merge              = 0,
4827         .map                    = generic_pipe_buf_map,
4828         .unmap                  = generic_pipe_buf_unmap,
4829         .confirm                = generic_pipe_buf_confirm,
4830         .release                = buffer_pipe_buf_release,
4831         .steal                  = generic_pipe_buf_steal,
4832         .get                    = buffer_pipe_buf_get,
4833 };
4834
4835 /*
4836  * Callback from splice_to_pipe(), if we need to release some pages
4837  * at the end of the spd in case we error'ed out in filling the pipe.
4838  */
4839 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
4840 {
4841         struct buffer_ref *ref =
4842                 (struct buffer_ref *)spd->partial[i].private;
4843
4844         if (--ref->ref)
4845                 return;
4846
4847         ring_buffer_free_read_page(ref->buffer, ref->page);
4848         kfree(ref);
4849         spd->partial[i].private = 0;
4850 }
4851
4852 static ssize_t
4853 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4854                             struct pipe_inode_info *pipe, size_t len,
4855                             unsigned int flags)
4856 {
4857         struct ftrace_buffer_info *info = file->private_data;
4858         struct trace_iterator *iter = &info->iter;
4859         struct partial_page partial_def[PIPE_DEF_BUFFERS];
4860         struct page *pages_def[PIPE_DEF_BUFFERS];
4861         struct splice_pipe_desc spd = {
4862                 .pages          = pages_def,
4863                 .partial        = partial_def,
4864                 .nr_pages_max   = PIPE_DEF_BUFFERS,
4865                 .flags          = flags,
4866                 .ops            = &buffer_pipe_buf_ops,
4867                 .spd_release    = buffer_spd_release,
4868         };
4869         struct buffer_ref *ref;
4870         int entries, size, i;
4871         ssize_t ret;
4872
4873         mutex_lock(&trace_types_lock);
4874
4875 #ifdef CONFIG_TRACER_MAX_TRACE
4876         if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
4877                 ret = -EBUSY;
4878                 goto out;
4879         }
4880 #endif
4881
4882         if (splice_grow_spd(pipe, &spd)) {
4883                 ret = -ENOMEM;
4884                 goto out;
4885         }
4886
4887         if (*ppos & (PAGE_SIZE - 1)) {
4888                 ret = -EINVAL;
4889                 goto out;
4890         }
4891
4892         if (len & (PAGE_SIZE - 1)) {
4893                 if (len < PAGE_SIZE) {
4894                         ret = -EINVAL;
4895                         goto out;
4896                 }
4897                 len &= PAGE_MASK;
4898         }
4899
4900  again:
4901         trace_access_lock(iter->cpu_file);
4902         entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
4903
4904         for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
4905                 struct page *page;
4906                 int r;
4907
4908                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
4909                 if (!ref)
4910                         break;
4911
4912                 ref->ref = 1;
4913                 ref->buffer = iter->trace_buffer->buffer;
4914                 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
4915                 if (!ref->page) {
4916                         kfree(ref);
4917                         break;
4918                 }
4919
4920                 r = ring_buffer_read_page(ref->buffer, &ref->page,
4921                                           len, iter->cpu_file, 1);
4922                 if (r < 0) {
4923                         ring_buffer_free_read_page(ref->buffer, ref->page);
4924                         kfree(ref);
4925                         break;
4926                 }
4927
4928                 /*
4929                  * zero out any left over data, this is going to
4930                  * user land.
4931                  */
4932                 size = ring_buffer_page_len(ref->page);
4933                 if (size < PAGE_SIZE)
4934                         memset(ref->page + size, 0, PAGE_SIZE - size);
4935
4936                 page = virt_to_page(ref->page);
4937
4938                 spd.pages[i] = page;
4939                 spd.partial[i].len = PAGE_SIZE;
4940                 spd.partial[i].offset = 0;
4941                 spd.partial[i].private = (unsigned long)ref;
4942                 spd.nr_pages++;
4943                 *ppos += PAGE_SIZE;
4944
4945                 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
4946         }
4947
4948         trace_access_unlock(iter->cpu_file);
4949         spd.nr_pages = i;
4950
4951         /* did we read anything? */
4952         if (!spd.nr_pages) {
4953                 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
4954                         ret = -EAGAIN;
4955                         goto out;
4956                 }
4957                 mutex_unlock(&trace_types_lock);
4958                 iter->trace->wait_pipe(iter);
4959                 mutex_lock(&trace_types_lock);
4960                 if (signal_pending(current)) {
4961                         ret = -EINTR;
4962                         goto out;
4963                 }
4964                 goto again;
4965         }
4966
4967         ret = splice_to_pipe(pipe, &spd);
4968         splice_shrink_spd(&spd);
4969 out:
4970         mutex_unlock(&trace_types_lock);
4971
4972         return ret;
4973 }
4974
4975 static const struct file_operations tracing_buffers_fops = {
4976         .open           = tracing_buffers_open,
4977         .read           = tracing_buffers_read,
4978         .poll           = tracing_buffers_poll,
4979         .release        = tracing_buffers_release,
4980         .splice_read    = tracing_buffers_splice_read,
4981         .llseek         = no_llseek,
4982 };
4983
4984 static ssize_t
4985 tracing_stats_read(struct file *filp, char __user *ubuf,
4986                    size_t count, loff_t *ppos)
4987 {
4988         struct trace_cpu *tc = filp->private_data;
4989         struct trace_array *tr = tc->tr;
4990         struct trace_buffer *trace_buf = &tr->trace_buffer;
4991         struct trace_seq *s;
4992         unsigned long cnt;
4993         unsigned long long t;
4994         unsigned long usec_rem;
4995         int cpu = tc->cpu;
4996
4997         s = kmalloc(sizeof(*s), GFP_KERNEL);
4998         if (!s)
4999                 return -ENOMEM;
5000
5001         trace_seq_init(s);
5002
5003         cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5004         trace_seq_printf(s, "entries: %ld\n", cnt);
5005
5006         cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5007         trace_seq_printf(s, "overrun: %ld\n", cnt);
5008
5009         cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5010         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5011
5012         cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5013         trace_seq_printf(s, "bytes: %ld\n", cnt);
5014
5015         if (trace_clocks[trace_clock_id].in_ns) {
5016                 /* local or global for trace_clock */
5017                 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5018                 usec_rem = do_div(t, USEC_PER_SEC);
5019                 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5020                                                                 t, usec_rem);
5021
5022                 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5023                 usec_rem = do_div(t, USEC_PER_SEC);
5024                 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5025         } else {
5026                 /* counter or tsc mode for trace_clock */
5027                 trace_seq_printf(s, "oldest event ts: %llu\n",
5028                                 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5029
5030                 trace_seq_printf(s, "now ts: %llu\n",
5031                                 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5032         }
5033
5034         cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5035         trace_seq_printf(s, "dropped events: %ld\n", cnt);
5036
5037         cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5038         trace_seq_printf(s, "read events: %ld\n", cnt);
5039
5040         count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5041
5042         kfree(s);
5043
5044         return count;
5045 }
5046
5047 static const struct file_operations tracing_stats_fops = {
5048         .open           = tracing_open_generic,
5049         .read           = tracing_stats_read,
5050         .llseek         = generic_file_llseek,
5051 };
5052
5053 #ifdef CONFIG_DYNAMIC_FTRACE
5054
5055 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5056 {
5057         return 0;
5058 }
5059
5060 static ssize_t
5061 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5062                   size_t cnt, loff_t *ppos)
5063 {
5064         static char ftrace_dyn_info_buffer[1024];
5065         static DEFINE_MUTEX(dyn_info_mutex);
5066         unsigned long *p = filp->private_data;
5067         char *buf = ftrace_dyn_info_buffer;
5068         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5069         int r;
5070
5071         mutex_lock(&dyn_info_mutex);
5072         r = sprintf(buf, "%ld ", *p);
5073
5074         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5075         buf[r++] = '\n';
5076
5077         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5078
5079         mutex_unlock(&dyn_info_mutex);
5080
5081         return r;
5082 }
5083
5084 static const struct file_operations tracing_dyn_info_fops = {
5085         .open           = tracing_open_generic,
5086         .read           = tracing_read_dyn_info,
5087         .llseek         = generic_file_llseek,
5088 };
5089 #endif /* CONFIG_DYNAMIC_FTRACE */
5090
5091 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5092 static void
5093 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5094 {
5095         tracing_snapshot();
5096 }
5097
5098 static void
5099 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5100 {
5101         unsigned long *count = (long *)data;
5102
5103         if (!*count)
5104                 return;
5105
5106         if (*count != -1)
5107                 (*count)--;
5108
5109         tracing_snapshot();
5110 }
5111
5112 static int
5113 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5114                       struct ftrace_probe_ops *ops, void *data)
5115 {
5116         long count = (long)data;
5117
5118         seq_printf(m, "%ps:", (void *)ip);
5119
5120         seq_printf(m, "snapshot");
5121
5122         if (count == -1)
5123                 seq_printf(m, ":unlimited\n");
5124         else
5125                 seq_printf(m, ":count=%ld\n", count);
5126
5127         return 0;
5128 }
5129
5130 static struct ftrace_probe_ops snapshot_probe_ops = {
5131         .func                   = ftrace_snapshot,
5132         .print                  = ftrace_snapshot_print,
5133 };
5134
5135 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5136         .func                   = ftrace_count_snapshot,
5137         .print                  = ftrace_snapshot_print,
5138 };
5139
5140 static int
5141 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5142                                char *glob, char *cmd, char *param, int enable)
5143 {
5144         struct ftrace_probe_ops *ops;
5145         void *count = (void *)-1;
5146         char *number;
5147         int ret;
5148
5149         /* hash funcs only work with set_ftrace_filter */
5150         if (!enable)
5151                 return -EINVAL;
5152
5153         ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
5154
5155         if (glob[0] == '!') {
5156                 unregister_ftrace_function_probe_func(glob+1, ops);
5157                 return 0;
5158         }
5159
5160         if (!param)
5161                 goto out_reg;
5162
5163         number = strsep(&param, ":");
5164
5165         if (!strlen(number))
5166                 goto out_reg;
5167
5168         /*
5169          * We use the callback data field (which is a pointer)
5170          * as our counter.
5171          */
5172         ret = kstrtoul(number, 0, (unsigned long *)&count);
5173         if (ret)
5174                 return ret;
5175
5176  out_reg:
5177         ret = register_ftrace_function_probe(glob, ops, count);
5178
5179         if (ret >= 0)
5180                 alloc_snapshot(&global_trace);
5181
5182         return ret < 0 ? ret : 0;
5183 }
5184
5185 static struct ftrace_func_command ftrace_snapshot_cmd = {
5186         .name                   = "snapshot",
5187         .func                   = ftrace_trace_snapshot_callback,
5188 };
5189
5190 static int register_snapshot_cmd(void)
5191 {
5192         return register_ftrace_command(&ftrace_snapshot_cmd);
5193 }
5194 #else
5195 static inline int register_snapshot_cmd(void) { return 0; }
5196 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5197
5198 struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5199 {
5200         static int once;
5201
5202         if (tr->dir)
5203                 return tr->dir;
5204
5205         if (!debugfs_initialized())
5206                 return NULL;
5207
5208         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5209                 tr->dir = debugfs_create_dir("tracing", NULL);
5210
5211         if (!tr->dir && !once) {
5212                 once = 1;
5213                 pr_warning("Could not create debugfs directory 'tracing'\n");
5214                 return NULL;
5215         }
5216
5217         return tr->dir;
5218 }
5219
5220 struct dentry *tracing_init_dentry(void)
5221 {
5222         return tracing_init_dentry_tr(&global_trace);
5223 }
5224
5225 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5226 {
5227         struct dentry *d_tracer;
5228
5229         if (tr->percpu_dir)
5230                 return tr->percpu_dir;
5231
5232         d_tracer = tracing_init_dentry_tr(tr);
5233         if (!d_tracer)
5234                 return NULL;
5235
5236         tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5237
5238         WARN_ONCE(!tr->percpu_dir,
5239                   "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5240
5241         return tr->percpu_dir;
5242 }
5243
5244 static void
5245 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5246 {
5247         struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
5248         struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5249         struct dentry *d_cpu;
5250         char cpu_dir[30]; /* 30 characters should be more than enough */
5251
5252         if (!d_percpu)
5253                 return;
5254
5255         snprintf(cpu_dir, 30, "cpu%ld", cpu);
5256         d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5257         if (!d_cpu) {
5258                 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5259                 return;
5260         }
5261
5262         /* per cpu trace_pipe */
5263         trace_create_file("trace_pipe", 0444, d_cpu,
5264                         (void *)&data->trace_cpu, &tracing_pipe_fops);
5265
5266         /* per cpu trace */
5267         trace_create_file("trace", 0644, d_cpu,
5268                         (void *)&data->trace_cpu, &tracing_fops);
5269
5270         trace_create_file("trace_pipe_raw", 0444, d_cpu,
5271                         (void *)&data->trace_cpu, &tracing_buffers_fops);
5272
5273         trace_create_file("stats", 0444, d_cpu,
5274                         (void *)&data->trace_cpu, &tracing_stats_fops);
5275
5276         trace_create_file("buffer_size_kb", 0444, d_cpu,
5277                         (void *)&data->trace_cpu, &tracing_entries_fops);
5278
5279 #ifdef CONFIG_TRACER_SNAPSHOT
5280         trace_create_file("snapshot", 0644, d_cpu,
5281                           (void *)&data->trace_cpu, &snapshot_fops);
5282
5283         trace_create_file("snapshot_raw", 0444, d_cpu,
5284                         (void *)&data->trace_cpu, &snapshot_raw_fops);
5285 #endif
5286 }
5287
5288 #ifdef CONFIG_FTRACE_SELFTEST
5289 /* Let selftest have access to static functions in this file */
5290 #include "trace_selftest.c"
5291 #endif
5292
5293 struct trace_option_dentry {
5294         struct tracer_opt               *opt;
5295         struct tracer_flags             *flags;
5296         struct trace_array              *tr;
5297         struct dentry                   *entry;
5298 };
5299
5300 static ssize_t
5301 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5302                         loff_t *ppos)
5303 {
5304         struct trace_option_dentry *topt = filp->private_data;
5305         char *buf;
5306
5307         if (topt->flags->val & topt->opt->bit)
5308                 buf = "1\n";
5309         else
5310                 buf = "0\n";
5311
5312         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5313 }
5314
5315 static ssize_t
5316 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5317                          loff_t *ppos)
5318 {
5319         struct trace_option_dentry *topt = filp->private_data;
5320         unsigned long val;
5321         int ret;
5322
5323         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5324         if (ret)
5325                 return ret;
5326
5327         if (val != 0 && val != 1)
5328                 return -EINVAL;
5329
5330         if (!!(topt->flags->val & topt->opt->bit) != val) {
5331                 mutex_lock(&trace_types_lock);
5332                 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
5333                                           topt->opt, !val);
5334                 mutex_unlock(&trace_types_lock);
5335                 if (ret)
5336                         return ret;
5337         }
5338
5339         *ppos += cnt;
5340
5341         return cnt;
5342 }
5343
5344
5345 static const struct file_operations trace_options_fops = {
5346         .open = tracing_open_generic,
5347         .read = trace_options_read,
5348         .write = trace_options_write,
5349         .llseek = generic_file_llseek,
5350 };
5351
5352 static ssize_t
5353 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5354                         loff_t *ppos)
5355 {
5356         long index = (long)filp->private_data;
5357         char *buf;
5358
5359         if (trace_flags & (1 << index))
5360                 buf = "1\n";
5361         else
5362                 buf = "0\n";
5363
5364         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5365 }
5366
5367 static ssize_t
5368 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5369                          loff_t *ppos)
5370 {
5371         struct trace_array *tr = &global_trace;
5372         long index = (long)filp->private_data;
5373         unsigned long val;
5374         int ret;
5375
5376         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5377         if (ret)
5378                 return ret;
5379
5380         if (val != 0 && val != 1)
5381                 return -EINVAL;
5382
5383         mutex_lock(&trace_types_lock);
5384         ret = set_tracer_flag(tr, 1 << index, val);
5385         mutex_unlock(&trace_types_lock);
5386
5387         if (ret < 0)
5388                 return ret;
5389
5390         *ppos += cnt;
5391
5392         return cnt;
5393 }
5394
5395 static const struct file_operations trace_options_core_fops = {
5396         .open = tracing_open_generic,
5397         .read = trace_options_core_read,
5398         .write = trace_options_core_write,
5399         .llseek = generic_file_llseek,
5400 };
5401
5402 struct dentry *trace_create_file(const char *name,
5403                                  umode_t mode,
5404                                  struct dentry *parent,
5405                                  void *data,
5406                                  const struct file_operations *fops)
5407 {
5408         struct dentry *ret;
5409
5410         ret = debugfs_create_file(name, mode, parent, data, fops);
5411         if (!ret)
5412                 pr_warning("Could not create debugfs '%s' entry\n", name);
5413
5414         return ret;
5415 }
5416
5417
5418 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
5419 {
5420         struct dentry *d_tracer;
5421
5422         if (tr->options)
5423                 return tr->options;
5424
5425         d_tracer = tracing_init_dentry_tr(tr);
5426         if (!d_tracer)
5427                 return NULL;
5428
5429         tr->options = debugfs_create_dir("options", d_tracer);
5430         if (!tr->options) {
5431                 pr_warning("Could not create debugfs directory 'options'\n");
5432                 return NULL;
5433         }
5434
5435         return tr->options;
5436 }
5437
5438 static void
5439 create_trace_option_file(struct trace_array *tr,
5440                          struct trace_option_dentry *topt,
5441                          struct tracer_flags *flags,
5442                          struct tracer_opt *opt)
5443 {
5444         struct dentry *t_options;
5445
5446         t_options = trace_options_init_dentry(tr);
5447         if (!t_options)
5448                 return;
5449
5450         topt->flags = flags;
5451         topt->opt = opt;
5452         topt->tr = tr;
5453
5454         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
5455                                     &trace_options_fops);
5456
5457 }
5458
5459 static struct trace_option_dentry *
5460 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
5461 {
5462         struct trace_option_dentry *topts;
5463         struct tracer_flags *flags;
5464         struct tracer_opt *opts;
5465         int cnt;
5466
5467         if (!tracer)
5468                 return NULL;
5469
5470         flags = tracer->flags;
5471
5472         if (!flags || !flags->opts)
5473                 return NULL;
5474
5475         opts = flags->opts;
5476
5477         for (cnt = 0; opts[cnt].name; cnt++)
5478                 ;
5479
5480         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
5481         if (!topts)
5482                 return NULL;
5483
5484         for (cnt = 0; opts[cnt].name; cnt++)
5485                 create_trace_option_file(tr, &topts[cnt], flags,
5486                                          &opts[cnt]);
5487
5488         return topts;
5489 }
5490
5491 static void
5492 destroy_trace_option_files(struct trace_option_dentry *topts)
5493 {
5494         int cnt;
5495
5496         if (!topts)
5497                 return;
5498
5499         for (cnt = 0; topts[cnt].opt; cnt++) {
5500                 if (topts[cnt].entry)
5501                         debugfs_remove(topts[cnt].entry);
5502         }
5503
5504         kfree(topts);
5505 }
5506
5507 static struct dentry *
5508 create_trace_option_core_file(struct trace_array *tr,
5509                               const char *option, long index)
5510 {
5511         struct dentry *t_options;
5512
5513         t_options = trace_options_init_dentry(tr);
5514         if (!t_options)
5515                 return NULL;
5516
5517         return trace_create_file(option, 0644, t_options, (void *)index,
5518                                     &trace_options_core_fops);
5519 }
5520
5521 static __init void create_trace_options_dir(struct trace_array *tr)
5522 {
5523         struct dentry *t_options;
5524         int i;
5525
5526         t_options = trace_options_init_dentry(tr);
5527         if (!t_options)
5528                 return;
5529
5530         for (i = 0; trace_options[i]; i++)
5531                 create_trace_option_core_file(tr, trace_options[i], i);
5532 }
5533
5534 static ssize_t
5535 rb_simple_read(struct file *filp, char __user *ubuf,
5536                size_t cnt, loff_t *ppos)
5537 {
5538         struct trace_array *tr = filp->private_data;
5539         struct ring_buffer *buffer = tr->trace_buffer.buffer;
5540         char buf[64];
5541         int r;
5542
5543         if (buffer)
5544                 r = ring_buffer_record_is_on(buffer);
5545         else
5546                 r = 0;
5547
5548         r = sprintf(buf, "%d\n", r);
5549
5550         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5551 }
5552
5553 static ssize_t
5554 rb_simple_write(struct file *filp, const char __user *ubuf,
5555                 size_t cnt, loff_t *ppos)
5556 {
5557         struct trace_array *tr = filp->private_data;
5558         struct ring_buffer *buffer = tr->trace_buffer.buffer;
5559         unsigned long val;
5560         int ret;
5561
5562         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5563         if (ret)
5564                 return ret;
5565
5566         if (buffer) {
5567                 mutex_lock(&trace_types_lock);
5568                 if (val) {
5569                         ring_buffer_record_on(buffer);
5570                         if (tr->current_trace->start)
5571                                 tr->current_trace->start(tr);
5572                 } else {
5573                         ring_buffer_record_off(buffer);
5574                         if (tr->current_trace->stop)
5575                                 tr->current_trace->stop(tr);
5576                 }
5577                 mutex_unlock(&trace_types_lock);
5578         }
5579
5580         (*ppos)++;
5581
5582         return cnt;
5583 }
5584
5585 static const struct file_operations rb_simple_fops = {
5586         .open           = tracing_open_generic,
5587         .read           = rb_simple_read,
5588         .write          = rb_simple_write,
5589         .llseek         = default_llseek,
5590 };
5591
5592 struct dentry *trace_instance_dir;
5593
5594 static void
5595 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5596
5597 static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5598 {
5599         int cpu;
5600
5601         for_each_tracing_cpu(cpu) {
5602                 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5603                 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5604                 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5605         }
5606 }
5607
5608 static int
5609 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
5610 {
5611         enum ring_buffer_flags rb_flags;
5612
5613         rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5614
5615         buf->buffer = ring_buffer_alloc(size, rb_flags);
5616         if (!buf->buffer)
5617                 return -ENOMEM;
5618
5619         buf->data = alloc_percpu(struct trace_array_cpu);
5620         if (!buf->data) {
5621                 ring_buffer_free(buf->buffer);
5622                 return -ENOMEM;
5623         }
5624
5625         init_trace_buffers(tr, buf);
5626
5627         /* Allocate the first page for all buffers */
5628         set_buffer_entries(&tr->trace_buffer,
5629                            ring_buffer_size(tr->trace_buffer.buffer, 0));
5630
5631         return 0;
5632 }
5633
5634 static int allocate_trace_buffers(struct trace_array *tr, int size)
5635 {
5636         int ret;
5637
5638         ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
5639         if (ret)
5640                 return ret;
5641
5642 #ifdef CONFIG_TRACER_MAX_TRACE
5643         ret = allocate_trace_buffer(tr, &tr->max_buffer,
5644                                     allocate_snapshot ? size : 1);
5645         if (WARN_ON(ret)) {
5646                 ring_buffer_free(tr->trace_buffer.buffer);
5647                 free_percpu(tr->trace_buffer.data);
5648                 return -ENOMEM;
5649         }
5650         tr->allocated_snapshot = allocate_snapshot;
5651
5652         /*
5653          * Only the top level trace array gets its snapshot allocated
5654          * from the kernel command line.
5655          */
5656         allocate_snapshot = false;
5657 #endif
5658         return 0;
5659 }
5660
5661 static int new_instance_create(const char *name)
5662 {
5663         struct trace_array *tr;
5664         int ret;
5665
5666         mutex_lock(&trace_types_lock);
5667
5668         ret = -EEXIST;
5669         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5670                 if (tr->name && strcmp(tr->name, name) == 0)
5671                         goto out_unlock;
5672         }
5673
5674         ret = -ENOMEM;
5675         tr = kzalloc(sizeof(*tr), GFP_KERNEL);
5676         if (!tr)
5677                 goto out_unlock;
5678
5679         tr->name = kstrdup(name, GFP_KERNEL);
5680         if (!tr->name)
5681                 goto out_free_tr;
5682
5683         raw_spin_lock_init(&tr->start_lock);
5684
5685         tr->current_trace = &nop_trace;
5686
5687         INIT_LIST_HEAD(&tr->systems);
5688         INIT_LIST_HEAD(&tr->events);
5689
5690         if (allocate_trace_buffers(tr, trace_buf_size) < 0)
5691                 goto out_free_tr;
5692
5693         /* Holder for file callbacks */
5694         tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5695         tr->trace_cpu.tr = tr;
5696
5697         tr->dir = debugfs_create_dir(name, trace_instance_dir);
5698         if (!tr->dir)
5699                 goto out_free_tr;
5700
5701         ret = event_trace_add_tracer(tr->dir, tr);
5702         if (ret)
5703                 goto out_free_tr;
5704
5705         init_tracer_debugfs(tr, tr->dir);
5706
5707         list_add(&tr->list, &ftrace_trace_arrays);
5708
5709         mutex_unlock(&trace_types_lock);
5710
5711         return 0;
5712
5713  out_free_tr:
5714         if (tr->trace_buffer.buffer)
5715                 ring_buffer_free(tr->trace_buffer.buffer);
5716         kfree(tr->name);
5717         kfree(tr);
5718
5719  out_unlock:
5720         mutex_unlock(&trace_types_lock);
5721
5722         return ret;
5723
5724 }
5725
5726 static int instance_delete(const char *name)
5727 {
5728         struct trace_array *tr;
5729         int found = 0;
5730         int ret;
5731
5732         mutex_lock(&trace_types_lock);
5733
5734         ret = -ENODEV;
5735         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5736                 if (tr->name && strcmp(tr->name, name) == 0) {
5737                         found = 1;
5738                         break;
5739                 }
5740         }
5741         if (!found)
5742                 goto out_unlock;
5743
5744         ret = -EBUSY;
5745         if (tr->ref)
5746                 goto out_unlock;
5747
5748         list_del(&tr->list);
5749
5750         event_trace_del_tracer(tr);
5751         debugfs_remove_recursive(tr->dir);
5752         free_percpu(tr->trace_buffer.data);
5753         ring_buffer_free(tr->trace_buffer.buffer);
5754
5755         kfree(tr->name);
5756         kfree(tr);
5757
5758         ret = 0;
5759
5760  out_unlock:
5761         mutex_unlock(&trace_types_lock);
5762
5763         return ret;
5764 }
5765
5766 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
5767 {
5768         struct dentry *parent;
5769         int ret;
5770
5771         /* Paranoid: Make sure the parent is the "instances" directory */
5772         parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
5773         if (WARN_ON_ONCE(parent != trace_instance_dir))
5774                 return -ENOENT;
5775
5776         /*
5777          * The inode mutex is locked, but debugfs_create_dir() will also
5778          * take the mutex. As the instances directory can not be destroyed
5779          * or changed in any other way, it is safe to unlock it, and
5780          * let the dentry try. If two users try to make the same dir at
5781          * the same time, then the new_instance_create() will determine the
5782          * winner.
5783          */
5784         mutex_unlock(&inode->i_mutex);
5785
5786         ret = new_instance_create(dentry->d_iname);
5787
5788         mutex_lock(&inode->i_mutex);
5789
5790         return ret;
5791 }
5792
5793 static int instance_rmdir(struct inode *inode, struct dentry *dentry)
5794 {
5795         struct dentry *parent;
5796         int ret;
5797
5798         /* Paranoid: Make sure the parent is the "instances" directory */
5799         parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
5800         if (WARN_ON_ONCE(parent != trace_instance_dir))
5801                 return -ENOENT;
5802
5803         /* The caller did a dget() on dentry */
5804         mutex_unlock(&dentry->d_inode->i_mutex);
5805
5806         /*
5807          * The inode mutex is locked, but debugfs_create_dir() will also
5808          * take the mutex. As the instances directory can not be destroyed
5809          * or changed in any other way, it is safe to unlock it, and
5810          * let the dentry try. If two users try to make the same dir at
5811          * the same time, then the instance_delete() will determine the
5812          * winner.
5813          */
5814         mutex_unlock(&inode->i_mutex);
5815
5816         ret = instance_delete(dentry->d_iname);
5817
5818         mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
5819         mutex_lock(&dentry->d_inode->i_mutex);
5820
5821         return ret;
5822 }
5823
5824 static const struct inode_operations instance_dir_inode_operations = {
5825         .lookup         = simple_lookup,
5826         .mkdir          = instance_mkdir,
5827         .rmdir          = instance_rmdir,
5828 };
5829
5830 static __init void create_trace_instances(struct dentry *d_tracer)
5831 {
5832         trace_instance_dir = debugfs_create_dir("instances", d_tracer);
5833         if (WARN_ON(!trace_instance_dir))
5834                 return;
5835
5836         /* Hijack the dir inode operations, to allow mkdir */
5837         trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
5838 }
5839
5840 static void
5841 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
5842 {
5843         int cpu;
5844
5845         trace_create_file("trace_options", 0644, d_tracer,
5846                           tr, &tracing_iter_fops);
5847
5848         trace_create_file("trace", 0644, d_tracer,
5849                         (void *)&tr->trace_cpu, &tracing_fops);
5850
5851         trace_create_file("trace_pipe", 0444, d_tracer,
5852                         (void *)&tr->trace_cpu, &tracing_pipe_fops);
5853
5854         trace_create_file("buffer_size_kb", 0644, d_tracer,
5855                         (void *)&tr->trace_cpu, &tracing_entries_fops);
5856
5857         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
5858                           tr, &tracing_total_entries_fops);
5859
5860         trace_create_file("free_buffer", 0644, d_tracer,
5861                           tr, &tracing_free_buffer_fops);
5862
5863         trace_create_file("trace_marker", 0220, d_tracer,
5864                           tr, &tracing_mark_fops);
5865
5866         trace_create_file("trace_clock", 0644, d_tracer, tr,
5867                           &trace_clock_fops);
5868
5869         trace_create_file("tracing_on", 0644, d_tracer,
5870                             tr, &rb_simple_fops);
5871
5872 #ifdef CONFIG_TRACER_SNAPSHOT
5873         trace_create_file("snapshot", 0644, d_tracer,
5874                           (void *)&tr->trace_cpu, &snapshot_fops);
5875 #endif
5876
5877         for_each_tracing_cpu(cpu)
5878                 tracing_init_debugfs_percpu(tr, cpu);
5879
5880 }
5881
5882 static __init int tracer_init_debugfs(void)
5883 {
5884         struct dentry *d_tracer;
5885
5886         trace_access_lock_init();
5887
5888         d_tracer = tracing_init_dentry();
5889
5890         init_tracer_debugfs(&global_trace, d_tracer);
5891
5892         trace_create_file("tracing_cpumask", 0644, d_tracer,
5893                         &global_trace, &tracing_cpumask_fops);
5894
5895         trace_create_file("available_tracers", 0444, d_tracer,
5896                         &global_trace, &show_traces_fops);
5897
5898         trace_create_file("current_tracer", 0644, d_tracer,
5899                         &global_trace, &set_tracer_fops);
5900
5901 #ifdef CONFIG_TRACER_MAX_TRACE
5902         trace_create_file("tracing_max_latency", 0644, d_tracer,
5903                         &tracing_max_latency, &tracing_max_lat_fops);
5904 #endif
5905
5906         trace_create_file("tracing_thresh", 0644, d_tracer,
5907                         &tracing_thresh, &tracing_max_lat_fops);
5908
5909         trace_create_file("README", 0444, d_tracer,
5910                         NULL, &tracing_readme_fops);
5911
5912         trace_create_file("saved_cmdlines", 0444, d_tracer,
5913                         NULL, &tracing_saved_cmdlines_fops);
5914
5915 #ifdef CONFIG_DYNAMIC_FTRACE
5916         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
5917                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
5918 #endif
5919
5920         create_trace_instances(d_tracer);
5921
5922         create_trace_options_dir(&global_trace);
5923
5924         return 0;
5925 }
5926
5927 static int trace_panic_handler(struct notifier_block *this,
5928                                unsigned long event, void *unused)
5929 {
5930         if (ftrace_dump_on_oops)
5931                 ftrace_dump(ftrace_dump_on_oops);
5932         return NOTIFY_OK;
5933 }
5934
5935 static struct notifier_block trace_panic_notifier = {
5936         .notifier_call  = trace_panic_handler,
5937         .next           = NULL,
5938         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
5939 };
5940
5941 static int trace_die_handler(struct notifier_block *self,
5942                              unsigned long val,
5943                              void *data)
5944 {
5945         switch (val) {
5946         case DIE_OOPS:
5947                 if (ftrace_dump_on_oops)
5948                         ftrace_dump(ftrace_dump_on_oops);
5949                 break;
5950         default:
5951                 break;
5952         }
5953         return NOTIFY_OK;
5954 }
5955
5956 static struct notifier_block trace_die_notifier = {
5957         .notifier_call = trace_die_handler,
5958         .priority = 200
5959 };
5960
5961 /*
5962  * printk is set to max of 1024, we really don't need it that big.
5963  * Nothing should be printing 1000 characters anyway.
5964  */
5965 #define TRACE_MAX_PRINT         1000
5966
5967 /*
5968  * Define here KERN_TRACE so that we have one place to modify
5969  * it if we decide to change what log level the ftrace dump
5970  * should be at.
5971  */
5972 #define KERN_TRACE              KERN_EMERG
5973
5974 void
5975 trace_printk_seq(struct trace_seq *s)
5976 {
5977         /* Probably should print a warning here. */
5978         if (s->len >= 1000)
5979                 s->len = 1000;
5980
5981         /* should be zero ended, but we are paranoid. */
5982         s->buffer[s->len] = 0;
5983
5984         printk(KERN_TRACE "%s", s->buffer);
5985
5986         trace_seq_init(s);
5987 }
5988
5989 void trace_init_global_iter(struct trace_iterator *iter)
5990 {
5991         iter->tr = &global_trace;
5992         iter->trace = iter->tr->current_trace;
5993         iter->cpu_file = RING_BUFFER_ALL_CPUS;
5994         iter->trace_buffer = &global_trace.trace_buffer;
5995 }
5996
5997 static void
5998 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5999 {
6000         static arch_spinlock_t ftrace_dump_lock =
6001                 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6002         /* use static because iter can be a bit big for the stack */
6003         static struct trace_iterator iter;
6004         unsigned int old_userobj;
6005         static int dump_ran;
6006         unsigned long flags;
6007         int cnt = 0, cpu;
6008
6009         /* only one dump */
6010         local_irq_save(flags);
6011         arch_spin_lock(&ftrace_dump_lock);
6012         if (dump_ran)
6013                 goto out;
6014
6015         dump_ran = 1;
6016
6017         tracing_off();
6018
6019         /* Did function tracer already get disabled? */
6020         if (ftrace_is_dead()) {
6021                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6022                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
6023         }
6024
6025         if (disable_tracing)
6026                 ftrace_kill();
6027
6028         /* Simulate the iterator */
6029         trace_init_global_iter(&iter);
6030
6031         for_each_tracing_cpu(cpu) {
6032                 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6033         }
6034
6035         old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6036
6037         /* don't look at user memory in panic mode */
6038         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6039
6040         switch (oops_dump_mode) {
6041         case DUMP_ALL:
6042                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6043                 break;
6044         case DUMP_ORIG:
6045                 iter.cpu_file = raw_smp_processor_id();
6046                 break;
6047         case DUMP_NONE:
6048                 goto out_enable;
6049         default:
6050                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6051                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
6052         }
6053
6054         printk(KERN_TRACE "Dumping ftrace buffer:\n");
6055
6056         /*
6057          * We need to stop all tracing on all CPUS to read the
6058          * the next buffer. This is a bit expensive, but is
6059          * not done often. We fill all what we can read,
6060          * and then release the locks again.
6061          */
6062
6063         while (!trace_empty(&iter)) {
6064
6065                 if (!cnt)
6066                         printk(KERN_TRACE "---------------------------------\n");
6067
6068                 cnt++;
6069
6070                 /* reset all but tr, trace, and overruns */
6071                 memset(&iter.seq, 0,
6072                        sizeof(struct trace_iterator) -
6073                        offsetof(struct trace_iterator, seq));
6074                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6075                 iter.pos = -1;
6076
6077                 if (trace_find_next_entry_inc(&iter) != NULL) {
6078                         int ret;
6079
6080                         ret = print_trace_line(&iter);
6081                         if (ret != TRACE_TYPE_NO_CONSUME)
6082                                 trace_consume(&iter);
6083                 }
6084                 touch_nmi_watchdog();
6085
6086                 trace_printk_seq(&iter.seq);
6087         }
6088
6089         if (!cnt)
6090                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
6091         else
6092                 printk(KERN_TRACE "---------------------------------\n");
6093
6094  out_enable:
6095         /* Re-enable tracing if requested */
6096         if (!disable_tracing) {
6097                 trace_flags |= old_userobj;
6098
6099                 for_each_tracing_cpu(cpu) {
6100                         atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6101                 }
6102                 tracing_on();
6103         }
6104
6105  out:
6106         arch_spin_unlock(&ftrace_dump_lock);
6107         local_irq_restore(flags);
6108 }
6109
6110 /* By default: disable tracing after the dump */
6111 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6112 {
6113         __ftrace_dump(true, oops_dump_mode);
6114 }
6115 EXPORT_SYMBOL_GPL(ftrace_dump);
6116
6117 __init static int tracer_alloc_buffers(void)
6118 {
6119         int ring_buf_size;
6120         int ret = -ENOMEM;
6121
6122
6123         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6124                 goto out;
6125
6126         if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
6127                 goto out_free_buffer_mask;
6128
6129         /* Only allocate trace_printk buffers if a trace_printk exists */
6130         if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6131                 /* Must be called before global_trace.buffer is allocated */
6132                 trace_printk_init_buffers();
6133
6134         /* To save memory, keep the ring buffer size to its minimum */
6135         if (ring_buffer_expanded)
6136                 ring_buf_size = trace_buf_size;
6137         else
6138                 ring_buf_size = 1;
6139
6140         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6141         cpumask_copy(tracing_cpumask, cpu_all_mask);
6142
6143         raw_spin_lock_init(&global_trace.start_lock);
6144
6145         /* TODO: make the number of buffers hot pluggable with CPUS */
6146         if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6147                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6148                 WARN_ON(1);
6149                 goto out_free_cpumask;
6150         }
6151
6152         if (global_trace.buffer_disabled)
6153                 tracing_off();
6154
6155         trace_init_cmdlines();
6156
6157         register_tracer(&nop_trace);
6158
6159         global_trace.current_trace = &nop_trace;
6160
6161         /* All seems OK, enable tracing */
6162         tracing_disabled = 0;
6163
6164         atomic_notifier_chain_register(&panic_notifier_list,
6165                                        &trace_panic_notifier);
6166
6167         register_die_notifier(&trace_die_notifier);
6168
6169         global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6170
6171         /* Holder for file callbacks */
6172         global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
6173         global_trace.trace_cpu.tr = &global_trace;
6174
6175         INIT_LIST_HEAD(&global_trace.systems);
6176         INIT_LIST_HEAD(&global_trace.events);
6177         list_add(&global_trace.list, &ftrace_trace_arrays);
6178
6179         while (trace_boot_options) {
6180                 char *option;
6181
6182                 option = strsep(&trace_boot_options, ",");
6183                 trace_set_options(&global_trace, option);
6184         }
6185
6186         register_snapshot_cmd();
6187
6188         return 0;
6189
6190 out_free_cpumask:
6191         free_percpu(global_trace.trace_buffer.data);
6192 #ifdef CONFIG_TRACER_MAX_TRACE
6193         free_percpu(global_trace.max_buffer.data);
6194 #endif
6195         free_cpumask_var(tracing_cpumask);
6196 out_free_buffer_mask:
6197         free_cpumask_var(tracing_buffer_mask);
6198 out:
6199         return ret;
6200 }
6201
6202 __init static int clear_boot_tracer(void)
6203 {
6204         /*
6205          * The default tracer at boot buffer is an init section.
6206          * This function is called in lateinit. If we did not
6207          * find the boot tracer, then clear it out, to prevent
6208          * later registration from accessing the buffer that is
6209          * about to be freed.
6210          */
6211         if (!default_bootup_tracer)
6212                 return 0;
6213
6214         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6215                default_bootup_tracer);
6216         default_bootup_tracer = NULL;
6217
6218         return 0;
6219 }
6220
6221 early_initcall(tracer_alloc_buffers);
6222 fs_initcall(tracer_init_debugfs);
6223 late_initcall(clear_boot_tracer);