]> Pileus Git - ~andy/linux/blob - kernel/trace/ring_buffer.c
dbc0f93396aa7a5a9324da360e9eed2306cf5d4d
[~andy/linux] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_clock.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/spinlock.h>
10 #include <linux/debugfs.h>
11 #include <linux/uaccess.h>
12 #include <linux/hardirq.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/mutex.h>
16 #include <linux/init.h>
17 #include <linux/hash.h>
18 #include <linux/list.h>
19 #include <linux/cpu.h>
20 #include <linux/fs.h>
21
22 #include "trace.h"
23
24 /*
25  * The ring buffer header is special. We must manually up keep it.
26  */
27 int ring_buffer_print_entry_header(struct trace_seq *s)
28 {
29         int ret;
30
31         ret = trace_seq_printf(s, "# compressed entry header\n");
32         ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
33         ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
34         ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
35         ret = trace_seq_printf(s, "\n");
36         ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
37                                RINGBUF_TYPE_PADDING);
38         ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
39                                RINGBUF_TYPE_TIME_EXTEND);
40         ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
41                                RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
42
43         return ret;
44 }
45
46 /*
47  * The ring buffer is made up of a list of pages. A separate list of pages is
48  * allocated for each CPU. A writer may only write to a buffer that is
49  * associated with the CPU it is currently executing on.  A reader may read
50  * from any per cpu buffer.
51  *
52  * The reader is special. For each per cpu buffer, the reader has its own
53  * reader page. When a reader has read the entire reader page, this reader
54  * page is swapped with another page in the ring buffer.
55  *
56  * Now, as long as the writer is off the reader page, the reader can do what
57  * ever it wants with that page. The writer will never write to that page
58  * again (as long as it is out of the ring buffer).
59  *
60  * Here's some silly ASCII art.
61  *
62  *   +------+
63  *   |reader|          RING BUFFER
64  *   |page  |
65  *   +------+        +---+   +---+   +---+
66  *                   |   |-->|   |-->|   |
67  *                   +---+   +---+   +---+
68  *                     ^               |
69  *                     |               |
70  *                     +---------------+
71  *
72  *
73  *   +------+
74  *   |reader|          RING BUFFER
75  *   |page  |------------------v
76  *   +------+        +---+   +---+   +---+
77  *                   |   |-->|   |-->|   |
78  *                   +---+   +---+   +---+
79  *                     ^               |
80  *                     |               |
81  *                     +---------------+
82  *
83  *
84  *   +------+
85  *   |reader|          RING BUFFER
86  *   |page  |------------------v
87  *   +------+        +---+   +---+   +---+
88  *      ^            |   |-->|   |-->|   |
89  *      |            +---+   +---+   +---+
90  *      |                              |
91  *      |                              |
92  *      +------------------------------+
93  *
94  *
95  *   +------+
96  *   |buffer|          RING BUFFER
97  *   |page  |------------------v
98  *   +------+        +---+   +---+   +---+
99  *      ^            |   |   |   |-->|   |
100  *      |   New      +---+   +---+   +---+
101  *      |  Reader------^               |
102  *      |   page                       |
103  *      +------------------------------+
104  *
105  *
106  * After we make this swap, the reader can hand this page off to the splice
107  * code and be done with it. It can even allocate a new page if it needs to
108  * and swap that into the ring buffer.
109  *
110  * We will be using cmpxchg soon to make all this lockless.
111  *
112  */
113
114 /*
115  * A fast way to enable or disable all ring buffers is to
116  * call tracing_on or tracing_off. Turning off the ring buffers
117  * prevents all ring buffers from being recorded to.
118  * Turning this switch on, makes it OK to write to the
119  * ring buffer, if the ring buffer is enabled itself.
120  *
121  * There's three layers that must be on in order to write
122  * to the ring buffer.
123  *
124  * 1) This global flag must be set.
125  * 2) The ring buffer must be enabled for recording.
126  * 3) The per cpu buffer must be enabled for recording.
127  *
128  * In case of an anomaly, this global flag has a bit set that
129  * will permantly disable all ring buffers.
130  */
131
132 /*
133  * Global flag to disable all recording to ring buffers
134  *  This has two bits: ON, DISABLED
135  *
136  *  ON   DISABLED
137  * ---- ----------
138  *   0      0        : ring buffers are off
139  *   1      0        : ring buffers are on
140  *   X      1        : ring buffers are permanently disabled
141  */
142
143 enum {
144         RB_BUFFERS_ON_BIT       = 0,
145         RB_BUFFERS_DISABLED_BIT = 1,
146 };
147
148 enum {
149         RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
150         RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
151 };
152
153 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
154
155 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
156
157 /**
158  * tracing_on - enable all tracing buffers
159  *
160  * This function enables all tracing buffers that may have been
161  * disabled with tracing_off.
162  */
163 void tracing_on(void)
164 {
165         set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
166 }
167 EXPORT_SYMBOL_GPL(tracing_on);
168
169 /**
170  * tracing_off - turn off all tracing buffers
171  *
172  * This function stops all tracing buffers from recording data.
173  * It does not disable any overhead the tracers themselves may
174  * be causing. This function simply causes all recording to
175  * the ring buffers to fail.
176  */
177 void tracing_off(void)
178 {
179         clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
180 }
181 EXPORT_SYMBOL_GPL(tracing_off);
182
183 /**
184  * tracing_off_permanent - permanently disable ring buffers
185  *
186  * This function, once called, will disable all ring buffers
187  * permanently.
188  */
189 void tracing_off_permanent(void)
190 {
191         set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
192 }
193
194 /**
195  * tracing_is_on - show state of ring buffers enabled
196  */
197 int tracing_is_on(void)
198 {
199         return ring_buffer_flags == RB_BUFFERS_ON;
200 }
201 EXPORT_SYMBOL_GPL(tracing_is_on);
202
203 #include "trace.h"
204
205 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
206 #define RB_ALIGNMENT            4U
207 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
208 #define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
209
210 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
211 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
212
213 enum {
214         RB_LEN_TIME_EXTEND = 8,
215         RB_LEN_TIME_STAMP = 16,
216 };
217
218 static inline int rb_null_event(struct ring_buffer_event *event)
219 {
220         return event->type_len == RINGBUF_TYPE_PADDING
221                         && event->time_delta == 0;
222 }
223
224 static inline int rb_discarded_event(struct ring_buffer_event *event)
225 {
226         return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
227 }
228
229 static void rb_event_set_padding(struct ring_buffer_event *event)
230 {
231         event->type_len = RINGBUF_TYPE_PADDING;
232         event->time_delta = 0;
233 }
234
235 static unsigned
236 rb_event_data_length(struct ring_buffer_event *event)
237 {
238         unsigned length;
239
240         if (event->type_len)
241                 length = event->type_len * RB_ALIGNMENT;
242         else
243                 length = event->array[0];
244         return length + RB_EVNT_HDR_SIZE;
245 }
246
247 /* inline for ring buffer fast paths */
248 static unsigned
249 rb_event_length(struct ring_buffer_event *event)
250 {
251         switch (event->type_len) {
252         case RINGBUF_TYPE_PADDING:
253                 if (rb_null_event(event))
254                         /* undefined */
255                         return -1;
256                 return  event->array[0] + RB_EVNT_HDR_SIZE;
257
258         case RINGBUF_TYPE_TIME_EXTEND:
259                 return RB_LEN_TIME_EXTEND;
260
261         case RINGBUF_TYPE_TIME_STAMP:
262                 return RB_LEN_TIME_STAMP;
263
264         case RINGBUF_TYPE_DATA:
265                 return rb_event_data_length(event);
266         default:
267                 BUG();
268         }
269         /* not hit */
270         return 0;
271 }
272
273 /**
274  * ring_buffer_event_length - return the length of the event
275  * @event: the event to get the length of
276  */
277 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
278 {
279         unsigned length = rb_event_length(event);
280         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
281                 return length;
282         length -= RB_EVNT_HDR_SIZE;
283         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
284                 length -= sizeof(event->array[0]);
285         return length;
286 }
287 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
288
289 /* inline for ring buffer fast paths */
290 static void *
291 rb_event_data(struct ring_buffer_event *event)
292 {
293         BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
294         /* If length is in len field, then array[0] has the data */
295         if (event->type_len)
296                 return (void *)&event->array[0];
297         /* Otherwise length is in array[0] and array[1] has the data */
298         return (void *)&event->array[1];
299 }
300
301 /**
302  * ring_buffer_event_data - return the data of the event
303  * @event: the event to get the data from
304  */
305 void *ring_buffer_event_data(struct ring_buffer_event *event)
306 {
307         return rb_event_data(event);
308 }
309 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
310
311 #define for_each_buffer_cpu(buffer, cpu)                \
312         for_each_cpu(cpu, buffer->cpumask)
313
314 #define TS_SHIFT        27
315 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
316 #define TS_DELTA_TEST   (~TS_MASK)
317
318 struct buffer_data_page {
319         u64              time_stamp;    /* page time stamp */
320         local_t          commit;        /* write committed index */
321         unsigned char    data[];        /* data of buffer page */
322 };
323
324 struct buffer_page {
325         struct list_head list;          /* list of buffer pages */
326         local_t          write;         /* index for next write */
327         unsigned         read;          /* index for next read */
328         local_t          entries;       /* entries on this page */
329         struct buffer_data_page *page;  /* Actual data page */
330 };
331
332 static void rb_init_page(struct buffer_data_page *bpage)
333 {
334         local_set(&bpage->commit, 0);
335 }
336
337 /**
338  * ring_buffer_page_len - the size of data on the page.
339  * @page: The page to read
340  *
341  * Returns the amount of data on the page, including buffer page header.
342  */
343 size_t ring_buffer_page_len(void *page)
344 {
345         return local_read(&((struct buffer_data_page *)page)->commit)
346                 + BUF_PAGE_HDR_SIZE;
347 }
348
349 /*
350  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
351  * this issue out.
352  */
353 static void free_buffer_page(struct buffer_page *bpage)
354 {
355         free_page((unsigned long)bpage->page);
356         kfree(bpage);
357 }
358
359 /*
360  * We need to fit the time_stamp delta into 27 bits.
361  */
362 static inline int test_time_stamp(u64 delta)
363 {
364         if (delta & TS_DELTA_TEST)
365                 return 1;
366         return 0;
367 }
368
369 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
370
371 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
372 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
373
374 /* Max number of timestamps that can fit on a page */
375 #define RB_TIMESTAMPS_PER_PAGE  (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
376
377 int ring_buffer_print_page_header(struct trace_seq *s)
378 {
379         struct buffer_data_page field;
380         int ret;
381
382         ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
383                                "offset:0;\tsize:%u;\n",
384                                (unsigned int)sizeof(field.time_stamp));
385
386         ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
387                                "offset:%u;\tsize:%u;\n",
388                                (unsigned int)offsetof(typeof(field), commit),
389                                (unsigned int)sizeof(field.commit));
390
391         ret = trace_seq_printf(s, "\tfield: char data;\t"
392                                "offset:%u;\tsize:%u;\n",
393                                (unsigned int)offsetof(typeof(field), data),
394                                (unsigned int)BUF_PAGE_SIZE);
395
396         return ret;
397 }
398
399 /*
400  * head_page == tail_page && head == tail then buffer is empty.
401  */
402 struct ring_buffer_per_cpu {
403         int                             cpu;
404         struct ring_buffer              *buffer;
405         spinlock_t                      reader_lock; /* serialize readers */
406         raw_spinlock_t                  lock;
407         struct lock_class_key           lock_key;
408         struct list_head                pages;
409         struct buffer_page              *head_page;     /* read from head */
410         struct buffer_page              *tail_page;     /* write to tail */
411         struct buffer_page              *commit_page;   /* committed pages */
412         struct buffer_page              *reader_page;
413         unsigned long                   nmi_dropped;
414         unsigned long                   commit_overrun;
415         unsigned long                   overrun;
416         unsigned long                   read;
417         local_t                         entries;
418         u64                             write_stamp;
419         u64                             read_stamp;
420         atomic_t                        record_disabled;
421 };
422
423 struct ring_buffer {
424         unsigned                        pages;
425         unsigned                        flags;
426         int                             cpus;
427         atomic_t                        record_disabled;
428         cpumask_var_t                   cpumask;
429
430         struct lock_class_key           *reader_lock_key;
431
432         struct mutex                    mutex;
433
434         struct ring_buffer_per_cpu      **buffers;
435
436 #ifdef CONFIG_HOTPLUG_CPU
437         struct notifier_block           cpu_notify;
438 #endif
439         u64                             (*clock)(void);
440 };
441
442 struct ring_buffer_iter {
443         struct ring_buffer_per_cpu      *cpu_buffer;
444         unsigned long                   head;
445         struct buffer_page              *head_page;
446         u64                             read_stamp;
447 };
448
449 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
450 #define RB_WARN_ON(buffer, cond)                                \
451         ({                                                      \
452                 int _____ret = unlikely(cond);                  \
453                 if (_____ret) {                                 \
454                         atomic_inc(&buffer->record_disabled);   \
455                         WARN_ON(1);                             \
456                 }                                               \
457                 _____ret;                                       \
458         })
459
460 /* Up this if you want to test the TIME_EXTENTS and normalization */
461 #define DEBUG_SHIFT 0
462
463 static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
464 {
465         /* shift to debug/test normalization and TIME_EXTENTS */
466         return buffer->clock() << DEBUG_SHIFT;
467 }
468
469 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
470 {
471         u64 time;
472
473         preempt_disable_notrace();
474         time = rb_time_stamp(buffer, cpu);
475         preempt_enable_no_resched_notrace();
476
477         return time;
478 }
479 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
480
481 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
482                                       int cpu, u64 *ts)
483 {
484         /* Just stupid testing the normalize function and deltas */
485         *ts >>= DEBUG_SHIFT;
486 }
487 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
488
489 /**
490  * check_pages - integrity check of buffer pages
491  * @cpu_buffer: CPU buffer with pages to test
492  *
493  * As a safety measure we check to make sure the data pages have not
494  * been corrupted.
495  */
496 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
497 {
498         struct list_head *head = &cpu_buffer->pages;
499         struct buffer_page *bpage, *tmp;
500
501         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
502                 return -1;
503         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
504                 return -1;
505
506         list_for_each_entry_safe(bpage, tmp, head, list) {
507                 if (RB_WARN_ON(cpu_buffer,
508                                bpage->list.next->prev != &bpage->list))
509                         return -1;
510                 if (RB_WARN_ON(cpu_buffer,
511                                bpage->list.prev->next != &bpage->list))
512                         return -1;
513         }
514
515         return 0;
516 }
517
518 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
519                              unsigned nr_pages)
520 {
521         struct list_head *head = &cpu_buffer->pages;
522         struct buffer_page *bpage, *tmp;
523         unsigned long addr;
524         LIST_HEAD(pages);
525         unsigned i;
526
527         for (i = 0; i < nr_pages; i++) {
528                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
529                                     GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
530                 if (!bpage)
531                         goto free_pages;
532                 list_add(&bpage->list, &pages);
533
534                 addr = __get_free_page(GFP_KERNEL);
535                 if (!addr)
536                         goto free_pages;
537                 bpage->page = (void *)addr;
538                 rb_init_page(bpage->page);
539         }
540
541         list_splice(&pages, head);
542
543         rb_check_pages(cpu_buffer);
544
545         return 0;
546
547  free_pages:
548         list_for_each_entry_safe(bpage, tmp, &pages, list) {
549                 list_del_init(&bpage->list);
550                 free_buffer_page(bpage);
551         }
552         return -ENOMEM;
553 }
554
555 static struct ring_buffer_per_cpu *
556 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
557 {
558         struct ring_buffer_per_cpu *cpu_buffer;
559         struct buffer_page *bpage;
560         unsigned long addr;
561         int ret;
562
563         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
564                                   GFP_KERNEL, cpu_to_node(cpu));
565         if (!cpu_buffer)
566                 return NULL;
567
568         cpu_buffer->cpu = cpu;
569         cpu_buffer->buffer = buffer;
570         spin_lock_init(&cpu_buffer->reader_lock);
571         lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
572         cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
573         INIT_LIST_HEAD(&cpu_buffer->pages);
574
575         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
576                             GFP_KERNEL, cpu_to_node(cpu));
577         if (!bpage)
578                 goto fail_free_buffer;
579
580         cpu_buffer->reader_page = bpage;
581         addr = __get_free_page(GFP_KERNEL);
582         if (!addr)
583                 goto fail_free_reader;
584         bpage->page = (void *)addr;
585         rb_init_page(bpage->page);
586
587         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
588
589         ret = rb_allocate_pages(cpu_buffer, buffer->pages);
590         if (ret < 0)
591                 goto fail_free_reader;
592
593         cpu_buffer->head_page
594                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
595         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
596
597         return cpu_buffer;
598
599  fail_free_reader:
600         free_buffer_page(cpu_buffer->reader_page);
601
602  fail_free_buffer:
603         kfree(cpu_buffer);
604         return NULL;
605 }
606
607 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
608 {
609         struct list_head *head = &cpu_buffer->pages;
610         struct buffer_page *bpage, *tmp;
611
612         free_buffer_page(cpu_buffer->reader_page);
613
614         list_for_each_entry_safe(bpage, tmp, head, list) {
615                 list_del_init(&bpage->list);
616                 free_buffer_page(bpage);
617         }
618         kfree(cpu_buffer);
619 }
620
621 /*
622  * Causes compile errors if the struct buffer_page gets bigger
623  * than the struct page.
624  */
625 extern int ring_buffer_page_too_big(void);
626
627 #ifdef CONFIG_HOTPLUG_CPU
628 static int rb_cpu_notify(struct notifier_block *self,
629                          unsigned long action, void *hcpu);
630 #endif
631
632 /**
633  * ring_buffer_alloc - allocate a new ring_buffer
634  * @size: the size in bytes per cpu that is needed.
635  * @flags: attributes to set for the ring buffer.
636  *
637  * Currently the only flag that is available is the RB_FL_OVERWRITE
638  * flag. This flag means that the buffer will overwrite old data
639  * when the buffer wraps. If this flag is not set, the buffer will
640  * drop data when the tail hits the head.
641  */
642 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
643                                         struct lock_class_key *key)
644 {
645         struct ring_buffer *buffer;
646         int bsize;
647         int cpu;
648
649         /* Paranoid! Optimizes out when all is well */
650         if (sizeof(struct buffer_page) > sizeof(struct page))
651                 ring_buffer_page_too_big();
652
653
654         /* keep it in its own cache line */
655         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
656                          GFP_KERNEL);
657         if (!buffer)
658                 return NULL;
659
660         if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
661                 goto fail_free_buffer;
662
663         buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
664         buffer->flags = flags;
665         buffer->clock = trace_clock_local;
666         buffer->reader_lock_key = key;
667
668         /* need at least two pages */
669         if (buffer->pages == 1)
670                 buffer->pages++;
671
672         /*
673          * In case of non-hotplug cpu, if the ring-buffer is allocated
674          * in early initcall, it will not be notified of secondary cpus.
675          * In that off case, we need to allocate for all possible cpus.
676          */
677 #ifdef CONFIG_HOTPLUG_CPU
678         get_online_cpus();
679         cpumask_copy(buffer->cpumask, cpu_online_mask);
680 #else
681         cpumask_copy(buffer->cpumask, cpu_possible_mask);
682 #endif
683         buffer->cpus = nr_cpu_ids;
684
685         bsize = sizeof(void *) * nr_cpu_ids;
686         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
687                                   GFP_KERNEL);
688         if (!buffer->buffers)
689                 goto fail_free_cpumask;
690
691         for_each_buffer_cpu(buffer, cpu) {
692                 buffer->buffers[cpu] =
693                         rb_allocate_cpu_buffer(buffer, cpu);
694                 if (!buffer->buffers[cpu])
695                         goto fail_free_buffers;
696         }
697
698 #ifdef CONFIG_HOTPLUG_CPU
699         buffer->cpu_notify.notifier_call = rb_cpu_notify;
700         buffer->cpu_notify.priority = 0;
701         register_cpu_notifier(&buffer->cpu_notify);
702 #endif
703
704         put_online_cpus();
705         mutex_init(&buffer->mutex);
706
707         return buffer;
708
709  fail_free_buffers:
710         for_each_buffer_cpu(buffer, cpu) {
711                 if (buffer->buffers[cpu])
712                         rb_free_cpu_buffer(buffer->buffers[cpu]);
713         }
714         kfree(buffer->buffers);
715
716  fail_free_cpumask:
717         free_cpumask_var(buffer->cpumask);
718         put_online_cpus();
719
720  fail_free_buffer:
721         kfree(buffer);
722         return NULL;
723 }
724 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
725
726 /**
727  * ring_buffer_free - free a ring buffer.
728  * @buffer: the buffer to free.
729  */
730 void
731 ring_buffer_free(struct ring_buffer *buffer)
732 {
733         int cpu;
734
735         get_online_cpus();
736
737 #ifdef CONFIG_HOTPLUG_CPU
738         unregister_cpu_notifier(&buffer->cpu_notify);
739 #endif
740
741         for_each_buffer_cpu(buffer, cpu)
742                 rb_free_cpu_buffer(buffer->buffers[cpu]);
743
744         put_online_cpus();
745
746         free_cpumask_var(buffer->cpumask);
747
748         kfree(buffer);
749 }
750 EXPORT_SYMBOL_GPL(ring_buffer_free);
751
752 void ring_buffer_set_clock(struct ring_buffer *buffer,
753                            u64 (*clock)(void))
754 {
755         buffer->clock = clock;
756 }
757
758 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
759
760 static void
761 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
762 {
763         struct buffer_page *bpage;
764         struct list_head *p;
765         unsigned i;
766
767         atomic_inc(&cpu_buffer->record_disabled);
768         synchronize_sched();
769
770         for (i = 0; i < nr_pages; i++) {
771                 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
772                         return;
773                 p = cpu_buffer->pages.next;
774                 bpage = list_entry(p, struct buffer_page, list);
775                 list_del_init(&bpage->list);
776                 free_buffer_page(bpage);
777         }
778         if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
779                 return;
780
781         rb_reset_cpu(cpu_buffer);
782
783         rb_check_pages(cpu_buffer);
784
785         atomic_dec(&cpu_buffer->record_disabled);
786
787 }
788
789 static void
790 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
791                 struct list_head *pages, unsigned nr_pages)
792 {
793         struct buffer_page *bpage;
794         struct list_head *p;
795         unsigned i;
796
797         atomic_inc(&cpu_buffer->record_disabled);
798         synchronize_sched();
799
800         for (i = 0; i < nr_pages; i++) {
801                 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
802                         return;
803                 p = pages->next;
804                 bpage = list_entry(p, struct buffer_page, list);
805                 list_del_init(&bpage->list);
806                 list_add_tail(&bpage->list, &cpu_buffer->pages);
807         }
808         rb_reset_cpu(cpu_buffer);
809
810         rb_check_pages(cpu_buffer);
811
812         atomic_dec(&cpu_buffer->record_disabled);
813 }
814
815 /**
816  * ring_buffer_resize - resize the ring buffer
817  * @buffer: the buffer to resize.
818  * @size: the new size.
819  *
820  * The tracer is responsible for making sure that the buffer is
821  * not being used while changing the size.
822  * Note: We may be able to change the above requirement by using
823  *  RCU synchronizations.
824  *
825  * Minimum size is 2 * BUF_PAGE_SIZE.
826  *
827  * Returns -1 on failure.
828  */
829 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
830 {
831         struct ring_buffer_per_cpu *cpu_buffer;
832         unsigned nr_pages, rm_pages, new_pages;
833         struct buffer_page *bpage, *tmp;
834         unsigned long buffer_size;
835         unsigned long addr;
836         LIST_HEAD(pages);
837         int i, cpu;
838
839         /*
840          * Always succeed at resizing a non-existent buffer:
841          */
842         if (!buffer)
843                 return size;
844
845         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
846         size *= BUF_PAGE_SIZE;
847         buffer_size = buffer->pages * BUF_PAGE_SIZE;
848
849         /* we need a minimum of two pages */
850         if (size < BUF_PAGE_SIZE * 2)
851                 size = BUF_PAGE_SIZE * 2;
852
853         if (size == buffer_size)
854                 return size;
855
856         mutex_lock(&buffer->mutex);
857         get_online_cpus();
858
859         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
860
861         if (size < buffer_size) {
862
863                 /* easy case, just free pages */
864                 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
865                         goto out_fail;
866
867                 rm_pages = buffer->pages - nr_pages;
868
869                 for_each_buffer_cpu(buffer, cpu) {
870                         cpu_buffer = buffer->buffers[cpu];
871                         rb_remove_pages(cpu_buffer, rm_pages);
872                 }
873                 goto out;
874         }
875
876         /*
877          * This is a bit more difficult. We only want to add pages
878          * when we can allocate enough for all CPUs. We do this
879          * by allocating all the pages and storing them on a local
880          * link list. If we succeed in our allocation, then we
881          * add these pages to the cpu_buffers. Otherwise we just free
882          * them all and return -ENOMEM;
883          */
884         if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
885                 goto out_fail;
886
887         new_pages = nr_pages - buffer->pages;
888
889         for_each_buffer_cpu(buffer, cpu) {
890                 for (i = 0; i < new_pages; i++) {
891                         bpage = kzalloc_node(ALIGN(sizeof(*bpage),
892                                                   cache_line_size()),
893                                             GFP_KERNEL, cpu_to_node(cpu));
894                         if (!bpage)
895                                 goto free_pages;
896                         list_add(&bpage->list, &pages);
897                         addr = __get_free_page(GFP_KERNEL);
898                         if (!addr)
899                                 goto free_pages;
900                         bpage->page = (void *)addr;
901                         rb_init_page(bpage->page);
902                 }
903         }
904
905         for_each_buffer_cpu(buffer, cpu) {
906                 cpu_buffer = buffer->buffers[cpu];
907                 rb_insert_pages(cpu_buffer, &pages, new_pages);
908         }
909
910         if (RB_WARN_ON(buffer, !list_empty(&pages)))
911                 goto out_fail;
912
913  out:
914         buffer->pages = nr_pages;
915         put_online_cpus();
916         mutex_unlock(&buffer->mutex);
917
918         return size;
919
920  free_pages:
921         list_for_each_entry_safe(bpage, tmp, &pages, list) {
922                 list_del_init(&bpage->list);
923                 free_buffer_page(bpage);
924         }
925         put_online_cpus();
926         mutex_unlock(&buffer->mutex);
927         return -ENOMEM;
928
929         /*
930          * Something went totally wrong, and we are too paranoid
931          * to even clean up the mess.
932          */
933  out_fail:
934         put_online_cpus();
935         mutex_unlock(&buffer->mutex);
936         return -1;
937 }
938 EXPORT_SYMBOL_GPL(ring_buffer_resize);
939
940 static inline void *
941 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
942 {
943         return bpage->data + index;
944 }
945
946 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
947 {
948         return bpage->page->data + index;
949 }
950
951 static inline struct ring_buffer_event *
952 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
953 {
954         return __rb_page_index(cpu_buffer->reader_page,
955                                cpu_buffer->reader_page->read);
956 }
957
958 static inline struct ring_buffer_event *
959 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
960 {
961         return __rb_page_index(cpu_buffer->head_page,
962                                cpu_buffer->head_page->read);
963 }
964
965 static inline struct ring_buffer_event *
966 rb_iter_head_event(struct ring_buffer_iter *iter)
967 {
968         return __rb_page_index(iter->head_page, iter->head);
969 }
970
971 static inline unsigned rb_page_write(struct buffer_page *bpage)
972 {
973         return local_read(&bpage->write);
974 }
975
976 static inline unsigned rb_page_commit(struct buffer_page *bpage)
977 {
978         return local_read(&bpage->page->commit);
979 }
980
981 /* Size is determined by what has been commited */
982 static inline unsigned rb_page_size(struct buffer_page *bpage)
983 {
984         return rb_page_commit(bpage);
985 }
986
987 static inline unsigned
988 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
989 {
990         return rb_page_commit(cpu_buffer->commit_page);
991 }
992
993 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
994 {
995         return rb_page_commit(cpu_buffer->head_page);
996 }
997
998 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
999                                struct buffer_page **bpage)
1000 {
1001         struct list_head *p = (*bpage)->list.next;
1002
1003         if (p == &cpu_buffer->pages)
1004                 p = p->next;
1005
1006         *bpage = list_entry(p, struct buffer_page, list);
1007 }
1008
1009 static inline unsigned
1010 rb_event_index(struct ring_buffer_event *event)
1011 {
1012         unsigned long addr = (unsigned long)event;
1013
1014         return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
1015 }
1016
1017 static inline int
1018 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1019              struct ring_buffer_event *event)
1020 {
1021         unsigned long addr = (unsigned long)event;
1022         unsigned long index;
1023
1024         index = rb_event_index(event);
1025         addr &= PAGE_MASK;
1026
1027         return cpu_buffer->commit_page->page == (void *)addr &&
1028                 rb_commit_index(cpu_buffer) == index;
1029 }
1030
1031 static void
1032 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
1033                     struct ring_buffer_event *event)
1034 {
1035         unsigned long addr = (unsigned long)event;
1036         unsigned long index;
1037
1038         index = rb_event_index(event);
1039         addr &= PAGE_MASK;
1040
1041         while (cpu_buffer->commit_page->page != (void *)addr) {
1042                 if (RB_WARN_ON(cpu_buffer,
1043                           cpu_buffer->commit_page == cpu_buffer->tail_page))
1044                         return;
1045                 cpu_buffer->commit_page->page->commit =
1046                         cpu_buffer->commit_page->write;
1047                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1048                 cpu_buffer->write_stamp =
1049                         cpu_buffer->commit_page->page->time_stamp;
1050         }
1051
1052         /* Now set the commit to the event's index */
1053         local_set(&cpu_buffer->commit_page->page->commit, index);
1054 }
1055
1056 static void
1057 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1058 {
1059         /*
1060          * We only race with interrupts and NMIs on this CPU.
1061          * If we own the commit event, then we can commit
1062          * all others that interrupted us, since the interruptions
1063          * are in stack format (they finish before they come
1064          * back to us). This allows us to do a simple loop to
1065          * assign the commit to the tail.
1066          */
1067  again:
1068         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1069                 cpu_buffer->commit_page->page->commit =
1070                         cpu_buffer->commit_page->write;
1071                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1072                 cpu_buffer->write_stamp =
1073                         cpu_buffer->commit_page->page->time_stamp;
1074                 /* add barrier to keep gcc from optimizing too much */
1075                 barrier();
1076         }
1077         while (rb_commit_index(cpu_buffer) !=
1078                rb_page_write(cpu_buffer->commit_page)) {
1079                 cpu_buffer->commit_page->page->commit =
1080                         cpu_buffer->commit_page->write;
1081                 barrier();
1082         }
1083
1084         /* again, keep gcc from optimizing */
1085         barrier();
1086
1087         /*
1088          * If an interrupt came in just after the first while loop
1089          * and pushed the tail page forward, we will be left with
1090          * a dangling commit that will never go forward.
1091          */
1092         if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1093                 goto again;
1094 }
1095
1096 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1097 {
1098         cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1099         cpu_buffer->reader_page->read = 0;
1100 }
1101
1102 static void rb_inc_iter(struct ring_buffer_iter *iter)
1103 {
1104         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1105
1106         /*
1107          * The iterator could be on the reader page (it starts there).
1108          * But the head could have moved, since the reader was
1109          * found. Check for this case and assign the iterator
1110          * to the head page instead of next.
1111          */
1112         if (iter->head_page == cpu_buffer->reader_page)
1113                 iter->head_page = cpu_buffer->head_page;
1114         else
1115                 rb_inc_page(cpu_buffer, &iter->head_page);
1116
1117         iter->read_stamp = iter->head_page->page->time_stamp;
1118         iter->head = 0;
1119 }
1120
1121 /**
1122  * ring_buffer_update_event - update event type and data
1123  * @event: the even to update
1124  * @type: the type of event
1125  * @length: the size of the event field in the ring buffer
1126  *
1127  * Update the type and data fields of the event. The length
1128  * is the actual size that is written to the ring buffer,
1129  * and with this, we can determine what to place into the
1130  * data field.
1131  */
1132 static void
1133 rb_update_event(struct ring_buffer_event *event,
1134                          unsigned type, unsigned length)
1135 {
1136         event->type_len = type;
1137
1138         switch (type) {
1139
1140         case RINGBUF_TYPE_PADDING:
1141         case RINGBUF_TYPE_TIME_EXTEND:
1142         case RINGBUF_TYPE_TIME_STAMP:
1143                 break;
1144
1145         case 0:
1146                 length -= RB_EVNT_HDR_SIZE;
1147                 if (length > RB_MAX_SMALL_DATA)
1148                         event->array[0] = length;
1149                 else
1150                         event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1151                 break;
1152         default:
1153                 BUG();
1154         }
1155 }
1156
1157 static unsigned rb_calculate_event_length(unsigned length)
1158 {
1159         struct ring_buffer_event event; /* Used only for sizeof array */
1160
1161         /* zero length can cause confusions */
1162         if (!length)
1163                 length = 1;
1164
1165         if (length > RB_MAX_SMALL_DATA)
1166                 length += sizeof(event.array[0]);
1167
1168         length += RB_EVNT_HDR_SIZE;
1169         length = ALIGN(length, RB_ALIGNMENT);
1170
1171         return length;
1172 }
1173
1174 static inline void
1175 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1176               struct buffer_page *tail_page,
1177               unsigned long tail, unsigned long length)
1178 {
1179         struct ring_buffer_event *event;
1180
1181         /*
1182          * Only the event that crossed the page boundary
1183          * must fill the old tail_page with padding.
1184          */
1185         if (tail >= BUF_PAGE_SIZE) {
1186                 local_sub(length, &tail_page->write);
1187                 return;
1188         }
1189
1190         event = __rb_page_index(tail_page, tail);
1191
1192         /*
1193          * If this event is bigger than the minimum size, then
1194          * we need to be careful that we don't subtract the
1195          * write counter enough to allow another writer to slip
1196          * in on this page.
1197          * We put in a discarded commit instead, to make sure
1198          * that this space is not used again.
1199          *
1200          * If we are less than the minimum size, we don't need to
1201          * worry about it.
1202          */
1203         if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1204                 /* No room for any events */
1205
1206                 /* Mark the rest of the page with padding */
1207                 rb_event_set_padding(event);
1208
1209                 /* Set the write back to the previous setting */
1210                 local_sub(length, &tail_page->write);
1211                 return;
1212         }
1213
1214         /* Put in a discarded event */
1215         event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1216         event->type_len = RINGBUF_TYPE_PADDING;
1217         /* time delta must be non zero */
1218         event->time_delta = 1;
1219         /* Account for this as an entry */
1220         local_inc(&tail_page->entries);
1221         local_inc(&cpu_buffer->entries);
1222
1223         /* Set write to end of buffer */
1224         length = (tail + length) - BUF_PAGE_SIZE;
1225         local_sub(length, &tail_page->write);
1226 }
1227
1228 static struct ring_buffer_event *
1229 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1230              unsigned long length, unsigned long tail,
1231              struct buffer_page *commit_page,
1232              struct buffer_page *tail_page, u64 *ts)
1233 {
1234         struct buffer_page *next_page, *head_page, *reader_page;
1235         struct ring_buffer *buffer = cpu_buffer->buffer;
1236         struct ring_buffer_event *event;
1237         bool lock_taken = false;
1238         unsigned long flags;
1239
1240         next_page = tail_page;
1241
1242         local_irq_save(flags);
1243         /*
1244          * Since the write to the buffer is still not
1245          * fully lockless, we must be careful with NMIs.
1246          * The locks in the writers are taken when a write
1247          * crosses to a new page. The locks protect against
1248          * races with the readers (this will soon be fixed
1249          * with a lockless solution).
1250          *
1251          * Because we can not protect against NMIs, and we
1252          * want to keep traces reentrant, we need to manage
1253          * what happens when we are in an NMI.
1254          *
1255          * NMIs can happen after we take the lock.
1256          * If we are in an NMI, only take the lock
1257          * if it is not already taken. Otherwise
1258          * simply fail.
1259          */
1260         if (unlikely(in_nmi())) {
1261                 if (!__raw_spin_trylock(&cpu_buffer->lock)) {
1262                         cpu_buffer->nmi_dropped++;
1263                         goto out_reset;
1264                 }
1265         } else
1266                 __raw_spin_lock(&cpu_buffer->lock);
1267
1268         lock_taken = true;
1269
1270         rb_inc_page(cpu_buffer, &next_page);
1271
1272         head_page = cpu_buffer->head_page;
1273         reader_page = cpu_buffer->reader_page;
1274
1275         /* we grabbed the lock before incrementing */
1276         if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1277                 goto out_reset;
1278
1279         /*
1280          * If for some reason, we had an interrupt storm that made
1281          * it all the way around the buffer, bail, and warn
1282          * about it.
1283          */
1284         if (unlikely(next_page == commit_page)) {
1285                 cpu_buffer->commit_overrun++;
1286                 goto out_reset;
1287         }
1288
1289         if (next_page == head_page) {
1290                 if (!(buffer->flags & RB_FL_OVERWRITE))
1291                         goto out_reset;
1292
1293                 /* tail_page has not moved yet? */
1294                 if (tail_page == cpu_buffer->tail_page) {
1295                         /* count overflows */
1296                         cpu_buffer->overrun +=
1297                                 local_read(&head_page->entries);
1298
1299                         rb_inc_page(cpu_buffer, &head_page);
1300                         cpu_buffer->head_page = head_page;
1301                         cpu_buffer->head_page->read = 0;
1302                 }
1303         }
1304
1305         /*
1306          * If the tail page is still the same as what we think
1307          * it is, then it is up to us to update the tail
1308          * pointer.
1309          */
1310         if (tail_page == cpu_buffer->tail_page) {
1311                 local_set(&next_page->write, 0);
1312                 local_set(&next_page->entries, 0);
1313                 local_set(&next_page->page->commit, 0);
1314                 cpu_buffer->tail_page = next_page;
1315
1316                 /* reread the time stamp */
1317                 *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
1318                 cpu_buffer->tail_page->page->time_stamp = *ts;
1319         }
1320
1321         rb_reset_tail(cpu_buffer, tail_page, tail, length);
1322
1323         /*
1324          * If this was a commit entry that failed,
1325          * increment that too
1326          */
1327         if (tail_page == cpu_buffer->commit_page &&
1328             tail == rb_commit_index(cpu_buffer)) {
1329                 rb_set_commit_to_write(cpu_buffer);
1330         }
1331
1332         __raw_spin_unlock(&cpu_buffer->lock);
1333         local_irq_restore(flags);
1334
1335         /* fail and let the caller try again */
1336         return ERR_PTR(-EAGAIN);
1337
1338  out_reset:
1339         /* reset write */
1340         rb_reset_tail(cpu_buffer, tail_page, tail, length);
1341
1342         if (likely(lock_taken))
1343                 __raw_spin_unlock(&cpu_buffer->lock);
1344         local_irq_restore(flags);
1345         return NULL;
1346 }
1347
1348 static struct ring_buffer_event *
1349 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1350                   unsigned type, unsigned long length, u64 *ts)
1351 {
1352         struct buffer_page *tail_page, *commit_page;
1353         struct ring_buffer_event *event;
1354         unsigned long tail, write;
1355
1356         commit_page = cpu_buffer->commit_page;
1357         /* we just need to protect against interrupts */
1358         barrier();
1359         tail_page = cpu_buffer->tail_page;
1360         write = local_add_return(length, &tail_page->write);
1361         tail = write - length;
1362
1363         /* See if we shot pass the end of this buffer page */
1364         if (write > BUF_PAGE_SIZE)
1365                 return rb_move_tail(cpu_buffer, length, tail,
1366                                     commit_page, tail_page, ts);
1367
1368         /* We reserved something on the buffer */
1369
1370         if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1371                 return NULL;
1372
1373         event = __rb_page_index(tail_page, tail);
1374         rb_update_event(event, type, length);
1375
1376         /* The passed in type is zero for DATA */
1377         if (likely(!type))
1378                 local_inc(&tail_page->entries);
1379
1380         /*
1381          * If this is a commit and the tail is zero, then update
1382          * this page's time stamp.
1383          */
1384         if (!tail && rb_is_commit(cpu_buffer, event))
1385                 cpu_buffer->commit_page->page->time_stamp = *ts;
1386
1387         return event;
1388 }
1389
1390 static inline int
1391 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1392                   struct ring_buffer_event *event)
1393 {
1394         unsigned long new_index, old_index;
1395         struct buffer_page *bpage;
1396         unsigned long index;
1397         unsigned long addr;
1398
1399         new_index = rb_event_index(event);
1400         old_index = new_index + rb_event_length(event);
1401         addr = (unsigned long)event;
1402         addr &= PAGE_MASK;
1403
1404         bpage = cpu_buffer->tail_page;
1405
1406         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1407                 /*
1408                  * This is on the tail page. It is possible that
1409                  * a write could come in and move the tail page
1410                  * and write to the next page. That is fine
1411                  * because we just shorten what is on this page.
1412                  */
1413                 index = local_cmpxchg(&bpage->write, old_index, new_index);
1414                 if (index == old_index)
1415                         return 1;
1416         }
1417
1418         /* could not discard */
1419         return 0;
1420 }
1421
1422 static int
1423 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1424                   u64 *ts, u64 *delta)
1425 {
1426         struct ring_buffer_event *event;
1427         static int once;
1428         int ret;
1429
1430         if (unlikely(*delta > (1ULL << 59) && !once++)) {
1431                 printk(KERN_WARNING "Delta way too big! %llu"
1432                        " ts=%llu write stamp = %llu\n",
1433                        (unsigned long long)*delta,
1434                        (unsigned long long)*ts,
1435                        (unsigned long long)cpu_buffer->write_stamp);
1436                 WARN_ON(1);
1437         }
1438
1439         /*
1440          * The delta is too big, we to add a
1441          * new timestamp.
1442          */
1443         event = __rb_reserve_next(cpu_buffer,
1444                                   RINGBUF_TYPE_TIME_EXTEND,
1445                                   RB_LEN_TIME_EXTEND,
1446                                   ts);
1447         if (!event)
1448                 return -EBUSY;
1449
1450         if (PTR_ERR(event) == -EAGAIN)
1451                 return -EAGAIN;
1452
1453         /* Only a commited time event can update the write stamp */
1454         if (rb_is_commit(cpu_buffer, event)) {
1455                 /*
1456                  * If this is the first on the page, then we need to
1457                  * update the page itself, and just put in a zero.
1458                  */
1459                 if (rb_event_index(event)) {
1460                         event->time_delta = *delta & TS_MASK;
1461                         event->array[0] = *delta >> TS_SHIFT;
1462                 } else {
1463                         cpu_buffer->commit_page->page->time_stamp = *ts;
1464                         /* try to discard, since we do not need this */
1465                         if (!rb_try_to_discard(cpu_buffer, event)) {
1466                                 /* nope, just zero it */
1467                                 event->time_delta = 0;
1468                                 event->array[0] = 0;
1469                         }
1470                 }
1471                 cpu_buffer->write_stamp = *ts;
1472                 /* let the caller know this was the commit */
1473                 ret = 1;
1474         } else {
1475                 /* Try to discard the event */
1476                 if (!rb_try_to_discard(cpu_buffer, event)) {
1477                         /* Darn, this is just wasted space */
1478                         event->time_delta = 0;
1479                         event->array[0] = 0;
1480                 }
1481                 ret = 0;
1482         }
1483
1484         *delta = 0;
1485
1486         return ret;
1487 }
1488
1489 static struct ring_buffer_event *
1490 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1491                       unsigned long length)
1492 {
1493         struct ring_buffer_event *event;
1494         u64 ts, delta = 0;
1495         int commit = 0;
1496         int nr_loops = 0;
1497
1498         length = rb_calculate_event_length(length);
1499  again:
1500         /*
1501          * We allow for interrupts to reenter here and do a trace.
1502          * If one does, it will cause this original code to loop
1503          * back here. Even with heavy interrupts happening, this
1504          * should only happen a few times in a row. If this happens
1505          * 1000 times in a row, there must be either an interrupt
1506          * storm or we have something buggy.
1507          * Bail!
1508          */
1509         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1510                 return NULL;
1511
1512         ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
1513
1514         /*
1515          * Only the first commit can update the timestamp.
1516          * Yes there is a race here. If an interrupt comes in
1517          * just after the conditional and it traces too, then it
1518          * will also check the deltas. More than one timestamp may
1519          * also be made. But only the entry that did the actual
1520          * commit will be something other than zero.
1521          */
1522         if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
1523                    rb_page_write(cpu_buffer->tail_page) ==
1524                    rb_commit_index(cpu_buffer))) {
1525                 u64 diff;
1526
1527                 diff = ts - cpu_buffer->write_stamp;
1528
1529                 /* make sure this diff is calculated here */
1530                 barrier();
1531
1532                 /* Did the write stamp get updated already? */
1533                 if (unlikely(ts < cpu_buffer->write_stamp))
1534                         goto get_event;
1535
1536                 delta = diff;
1537                 if (unlikely(test_time_stamp(delta))) {
1538
1539                         commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1540                         if (commit == -EBUSY)
1541                                 return NULL;
1542
1543                         if (commit == -EAGAIN)
1544                                 goto again;
1545
1546                         RB_WARN_ON(cpu_buffer, commit < 0);
1547                 }
1548         }
1549
1550  get_event:
1551         event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
1552         if (unlikely(PTR_ERR(event) == -EAGAIN))
1553                 goto again;
1554
1555         if (!event) {
1556                 if (unlikely(commit))
1557                         /*
1558                          * Ouch! We needed a timestamp and it was commited. But
1559                          * we didn't get our event reserved.
1560                          */
1561                         rb_set_commit_to_write(cpu_buffer);
1562                 return NULL;
1563         }
1564
1565         /*
1566          * If the timestamp was commited, make the commit our entry
1567          * now so that we will update it when needed.
1568          */
1569         if (unlikely(commit))
1570                 rb_set_commit_event(cpu_buffer, event);
1571         else if (!rb_is_commit(cpu_buffer, event))
1572                 delta = 0;
1573
1574         event->time_delta = delta;
1575
1576         return event;
1577 }
1578
1579 #define TRACE_RECURSIVE_DEPTH 16
1580
1581 static int trace_recursive_lock(void)
1582 {
1583         current->trace_recursion++;
1584
1585         if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1586                 return 0;
1587
1588         /* Disable all tracing before we do anything else */
1589         tracing_off_permanent();
1590
1591         printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
1592                     "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1593                     current->trace_recursion,
1594                     hardirq_count() >> HARDIRQ_SHIFT,
1595                     softirq_count() >> SOFTIRQ_SHIFT,
1596                     in_nmi());
1597
1598         WARN_ON_ONCE(1);
1599         return -1;
1600 }
1601
1602 static void trace_recursive_unlock(void)
1603 {
1604         WARN_ON_ONCE(!current->trace_recursion);
1605
1606         current->trace_recursion--;
1607 }
1608
1609 static DEFINE_PER_CPU(int, rb_need_resched);
1610
1611 /**
1612  * ring_buffer_lock_reserve - reserve a part of the buffer
1613  * @buffer: the ring buffer to reserve from
1614  * @length: the length of the data to reserve (excluding event header)
1615  *
1616  * Returns a reseverd event on the ring buffer to copy directly to.
1617  * The user of this interface will need to get the body to write into
1618  * and can use the ring_buffer_event_data() interface.
1619  *
1620  * The length is the length of the data needed, not the event length
1621  * which also includes the event header.
1622  *
1623  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1624  * If NULL is returned, then nothing has been allocated or locked.
1625  */
1626 struct ring_buffer_event *
1627 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1628 {
1629         struct ring_buffer_per_cpu *cpu_buffer;
1630         struct ring_buffer_event *event;
1631         int cpu, resched;
1632
1633         if (ring_buffer_flags != RB_BUFFERS_ON)
1634                 return NULL;
1635
1636         if (atomic_read(&buffer->record_disabled))
1637                 return NULL;
1638
1639         /* If we are tracing schedule, we don't want to recurse */
1640         resched = ftrace_preempt_disable();
1641
1642         if (trace_recursive_lock())
1643                 goto out_nocheck;
1644
1645         cpu = raw_smp_processor_id();
1646
1647         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1648                 goto out;
1649
1650         cpu_buffer = buffer->buffers[cpu];
1651
1652         if (atomic_read(&cpu_buffer->record_disabled))
1653                 goto out;
1654
1655         if (length > BUF_MAX_DATA_SIZE)
1656                 goto out;
1657
1658         event = rb_reserve_next_event(cpu_buffer, length);
1659         if (!event)
1660                 goto out;
1661
1662         /*
1663          * Need to store resched state on this cpu.
1664          * Only the first needs to.
1665          */
1666
1667         if (preempt_count() == 1)
1668                 per_cpu(rb_need_resched, cpu) = resched;
1669
1670         return event;
1671
1672  out:
1673         trace_recursive_unlock();
1674
1675  out_nocheck:
1676         ftrace_preempt_enable(resched);
1677         return NULL;
1678 }
1679 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1680
1681 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1682                       struct ring_buffer_event *event)
1683 {
1684         local_inc(&cpu_buffer->entries);
1685
1686         /* Only process further if we own the commit */
1687         if (!rb_is_commit(cpu_buffer, event))
1688                 return;
1689
1690         cpu_buffer->write_stamp += event->time_delta;
1691
1692         rb_set_commit_to_write(cpu_buffer);
1693 }
1694
1695 /**
1696  * ring_buffer_unlock_commit - commit a reserved
1697  * @buffer: The buffer to commit to
1698  * @event: The event pointer to commit.
1699  *
1700  * This commits the data to the ring buffer, and releases any locks held.
1701  *
1702  * Must be paired with ring_buffer_lock_reserve.
1703  */
1704 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1705                               struct ring_buffer_event *event)
1706 {
1707         struct ring_buffer_per_cpu *cpu_buffer;
1708         int cpu = raw_smp_processor_id();
1709
1710         cpu_buffer = buffer->buffers[cpu];
1711
1712         rb_commit(cpu_buffer, event);
1713
1714         trace_recursive_unlock();
1715
1716         /*
1717          * Only the last preempt count needs to restore preemption.
1718          */
1719         if (preempt_count() == 1)
1720                 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1721         else
1722                 preempt_enable_no_resched_notrace();
1723
1724         return 0;
1725 }
1726 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1727
1728 static inline void rb_event_discard(struct ring_buffer_event *event)
1729 {
1730         /* array[0] holds the actual length for the discarded event */
1731         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
1732         event->type_len = RINGBUF_TYPE_PADDING;
1733         /* time delta must be non zero */
1734         if (!event->time_delta)
1735                 event->time_delta = 1;
1736 }
1737
1738 /**
1739  * ring_buffer_event_discard - discard any event in the ring buffer
1740  * @event: the event to discard
1741  *
1742  * Sometimes a event that is in the ring buffer needs to be ignored.
1743  * This function lets the user discard an event in the ring buffer
1744  * and then that event will not be read later.
1745  *
1746  * Note, it is up to the user to be careful with this, and protect
1747  * against races. If the user discards an event that has been consumed
1748  * it is possible that it could corrupt the ring buffer.
1749  */
1750 void ring_buffer_event_discard(struct ring_buffer_event *event)
1751 {
1752         rb_event_discard(event);
1753 }
1754 EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1755
1756 /**
1757  * ring_buffer_commit_discard - discard an event that has not been committed
1758  * @buffer: the ring buffer
1759  * @event: non committed event to discard
1760  *
1761  * This is similar to ring_buffer_event_discard but must only be
1762  * performed on an event that has not been committed yet. The difference
1763  * is that this will also try to free the event from the ring buffer
1764  * if another event has not been added behind it.
1765  *
1766  * If another event has been added behind it, it will set the event
1767  * up as discarded, and perform the commit.
1768  *
1769  * If this function is called, do not call ring_buffer_unlock_commit on
1770  * the event.
1771  */
1772 void ring_buffer_discard_commit(struct ring_buffer *buffer,
1773                                 struct ring_buffer_event *event)
1774 {
1775         struct ring_buffer_per_cpu *cpu_buffer;
1776         int cpu;
1777
1778         /* The event is discarded regardless */
1779         rb_event_discard(event);
1780
1781         /*
1782          * This must only be called if the event has not been
1783          * committed yet. Thus we can assume that preemption
1784          * is still disabled.
1785          */
1786         RB_WARN_ON(buffer, preemptible());
1787
1788         cpu = smp_processor_id();
1789         cpu_buffer = buffer->buffers[cpu];
1790
1791         if (!rb_try_to_discard(cpu_buffer, event))
1792                 goto out;
1793
1794         /*
1795          * The commit is still visible by the reader, so we
1796          * must increment entries.
1797          */
1798         local_inc(&cpu_buffer->entries);
1799  out:
1800         /*
1801          * If a write came in and pushed the tail page
1802          * we still need to update the commit pointer
1803          * if we were the commit.
1804          */
1805         if (rb_is_commit(cpu_buffer, event))
1806                 rb_set_commit_to_write(cpu_buffer);
1807
1808         trace_recursive_unlock();
1809
1810         /*
1811          * Only the last preempt count needs to restore preemption.
1812          */
1813         if (preempt_count() == 1)
1814                 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1815         else
1816                 preempt_enable_no_resched_notrace();
1817
1818 }
1819 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1820
1821 /**
1822  * ring_buffer_write - write data to the buffer without reserving
1823  * @buffer: The ring buffer to write to.
1824  * @length: The length of the data being written (excluding the event header)
1825  * @data: The data to write to the buffer.
1826  *
1827  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1828  * one function. If you already have the data to write to the buffer, it
1829  * may be easier to simply call this function.
1830  *
1831  * Note, like ring_buffer_lock_reserve, the length is the length of the data
1832  * and not the length of the event which would hold the header.
1833  */
1834 int ring_buffer_write(struct ring_buffer *buffer,
1835                         unsigned long length,
1836                         void *data)
1837 {
1838         struct ring_buffer_per_cpu *cpu_buffer;
1839         struct ring_buffer_event *event;
1840         void *body;
1841         int ret = -EBUSY;
1842         int cpu, resched;
1843
1844         if (ring_buffer_flags != RB_BUFFERS_ON)
1845                 return -EBUSY;
1846
1847         if (atomic_read(&buffer->record_disabled))
1848                 return -EBUSY;
1849
1850         resched = ftrace_preempt_disable();
1851
1852         cpu = raw_smp_processor_id();
1853
1854         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1855                 goto out;
1856
1857         cpu_buffer = buffer->buffers[cpu];
1858
1859         if (atomic_read(&cpu_buffer->record_disabled))
1860                 goto out;
1861
1862         if (length > BUF_MAX_DATA_SIZE)
1863                 goto out;
1864
1865         event = rb_reserve_next_event(cpu_buffer, length);
1866         if (!event)
1867                 goto out;
1868
1869         body = rb_event_data(event);
1870
1871         memcpy(body, data, length);
1872
1873         rb_commit(cpu_buffer, event);
1874
1875         ret = 0;
1876  out:
1877         ftrace_preempt_enable(resched);
1878
1879         return ret;
1880 }
1881 EXPORT_SYMBOL_GPL(ring_buffer_write);
1882
1883 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1884 {
1885         struct buffer_page *reader = cpu_buffer->reader_page;
1886         struct buffer_page *head = cpu_buffer->head_page;
1887         struct buffer_page *commit = cpu_buffer->commit_page;
1888
1889         return reader->read == rb_page_commit(reader) &&
1890                 (commit == reader ||
1891                  (commit == head &&
1892                   head->read == rb_page_commit(commit)));
1893 }
1894
1895 /**
1896  * ring_buffer_record_disable - stop all writes into the buffer
1897  * @buffer: The ring buffer to stop writes to.
1898  *
1899  * This prevents all writes to the buffer. Any attempt to write
1900  * to the buffer after this will fail and return NULL.
1901  *
1902  * The caller should call synchronize_sched() after this.
1903  */
1904 void ring_buffer_record_disable(struct ring_buffer *buffer)
1905 {
1906         atomic_inc(&buffer->record_disabled);
1907 }
1908 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1909
1910 /**
1911  * ring_buffer_record_enable - enable writes to the buffer
1912  * @buffer: The ring buffer to enable writes
1913  *
1914  * Note, multiple disables will need the same number of enables
1915  * to truely enable the writing (much like preempt_disable).
1916  */
1917 void ring_buffer_record_enable(struct ring_buffer *buffer)
1918 {
1919         atomic_dec(&buffer->record_disabled);
1920 }
1921 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1922
1923 /**
1924  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1925  * @buffer: The ring buffer to stop writes to.
1926  * @cpu: The CPU buffer to stop
1927  *
1928  * This prevents all writes to the buffer. Any attempt to write
1929  * to the buffer after this will fail and return NULL.
1930  *
1931  * The caller should call synchronize_sched() after this.
1932  */
1933 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1934 {
1935         struct ring_buffer_per_cpu *cpu_buffer;
1936
1937         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1938                 return;
1939
1940         cpu_buffer = buffer->buffers[cpu];
1941         atomic_inc(&cpu_buffer->record_disabled);
1942 }
1943 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1944
1945 /**
1946  * ring_buffer_record_enable_cpu - enable writes to the buffer
1947  * @buffer: The ring buffer to enable writes
1948  * @cpu: The CPU to enable.
1949  *
1950  * Note, multiple disables will need the same number of enables
1951  * to truely enable the writing (much like preempt_disable).
1952  */
1953 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1954 {
1955         struct ring_buffer_per_cpu *cpu_buffer;
1956
1957         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1958                 return;
1959
1960         cpu_buffer = buffer->buffers[cpu];
1961         atomic_dec(&cpu_buffer->record_disabled);
1962 }
1963 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1964
1965 /**
1966  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1967  * @buffer: The ring buffer
1968  * @cpu: The per CPU buffer to get the entries from.
1969  */
1970 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1971 {
1972         struct ring_buffer_per_cpu *cpu_buffer;
1973         unsigned long ret;
1974
1975         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1976                 return 0;
1977
1978         cpu_buffer = buffer->buffers[cpu];
1979         ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
1980                 - cpu_buffer->read;
1981
1982         return ret;
1983 }
1984 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1985
1986 /**
1987  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1988  * @buffer: The ring buffer
1989  * @cpu: The per CPU buffer to get the number of overruns from
1990  */
1991 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1992 {
1993         struct ring_buffer_per_cpu *cpu_buffer;
1994         unsigned long ret;
1995
1996         if (!cpumask_test_cpu(cpu, buffer->cpumask))
1997                 return 0;
1998
1999         cpu_buffer = buffer->buffers[cpu];
2000         ret = cpu_buffer->overrun;
2001
2002         return ret;
2003 }
2004 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
2005
2006 /**
2007  * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
2008  * @buffer: The ring buffer
2009  * @cpu: The per CPU buffer to get the number of overruns from
2010  */
2011 unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
2012 {
2013         struct ring_buffer_per_cpu *cpu_buffer;
2014         unsigned long ret;
2015
2016         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2017                 return 0;
2018
2019         cpu_buffer = buffer->buffers[cpu];
2020         ret = cpu_buffer->nmi_dropped;
2021
2022         return ret;
2023 }
2024 EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
2025
2026 /**
2027  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2028  * @buffer: The ring buffer
2029  * @cpu: The per CPU buffer to get the number of overruns from
2030  */
2031 unsigned long
2032 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2033 {
2034         struct ring_buffer_per_cpu *cpu_buffer;
2035         unsigned long ret;
2036
2037         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2038                 return 0;
2039
2040         cpu_buffer = buffer->buffers[cpu];
2041         ret = cpu_buffer->commit_overrun;
2042
2043         return ret;
2044 }
2045 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2046
2047 /**
2048  * ring_buffer_entries - get the number of entries in a buffer
2049  * @buffer: The ring buffer
2050  *
2051  * Returns the total number of entries in the ring buffer
2052  * (all CPU entries)
2053  */
2054 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2055 {
2056         struct ring_buffer_per_cpu *cpu_buffer;
2057         unsigned long entries = 0;
2058         int cpu;
2059
2060         /* if you care about this being correct, lock the buffer */
2061         for_each_buffer_cpu(buffer, cpu) {
2062                 cpu_buffer = buffer->buffers[cpu];
2063                 entries += (local_read(&cpu_buffer->entries) -
2064                             cpu_buffer->overrun) - cpu_buffer->read;
2065         }
2066
2067         return entries;
2068 }
2069 EXPORT_SYMBOL_GPL(ring_buffer_entries);
2070
2071 /**
2072  * ring_buffer_overrun_cpu - get the number of overruns in buffer
2073  * @buffer: The ring buffer
2074  *
2075  * Returns the total number of overruns in the ring buffer
2076  * (all CPU entries)
2077  */
2078 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2079 {
2080         struct ring_buffer_per_cpu *cpu_buffer;
2081         unsigned long overruns = 0;
2082         int cpu;
2083
2084         /* if you care about this being correct, lock the buffer */
2085         for_each_buffer_cpu(buffer, cpu) {
2086                 cpu_buffer = buffer->buffers[cpu];
2087                 overruns += cpu_buffer->overrun;
2088         }
2089
2090         return overruns;
2091 }
2092 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
2093
2094 static void rb_iter_reset(struct ring_buffer_iter *iter)
2095 {
2096         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2097
2098         /* Iterator usage is expected to have record disabled */
2099         if (list_empty(&cpu_buffer->reader_page->list)) {
2100                 iter->head_page = cpu_buffer->head_page;
2101                 iter->head = cpu_buffer->head_page->read;
2102         } else {
2103                 iter->head_page = cpu_buffer->reader_page;
2104                 iter->head = cpu_buffer->reader_page->read;
2105         }
2106         if (iter->head)
2107                 iter->read_stamp = cpu_buffer->read_stamp;
2108         else
2109                 iter->read_stamp = iter->head_page->page->time_stamp;
2110 }
2111
2112 /**
2113  * ring_buffer_iter_reset - reset an iterator
2114  * @iter: The iterator to reset
2115  *
2116  * Resets the iterator, so that it will start from the beginning
2117  * again.
2118  */
2119 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2120 {
2121         struct ring_buffer_per_cpu *cpu_buffer;
2122         unsigned long flags;
2123
2124         if (!iter)
2125                 return;
2126
2127         cpu_buffer = iter->cpu_buffer;
2128
2129         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2130         rb_iter_reset(iter);
2131         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2132 }
2133 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2134
2135 /**
2136  * ring_buffer_iter_empty - check if an iterator has no more to read
2137  * @iter: The iterator to check
2138  */
2139 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2140 {
2141         struct ring_buffer_per_cpu *cpu_buffer;
2142
2143         cpu_buffer = iter->cpu_buffer;
2144
2145         return iter->head_page == cpu_buffer->commit_page &&
2146                 iter->head == rb_commit_index(cpu_buffer);
2147 }
2148 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
2149
2150 static void
2151 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2152                      struct ring_buffer_event *event)
2153 {
2154         u64 delta;
2155
2156         switch (event->type_len) {
2157         case RINGBUF_TYPE_PADDING:
2158                 return;
2159
2160         case RINGBUF_TYPE_TIME_EXTEND:
2161                 delta = event->array[0];
2162                 delta <<= TS_SHIFT;
2163                 delta += event->time_delta;
2164                 cpu_buffer->read_stamp += delta;
2165                 return;
2166
2167         case RINGBUF_TYPE_TIME_STAMP:
2168                 /* FIXME: not implemented */
2169                 return;
2170
2171         case RINGBUF_TYPE_DATA:
2172                 cpu_buffer->read_stamp += event->time_delta;
2173                 return;
2174
2175         default:
2176                 BUG();
2177         }
2178         return;
2179 }
2180
2181 static void
2182 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2183                           struct ring_buffer_event *event)
2184 {
2185         u64 delta;
2186
2187         switch (event->type_len) {
2188         case RINGBUF_TYPE_PADDING:
2189                 return;
2190
2191         case RINGBUF_TYPE_TIME_EXTEND:
2192                 delta = event->array[0];
2193                 delta <<= TS_SHIFT;
2194                 delta += event->time_delta;
2195                 iter->read_stamp += delta;
2196                 return;
2197
2198         case RINGBUF_TYPE_TIME_STAMP:
2199                 /* FIXME: not implemented */
2200                 return;
2201
2202         case RINGBUF_TYPE_DATA:
2203                 iter->read_stamp += event->time_delta;
2204                 return;
2205
2206         default:
2207                 BUG();
2208         }
2209         return;
2210 }
2211
2212 static struct buffer_page *
2213 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2214 {
2215         struct buffer_page *reader = NULL;
2216         unsigned long flags;
2217         int nr_loops = 0;
2218
2219         local_irq_save(flags);
2220         __raw_spin_lock(&cpu_buffer->lock);
2221
2222  again:
2223         /*
2224          * This should normally only loop twice. But because the
2225          * start of the reader inserts an empty page, it causes
2226          * a case where we will loop three times. There should be no
2227          * reason to loop four times (that I know of).
2228          */
2229         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2230                 reader = NULL;
2231                 goto out;
2232         }
2233
2234         reader = cpu_buffer->reader_page;
2235
2236         /* If there's more to read, return this page */
2237         if (cpu_buffer->reader_page->read < rb_page_size(reader))
2238                 goto out;
2239
2240         /* Never should we have an index greater than the size */
2241         if (RB_WARN_ON(cpu_buffer,
2242                        cpu_buffer->reader_page->read > rb_page_size(reader)))
2243                 goto out;
2244
2245         /* check if we caught up to the tail */
2246         reader = NULL;
2247         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2248                 goto out;
2249
2250         /*
2251          * Splice the empty reader page into the list around the head.
2252          * Reset the reader page to size zero.
2253          */
2254
2255         reader = cpu_buffer->head_page;
2256         cpu_buffer->reader_page->list.next = reader->list.next;
2257         cpu_buffer->reader_page->list.prev = reader->list.prev;
2258
2259         local_set(&cpu_buffer->reader_page->write, 0);
2260         local_set(&cpu_buffer->reader_page->entries, 0);
2261         local_set(&cpu_buffer->reader_page->page->commit, 0);
2262
2263         /* Make the reader page now replace the head */
2264         reader->list.prev->next = &cpu_buffer->reader_page->list;
2265         reader->list.next->prev = &cpu_buffer->reader_page->list;
2266
2267         /*
2268          * If the tail is on the reader, then we must set the head
2269          * to the inserted page, otherwise we set it one before.
2270          */
2271         cpu_buffer->head_page = cpu_buffer->reader_page;
2272
2273         if (cpu_buffer->commit_page != reader)
2274                 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2275
2276         /* Finally update the reader page to the new head */
2277         cpu_buffer->reader_page = reader;
2278         rb_reset_reader_page(cpu_buffer);
2279
2280         goto again;
2281
2282  out:
2283         __raw_spin_unlock(&cpu_buffer->lock);
2284         local_irq_restore(flags);
2285
2286         return reader;
2287 }
2288
2289 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2290 {
2291         struct ring_buffer_event *event;
2292         struct buffer_page *reader;
2293         unsigned length;
2294
2295         reader = rb_get_reader_page(cpu_buffer);
2296
2297         /* This function should not be called when buffer is empty */
2298         if (RB_WARN_ON(cpu_buffer, !reader))
2299                 return;
2300
2301         event = rb_reader_event(cpu_buffer);
2302
2303         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2304                         || rb_discarded_event(event))
2305                 cpu_buffer->read++;
2306
2307         rb_update_read_stamp(cpu_buffer, event);
2308
2309         length = rb_event_length(event);
2310         cpu_buffer->reader_page->read += length;
2311 }
2312
2313 static void rb_advance_iter(struct ring_buffer_iter *iter)
2314 {
2315         struct ring_buffer *buffer;
2316         struct ring_buffer_per_cpu *cpu_buffer;
2317         struct ring_buffer_event *event;
2318         unsigned length;
2319
2320         cpu_buffer = iter->cpu_buffer;
2321         buffer = cpu_buffer->buffer;
2322
2323         /*
2324          * Check if we are at the end of the buffer.
2325          */
2326         if (iter->head >= rb_page_size(iter->head_page)) {
2327                 /* discarded commits can make the page empty */
2328                 if (iter->head_page == cpu_buffer->commit_page)
2329                         return;
2330                 rb_inc_iter(iter);
2331                 return;
2332         }
2333
2334         event = rb_iter_head_event(iter);
2335
2336         length = rb_event_length(event);
2337
2338         /*
2339          * This should not be called to advance the header if we are
2340          * at the tail of the buffer.
2341          */
2342         if (RB_WARN_ON(cpu_buffer,
2343                        (iter->head_page == cpu_buffer->commit_page) &&
2344                        (iter->head + length > rb_commit_index(cpu_buffer))))
2345                 return;
2346
2347         rb_update_iter_read_stamp(iter, event);
2348
2349         iter->head += length;
2350
2351         /* check for end of page padding */
2352         if ((iter->head >= rb_page_size(iter->head_page)) &&
2353             (iter->head_page != cpu_buffer->commit_page))
2354                 rb_advance_iter(iter);
2355 }
2356
2357 static struct ring_buffer_event *
2358 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2359 {
2360         struct ring_buffer_per_cpu *cpu_buffer;
2361         struct ring_buffer_event *event;
2362         struct buffer_page *reader;
2363         int nr_loops = 0;
2364
2365         cpu_buffer = buffer->buffers[cpu];
2366
2367  again:
2368         /*
2369          * We repeat when a timestamp is encountered. It is possible
2370          * to get multiple timestamps from an interrupt entering just
2371          * as one timestamp is about to be written, or from discarded
2372          * commits. The most that we can have is the number on a single page.
2373          */
2374         if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2375                 return NULL;
2376
2377         reader = rb_get_reader_page(cpu_buffer);
2378         if (!reader)
2379                 return NULL;
2380
2381         event = rb_reader_event(cpu_buffer);
2382
2383         switch (event->type_len) {
2384         case RINGBUF_TYPE_PADDING:
2385                 if (rb_null_event(event))
2386                         RB_WARN_ON(cpu_buffer, 1);
2387                 /*
2388                  * Because the writer could be discarding every
2389                  * event it creates (which would probably be bad)
2390                  * if we were to go back to "again" then we may never
2391                  * catch up, and will trigger the warn on, or lock
2392                  * the box. Return the padding, and we will release
2393                  * the current locks, and try again.
2394                  */
2395                 rb_advance_reader(cpu_buffer);
2396                 return event;
2397
2398         case RINGBUF_TYPE_TIME_EXTEND:
2399                 /* Internal data, OK to advance */
2400                 rb_advance_reader(cpu_buffer);
2401                 goto again;
2402
2403         case RINGBUF_TYPE_TIME_STAMP:
2404                 /* FIXME: not implemented */
2405                 rb_advance_reader(cpu_buffer);
2406                 goto again;
2407
2408         case RINGBUF_TYPE_DATA:
2409                 if (ts) {
2410                         *ts = cpu_buffer->read_stamp + event->time_delta;
2411                         ring_buffer_normalize_time_stamp(buffer,
2412                                                          cpu_buffer->cpu, ts);
2413                 }
2414                 return event;
2415
2416         default:
2417                 BUG();
2418         }
2419
2420         return NULL;
2421 }
2422 EXPORT_SYMBOL_GPL(ring_buffer_peek);
2423
2424 static struct ring_buffer_event *
2425 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2426 {
2427         struct ring_buffer *buffer;
2428         struct ring_buffer_per_cpu *cpu_buffer;
2429         struct ring_buffer_event *event;
2430         int nr_loops = 0;
2431
2432         if (ring_buffer_iter_empty(iter))
2433                 return NULL;
2434
2435         cpu_buffer = iter->cpu_buffer;
2436         buffer = cpu_buffer->buffer;
2437
2438  again:
2439         /*
2440          * We repeat when a timestamp is encountered.
2441          * We can get multiple timestamps by nested interrupts or also
2442          * if filtering is on (discarding commits). Since discarding
2443          * commits can be frequent we can get a lot of timestamps.
2444          * But we limit them by not adding timestamps if they begin
2445          * at the start of a page.
2446          */
2447         if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2448                 return NULL;
2449
2450         if (rb_per_cpu_empty(cpu_buffer))
2451                 return NULL;
2452
2453         event = rb_iter_head_event(iter);
2454
2455         switch (event->type_len) {
2456         case RINGBUF_TYPE_PADDING:
2457                 if (rb_null_event(event)) {
2458                         rb_inc_iter(iter);
2459                         goto again;
2460                 }
2461                 rb_advance_iter(iter);
2462                 return event;
2463
2464         case RINGBUF_TYPE_TIME_EXTEND:
2465                 /* Internal data, OK to advance */
2466                 rb_advance_iter(iter);
2467                 goto again;
2468
2469         case RINGBUF_TYPE_TIME_STAMP:
2470                 /* FIXME: not implemented */
2471                 rb_advance_iter(iter);
2472                 goto again;
2473
2474         case RINGBUF_TYPE_DATA:
2475                 if (ts) {
2476                         *ts = iter->read_stamp + event->time_delta;
2477                         ring_buffer_normalize_time_stamp(buffer,
2478                                                          cpu_buffer->cpu, ts);
2479                 }
2480                 return event;
2481
2482         default:
2483                 BUG();
2484         }
2485
2486         return NULL;
2487 }
2488 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
2489
2490 /**
2491  * ring_buffer_peek - peek at the next event to be read
2492  * @buffer: The ring buffer to read
2493  * @cpu: The cpu to peak at
2494  * @ts: The timestamp counter of this event.
2495  *
2496  * This will return the event that will be read next, but does
2497  * not consume the data.
2498  */
2499 struct ring_buffer_event *
2500 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2501 {
2502         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2503         struct ring_buffer_event *event;
2504         unsigned long flags;
2505
2506         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2507                 return NULL;
2508
2509  again:
2510         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2511         event = rb_buffer_peek(buffer, cpu, ts);
2512         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2513
2514         if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2515                 cpu_relax();
2516                 goto again;
2517         }
2518
2519         return event;
2520 }
2521
2522 /**
2523  * ring_buffer_iter_peek - peek at the next event to be read
2524  * @iter: The ring buffer iterator
2525  * @ts: The timestamp counter of this event.
2526  *
2527  * This will return the event that will be read next, but does
2528  * not increment the iterator.
2529  */
2530 struct ring_buffer_event *
2531 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2532 {
2533         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2534         struct ring_buffer_event *event;
2535         unsigned long flags;
2536
2537  again:
2538         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2539         event = rb_iter_peek(iter, ts);
2540         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2541
2542         if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2543                 cpu_relax();
2544                 goto again;
2545         }
2546
2547         return event;
2548 }
2549
2550 /**
2551  * ring_buffer_consume - return an event and consume it
2552  * @buffer: The ring buffer to get the next event from
2553  *
2554  * Returns the next event in the ring buffer, and that event is consumed.
2555  * Meaning, that sequential reads will keep returning a different event,
2556  * and eventually empty the ring buffer if the producer is slower.
2557  */
2558 struct ring_buffer_event *
2559 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2560 {
2561         struct ring_buffer_per_cpu *cpu_buffer;
2562         struct ring_buffer_event *event = NULL;
2563         unsigned long flags;
2564
2565  again:
2566         /* might be called in atomic */
2567         preempt_disable();
2568
2569         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2570                 goto out;
2571
2572         cpu_buffer = buffer->buffers[cpu];
2573         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2574
2575         event = rb_buffer_peek(buffer, cpu, ts);
2576         if (!event)
2577                 goto out_unlock;
2578
2579         rb_advance_reader(cpu_buffer);
2580
2581  out_unlock:
2582         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2583
2584  out:
2585         preempt_enable();
2586
2587         if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2588                 cpu_relax();
2589                 goto again;
2590         }
2591
2592         return event;
2593 }
2594 EXPORT_SYMBOL_GPL(ring_buffer_consume);
2595
2596 /**
2597  * ring_buffer_read_start - start a non consuming read of the buffer
2598  * @buffer: The ring buffer to read from
2599  * @cpu: The cpu buffer to iterate over
2600  *
2601  * This starts up an iteration through the buffer. It also disables
2602  * the recording to the buffer until the reading is finished.
2603  * This prevents the reading from being corrupted. This is not
2604  * a consuming read, so a producer is not expected.
2605  *
2606  * Must be paired with ring_buffer_finish.
2607  */
2608 struct ring_buffer_iter *
2609 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2610 {
2611         struct ring_buffer_per_cpu *cpu_buffer;
2612         struct ring_buffer_iter *iter;
2613         unsigned long flags;
2614
2615         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2616                 return NULL;
2617
2618         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2619         if (!iter)
2620                 return NULL;
2621
2622         cpu_buffer = buffer->buffers[cpu];
2623
2624         iter->cpu_buffer = cpu_buffer;
2625
2626         atomic_inc(&cpu_buffer->record_disabled);
2627         synchronize_sched();
2628
2629         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2630         __raw_spin_lock(&cpu_buffer->lock);
2631         rb_iter_reset(iter);
2632         __raw_spin_unlock(&cpu_buffer->lock);
2633         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2634
2635         return iter;
2636 }
2637 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2638
2639 /**
2640  * ring_buffer_finish - finish reading the iterator of the buffer
2641  * @iter: The iterator retrieved by ring_buffer_start
2642  *
2643  * This re-enables the recording to the buffer, and frees the
2644  * iterator.
2645  */
2646 void
2647 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2648 {
2649         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2650
2651         atomic_dec(&cpu_buffer->record_disabled);
2652         kfree(iter);
2653 }
2654 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2655
2656 /**
2657  * ring_buffer_read - read the next item in the ring buffer by the iterator
2658  * @iter: The ring buffer iterator
2659  * @ts: The time stamp of the event read.
2660  *
2661  * This reads the next event in the ring buffer and increments the iterator.
2662  */
2663 struct ring_buffer_event *
2664 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2665 {
2666         struct ring_buffer_event *event;
2667         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2668         unsigned long flags;
2669
2670  again:
2671         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2672         event = rb_iter_peek(iter, ts);
2673         if (!event)
2674                 goto out;
2675
2676         rb_advance_iter(iter);
2677  out:
2678         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2679
2680         if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2681                 cpu_relax();
2682                 goto again;
2683         }
2684
2685         return event;
2686 }
2687 EXPORT_SYMBOL_GPL(ring_buffer_read);
2688
2689 /**
2690  * ring_buffer_size - return the size of the ring buffer (in bytes)
2691  * @buffer: The ring buffer.
2692  */
2693 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2694 {
2695         return BUF_PAGE_SIZE * buffer->pages;
2696 }
2697 EXPORT_SYMBOL_GPL(ring_buffer_size);
2698
2699 static void
2700 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2701 {
2702         cpu_buffer->head_page
2703                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2704         local_set(&cpu_buffer->head_page->write, 0);
2705         local_set(&cpu_buffer->head_page->entries, 0);
2706         local_set(&cpu_buffer->head_page->page->commit, 0);
2707
2708         cpu_buffer->head_page->read = 0;
2709
2710         cpu_buffer->tail_page = cpu_buffer->head_page;
2711         cpu_buffer->commit_page = cpu_buffer->head_page;
2712
2713         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2714         local_set(&cpu_buffer->reader_page->write, 0);
2715         local_set(&cpu_buffer->reader_page->entries, 0);
2716         local_set(&cpu_buffer->reader_page->page->commit, 0);
2717         cpu_buffer->reader_page->read = 0;
2718
2719         cpu_buffer->nmi_dropped = 0;
2720         cpu_buffer->commit_overrun = 0;
2721         cpu_buffer->overrun = 0;
2722         cpu_buffer->read = 0;
2723         local_set(&cpu_buffer->entries, 0);
2724
2725         cpu_buffer->write_stamp = 0;
2726         cpu_buffer->read_stamp = 0;
2727 }
2728
2729 /**
2730  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2731  * @buffer: The ring buffer to reset a per cpu buffer of
2732  * @cpu: The CPU buffer to be reset
2733  */
2734 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2735 {
2736         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2737         unsigned long flags;
2738
2739         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2740                 return;
2741
2742         atomic_inc(&cpu_buffer->record_disabled);
2743
2744         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2745
2746         __raw_spin_lock(&cpu_buffer->lock);
2747
2748         rb_reset_cpu(cpu_buffer);
2749
2750         __raw_spin_unlock(&cpu_buffer->lock);
2751
2752         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2753
2754         atomic_dec(&cpu_buffer->record_disabled);
2755 }
2756 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2757
2758 /**
2759  * ring_buffer_reset - reset a ring buffer
2760  * @buffer: The ring buffer to reset all cpu buffers
2761  */
2762 void ring_buffer_reset(struct ring_buffer *buffer)
2763 {
2764         int cpu;
2765
2766         for_each_buffer_cpu(buffer, cpu)
2767                 ring_buffer_reset_cpu(buffer, cpu);
2768 }
2769 EXPORT_SYMBOL_GPL(ring_buffer_reset);
2770
2771 /**
2772  * rind_buffer_empty - is the ring buffer empty?
2773  * @buffer: The ring buffer to test
2774  */
2775 int ring_buffer_empty(struct ring_buffer *buffer)
2776 {
2777         struct ring_buffer_per_cpu *cpu_buffer;
2778         int cpu;
2779
2780         /* yes this is racy, but if you don't like the race, lock the buffer */
2781         for_each_buffer_cpu(buffer, cpu) {
2782                 cpu_buffer = buffer->buffers[cpu];
2783                 if (!rb_per_cpu_empty(cpu_buffer))
2784                         return 0;
2785         }
2786
2787         return 1;
2788 }
2789 EXPORT_SYMBOL_GPL(ring_buffer_empty);
2790
2791 /**
2792  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2793  * @buffer: The ring buffer
2794  * @cpu: The CPU buffer to test
2795  */
2796 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2797 {
2798         struct ring_buffer_per_cpu *cpu_buffer;
2799         int ret;
2800
2801         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2802                 return 1;
2803
2804         cpu_buffer = buffer->buffers[cpu];
2805         ret = rb_per_cpu_empty(cpu_buffer);
2806
2807
2808         return ret;
2809 }
2810 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2811
2812 /**
2813  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2814  * @buffer_a: One buffer to swap with
2815  * @buffer_b: The other buffer to swap with
2816  *
2817  * This function is useful for tracers that want to take a "snapshot"
2818  * of a CPU buffer and has another back up buffer lying around.
2819  * it is expected that the tracer handles the cpu buffer not being
2820  * used at the moment.
2821  */
2822 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2823                          struct ring_buffer *buffer_b, int cpu)
2824 {
2825         struct ring_buffer_per_cpu *cpu_buffer_a;
2826         struct ring_buffer_per_cpu *cpu_buffer_b;
2827         int ret = -EINVAL;
2828
2829         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2830             !cpumask_test_cpu(cpu, buffer_b->cpumask))
2831                 goto out;
2832
2833         /* At least make sure the two buffers are somewhat the same */
2834         if (buffer_a->pages != buffer_b->pages)
2835                 goto out;
2836
2837         ret = -EAGAIN;
2838
2839         if (ring_buffer_flags != RB_BUFFERS_ON)
2840                 goto out;
2841
2842         if (atomic_read(&buffer_a->record_disabled))
2843                 goto out;
2844
2845         if (atomic_read(&buffer_b->record_disabled))
2846                 goto out;
2847
2848         cpu_buffer_a = buffer_a->buffers[cpu];
2849         cpu_buffer_b = buffer_b->buffers[cpu];
2850
2851         if (atomic_read(&cpu_buffer_a->record_disabled))
2852                 goto out;
2853
2854         if (atomic_read(&cpu_buffer_b->record_disabled))
2855                 goto out;
2856
2857         /*
2858          * We can't do a synchronize_sched here because this
2859          * function can be called in atomic context.
2860          * Normally this will be called from the same CPU as cpu.
2861          * If not it's up to the caller to protect this.
2862          */
2863         atomic_inc(&cpu_buffer_a->record_disabled);
2864         atomic_inc(&cpu_buffer_b->record_disabled);
2865
2866         buffer_a->buffers[cpu] = cpu_buffer_b;
2867         buffer_b->buffers[cpu] = cpu_buffer_a;
2868
2869         cpu_buffer_b->buffer = buffer_a;
2870         cpu_buffer_a->buffer = buffer_b;
2871
2872         atomic_dec(&cpu_buffer_a->record_disabled);
2873         atomic_dec(&cpu_buffer_b->record_disabled);
2874
2875         ret = 0;
2876 out:
2877         return ret;
2878 }
2879 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2880
2881 /**
2882  * ring_buffer_alloc_read_page - allocate a page to read from buffer
2883  * @buffer: the buffer to allocate for.
2884  *
2885  * This function is used in conjunction with ring_buffer_read_page.
2886  * When reading a full page from the ring buffer, these functions
2887  * can be used to speed up the process. The calling function should
2888  * allocate a few pages first with this function. Then when it
2889  * needs to get pages from the ring buffer, it passes the result
2890  * of this function into ring_buffer_read_page, which will swap
2891  * the page that was allocated, with the read page of the buffer.
2892  *
2893  * Returns:
2894  *  The page allocated, or NULL on error.
2895  */
2896 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2897 {
2898         struct buffer_data_page *bpage;
2899         unsigned long addr;
2900
2901         addr = __get_free_page(GFP_KERNEL);
2902         if (!addr)
2903                 return NULL;
2904
2905         bpage = (void *)addr;
2906
2907         rb_init_page(bpage);
2908
2909         return bpage;
2910 }
2911 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
2912
2913 /**
2914  * ring_buffer_free_read_page - free an allocated read page
2915  * @buffer: the buffer the page was allocate for
2916  * @data: the page to free
2917  *
2918  * Free a page allocated from ring_buffer_alloc_read_page.
2919  */
2920 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2921 {
2922         free_page((unsigned long)data);
2923 }
2924 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
2925
2926 /**
2927  * ring_buffer_read_page - extract a page from the ring buffer
2928  * @buffer: buffer to extract from
2929  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2930  * @len: amount to extract
2931  * @cpu: the cpu of the buffer to extract
2932  * @full: should the extraction only happen when the page is full.
2933  *
2934  * This function will pull out a page from the ring buffer and consume it.
2935  * @data_page must be the address of the variable that was returned
2936  * from ring_buffer_alloc_read_page. This is because the page might be used
2937  * to swap with a page in the ring buffer.
2938  *
2939  * for example:
2940  *      rpage = ring_buffer_alloc_read_page(buffer);
2941  *      if (!rpage)
2942  *              return error;
2943  *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
2944  *      if (ret >= 0)
2945  *              process_page(rpage, ret);
2946  *
2947  * When @full is set, the function will not return true unless
2948  * the writer is off the reader page.
2949  *
2950  * Note: it is up to the calling functions to handle sleeps and wakeups.
2951  *  The ring buffer can be used anywhere in the kernel and can not
2952  *  blindly call wake_up. The layer that uses the ring buffer must be
2953  *  responsible for that.
2954  *
2955  * Returns:
2956  *  >=0 if data has been transferred, returns the offset of consumed data.
2957  *  <0 if no data has been transferred.
2958  */
2959 int ring_buffer_read_page(struct ring_buffer *buffer,
2960                           void **data_page, size_t len, int cpu, int full)
2961 {
2962         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2963         struct ring_buffer_event *event;
2964         struct buffer_data_page *bpage;
2965         struct buffer_page *reader;
2966         unsigned long flags;
2967         unsigned int commit;
2968         unsigned int read;
2969         u64 save_timestamp;
2970         int ret = -1;
2971
2972         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2973                 goto out;
2974
2975         /*
2976          * If len is not big enough to hold the page header, then
2977          * we can not copy anything.
2978          */
2979         if (len <= BUF_PAGE_HDR_SIZE)
2980                 goto out;
2981
2982         len -= BUF_PAGE_HDR_SIZE;
2983
2984         if (!data_page)
2985                 goto out;
2986
2987         bpage = *data_page;
2988         if (!bpage)
2989                 goto out;
2990
2991         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2992
2993         reader = rb_get_reader_page(cpu_buffer);
2994         if (!reader)
2995                 goto out_unlock;
2996
2997         event = rb_reader_event(cpu_buffer);
2998
2999         read = reader->read;
3000         commit = rb_page_commit(reader);
3001
3002         /*
3003          * If this page has been partially read or
3004          * if len is not big enough to read the rest of the page or
3005          * a writer is still on the page, then
3006          * we must copy the data from the page to the buffer.
3007          * Otherwise, we can simply swap the page with the one passed in.
3008          */
3009         if (read || (len < (commit - read)) ||
3010             cpu_buffer->reader_page == cpu_buffer->commit_page) {
3011                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
3012                 unsigned int rpos = read;
3013                 unsigned int pos = 0;
3014                 unsigned int size;
3015
3016                 if (full)
3017                         goto out_unlock;
3018
3019                 if (len > (commit - read))
3020                         len = (commit - read);
3021
3022                 size = rb_event_length(event);
3023
3024                 if (len < size)
3025                         goto out_unlock;
3026
3027                 /* save the current timestamp, since the user will need it */
3028                 save_timestamp = cpu_buffer->read_stamp;
3029
3030                 /* Need to copy one event at a time */
3031                 do {
3032                         memcpy(bpage->data + pos, rpage->data + rpos, size);
3033
3034                         len -= size;
3035
3036                         rb_advance_reader(cpu_buffer);
3037                         rpos = reader->read;
3038                         pos += size;
3039
3040                         event = rb_reader_event(cpu_buffer);
3041                         size = rb_event_length(event);
3042                 } while (len > size);
3043
3044                 /* update bpage */
3045                 local_set(&bpage->commit, pos);
3046                 bpage->time_stamp = save_timestamp;
3047
3048                 /* we copied everything to the beginning */
3049                 read = 0;
3050         } else {
3051                 /* update the entry counter */
3052                 cpu_buffer->read += local_read(&reader->entries);
3053
3054                 /* swap the pages */
3055                 rb_init_page(bpage);
3056                 bpage = reader->page;
3057                 reader->page = *data_page;
3058                 local_set(&reader->write, 0);
3059                 local_set(&reader->entries, 0);
3060                 reader->read = 0;
3061                 *data_page = bpage;
3062         }
3063         ret = read;
3064
3065  out_unlock:
3066         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3067
3068  out:
3069         return ret;
3070 }
3071 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
3072
3073 static ssize_t
3074 rb_simple_read(struct file *filp, char __user *ubuf,
3075                size_t cnt, loff_t *ppos)
3076 {
3077         unsigned long *p = filp->private_data;
3078         char buf[64];
3079         int r;
3080
3081         if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3082                 r = sprintf(buf, "permanently disabled\n");
3083         else
3084                 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
3085
3086         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3087 }
3088
3089 static ssize_t
3090 rb_simple_write(struct file *filp, const char __user *ubuf,
3091                 size_t cnt, loff_t *ppos)
3092 {
3093         unsigned long *p = filp->private_data;
3094         char buf[64];
3095         unsigned long val;
3096         int ret;
3097
3098         if (cnt >= sizeof(buf))
3099                 return -EINVAL;
3100
3101         if (copy_from_user(&buf, ubuf, cnt))
3102                 return -EFAULT;
3103
3104         buf[cnt] = 0;
3105
3106         ret = strict_strtoul(buf, 10, &val);
3107         if (ret < 0)
3108                 return ret;
3109
3110         if (val)
3111                 set_bit(RB_BUFFERS_ON_BIT, p);
3112         else
3113                 clear_bit(RB_BUFFERS_ON_BIT, p);
3114
3115         (*ppos)++;
3116
3117         return cnt;
3118 }
3119
3120 static const struct file_operations rb_simple_fops = {
3121         .open           = tracing_open_generic,
3122         .read           = rb_simple_read,
3123         .write          = rb_simple_write,
3124 };
3125
3126
3127 static __init int rb_init_debugfs(void)
3128 {
3129         struct dentry *d_tracer;
3130
3131         d_tracer = tracing_init_dentry();
3132
3133         trace_create_file("tracing_on", 0644, d_tracer,
3134                             &ring_buffer_flags, &rb_simple_fops);
3135
3136         return 0;
3137 }
3138
3139 fs_initcall(rb_init_debugfs);
3140
3141 #ifdef CONFIG_HOTPLUG_CPU
3142 static int rb_cpu_notify(struct notifier_block *self,
3143                          unsigned long action, void *hcpu)
3144 {
3145         struct ring_buffer *buffer =
3146                 container_of(self, struct ring_buffer, cpu_notify);
3147         long cpu = (long)hcpu;
3148
3149         switch (action) {
3150         case CPU_UP_PREPARE:
3151         case CPU_UP_PREPARE_FROZEN:
3152                 if (cpumask_test_cpu(cpu, buffer->cpumask))
3153                         return NOTIFY_OK;
3154
3155                 buffer->buffers[cpu] =
3156                         rb_allocate_cpu_buffer(buffer, cpu);
3157                 if (!buffer->buffers[cpu]) {
3158                         WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3159                              cpu);
3160                         return NOTIFY_OK;
3161                 }
3162                 smp_wmb();
3163                 cpumask_set_cpu(cpu, buffer->cpumask);
3164                 break;
3165         case CPU_DOWN_PREPARE:
3166         case CPU_DOWN_PREPARE_FROZEN:
3167                 /*
3168                  * Do nothing.
3169                  *  If we were to free the buffer, then the user would
3170                  *  lose any trace that was in the buffer.
3171                  */
3172                 break;
3173         default:
3174                 break;
3175         }
3176         return NOTIFY_OK;
3177 }
3178 #endif