]> Pileus Git - ~andy/linux/blob - kernel/events/internal.h
perf: Carve out callchain functionality
[~andy/linux] / kernel / events / internal.h
1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
3
4 #include <linux/hardirq.h>
5
6 /* Buffer handling */
7
8 #define RING_BUFFER_WRITABLE            0x01
9
10 struct ring_buffer {
11         atomic_t                        refcount;
12         struct rcu_head                 rcu_head;
13 #ifdef CONFIG_PERF_USE_VMALLOC
14         struct work_struct              work;
15         int                             page_order;     /* allocation order  */
16 #endif
17         int                             nr_pages;       /* nr of data pages  */
18         int                             writable;       /* are we writable   */
19
20         atomic_t                        poll;           /* POLL_ for wakeups */
21
22         local_t                         head;           /* write position    */
23         local_t                         nest;           /* nested writers    */
24         local_t                         events;         /* event limit       */
25         local_t                         wakeup;         /* wakeup stamp      */
26         local_t                         lost;           /* nr records lost   */
27
28         long                            watermark;      /* wakeup watermark  */
29
30         struct perf_event_mmap_page     *user_page;
31         void                            *data_pages[0];
32 };
33
34 extern void rb_free(struct ring_buffer *rb);
35 extern struct ring_buffer *
36 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
37 extern void perf_event_wakeup(struct perf_event *event);
38
39 extern void
40 perf_event_header__init_id(struct perf_event_header *header,
41                            struct perf_sample_data *data,
42                            struct perf_event *event);
43 extern void
44 perf_event__output_id_sample(struct perf_event *event,
45                              struct perf_output_handle *handle,
46                              struct perf_sample_data *sample);
47
48 extern struct page *
49 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
50
51 #ifdef CONFIG_PERF_USE_VMALLOC
52 /*
53  * Back perf_mmap() with vmalloc memory.
54  *
55  * Required for architectures that have d-cache aliasing issues.
56  */
57
58 static inline int page_order(struct ring_buffer *rb)
59 {
60         return rb->page_order;
61 }
62
63 #else
64
65 static inline int page_order(struct ring_buffer *rb)
66 {
67         return 0;
68 }
69 #endif
70
71 static inline unsigned long perf_data_size(struct ring_buffer *rb)
72 {
73         return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
74 }
75
76 static inline void
77 __output_copy(struct perf_output_handle *handle,
78                    const void *buf, unsigned int len)
79 {
80         do {
81                 unsigned long size = min_t(unsigned long, handle->size, len);
82
83                 memcpy(handle->addr, buf, size);
84
85                 len -= size;
86                 handle->addr += size;
87                 buf += size;
88                 handle->size -= size;
89                 if (!handle->size) {
90                         struct ring_buffer *rb = handle->rb;
91
92                         handle->page++;
93                         handle->page &= rb->nr_pages - 1;
94                         handle->addr = rb->data_pages[handle->page];
95                         handle->size = PAGE_SIZE << page_order(rb);
96                 }
97         } while (len);
98 }
99
100 /* Callchain handling */
101 extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
102 extern int get_callchain_buffers(void);
103 extern void put_callchain_buffers(void);
104
105 static inline int get_recursion_context(int *recursion)
106 {
107         int rctx;
108
109         if (in_nmi())
110                 rctx = 3;
111         else if (in_irq())
112                 rctx = 2;
113         else if (in_softirq())
114                 rctx = 1;
115         else
116                 rctx = 0;
117
118         if (recursion[rctx])
119                 return -1;
120
121         recursion[rctx]++;
122         barrier();
123
124         return rctx;
125 }
126
127 static inline void put_recursion_context(int *recursion, int rctx)
128 {
129         barrier();
130         recursion[rctx]--;
131 }
132
133 #endif /* _KERNEL_EVENTS_INTERNAL_H */