]> Pileus Git - ~andy/linux/blob - kernel/events/internal.h
perf: Fix perf mmap bugs
[~andy/linux] / kernel / events / internal.h
1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
3
4 #include <linux/hardirq.h>
5 #include <linux/uaccess.h>
6
7 /* Buffer handling */
8
9 #define RING_BUFFER_WRITABLE            0x01
10
11 struct ring_buffer {
12         atomic_t                        refcount;
13         struct rcu_head                 rcu_head;
14 #ifdef CONFIG_PERF_USE_VMALLOC
15         struct work_struct              work;
16         int                             page_order;     /* allocation order  */
17 #endif
18         int                             nr_pages;       /* nr of data pages  */
19         int                             overwrite;      /* can overwrite itself */
20
21         atomic_t                        poll;           /* POLL_ for wakeups */
22
23         local_t                         head;           /* write position    */
24         local_t                         nest;           /* nested writers    */
25         local_t                         events;         /* event limit       */
26         local_t                         wakeup;         /* wakeup stamp      */
27         local_t                         lost;           /* nr records lost   */
28
29         long                            watermark;      /* wakeup watermark  */
30         /* poll crap */
31         spinlock_t                      event_lock;
32         struct list_head                event_list;
33
34         int                             mmap_locked;
35         struct user_struct              *mmap_user;
36
37         struct perf_event_mmap_page     *user_page;
38         void                            *data_pages[0];
39 };
40
41 extern void rb_free(struct ring_buffer *rb);
42 extern struct ring_buffer *
43 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
44 extern void perf_event_wakeup(struct perf_event *event);
45
46 extern void
47 perf_event_header__init_id(struct perf_event_header *header,
48                            struct perf_sample_data *data,
49                            struct perf_event *event);
50 extern void
51 perf_event__output_id_sample(struct perf_event *event,
52                              struct perf_output_handle *handle,
53                              struct perf_sample_data *sample);
54
55 extern struct page *
56 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
57
58 #ifdef CONFIG_PERF_USE_VMALLOC
59 /*
60  * Back perf_mmap() with vmalloc memory.
61  *
62  * Required for architectures that have d-cache aliasing issues.
63  */
64
65 static inline int page_order(struct ring_buffer *rb)
66 {
67         return rb->page_order;
68 }
69
70 #else
71
72 static inline int page_order(struct ring_buffer *rb)
73 {
74         return 0;
75 }
76 #endif
77
78 static inline unsigned long perf_data_size(struct ring_buffer *rb)
79 {
80         return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
81 }
82
83 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)                      \
84 static inline unsigned int                                              \
85 func_name(struct perf_output_handle *handle,                            \
86           const void *buf, unsigned int len)                            \
87 {                                                                       \
88         unsigned long size, written;                                    \
89                                                                         \
90         do {                                                            \
91                 size = min_t(unsigned long, handle->size, len);         \
92                                                                         \
93                 written = memcpy_func(handle->addr, buf, size);         \
94                                                                         \
95                 len -= written;                                         \
96                 handle->addr += written;                                \
97                 buf += written;                                         \
98                 handle->size -= written;                                \
99                 if (!handle->size) {                                    \
100                         struct ring_buffer *rb = handle->rb;            \
101                                                                         \
102                         handle->page++;                                 \
103                         handle->page &= rb->nr_pages - 1;               \
104                         handle->addr = rb->data_pages[handle->page];    \
105                         handle->size = PAGE_SIZE << page_order(rb);     \
106                 }                                                       \
107         } while (len && written == size);                               \
108                                                                         \
109         return len;                                                     \
110 }
111
112 static inline int memcpy_common(void *dst, const void *src, size_t n)
113 {
114         memcpy(dst, src, n);
115         return n;
116 }
117
118 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
119
120 #define MEMCPY_SKIP(dst, src, n) (n)
121
122 DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
123
124 #ifndef arch_perf_out_copy_user
125 #define arch_perf_out_copy_user __copy_from_user_inatomic
126 #endif
127
128 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
129
130 /* Callchain handling */
131 extern struct perf_callchain_entry *
132 perf_callchain(struct perf_event *event, struct pt_regs *regs);
133 extern int get_callchain_buffers(void);
134 extern void put_callchain_buffers(void);
135
136 static inline int get_recursion_context(int *recursion)
137 {
138         int rctx;
139
140         if (in_nmi())
141                 rctx = 3;
142         else if (in_irq())
143                 rctx = 2;
144         else if (in_softirq())
145                 rctx = 1;
146         else
147                 rctx = 0;
148
149         if (recursion[rctx])
150                 return -1;
151
152         recursion[rctx]++;
153         barrier();
154
155         return rctx;
156 }
157
158 static inline void put_recursion_context(int *recursion, int rctx)
159 {
160         barrier();
161         recursion[rctx]--;
162 }
163
164 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
165 static inline bool arch_perf_have_user_stack_dump(void)
166 {
167         return true;
168 }
169
170 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
171 #else
172 static inline bool arch_perf_have_user_stack_dump(void)
173 {
174         return false;
175 }
176
177 #define perf_user_stack_pointer(regs) 0
178 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
179
180 #endif /* _KERNEL_EVENTS_INTERNAL_H */