]> Pileus Git - ~andy/linux/blob - drivers/media/v4l2-core/videobuf2-dma-sg.c
Linux 3.14
[~andy/linux] / drivers / media / v4l2-core / videobuf2-dma-sg.c
1 /*
2  * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-memops.h>
22 #include <media/videobuf2-dma-sg.h>
23
24 static int debug;
25 module_param(debug, int, 0644);
26
27 #define dprintk(level, fmt, arg...)                                     \
28         do {                                                            \
29                 if (debug >= level)                                     \
30                         printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg);  \
31         } while (0)
32
33 struct vb2_dma_sg_buf {
34         void                            *vaddr;
35         struct page                     **pages;
36         int                             write;
37         int                             offset;
38         struct sg_table                 sg_table;
39         size_t                          size;
40         unsigned int                    num_pages;
41         atomic_t                        refcount;
42         struct vb2_vmarea_handler       handler;
43         struct vm_area_struct           *vma;
44 };
45
46 static void vb2_dma_sg_put(void *buf_priv);
47
48 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
49                 gfp_t gfp_flags)
50 {
51         unsigned int last_page = 0;
52         int size = buf->size;
53
54         while (size > 0) {
55                 struct page *pages;
56                 int order;
57                 int i;
58
59                 order = get_order(size);
60                 /* Dont over allocate*/
61                 if ((PAGE_SIZE << order) > size)
62                         order--;
63
64                 pages = NULL;
65                 while (!pages) {
66                         pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
67                                         __GFP_NOWARN | gfp_flags, order);
68                         if (pages)
69                                 break;
70
71                         if (order == 0) {
72                                 while (last_page--)
73                                         __free_page(buf->pages[last_page]);
74                                 return -ENOMEM;
75                         }
76                         order--;
77                 }
78
79                 split_page(pages, order);
80                 for (i = 0; i < (1 << order); i++)
81                         buf->pages[last_page++] = &pages[i];
82
83                 size -= PAGE_SIZE << order;
84         }
85
86         return 0;
87 }
88
89 static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
90 {
91         struct vb2_dma_sg_buf *buf;
92         int ret;
93         int num_pages;
94
95         buf = kzalloc(sizeof *buf, GFP_KERNEL);
96         if (!buf)
97                 return NULL;
98
99         buf->vaddr = NULL;
100         buf->write = 0;
101         buf->offset = 0;
102         buf->size = size;
103         /* size is already page aligned */
104         buf->num_pages = size >> PAGE_SHIFT;
105
106         buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
107                              GFP_KERNEL);
108         if (!buf->pages)
109                 goto fail_pages_array_alloc;
110
111         ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
112         if (ret)
113                 goto fail_pages_alloc;
114
115         ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
116                         buf->num_pages, 0, size, gfp_flags);
117         if (ret)
118                 goto fail_table_alloc;
119
120         buf->handler.refcount = &buf->refcount;
121         buf->handler.put = vb2_dma_sg_put;
122         buf->handler.arg = buf;
123
124         atomic_inc(&buf->refcount);
125
126         dprintk(1, "%s: Allocated buffer of %d pages\n",
127                 __func__, buf->num_pages);
128         return buf;
129
130 fail_table_alloc:
131         num_pages = buf->num_pages;
132         while (num_pages--)
133                 __free_page(buf->pages[num_pages]);
134 fail_pages_alloc:
135         kfree(buf->pages);
136 fail_pages_array_alloc:
137         kfree(buf);
138         return NULL;
139 }
140
141 static void vb2_dma_sg_put(void *buf_priv)
142 {
143         struct vb2_dma_sg_buf *buf = buf_priv;
144         int i = buf->num_pages;
145
146         if (atomic_dec_and_test(&buf->refcount)) {
147                 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
148                         buf->num_pages);
149                 if (buf->vaddr)
150                         vm_unmap_ram(buf->vaddr, buf->num_pages);
151                 sg_free_table(&buf->sg_table);
152                 while (--i >= 0)
153                         __free_page(buf->pages[i]);
154                 kfree(buf->pages);
155                 kfree(buf);
156         }
157 }
158
159 static inline int vma_is_io(struct vm_area_struct *vma)
160 {
161         return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
162 }
163
164 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
165                                     unsigned long size, int write)
166 {
167         struct vb2_dma_sg_buf *buf;
168         unsigned long first, last;
169         int num_pages_from_user;
170         struct vm_area_struct *vma;
171
172         buf = kzalloc(sizeof *buf, GFP_KERNEL);
173         if (!buf)
174                 return NULL;
175
176         buf->vaddr = NULL;
177         buf->write = write;
178         buf->offset = vaddr & ~PAGE_MASK;
179         buf->size = size;
180
181         first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
182         last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
183         buf->num_pages = last - first + 1;
184
185         buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
186                              GFP_KERNEL);
187         if (!buf->pages)
188                 goto userptr_fail_alloc_pages;
189
190         vma = find_vma(current->mm, vaddr);
191         if (!vma) {
192                 dprintk(1, "no vma for address %lu\n", vaddr);
193                 goto userptr_fail_find_vma;
194         }
195
196         if (vma->vm_end < vaddr + size) {
197                 dprintk(1, "vma at %lu is too small for %lu bytes\n",
198                         vaddr, size);
199                 goto userptr_fail_find_vma;
200         }
201
202         buf->vma = vb2_get_vma(vma);
203         if (!buf->vma) {
204                 dprintk(1, "failed to copy vma\n");
205                 goto userptr_fail_find_vma;
206         }
207
208         if (vma_is_io(buf->vma)) {
209                 for (num_pages_from_user = 0;
210                      num_pages_from_user < buf->num_pages;
211                      ++num_pages_from_user, vaddr += PAGE_SIZE) {
212                         unsigned long pfn;
213
214                         if (follow_pfn(buf->vma, vaddr, &pfn)) {
215                                 dprintk(1, "no page for address %lu\n", vaddr);
216                                 break;
217                         }
218                         buf->pages[num_pages_from_user] = pfn_to_page(pfn);
219                 }
220         } else
221                 num_pages_from_user = get_user_pages(current, current->mm,
222                                              vaddr & PAGE_MASK,
223                                              buf->num_pages,
224                                              write,
225                                              1, /* force */
226                                              buf->pages,
227                                              NULL);
228
229         if (num_pages_from_user != buf->num_pages)
230                 goto userptr_fail_get_user_pages;
231
232         if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
233                         buf->num_pages, buf->offset, size, 0))
234                 goto userptr_fail_alloc_table_from_pages;
235
236         return buf;
237
238 userptr_fail_alloc_table_from_pages:
239 userptr_fail_get_user_pages:
240         dprintk(1, "get_user_pages requested/got: %d/%d]\n",
241                 buf->num_pages, num_pages_from_user);
242         if (!vma_is_io(buf->vma))
243                 while (--num_pages_from_user >= 0)
244                         put_page(buf->pages[num_pages_from_user]);
245         vb2_put_vma(buf->vma);
246 userptr_fail_find_vma:
247         kfree(buf->pages);
248 userptr_fail_alloc_pages:
249         kfree(buf);
250         return NULL;
251 }
252
253 /*
254  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
255  *               be used
256  */
257 static void vb2_dma_sg_put_userptr(void *buf_priv)
258 {
259         struct vb2_dma_sg_buf *buf = buf_priv;
260         int i = buf->num_pages;
261
262         dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
263                __func__, buf->num_pages);
264         if (buf->vaddr)
265                 vm_unmap_ram(buf->vaddr, buf->num_pages);
266         sg_free_table(&buf->sg_table);
267         while (--i >= 0) {
268                 if (buf->write)
269                         set_page_dirty_lock(buf->pages[i]);
270                 if (!vma_is_io(buf->vma))
271                         put_page(buf->pages[i]);
272         }
273         kfree(buf->pages);
274         vb2_put_vma(buf->vma);
275         kfree(buf);
276 }
277
278 static void *vb2_dma_sg_vaddr(void *buf_priv)
279 {
280         struct vb2_dma_sg_buf *buf = buf_priv;
281
282         BUG_ON(!buf);
283
284         if (!buf->vaddr)
285                 buf->vaddr = vm_map_ram(buf->pages,
286                                         buf->num_pages,
287                                         -1,
288                                         PAGE_KERNEL);
289
290         /* add offset in case userptr is not page-aligned */
291         return buf->vaddr + buf->offset;
292 }
293
294 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
295 {
296         struct vb2_dma_sg_buf *buf = buf_priv;
297
298         return atomic_read(&buf->refcount);
299 }
300
301 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
302 {
303         struct vb2_dma_sg_buf *buf = buf_priv;
304         unsigned long uaddr = vma->vm_start;
305         unsigned long usize = vma->vm_end - vma->vm_start;
306         int i = 0;
307
308         if (!buf) {
309                 printk(KERN_ERR "No memory to map\n");
310                 return -EINVAL;
311         }
312
313         do {
314                 int ret;
315
316                 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
317                 if (ret) {
318                         printk(KERN_ERR "Remapping memory, error: %d\n", ret);
319                         return ret;
320                 }
321
322                 uaddr += PAGE_SIZE;
323                 usize -= PAGE_SIZE;
324         } while (usize > 0);
325
326
327         /*
328          * Use common vm_area operations to track buffer refcount.
329          */
330         vma->vm_private_data    = &buf->handler;
331         vma->vm_ops             = &vb2_common_vm_ops;
332
333         vma->vm_ops->open(vma);
334
335         return 0;
336 }
337
338 static void *vb2_dma_sg_cookie(void *buf_priv)
339 {
340         struct vb2_dma_sg_buf *buf = buf_priv;
341
342         return &buf->sg_table;
343 }
344
345 const struct vb2_mem_ops vb2_dma_sg_memops = {
346         .alloc          = vb2_dma_sg_alloc,
347         .put            = vb2_dma_sg_put,
348         .get_userptr    = vb2_dma_sg_get_userptr,
349         .put_userptr    = vb2_dma_sg_put_userptr,
350         .vaddr          = vb2_dma_sg_vaddr,
351         .mmap           = vb2_dma_sg_mmap,
352         .num_users      = vb2_dma_sg_num_users,
353         .cookie         = vb2_dma_sg_cookie,
354 };
355 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
356
357 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
358 MODULE_AUTHOR("Andrzej Pietrasiewicz");
359 MODULE_LICENSE("GPL");