]> Pileus Git - ~andy/linux/blob - drivers/media/v4l2-core/videobuf2-dma-contig.c
[media] v4l: vb2-dma-contig: add prepare/finish to dma-contig allocator
[~andy/linux] / drivers / media / v4l2-core / videobuf2-dma-contig.c
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/scatterlist.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dma-mapping.h>
18
19 #include <media/videobuf2-core.h>
20 #include <media/videobuf2-dma-contig.h>
21 #include <media/videobuf2-memops.h>
22
23 struct vb2_dc_conf {
24         struct device           *dev;
25 };
26
27 struct vb2_dc_buf {
28         struct device                   *dev;
29         void                            *vaddr;
30         unsigned long                   size;
31         dma_addr_t                      dma_addr;
32         enum dma_data_direction         dma_dir;
33         struct sg_table                 *dma_sgt;
34
35         /* MMAP related */
36         struct vb2_vmarea_handler       handler;
37         atomic_t                        refcount;
38
39         /* USERPTR related */
40         struct vm_area_struct           *vma;
41 };
42
43 /*********************************************/
44 /*        scatterlist table functions        */
45 /*********************************************/
46
47
48 static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
49         void (*cb)(struct page *pg))
50 {
51         struct scatterlist *s;
52         unsigned int i;
53
54         for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
55                 struct page *page = sg_page(s);
56                 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
57                         >> PAGE_SHIFT;
58                 unsigned int j;
59
60                 for (j = 0; j < n_pages; ++j, ++page)
61                         cb(page);
62         }
63 }
64
65 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
66 {
67         struct scatterlist *s;
68         dma_addr_t expected = sg_dma_address(sgt->sgl);
69         unsigned int i;
70         unsigned long size = 0;
71
72         for_each_sg(sgt->sgl, s, sgt->nents, i) {
73                 if (sg_dma_address(s) != expected)
74                         break;
75                 expected = sg_dma_address(s) + sg_dma_len(s);
76                 size += sg_dma_len(s);
77         }
78         return size;
79 }
80
81 /*********************************************/
82 /*         callbacks for all buffers         */
83 /*********************************************/
84
85 static void *vb2_dc_cookie(void *buf_priv)
86 {
87         struct vb2_dc_buf *buf = buf_priv;
88
89         return &buf->dma_addr;
90 }
91
92 static void *vb2_dc_vaddr(void *buf_priv)
93 {
94         struct vb2_dc_buf *buf = buf_priv;
95
96         return buf->vaddr;
97 }
98
99 static unsigned int vb2_dc_num_users(void *buf_priv)
100 {
101         struct vb2_dc_buf *buf = buf_priv;
102
103         return atomic_read(&buf->refcount);
104 }
105
106 static void vb2_dc_prepare(void *buf_priv)
107 {
108         struct vb2_dc_buf *buf = buf_priv;
109         struct sg_table *sgt = buf->dma_sgt;
110
111         if (!sgt)
112                 return;
113
114         dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
115 }
116
117 static void vb2_dc_finish(void *buf_priv)
118 {
119         struct vb2_dc_buf *buf = buf_priv;
120         struct sg_table *sgt = buf->dma_sgt;
121
122         if (!sgt)
123                 return;
124
125         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
126 }
127
128 /*********************************************/
129 /*        callbacks for MMAP buffers         */
130 /*********************************************/
131
132 static void vb2_dc_put(void *buf_priv)
133 {
134         struct vb2_dc_buf *buf = buf_priv;
135
136         if (!atomic_dec_and_test(&buf->refcount))
137                 return;
138
139         dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
140         kfree(buf);
141 }
142
143 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
144 {
145         struct vb2_dc_conf *conf = alloc_ctx;
146         struct device *dev = conf->dev;
147         struct vb2_dc_buf *buf;
148
149         buf = kzalloc(sizeof *buf, GFP_KERNEL);
150         if (!buf)
151                 return ERR_PTR(-ENOMEM);
152
153         buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
154         if (!buf->vaddr) {
155                 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
156                 kfree(buf);
157                 return ERR_PTR(-ENOMEM);
158         }
159
160         buf->dev = dev;
161         buf->size = size;
162
163         buf->handler.refcount = &buf->refcount;
164         buf->handler.put = vb2_dc_put;
165         buf->handler.arg = buf;
166
167         atomic_inc(&buf->refcount);
168
169         return buf;
170 }
171
172 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
173 {
174         struct vb2_dc_buf *buf = buf_priv;
175
176         if (!buf) {
177                 printk(KERN_ERR "No buffer to map\n");
178                 return -EINVAL;
179         }
180
181         return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
182                                   &vb2_common_vm_ops, &buf->handler);
183 }
184
185 /*********************************************/
186 /*       callbacks for USERPTR buffers       */
187 /*********************************************/
188
189 static inline int vma_is_io(struct vm_area_struct *vma)
190 {
191         return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
192 }
193
194 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
195         int n_pages, struct vm_area_struct *vma, int write)
196 {
197         if (vma_is_io(vma)) {
198                 unsigned int i;
199
200                 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
201                         unsigned long pfn;
202                         int ret = follow_pfn(vma, start, &pfn);
203
204                         if (ret) {
205                                 pr_err("no page for address %lu\n", start);
206                                 return ret;
207                         }
208                         pages[i] = pfn_to_page(pfn);
209                 }
210         } else {
211                 int n;
212
213                 n = get_user_pages(current, current->mm, start & PAGE_MASK,
214                         n_pages, write, 1, pages, NULL);
215                 /* negative error means that no page was pinned */
216                 n = max(n, 0);
217                 if (n != n_pages) {
218                         pr_err("got only %d of %d user pages\n", n, n_pages);
219                         while (n)
220                                 put_page(pages[--n]);
221                         return -EFAULT;
222                 }
223         }
224
225         return 0;
226 }
227
228 static void vb2_dc_put_dirty_page(struct page *page)
229 {
230         set_page_dirty_lock(page);
231         put_page(page);
232 }
233
234 static void vb2_dc_put_userptr(void *buf_priv)
235 {
236         struct vb2_dc_buf *buf = buf_priv;
237         struct sg_table *sgt = buf->dma_sgt;
238
239         dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
240         if (!vma_is_io(buf->vma))
241                 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
242
243         sg_free_table(sgt);
244         kfree(sgt);
245         vb2_put_vma(buf->vma);
246         kfree(buf);
247 }
248
249 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
250         unsigned long size, int write)
251 {
252         struct vb2_dc_conf *conf = alloc_ctx;
253         struct vb2_dc_buf *buf;
254         unsigned long start;
255         unsigned long end;
256         unsigned long offset;
257         struct page **pages;
258         int n_pages;
259         int ret = 0;
260         struct vm_area_struct *vma;
261         struct sg_table *sgt;
262         unsigned long contig_size;
263
264         buf = kzalloc(sizeof *buf, GFP_KERNEL);
265         if (!buf)
266                 return ERR_PTR(-ENOMEM);
267
268         buf->dev = conf->dev;
269         buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
270
271         start = vaddr & PAGE_MASK;
272         offset = vaddr & ~PAGE_MASK;
273         end = PAGE_ALIGN(vaddr + size);
274         n_pages = (end - start) >> PAGE_SHIFT;
275
276         pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
277         if (!pages) {
278                 ret = -ENOMEM;
279                 pr_err("failed to allocate pages table\n");
280                 goto fail_buf;
281         }
282
283         /* current->mm->mmap_sem is taken by videobuf2 core */
284         vma = find_vma(current->mm, vaddr);
285         if (!vma) {
286                 pr_err("no vma for address %lu\n", vaddr);
287                 ret = -EFAULT;
288                 goto fail_pages;
289         }
290
291         if (vma->vm_end < vaddr + size) {
292                 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
293                 ret = -EFAULT;
294                 goto fail_pages;
295         }
296
297         buf->vma = vb2_get_vma(vma);
298         if (!buf->vma) {
299                 pr_err("failed to copy vma\n");
300                 ret = -ENOMEM;
301                 goto fail_pages;
302         }
303
304         /* extract page list from userspace mapping */
305         ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
306         if (ret) {
307                 pr_err("failed to get user pages\n");
308                 goto fail_vma;
309         }
310
311         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
312         if (!sgt) {
313                 pr_err("failed to allocate sg table\n");
314                 ret = -ENOMEM;
315                 goto fail_get_user_pages;
316         }
317
318         ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
319                 offset, size, GFP_KERNEL);
320         if (ret) {
321                 pr_err("failed to initialize sg table\n");
322                 goto fail_sgt;
323         }
324
325         /* pages are no longer needed */
326         kfree(pages);
327         pages = NULL;
328
329         sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
330                 buf->dma_dir);
331         if (sgt->nents <= 0) {
332                 pr_err("failed to map scatterlist\n");
333                 ret = -EIO;
334                 goto fail_sgt_init;
335         }
336
337         contig_size = vb2_dc_get_contiguous_size(sgt);
338         if (contig_size < size) {
339                 pr_err("contiguous mapping is too small %lu/%lu\n",
340                         contig_size, size);
341                 ret = -EFAULT;
342                 goto fail_map_sg;
343         }
344
345         buf->dma_addr = sg_dma_address(sgt->sgl);
346         buf->size = size;
347         buf->dma_sgt = sgt;
348
349         return buf;
350
351 fail_map_sg:
352         dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
353
354 fail_sgt_init:
355         if (!vma_is_io(buf->vma))
356                 vb2_dc_sgt_foreach_page(sgt, put_page);
357         sg_free_table(sgt);
358
359 fail_sgt:
360         kfree(sgt);
361
362 fail_get_user_pages:
363         if (pages && !vma_is_io(buf->vma))
364                 while (n_pages)
365                         put_page(pages[--n_pages]);
366
367 fail_vma:
368         vb2_put_vma(buf->vma);
369
370 fail_pages:
371         kfree(pages); /* kfree is NULL-proof */
372
373 fail_buf:
374         kfree(buf);
375
376         return ERR_PTR(ret);
377 }
378
379 /*********************************************/
380 /*       DMA CONTIG exported functions       */
381 /*********************************************/
382
383 const struct vb2_mem_ops vb2_dma_contig_memops = {
384         .alloc          = vb2_dc_alloc,
385         .put            = vb2_dc_put,
386         .cookie         = vb2_dc_cookie,
387         .vaddr          = vb2_dc_vaddr,
388         .mmap           = vb2_dc_mmap,
389         .get_userptr    = vb2_dc_get_userptr,
390         .put_userptr    = vb2_dc_put_userptr,
391         .prepare        = vb2_dc_prepare,
392         .finish         = vb2_dc_finish,
393         .num_users      = vb2_dc_num_users,
394 };
395 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
396
397 void *vb2_dma_contig_init_ctx(struct device *dev)
398 {
399         struct vb2_dc_conf *conf;
400
401         conf = kzalloc(sizeof *conf, GFP_KERNEL);
402         if (!conf)
403                 return ERR_PTR(-ENOMEM);
404
405         conf->dev = dev;
406
407         return conf;
408 }
409 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
410
411 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
412 {
413         kfree(alloc_ctx);
414 }
415 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
416
417 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
418 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
419 MODULE_LICENSE("GPL");