]> Pileus Git - ~andy/linux/blob - drivers/media/v4l2-core/videobuf2-dma-contig.c
[media] v4l: vb2-dma-contig: let mmap method to use dma_mmap_coherent call
[~andy/linux] / drivers / media / v4l2-core / videobuf2-dma-contig.c
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
23
24 struct vb2_dc_conf {
25         struct device           *dev;
26 };
27
28 struct vb2_dc_buf {
29         struct device                   *dev;
30         void                            *vaddr;
31         unsigned long                   size;
32         dma_addr_t                      dma_addr;
33         enum dma_data_direction         dma_dir;
34         struct sg_table                 *dma_sgt;
35
36         /* MMAP related */
37         struct vb2_vmarea_handler       handler;
38         atomic_t                        refcount;
39
40         /* USERPTR related */
41         struct vm_area_struct           *vma;
42
43         /* DMABUF related */
44         struct dma_buf_attachment       *db_attach;
45 };
46
47 /*********************************************/
48 /*        scatterlist table functions        */
49 /*********************************************/
50
51
52 static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
53         void (*cb)(struct page *pg))
54 {
55         struct scatterlist *s;
56         unsigned int i;
57
58         for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
59                 struct page *page = sg_page(s);
60                 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
61                         >> PAGE_SHIFT;
62                 unsigned int j;
63
64                 for (j = 0; j < n_pages; ++j, ++page)
65                         cb(page);
66         }
67 }
68
69 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
70 {
71         struct scatterlist *s;
72         dma_addr_t expected = sg_dma_address(sgt->sgl);
73         unsigned int i;
74         unsigned long size = 0;
75
76         for_each_sg(sgt->sgl, s, sgt->nents, i) {
77                 if (sg_dma_address(s) != expected)
78                         break;
79                 expected = sg_dma_address(s) + sg_dma_len(s);
80                 size += sg_dma_len(s);
81         }
82         return size;
83 }
84
85 /*********************************************/
86 /*         callbacks for all buffers         */
87 /*********************************************/
88
89 static void *vb2_dc_cookie(void *buf_priv)
90 {
91         struct vb2_dc_buf *buf = buf_priv;
92
93         return &buf->dma_addr;
94 }
95
96 static void *vb2_dc_vaddr(void *buf_priv)
97 {
98         struct vb2_dc_buf *buf = buf_priv;
99
100         return buf->vaddr;
101 }
102
103 static unsigned int vb2_dc_num_users(void *buf_priv)
104 {
105         struct vb2_dc_buf *buf = buf_priv;
106
107         return atomic_read(&buf->refcount);
108 }
109
110 static void vb2_dc_prepare(void *buf_priv)
111 {
112         struct vb2_dc_buf *buf = buf_priv;
113         struct sg_table *sgt = buf->dma_sgt;
114
115         /* DMABUF exporter will flush the cache for us */
116         if (!sgt || buf->db_attach)
117                 return;
118
119         dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
120 }
121
122 static void vb2_dc_finish(void *buf_priv)
123 {
124         struct vb2_dc_buf *buf = buf_priv;
125         struct sg_table *sgt = buf->dma_sgt;
126
127         /* DMABUF exporter will flush the cache for us */
128         if (!sgt || buf->db_attach)
129                 return;
130
131         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
132 }
133
134 /*********************************************/
135 /*        callbacks for MMAP buffers         */
136 /*********************************************/
137
138 static void vb2_dc_put(void *buf_priv)
139 {
140         struct vb2_dc_buf *buf = buf_priv;
141
142         if (!atomic_dec_and_test(&buf->refcount))
143                 return;
144
145         dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
146         kfree(buf);
147 }
148
149 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
150 {
151         struct vb2_dc_conf *conf = alloc_ctx;
152         struct device *dev = conf->dev;
153         struct vb2_dc_buf *buf;
154
155         buf = kzalloc(sizeof *buf, GFP_KERNEL);
156         if (!buf)
157                 return ERR_PTR(-ENOMEM);
158
159         buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
160         if (!buf->vaddr) {
161                 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
162                 kfree(buf);
163                 return ERR_PTR(-ENOMEM);
164         }
165
166         buf->dev = dev;
167         buf->size = size;
168
169         buf->handler.refcount = &buf->refcount;
170         buf->handler.put = vb2_dc_put;
171         buf->handler.arg = buf;
172
173         atomic_inc(&buf->refcount);
174
175         return buf;
176 }
177
178 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
179 {
180         struct vb2_dc_buf *buf = buf_priv;
181         int ret;
182
183         if (!buf) {
184                 printk(KERN_ERR "No buffer to map\n");
185                 return -EINVAL;
186         }
187
188         /*
189          * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
190          * map whole buffer
191          */
192         vma->vm_pgoff = 0;
193
194         ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
195                 buf->dma_addr, buf->size);
196
197         if (ret) {
198                 pr_err("Remapping memory failed, error: %d\n", ret);
199                 return ret;
200         }
201
202         vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
203         vma->vm_private_data    = &buf->handler;
204         vma->vm_ops             = &vb2_common_vm_ops;
205
206         vma->vm_ops->open(vma);
207
208         pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
209                 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
210                 buf->size);
211
212         return 0;
213 }
214
215 /*********************************************/
216 /*       callbacks for USERPTR buffers       */
217 /*********************************************/
218
219 static inline int vma_is_io(struct vm_area_struct *vma)
220 {
221         return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
222 }
223
224 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
225         int n_pages, struct vm_area_struct *vma, int write)
226 {
227         if (vma_is_io(vma)) {
228                 unsigned int i;
229
230                 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
231                         unsigned long pfn;
232                         int ret = follow_pfn(vma, start, &pfn);
233
234                         if (ret) {
235                                 pr_err("no page for address %lu\n", start);
236                                 return ret;
237                         }
238                         pages[i] = pfn_to_page(pfn);
239                 }
240         } else {
241                 int n;
242
243                 n = get_user_pages(current, current->mm, start & PAGE_MASK,
244                         n_pages, write, 1, pages, NULL);
245                 /* negative error means that no page was pinned */
246                 n = max(n, 0);
247                 if (n != n_pages) {
248                         pr_err("got only %d of %d user pages\n", n, n_pages);
249                         while (n)
250                                 put_page(pages[--n]);
251                         return -EFAULT;
252                 }
253         }
254
255         return 0;
256 }
257
258 static void vb2_dc_put_dirty_page(struct page *page)
259 {
260         set_page_dirty_lock(page);
261         put_page(page);
262 }
263
264 static void vb2_dc_put_userptr(void *buf_priv)
265 {
266         struct vb2_dc_buf *buf = buf_priv;
267         struct sg_table *sgt = buf->dma_sgt;
268
269         dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
270         if (!vma_is_io(buf->vma))
271                 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
272
273         sg_free_table(sgt);
274         kfree(sgt);
275         vb2_put_vma(buf->vma);
276         kfree(buf);
277 }
278
279 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
280         unsigned long size, int write)
281 {
282         struct vb2_dc_conf *conf = alloc_ctx;
283         struct vb2_dc_buf *buf;
284         unsigned long start;
285         unsigned long end;
286         unsigned long offset;
287         struct page **pages;
288         int n_pages;
289         int ret = 0;
290         struct vm_area_struct *vma;
291         struct sg_table *sgt;
292         unsigned long contig_size;
293
294         buf = kzalloc(sizeof *buf, GFP_KERNEL);
295         if (!buf)
296                 return ERR_PTR(-ENOMEM);
297
298         buf->dev = conf->dev;
299         buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
300
301         start = vaddr & PAGE_MASK;
302         offset = vaddr & ~PAGE_MASK;
303         end = PAGE_ALIGN(vaddr + size);
304         n_pages = (end - start) >> PAGE_SHIFT;
305
306         pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
307         if (!pages) {
308                 ret = -ENOMEM;
309                 pr_err("failed to allocate pages table\n");
310                 goto fail_buf;
311         }
312
313         /* current->mm->mmap_sem is taken by videobuf2 core */
314         vma = find_vma(current->mm, vaddr);
315         if (!vma) {
316                 pr_err("no vma for address %lu\n", vaddr);
317                 ret = -EFAULT;
318                 goto fail_pages;
319         }
320
321         if (vma->vm_end < vaddr + size) {
322                 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
323                 ret = -EFAULT;
324                 goto fail_pages;
325         }
326
327         buf->vma = vb2_get_vma(vma);
328         if (!buf->vma) {
329                 pr_err("failed to copy vma\n");
330                 ret = -ENOMEM;
331                 goto fail_pages;
332         }
333
334         /* extract page list from userspace mapping */
335         ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
336         if (ret) {
337                 pr_err("failed to get user pages\n");
338                 goto fail_vma;
339         }
340
341         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
342         if (!sgt) {
343                 pr_err("failed to allocate sg table\n");
344                 ret = -ENOMEM;
345                 goto fail_get_user_pages;
346         }
347
348         ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
349                 offset, size, GFP_KERNEL);
350         if (ret) {
351                 pr_err("failed to initialize sg table\n");
352                 goto fail_sgt;
353         }
354
355         /* pages are no longer needed */
356         kfree(pages);
357         pages = NULL;
358
359         sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
360                 buf->dma_dir);
361         if (sgt->nents <= 0) {
362                 pr_err("failed to map scatterlist\n");
363                 ret = -EIO;
364                 goto fail_sgt_init;
365         }
366
367         contig_size = vb2_dc_get_contiguous_size(sgt);
368         if (contig_size < size) {
369                 pr_err("contiguous mapping is too small %lu/%lu\n",
370                         contig_size, size);
371                 ret = -EFAULT;
372                 goto fail_map_sg;
373         }
374
375         buf->dma_addr = sg_dma_address(sgt->sgl);
376         buf->size = size;
377         buf->dma_sgt = sgt;
378
379         return buf;
380
381 fail_map_sg:
382         dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
383
384 fail_sgt_init:
385         if (!vma_is_io(buf->vma))
386                 vb2_dc_sgt_foreach_page(sgt, put_page);
387         sg_free_table(sgt);
388
389 fail_sgt:
390         kfree(sgt);
391
392 fail_get_user_pages:
393         if (pages && !vma_is_io(buf->vma))
394                 while (n_pages)
395                         put_page(pages[--n_pages]);
396
397 fail_vma:
398         vb2_put_vma(buf->vma);
399
400 fail_pages:
401         kfree(pages); /* kfree is NULL-proof */
402
403 fail_buf:
404         kfree(buf);
405
406         return ERR_PTR(ret);
407 }
408
409 /*********************************************/
410 /*       callbacks for DMABUF buffers        */
411 /*********************************************/
412
413 static int vb2_dc_map_dmabuf(void *mem_priv)
414 {
415         struct vb2_dc_buf *buf = mem_priv;
416         struct sg_table *sgt;
417         unsigned long contig_size;
418
419         if (WARN_ON(!buf->db_attach)) {
420                 pr_err("trying to pin a non attached buffer\n");
421                 return -EINVAL;
422         }
423
424         if (WARN_ON(buf->dma_sgt)) {
425                 pr_err("dmabuf buffer is already pinned\n");
426                 return 0;
427         }
428
429         /* get the associated scatterlist for this buffer */
430         sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
431         if (IS_ERR_OR_NULL(sgt)) {
432                 pr_err("Error getting dmabuf scatterlist\n");
433                 return -EINVAL;
434         }
435
436         /* checking if dmabuf is big enough to store contiguous chunk */
437         contig_size = vb2_dc_get_contiguous_size(sgt);
438         if (contig_size < buf->size) {
439                 pr_err("contiguous chunk is too small %lu/%lu b\n",
440                         contig_size, buf->size);
441                 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
442                 return -EFAULT;
443         }
444
445         buf->dma_addr = sg_dma_address(sgt->sgl);
446         buf->dma_sgt = sgt;
447
448         return 0;
449 }
450
451 static void vb2_dc_unmap_dmabuf(void *mem_priv)
452 {
453         struct vb2_dc_buf *buf = mem_priv;
454         struct sg_table *sgt = buf->dma_sgt;
455
456         if (WARN_ON(!buf->db_attach)) {
457                 pr_err("trying to unpin a not attached buffer\n");
458                 return;
459         }
460
461         if (WARN_ON(!sgt)) {
462                 pr_err("dmabuf buffer is already unpinned\n");
463                 return;
464         }
465
466         dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
467
468         buf->dma_addr = 0;
469         buf->dma_sgt = NULL;
470 }
471
472 static void vb2_dc_detach_dmabuf(void *mem_priv)
473 {
474         struct vb2_dc_buf *buf = mem_priv;
475
476         /* if vb2 works correctly you should never detach mapped buffer */
477         if (WARN_ON(buf->dma_addr))
478                 vb2_dc_unmap_dmabuf(buf);
479
480         /* detach this attachment */
481         dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
482         kfree(buf);
483 }
484
485 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
486         unsigned long size, int write)
487 {
488         struct vb2_dc_conf *conf = alloc_ctx;
489         struct vb2_dc_buf *buf;
490         struct dma_buf_attachment *dba;
491
492         if (dbuf->size < size)
493                 return ERR_PTR(-EFAULT);
494
495         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
496         if (!buf)
497                 return ERR_PTR(-ENOMEM);
498
499         buf->dev = conf->dev;
500         /* create attachment for the dmabuf with the user device */
501         dba = dma_buf_attach(dbuf, buf->dev);
502         if (IS_ERR(dba)) {
503                 pr_err("failed to attach dmabuf\n");
504                 kfree(buf);
505                 return dba;
506         }
507
508         buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
509         buf->size = size;
510         buf->db_attach = dba;
511
512         return buf;
513 }
514
515 /*********************************************/
516 /*       DMA CONTIG exported functions       */
517 /*********************************************/
518
519 const struct vb2_mem_ops vb2_dma_contig_memops = {
520         .alloc          = vb2_dc_alloc,
521         .put            = vb2_dc_put,
522         .cookie         = vb2_dc_cookie,
523         .vaddr          = vb2_dc_vaddr,
524         .mmap           = vb2_dc_mmap,
525         .get_userptr    = vb2_dc_get_userptr,
526         .put_userptr    = vb2_dc_put_userptr,
527         .prepare        = vb2_dc_prepare,
528         .finish         = vb2_dc_finish,
529         .map_dmabuf     = vb2_dc_map_dmabuf,
530         .unmap_dmabuf   = vb2_dc_unmap_dmabuf,
531         .attach_dmabuf  = vb2_dc_attach_dmabuf,
532         .detach_dmabuf  = vb2_dc_detach_dmabuf,
533         .num_users      = vb2_dc_num_users,
534 };
535 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
536
537 void *vb2_dma_contig_init_ctx(struct device *dev)
538 {
539         struct vb2_dc_conf *conf;
540
541         conf = kzalloc(sizeof *conf, GFP_KERNEL);
542         if (!conf)
543                 return ERR_PTR(-ENOMEM);
544
545         conf->dev = dev;
546
547         return conf;
548 }
549 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
550
551 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
552 {
553         kfree(alloc_ctx);
554 }
555 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
556
557 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
558 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
559 MODULE_LICENSE("GPL");