]> Pileus Git - ~andy/linux/blob - drivers/media/v4l2-core/videobuf2-dma-contig.c
videobuf2-dma-contig: Only support if HAVE_GENERIC_DMA_COHERENT
[~andy/linux] / drivers / media / v4l2-core / videobuf2-dma-contig.c
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
23
24 struct vb2_dc_conf {
25         struct device           *dev;
26 };
27
28 struct vb2_dc_buf {
29         struct device                   *dev;
30         void                            *vaddr;
31         unsigned long                   size;
32         dma_addr_t                      dma_addr;
33         enum dma_data_direction         dma_dir;
34         struct sg_table                 *dma_sgt;
35
36         /* MMAP related */
37         struct vb2_vmarea_handler       handler;
38         atomic_t                        refcount;
39         struct sg_table                 *sgt_base;
40
41         /* USERPTR related */
42         struct vm_area_struct           *vma;
43
44         /* DMABUF related */
45         struct dma_buf_attachment       *db_attach;
46 };
47
48 /*********************************************/
49 /*        scatterlist table functions        */
50 /*********************************************/
51
52
53 static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54         void (*cb)(struct page *pg))
55 {
56         struct scatterlist *s;
57         unsigned int i;
58
59         for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60                 struct page *page = sg_page(s);
61                 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62                         >> PAGE_SHIFT;
63                 unsigned int j;
64
65                 for (j = 0; j < n_pages; ++j, ++page)
66                         cb(page);
67         }
68 }
69
70 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71 {
72         struct scatterlist *s;
73         dma_addr_t expected = sg_dma_address(sgt->sgl);
74         unsigned int i;
75         unsigned long size = 0;
76
77         for_each_sg(sgt->sgl, s, sgt->nents, i) {
78                 if (sg_dma_address(s) != expected)
79                         break;
80                 expected = sg_dma_address(s) + sg_dma_len(s);
81                 size += sg_dma_len(s);
82         }
83         return size;
84 }
85
86 /*********************************************/
87 /*         callbacks for all buffers         */
88 /*********************************************/
89
90 static void *vb2_dc_cookie(void *buf_priv)
91 {
92         struct vb2_dc_buf *buf = buf_priv;
93
94         return &buf->dma_addr;
95 }
96
97 static void *vb2_dc_vaddr(void *buf_priv)
98 {
99         struct vb2_dc_buf *buf = buf_priv;
100
101         return buf->vaddr;
102 }
103
104 static unsigned int vb2_dc_num_users(void *buf_priv)
105 {
106         struct vb2_dc_buf *buf = buf_priv;
107
108         return atomic_read(&buf->refcount);
109 }
110
111 static void vb2_dc_prepare(void *buf_priv)
112 {
113         struct vb2_dc_buf *buf = buf_priv;
114         struct sg_table *sgt = buf->dma_sgt;
115
116         /* DMABUF exporter will flush the cache for us */
117         if (!sgt || buf->db_attach)
118                 return;
119
120         dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
121 }
122
123 static void vb2_dc_finish(void *buf_priv)
124 {
125         struct vb2_dc_buf *buf = buf_priv;
126         struct sg_table *sgt = buf->dma_sgt;
127
128         /* DMABUF exporter will flush the cache for us */
129         if (!sgt || buf->db_attach)
130                 return;
131
132         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
133 }
134
135 /*********************************************/
136 /*        callbacks for MMAP buffers         */
137 /*********************************************/
138
139 static void vb2_dc_put(void *buf_priv)
140 {
141         struct vb2_dc_buf *buf = buf_priv;
142
143         if (!atomic_dec_and_test(&buf->refcount))
144                 return;
145
146         if (buf->sgt_base) {
147                 sg_free_table(buf->sgt_base);
148                 kfree(buf->sgt_base);
149         }
150         dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
151         put_device(buf->dev);
152         kfree(buf);
153 }
154
155 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
156 {
157         struct vb2_dc_conf *conf = alloc_ctx;
158         struct device *dev = conf->dev;
159         struct vb2_dc_buf *buf;
160
161         buf = kzalloc(sizeof *buf, GFP_KERNEL);
162         if (!buf)
163                 return ERR_PTR(-ENOMEM);
164
165         /* align image size to PAGE_SIZE */
166         size = PAGE_ALIGN(size);
167
168         buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
169         if (!buf->vaddr) {
170                 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
171                 kfree(buf);
172                 return ERR_PTR(-ENOMEM);
173         }
174
175         /* Prevent the device from being released while the buffer is used */
176         buf->dev = get_device(dev);
177         buf->size = size;
178
179         buf->handler.refcount = &buf->refcount;
180         buf->handler.put = vb2_dc_put;
181         buf->handler.arg = buf;
182
183         atomic_inc(&buf->refcount);
184
185         return buf;
186 }
187
188 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
189 {
190         struct vb2_dc_buf *buf = buf_priv;
191         int ret;
192
193         if (!buf) {
194                 printk(KERN_ERR "No buffer to map\n");
195                 return -EINVAL;
196         }
197
198         /*
199          * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
200          * map whole buffer
201          */
202         vma->vm_pgoff = 0;
203
204         ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
205                 buf->dma_addr, buf->size);
206
207         if (ret) {
208                 pr_err("Remapping memory failed, error: %d\n", ret);
209                 return ret;
210         }
211
212         vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
213         vma->vm_private_data    = &buf->handler;
214         vma->vm_ops             = &vb2_common_vm_ops;
215
216         vma->vm_ops->open(vma);
217
218         pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
219                 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
220                 buf->size);
221
222         return 0;
223 }
224
225 /*********************************************/
226 /*         DMABUF ops for exporters          */
227 /*********************************************/
228
229 #ifdef HAVE_GENERIC_DMA_COHERENT
230
231 struct vb2_dc_attachment {
232         struct sg_table sgt;
233         enum dma_data_direction dir;
234 };
235
236 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
237         struct dma_buf_attachment *dbuf_attach)
238 {
239         struct vb2_dc_attachment *attach;
240         unsigned int i;
241         struct scatterlist *rd, *wr;
242         struct sg_table *sgt;
243         struct vb2_dc_buf *buf = dbuf->priv;
244         int ret;
245
246         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
247         if (!attach)
248                 return -ENOMEM;
249
250         sgt = &attach->sgt;
251         /* Copy the buf->base_sgt scatter list to the attachment, as we can't
252          * map the same scatter list to multiple attachments at the same time.
253          */
254         ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
255         if (ret) {
256                 kfree(attach);
257                 return -ENOMEM;
258         }
259
260         rd = buf->sgt_base->sgl;
261         wr = sgt->sgl;
262         for (i = 0; i < sgt->orig_nents; ++i) {
263                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
264                 rd = sg_next(rd);
265                 wr = sg_next(wr);
266         }
267
268         attach->dir = DMA_NONE;
269         dbuf_attach->priv = attach;
270
271         return 0;
272 }
273
274 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
275         struct dma_buf_attachment *db_attach)
276 {
277         struct vb2_dc_attachment *attach = db_attach->priv;
278         struct sg_table *sgt;
279
280         if (!attach)
281                 return;
282
283         sgt = &attach->sgt;
284
285         /* release the scatterlist cache */
286         if (attach->dir != DMA_NONE)
287                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
288                         attach->dir);
289         sg_free_table(sgt);
290         kfree(attach);
291         db_attach->priv = NULL;
292 }
293
294 static struct sg_table *vb2_dc_dmabuf_ops_map(
295         struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
296 {
297         struct vb2_dc_attachment *attach = db_attach->priv;
298         /* stealing dmabuf mutex to serialize map/unmap operations */
299         struct mutex *lock = &db_attach->dmabuf->lock;
300         struct sg_table *sgt;
301         int ret;
302
303         mutex_lock(lock);
304
305         sgt = &attach->sgt;
306         /* return previously mapped sg table */
307         if (attach->dir == dir) {
308                 mutex_unlock(lock);
309                 return sgt;
310         }
311
312         /* release any previous cache */
313         if (attach->dir != DMA_NONE) {
314                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
315                         attach->dir);
316                 attach->dir = DMA_NONE;
317         }
318
319         /* mapping to the client with new direction */
320         ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
321         if (ret <= 0) {
322                 pr_err("failed to map scatterlist\n");
323                 mutex_unlock(lock);
324                 return ERR_PTR(-EIO);
325         }
326
327         attach->dir = dir;
328
329         mutex_unlock(lock);
330
331         return sgt;
332 }
333
334 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
335         struct sg_table *sgt, enum dma_data_direction dir)
336 {
337         /* nothing to be done here */
338 }
339
340 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
341 {
342         /* drop reference obtained in vb2_dc_get_dmabuf */
343         vb2_dc_put(dbuf->priv);
344 }
345
346 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
347 {
348         struct vb2_dc_buf *buf = dbuf->priv;
349
350         return buf->vaddr + pgnum * PAGE_SIZE;
351 }
352
353 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
354 {
355         struct vb2_dc_buf *buf = dbuf->priv;
356
357         return buf->vaddr;
358 }
359
360 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
361         struct vm_area_struct *vma)
362 {
363         return vb2_dc_mmap(dbuf->priv, vma);
364 }
365
366 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
367         .attach = vb2_dc_dmabuf_ops_attach,
368         .detach = vb2_dc_dmabuf_ops_detach,
369         .map_dma_buf = vb2_dc_dmabuf_ops_map,
370         .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
371         .kmap = vb2_dc_dmabuf_ops_kmap,
372         .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
373         .vmap = vb2_dc_dmabuf_ops_vmap,
374         .mmap = vb2_dc_dmabuf_ops_mmap,
375         .release = vb2_dc_dmabuf_ops_release,
376 };
377
378 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
379 {
380         int ret;
381         struct sg_table *sgt;
382
383         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
384         if (!sgt) {
385                 dev_err(buf->dev, "failed to alloc sg table\n");
386                 return NULL;
387         }
388
389         ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
390                 buf->size);
391         if (ret < 0) {
392                 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
393                 kfree(sgt);
394                 return NULL;
395         }
396
397         return sgt;
398 }
399
400 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
401 {
402         struct vb2_dc_buf *buf = buf_priv;
403         struct dma_buf *dbuf;
404
405         if (!buf->sgt_base)
406                 buf->sgt_base = vb2_dc_get_base_sgt(buf);
407
408         if (WARN_ON(!buf->sgt_base))
409                 return NULL;
410
411         dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
412         if (IS_ERR(dbuf))
413                 return NULL;
414
415         /* dmabuf keeps reference to vb2 buffer */
416         atomic_inc(&buf->refcount);
417
418         return dbuf;
419 }
420
421 #endif
422
423 /*********************************************/
424 /*       callbacks for USERPTR buffers       */
425 /*********************************************/
426
427 static inline int vma_is_io(struct vm_area_struct *vma)
428 {
429         return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
430 }
431
432 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
433         int n_pages, struct vm_area_struct *vma, int write)
434 {
435         if (vma_is_io(vma)) {
436                 unsigned int i;
437
438                 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
439                         unsigned long pfn;
440                         int ret = follow_pfn(vma, start, &pfn);
441
442                         if (ret) {
443                                 pr_err("no page for address %lu\n", start);
444                                 return ret;
445                         }
446                         pages[i] = pfn_to_page(pfn);
447                 }
448         } else {
449                 int n;
450
451                 n = get_user_pages(current, current->mm, start & PAGE_MASK,
452                         n_pages, write, 1, pages, NULL);
453                 /* negative error means that no page was pinned */
454                 n = max(n, 0);
455                 if (n != n_pages) {
456                         pr_err("got only %d of %d user pages\n", n, n_pages);
457                         while (n)
458                                 put_page(pages[--n]);
459                         return -EFAULT;
460                 }
461         }
462
463         return 0;
464 }
465
466 static void vb2_dc_put_dirty_page(struct page *page)
467 {
468         set_page_dirty_lock(page);
469         put_page(page);
470 }
471
472 static void vb2_dc_put_userptr(void *buf_priv)
473 {
474         struct vb2_dc_buf *buf = buf_priv;
475         struct sg_table *sgt = buf->dma_sgt;
476
477         dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
478         if (!vma_is_io(buf->vma))
479                 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
480
481         sg_free_table(sgt);
482         kfree(sgt);
483         vb2_put_vma(buf->vma);
484         kfree(buf);
485 }
486
487 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
488         unsigned long size, int write)
489 {
490         struct vb2_dc_conf *conf = alloc_ctx;
491         struct vb2_dc_buf *buf;
492         unsigned long start;
493         unsigned long end;
494         unsigned long offset;
495         struct page **pages;
496         int n_pages;
497         int ret = 0;
498         struct vm_area_struct *vma;
499         struct sg_table *sgt;
500         unsigned long contig_size;
501         unsigned long dma_align = dma_get_cache_alignment();
502
503         /* Only cache aligned DMA transfers are reliable */
504         if (!IS_ALIGNED(vaddr | size, dma_align)) {
505                 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
506                 return ERR_PTR(-EINVAL);
507         }
508
509         if (!size) {
510                 pr_debug("size is zero\n");
511                 return ERR_PTR(-EINVAL);
512         }
513
514         buf = kzalloc(sizeof *buf, GFP_KERNEL);
515         if (!buf)
516                 return ERR_PTR(-ENOMEM);
517
518         buf->dev = conf->dev;
519         buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
520
521         start = vaddr & PAGE_MASK;
522         offset = vaddr & ~PAGE_MASK;
523         end = PAGE_ALIGN(vaddr + size);
524         n_pages = (end - start) >> PAGE_SHIFT;
525
526         pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
527         if (!pages) {
528                 ret = -ENOMEM;
529                 pr_err("failed to allocate pages table\n");
530                 goto fail_buf;
531         }
532
533         /* current->mm->mmap_sem is taken by videobuf2 core */
534         vma = find_vma(current->mm, vaddr);
535         if (!vma) {
536                 pr_err("no vma for address %lu\n", vaddr);
537                 ret = -EFAULT;
538                 goto fail_pages;
539         }
540
541         if (vma->vm_end < vaddr + size) {
542                 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
543                 ret = -EFAULT;
544                 goto fail_pages;
545         }
546
547         buf->vma = vb2_get_vma(vma);
548         if (!buf->vma) {
549                 pr_err("failed to copy vma\n");
550                 ret = -ENOMEM;
551                 goto fail_pages;
552         }
553
554         /* extract page list from userspace mapping */
555         ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
556         if (ret) {
557                 pr_err("failed to get user pages\n");
558                 goto fail_vma;
559         }
560
561         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
562         if (!sgt) {
563                 pr_err("failed to allocate sg table\n");
564                 ret = -ENOMEM;
565                 goto fail_get_user_pages;
566         }
567
568         ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
569                 offset, size, GFP_KERNEL);
570         if (ret) {
571                 pr_err("failed to initialize sg table\n");
572                 goto fail_sgt;
573         }
574
575         /* pages are no longer needed */
576         kfree(pages);
577         pages = NULL;
578
579         sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
580                 buf->dma_dir);
581         if (sgt->nents <= 0) {
582                 pr_err("failed to map scatterlist\n");
583                 ret = -EIO;
584                 goto fail_sgt_init;
585         }
586
587         contig_size = vb2_dc_get_contiguous_size(sgt);
588         if (contig_size < size) {
589                 pr_err("contiguous mapping is too small %lu/%lu\n",
590                         contig_size, size);
591                 ret = -EFAULT;
592                 goto fail_map_sg;
593         }
594
595         buf->dma_addr = sg_dma_address(sgt->sgl);
596         buf->size = size;
597         buf->dma_sgt = sgt;
598
599         return buf;
600
601 fail_map_sg:
602         dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
603
604 fail_sgt_init:
605         if (!vma_is_io(buf->vma))
606                 vb2_dc_sgt_foreach_page(sgt, put_page);
607         sg_free_table(sgt);
608
609 fail_sgt:
610         kfree(sgt);
611
612 fail_get_user_pages:
613         if (pages && !vma_is_io(buf->vma))
614                 while (n_pages)
615                         put_page(pages[--n_pages]);
616
617 fail_vma:
618         vb2_put_vma(buf->vma);
619
620 fail_pages:
621         kfree(pages); /* kfree is NULL-proof */
622
623 fail_buf:
624         kfree(buf);
625
626         return ERR_PTR(ret);
627 }
628
629 /*********************************************/
630 /*       callbacks for DMABUF buffers        */
631 /*********************************************/
632
633 static int vb2_dc_map_dmabuf(void *mem_priv)
634 {
635         struct vb2_dc_buf *buf = mem_priv;
636         struct sg_table *sgt;
637         unsigned long contig_size;
638
639         if (WARN_ON(!buf->db_attach)) {
640                 pr_err("trying to pin a non attached buffer\n");
641                 return -EINVAL;
642         }
643
644         if (WARN_ON(buf->dma_sgt)) {
645                 pr_err("dmabuf buffer is already pinned\n");
646                 return 0;
647         }
648
649         /* get the associated scatterlist for this buffer */
650         sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
651         if (IS_ERR_OR_NULL(sgt)) {
652                 pr_err("Error getting dmabuf scatterlist\n");
653                 return -EINVAL;
654         }
655
656         /* checking if dmabuf is big enough to store contiguous chunk */
657         contig_size = vb2_dc_get_contiguous_size(sgt);
658         if (contig_size < buf->size) {
659                 pr_err("contiguous chunk is too small %lu/%lu b\n",
660                         contig_size, buf->size);
661                 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
662                 return -EFAULT;
663         }
664
665         buf->dma_addr = sg_dma_address(sgt->sgl);
666         buf->dma_sgt = sgt;
667
668         return 0;
669 }
670
671 static void vb2_dc_unmap_dmabuf(void *mem_priv)
672 {
673         struct vb2_dc_buf *buf = mem_priv;
674         struct sg_table *sgt = buf->dma_sgt;
675
676         if (WARN_ON(!buf->db_attach)) {
677                 pr_err("trying to unpin a not attached buffer\n");
678                 return;
679         }
680
681         if (WARN_ON(!sgt)) {
682                 pr_err("dmabuf buffer is already unpinned\n");
683                 return;
684         }
685
686         dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
687
688         buf->dma_addr = 0;
689         buf->dma_sgt = NULL;
690 }
691
692 static void vb2_dc_detach_dmabuf(void *mem_priv)
693 {
694         struct vb2_dc_buf *buf = mem_priv;
695
696         /* if vb2 works correctly you should never detach mapped buffer */
697         if (WARN_ON(buf->dma_addr))
698                 vb2_dc_unmap_dmabuf(buf);
699
700         /* detach this attachment */
701         dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
702         kfree(buf);
703 }
704
705 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
706         unsigned long size, int write)
707 {
708         struct vb2_dc_conf *conf = alloc_ctx;
709         struct vb2_dc_buf *buf;
710         struct dma_buf_attachment *dba;
711
712         if (dbuf->size < size)
713                 return ERR_PTR(-EFAULT);
714
715         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
716         if (!buf)
717                 return ERR_PTR(-ENOMEM);
718
719         buf->dev = conf->dev;
720         /* create attachment for the dmabuf with the user device */
721         dba = dma_buf_attach(dbuf, buf->dev);
722         if (IS_ERR(dba)) {
723                 pr_err("failed to attach dmabuf\n");
724                 kfree(buf);
725                 return dba;
726         }
727
728         buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
729         buf->size = size;
730         buf->db_attach = dba;
731
732         return buf;
733 }
734
735 /*********************************************/
736 /*       DMA CONTIG exported functions       */
737 /*********************************************/
738
739 const struct vb2_mem_ops vb2_dma_contig_memops = {
740         .alloc          = vb2_dc_alloc,
741         .put            = vb2_dc_put,
742 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
743         .get_dmabuf     = vb2_dc_get_dmabuf,
744 #endif
745         .cookie         = vb2_dc_cookie,
746         .vaddr          = vb2_dc_vaddr,
747         .mmap           = vb2_dc_mmap,
748         .get_userptr    = vb2_dc_get_userptr,
749         .put_userptr    = vb2_dc_put_userptr,
750         .prepare        = vb2_dc_prepare,
751         .finish         = vb2_dc_finish,
752         .map_dmabuf     = vb2_dc_map_dmabuf,
753         .unmap_dmabuf   = vb2_dc_unmap_dmabuf,
754         .attach_dmabuf  = vb2_dc_attach_dmabuf,
755         .detach_dmabuf  = vb2_dc_detach_dmabuf,
756         .num_users      = vb2_dc_num_users,
757 };
758 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
759
760 void *vb2_dma_contig_init_ctx(struct device *dev)
761 {
762         struct vb2_dc_conf *conf;
763
764         conf = kzalloc(sizeof *conf, GFP_KERNEL);
765         if (!conf)
766                 return ERR_PTR(-ENOMEM);
767
768         conf->dev = dev;
769
770         return conf;
771 }
772 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
773
774 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
775 {
776         kfree(alloc_ctx);
777 }
778 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
779
780 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
781 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
782 MODULE_LICENSE("GPL");