]> Pileus Git - ~andy/linux/blob - drivers/media/v4l2-core/videobuf2-dma-contig.c
[media] v4l: vb2-dma-contig: add reference counting for a device from allocator context
[~andy/linux] / drivers / media / v4l2-core / videobuf2-dma-contig.c
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
23
24 struct vb2_dc_conf {
25         struct device           *dev;
26 };
27
28 struct vb2_dc_buf {
29         struct device                   *dev;
30         void                            *vaddr;
31         unsigned long                   size;
32         dma_addr_t                      dma_addr;
33         enum dma_data_direction         dma_dir;
34         struct sg_table                 *dma_sgt;
35
36         /* MMAP related */
37         struct vb2_vmarea_handler       handler;
38         atomic_t                        refcount;
39         struct sg_table                 *sgt_base;
40
41         /* USERPTR related */
42         struct vm_area_struct           *vma;
43
44         /* DMABUF related */
45         struct dma_buf_attachment       *db_attach;
46 };
47
48 /*********************************************/
49 /*        scatterlist table functions        */
50 /*********************************************/
51
52
53 static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54         void (*cb)(struct page *pg))
55 {
56         struct scatterlist *s;
57         unsigned int i;
58
59         for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60                 struct page *page = sg_page(s);
61                 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62                         >> PAGE_SHIFT;
63                 unsigned int j;
64
65                 for (j = 0; j < n_pages; ++j, ++page)
66                         cb(page);
67         }
68 }
69
70 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71 {
72         struct scatterlist *s;
73         dma_addr_t expected = sg_dma_address(sgt->sgl);
74         unsigned int i;
75         unsigned long size = 0;
76
77         for_each_sg(sgt->sgl, s, sgt->nents, i) {
78                 if (sg_dma_address(s) != expected)
79                         break;
80                 expected = sg_dma_address(s) + sg_dma_len(s);
81                 size += sg_dma_len(s);
82         }
83         return size;
84 }
85
86 /*********************************************/
87 /*         callbacks for all buffers         */
88 /*********************************************/
89
90 static void *vb2_dc_cookie(void *buf_priv)
91 {
92         struct vb2_dc_buf *buf = buf_priv;
93
94         return &buf->dma_addr;
95 }
96
97 static void *vb2_dc_vaddr(void *buf_priv)
98 {
99         struct vb2_dc_buf *buf = buf_priv;
100
101         return buf->vaddr;
102 }
103
104 static unsigned int vb2_dc_num_users(void *buf_priv)
105 {
106         struct vb2_dc_buf *buf = buf_priv;
107
108         return atomic_read(&buf->refcount);
109 }
110
111 static void vb2_dc_prepare(void *buf_priv)
112 {
113         struct vb2_dc_buf *buf = buf_priv;
114         struct sg_table *sgt = buf->dma_sgt;
115
116         /* DMABUF exporter will flush the cache for us */
117         if (!sgt || buf->db_attach)
118                 return;
119
120         dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
121 }
122
123 static void vb2_dc_finish(void *buf_priv)
124 {
125         struct vb2_dc_buf *buf = buf_priv;
126         struct sg_table *sgt = buf->dma_sgt;
127
128         /* DMABUF exporter will flush the cache for us */
129         if (!sgt || buf->db_attach)
130                 return;
131
132         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
133 }
134
135 /*********************************************/
136 /*        callbacks for MMAP buffers         */
137 /*********************************************/
138
139 static void vb2_dc_put(void *buf_priv)
140 {
141         struct vb2_dc_buf *buf = buf_priv;
142
143         if (!atomic_dec_and_test(&buf->refcount))
144                 return;
145
146         if (buf->sgt_base) {
147                 sg_free_table(buf->sgt_base);
148                 kfree(buf->sgt_base);
149         }
150         dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
151         put_device(buf->dev);
152         kfree(buf);
153 }
154
155 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
156 {
157         struct vb2_dc_conf *conf = alloc_ctx;
158         struct device *dev = conf->dev;
159         struct vb2_dc_buf *buf;
160
161         buf = kzalloc(sizeof *buf, GFP_KERNEL);
162         if (!buf)
163                 return ERR_PTR(-ENOMEM);
164
165         buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
166         if (!buf->vaddr) {
167                 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
168                 kfree(buf);
169                 return ERR_PTR(-ENOMEM);
170         }
171
172         /* Prevent the device from being released while the buffer is used */
173         buf->dev = get_device(dev);
174         buf->size = size;
175
176         buf->handler.refcount = &buf->refcount;
177         buf->handler.put = vb2_dc_put;
178         buf->handler.arg = buf;
179
180         atomic_inc(&buf->refcount);
181
182         return buf;
183 }
184
185 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
186 {
187         struct vb2_dc_buf *buf = buf_priv;
188         int ret;
189
190         if (!buf) {
191                 printk(KERN_ERR "No buffer to map\n");
192                 return -EINVAL;
193         }
194
195         /*
196          * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
197          * map whole buffer
198          */
199         vma->vm_pgoff = 0;
200
201         ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
202                 buf->dma_addr, buf->size);
203
204         if (ret) {
205                 pr_err("Remapping memory failed, error: %d\n", ret);
206                 return ret;
207         }
208
209         vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
210         vma->vm_private_data    = &buf->handler;
211         vma->vm_ops             = &vb2_common_vm_ops;
212
213         vma->vm_ops->open(vma);
214
215         pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
216                 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
217                 buf->size);
218
219         return 0;
220 }
221
222 /*********************************************/
223 /*         DMABUF ops for exporters          */
224 /*********************************************/
225
226 struct vb2_dc_attachment {
227         struct sg_table sgt;
228         enum dma_data_direction dir;
229 };
230
231 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
232         struct dma_buf_attachment *dbuf_attach)
233 {
234         struct vb2_dc_attachment *attach;
235         unsigned int i;
236         struct scatterlist *rd, *wr;
237         struct sg_table *sgt;
238         struct vb2_dc_buf *buf = dbuf->priv;
239         int ret;
240
241         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
242         if (!attach)
243                 return -ENOMEM;
244
245         sgt = &attach->sgt;
246         /* Copy the buf->base_sgt scatter list to the attachment, as we can't
247          * map the same scatter list to multiple attachments at the same time.
248          */
249         ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
250         if (ret) {
251                 kfree(attach);
252                 return -ENOMEM;
253         }
254
255         rd = buf->sgt_base->sgl;
256         wr = sgt->sgl;
257         for (i = 0; i < sgt->orig_nents; ++i) {
258                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
259                 rd = sg_next(rd);
260                 wr = sg_next(wr);
261         }
262
263         attach->dir = DMA_NONE;
264         dbuf_attach->priv = attach;
265
266         return 0;
267 }
268
269 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
270         struct dma_buf_attachment *db_attach)
271 {
272         struct vb2_dc_attachment *attach = db_attach->priv;
273         struct sg_table *sgt;
274
275         if (!attach)
276                 return;
277
278         sgt = &attach->sgt;
279
280         /* release the scatterlist cache */
281         if (attach->dir != DMA_NONE)
282                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
283                         attach->dir);
284         sg_free_table(sgt);
285         kfree(attach);
286         db_attach->priv = NULL;
287 }
288
289 static struct sg_table *vb2_dc_dmabuf_ops_map(
290         struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
291 {
292         struct vb2_dc_attachment *attach = db_attach->priv;
293         /* stealing dmabuf mutex to serialize map/unmap operations */
294         struct mutex *lock = &db_attach->dmabuf->lock;
295         struct sg_table *sgt;
296         int ret;
297
298         mutex_lock(lock);
299
300         sgt = &attach->sgt;
301         /* return previously mapped sg table */
302         if (attach->dir == dir) {
303                 mutex_unlock(lock);
304                 return sgt;
305         }
306
307         /* release any previous cache */
308         if (attach->dir != DMA_NONE) {
309                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
310                         attach->dir);
311                 attach->dir = DMA_NONE;
312         }
313
314         /* mapping to the client with new direction */
315         ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
316         if (ret <= 0) {
317                 pr_err("failed to map scatterlist\n");
318                 mutex_unlock(lock);
319                 return ERR_PTR(-EIO);
320         }
321
322         attach->dir = dir;
323
324         mutex_unlock(lock);
325
326         return sgt;
327 }
328
329 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
330         struct sg_table *sgt, enum dma_data_direction dir)
331 {
332         /* nothing to be done here */
333 }
334
335 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
336 {
337         /* drop reference obtained in vb2_dc_get_dmabuf */
338         vb2_dc_put(dbuf->priv);
339 }
340
341 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
342 {
343         struct vb2_dc_buf *buf = dbuf->priv;
344
345         return buf->vaddr + pgnum * PAGE_SIZE;
346 }
347
348 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
349 {
350         struct vb2_dc_buf *buf = dbuf->priv;
351
352         return buf->vaddr;
353 }
354
355 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
356         struct vm_area_struct *vma)
357 {
358         return vb2_dc_mmap(dbuf->priv, vma);
359 }
360
361 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
362         .attach = vb2_dc_dmabuf_ops_attach,
363         .detach = vb2_dc_dmabuf_ops_detach,
364         .map_dma_buf = vb2_dc_dmabuf_ops_map,
365         .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
366         .kmap = vb2_dc_dmabuf_ops_kmap,
367         .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
368         .vmap = vb2_dc_dmabuf_ops_vmap,
369         .mmap = vb2_dc_dmabuf_ops_mmap,
370         .release = vb2_dc_dmabuf_ops_release,
371 };
372
373 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
374 {
375         int ret;
376         struct sg_table *sgt;
377
378         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
379         if (!sgt) {
380                 dev_err(buf->dev, "failed to alloc sg table\n");
381                 return NULL;
382         }
383
384         ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
385                 buf->size);
386         if (ret < 0) {
387                 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
388                 kfree(sgt);
389                 return NULL;
390         }
391
392         return sgt;
393 }
394
395 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
396 {
397         struct vb2_dc_buf *buf = buf_priv;
398         struct dma_buf *dbuf;
399
400         if (!buf->sgt_base)
401                 buf->sgt_base = vb2_dc_get_base_sgt(buf);
402
403         if (WARN_ON(!buf->sgt_base))
404                 return NULL;
405
406         dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
407         if (IS_ERR(dbuf))
408                 return NULL;
409
410         /* dmabuf keeps reference to vb2 buffer */
411         atomic_inc(&buf->refcount);
412
413         return dbuf;
414 }
415
416 /*********************************************/
417 /*       callbacks for USERPTR buffers       */
418 /*********************************************/
419
420 static inline int vma_is_io(struct vm_area_struct *vma)
421 {
422         return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
423 }
424
425 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
426         int n_pages, struct vm_area_struct *vma, int write)
427 {
428         if (vma_is_io(vma)) {
429                 unsigned int i;
430
431                 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
432                         unsigned long pfn;
433                         int ret = follow_pfn(vma, start, &pfn);
434
435                         if (ret) {
436                                 pr_err("no page for address %lu\n", start);
437                                 return ret;
438                         }
439                         pages[i] = pfn_to_page(pfn);
440                 }
441         } else {
442                 int n;
443
444                 n = get_user_pages(current, current->mm, start & PAGE_MASK,
445                         n_pages, write, 1, pages, NULL);
446                 /* negative error means that no page was pinned */
447                 n = max(n, 0);
448                 if (n != n_pages) {
449                         pr_err("got only %d of %d user pages\n", n, n_pages);
450                         while (n)
451                                 put_page(pages[--n]);
452                         return -EFAULT;
453                 }
454         }
455
456         return 0;
457 }
458
459 static void vb2_dc_put_dirty_page(struct page *page)
460 {
461         set_page_dirty_lock(page);
462         put_page(page);
463 }
464
465 static void vb2_dc_put_userptr(void *buf_priv)
466 {
467         struct vb2_dc_buf *buf = buf_priv;
468         struct sg_table *sgt = buf->dma_sgt;
469
470         dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
471         if (!vma_is_io(buf->vma))
472                 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
473
474         sg_free_table(sgt);
475         kfree(sgt);
476         vb2_put_vma(buf->vma);
477         kfree(buf);
478 }
479
480 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
481         unsigned long size, int write)
482 {
483         struct vb2_dc_conf *conf = alloc_ctx;
484         struct vb2_dc_buf *buf;
485         unsigned long start;
486         unsigned long end;
487         unsigned long offset;
488         struct page **pages;
489         int n_pages;
490         int ret = 0;
491         struct vm_area_struct *vma;
492         struct sg_table *sgt;
493         unsigned long contig_size;
494
495         buf = kzalloc(sizeof *buf, GFP_KERNEL);
496         if (!buf)
497                 return ERR_PTR(-ENOMEM);
498
499         buf->dev = conf->dev;
500         buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
501
502         start = vaddr & PAGE_MASK;
503         offset = vaddr & ~PAGE_MASK;
504         end = PAGE_ALIGN(vaddr + size);
505         n_pages = (end - start) >> PAGE_SHIFT;
506
507         pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
508         if (!pages) {
509                 ret = -ENOMEM;
510                 pr_err("failed to allocate pages table\n");
511                 goto fail_buf;
512         }
513
514         /* current->mm->mmap_sem is taken by videobuf2 core */
515         vma = find_vma(current->mm, vaddr);
516         if (!vma) {
517                 pr_err("no vma for address %lu\n", vaddr);
518                 ret = -EFAULT;
519                 goto fail_pages;
520         }
521
522         if (vma->vm_end < vaddr + size) {
523                 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
524                 ret = -EFAULT;
525                 goto fail_pages;
526         }
527
528         buf->vma = vb2_get_vma(vma);
529         if (!buf->vma) {
530                 pr_err("failed to copy vma\n");
531                 ret = -ENOMEM;
532                 goto fail_pages;
533         }
534
535         /* extract page list from userspace mapping */
536         ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
537         if (ret) {
538                 pr_err("failed to get user pages\n");
539                 goto fail_vma;
540         }
541
542         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
543         if (!sgt) {
544                 pr_err("failed to allocate sg table\n");
545                 ret = -ENOMEM;
546                 goto fail_get_user_pages;
547         }
548
549         ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
550                 offset, size, GFP_KERNEL);
551         if (ret) {
552                 pr_err("failed to initialize sg table\n");
553                 goto fail_sgt;
554         }
555
556         /* pages are no longer needed */
557         kfree(pages);
558         pages = NULL;
559
560         sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
561                 buf->dma_dir);
562         if (sgt->nents <= 0) {
563                 pr_err("failed to map scatterlist\n");
564                 ret = -EIO;
565                 goto fail_sgt_init;
566         }
567
568         contig_size = vb2_dc_get_contiguous_size(sgt);
569         if (contig_size < size) {
570                 pr_err("contiguous mapping is too small %lu/%lu\n",
571                         contig_size, size);
572                 ret = -EFAULT;
573                 goto fail_map_sg;
574         }
575
576         buf->dma_addr = sg_dma_address(sgt->sgl);
577         buf->size = size;
578         buf->dma_sgt = sgt;
579
580         return buf;
581
582 fail_map_sg:
583         dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
584
585 fail_sgt_init:
586         if (!vma_is_io(buf->vma))
587                 vb2_dc_sgt_foreach_page(sgt, put_page);
588         sg_free_table(sgt);
589
590 fail_sgt:
591         kfree(sgt);
592
593 fail_get_user_pages:
594         if (pages && !vma_is_io(buf->vma))
595                 while (n_pages)
596                         put_page(pages[--n_pages]);
597
598 fail_vma:
599         vb2_put_vma(buf->vma);
600
601 fail_pages:
602         kfree(pages); /* kfree is NULL-proof */
603
604 fail_buf:
605         kfree(buf);
606
607         return ERR_PTR(ret);
608 }
609
610 /*********************************************/
611 /*       callbacks for DMABUF buffers        */
612 /*********************************************/
613
614 static int vb2_dc_map_dmabuf(void *mem_priv)
615 {
616         struct vb2_dc_buf *buf = mem_priv;
617         struct sg_table *sgt;
618         unsigned long contig_size;
619
620         if (WARN_ON(!buf->db_attach)) {
621                 pr_err("trying to pin a non attached buffer\n");
622                 return -EINVAL;
623         }
624
625         if (WARN_ON(buf->dma_sgt)) {
626                 pr_err("dmabuf buffer is already pinned\n");
627                 return 0;
628         }
629
630         /* get the associated scatterlist for this buffer */
631         sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
632         if (IS_ERR_OR_NULL(sgt)) {
633                 pr_err("Error getting dmabuf scatterlist\n");
634                 return -EINVAL;
635         }
636
637         /* checking if dmabuf is big enough to store contiguous chunk */
638         contig_size = vb2_dc_get_contiguous_size(sgt);
639         if (contig_size < buf->size) {
640                 pr_err("contiguous chunk is too small %lu/%lu b\n",
641                         contig_size, buf->size);
642                 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
643                 return -EFAULT;
644         }
645
646         buf->dma_addr = sg_dma_address(sgt->sgl);
647         buf->dma_sgt = sgt;
648
649         return 0;
650 }
651
652 static void vb2_dc_unmap_dmabuf(void *mem_priv)
653 {
654         struct vb2_dc_buf *buf = mem_priv;
655         struct sg_table *sgt = buf->dma_sgt;
656
657         if (WARN_ON(!buf->db_attach)) {
658                 pr_err("trying to unpin a not attached buffer\n");
659                 return;
660         }
661
662         if (WARN_ON(!sgt)) {
663                 pr_err("dmabuf buffer is already unpinned\n");
664                 return;
665         }
666
667         dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
668
669         buf->dma_addr = 0;
670         buf->dma_sgt = NULL;
671 }
672
673 static void vb2_dc_detach_dmabuf(void *mem_priv)
674 {
675         struct vb2_dc_buf *buf = mem_priv;
676
677         /* if vb2 works correctly you should never detach mapped buffer */
678         if (WARN_ON(buf->dma_addr))
679                 vb2_dc_unmap_dmabuf(buf);
680
681         /* detach this attachment */
682         dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
683         kfree(buf);
684 }
685
686 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
687         unsigned long size, int write)
688 {
689         struct vb2_dc_conf *conf = alloc_ctx;
690         struct vb2_dc_buf *buf;
691         struct dma_buf_attachment *dba;
692
693         if (dbuf->size < size)
694                 return ERR_PTR(-EFAULT);
695
696         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
697         if (!buf)
698                 return ERR_PTR(-ENOMEM);
699
700         buf->dev = conf->dev;
701         /* create attachment for the dmabuf with the user device */
702         dba = dma_buf_attach(dbuf, buf->dev);
703         if (IS_ERR(dba)) {
704                 pr_err("failed to attach dmabuf\n");
705                 kfree(buf);
706                 return dba;
707         }
708
709         buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
710         buf->size = size;
711         buf->db_attach = dba;
712
713         return buf;
714 }
715
716 /*********************************************/
717 /*       DMA CONTIG exported functions       */
718 /*********************************************/
719
720 const struct vb2_mem_ops vb2_dma_contig_memops = {
721         .alloc          = vb2_dc_alloc,
722         .put            = vb2_dc_put,
723         .get_dmabuf     = vb2_dc_get_dmabuf,
724         .cookie         = vb2_dc_cookie,
725         .vaddr          = vb2_dc_vaddr,
726         .mmap           = vb2_dc_mmap,
727         .get_userptr    = vb2_dc_get_userptr,
728         .put_userptr    = vb2_dc_put_userptr,
729         .prepare        = vb2_dc_prepare,
730         .finish         = vb2_dc_finish,
731         .map_dmabuf     = vb2_dc_map_dmabuf,
732         .unmap_dmabuf   = vb2_dc_unmap_dmabuf,
733         .attach_dmabuf  = vb2_dc_attach_dmabuf,
734         .detach_dmabuf  = vb2_dc_detach_dmabuf,
735         .num_users      = vb2_dc_num_users,
736 };
737 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
738
739 void *vb2_dma_contig_init_ctx(struct device *dev)
740 {
741         struct vb2_dc_conf *conf;
742
743         conf = kzalloc(sizeof *conf, GFP_KERNEL);
744         if (!conf)
745                 return ERR_PTR(-ENOMEM);
746
747         conf->dev = dev;
748
749         return conf;
750 }
751 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
752
753 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
754 {
755         kfree(alloc_ctx);
756 }
757 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
758
759 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
760 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
761 MODULE_LICENSE("GPL");