]> Pileus Git - ~andy/linux/blob - drivers/media/v4l2-core/videobuf2-dma-contig.c
Merge branch 'for-3.8/core' of git://git.kernel.dk/linux-block
[~andy/linux] / drivers / media / v4l2-core / videobuf2-dma-contig.c
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
23
24 struct vb2_dc_conf {
25         struct device           *dev;
26 };
27
28 struct vb2_dc_buf {
29         struct device                   *dev;
30         void                            *vaddr;
31         unsigned long                   size;
32         dma_addr_t                      dma_addr;
33         enum dma_data_direction         dma_dir;
34         struct sg_table                 *dma_sgt;
35
36         /* MMAP related */
37         struct vb2_vmarea_handler       handler;
38         atomic_t                        refcount;
39         struct sg_table                 *sgt_base;
40
41         /* USERPTR related */
42         struct vm_area_struct           *vma;
43
44         /* DMABUF related */
45         struct dma_buf_attachment       *db_attach;
46 };
47
48 /*********************************************/
49 /*        scatterlist table functions        */
50 /*********************************************/
51
52
53 static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54         void (*cb)(struct page *pg))
55 {
56         struct scatterlist *s;
57         unsigned int i;
58
59         for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60                 struct page *page = sg_page(s);
61                 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62                         >> PAGE_SHIFT;
63                 unsigned int j;
64
65                 for (j = 0; j < n_pages; ++j, ++page)
66                         cb(page);
67         }
68 }
69
70 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71 {
72         struct scatterlist *s;
73         dma_addr_t expected = sg_dma_address(sgt->sgl);
74         unsigned int i;
75         unsigned long size = 0;
76
77         for_each_sg(sgt->sgl, s, sgt->nents, i) {
78                 if (sg_dma_address(s) != expected)
79                         break;
80                 expected = sg_dma_address(s) + sg_dma_len(s);
81                 size += sg_dma_len(s);
82         }
83         return size;
84 }
85
86 /*********************************************/
87 /*         callbacks for all buffers         */
88 /*********************************************/
89
90 static void *vb2_dc_cookie(void *buf_priv)
91 {
92         struct vb2_dc_buf *buf = buf_priv;
93
94         return &buf->dma_addr;
95 }
96
97 static void *vb2_dc_vaddr(void *buf_priv)
98 {
99         struct vb2_dc_buf *buf = buf_priv;
100
101         return buf->vaddr;
102 }
103
104 static unsigned int vb2_dc_num_users(void *buf_priv)
105 {
106         struct vb2_dc_buf *buf = buf_priv;
107
108         return atomic_read(&buf->refcount);
109 }
110
111 static void vb2_dc_prepare(void *buf_priv)
112 {
113         struct vb2_dc_buf *buf = buf_priv;
114         struct sg_table *sgt = buf->dma_sgt;
115
116         /* DMABUF exporter will flush the cache for us */
117         if (!sgt || buf->db_attach)
118                 return;
119
120         dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
121 }
122
123 static void vb2_dc_finish(void *buf_priv)
124 {
125         struct vb2_dc_buf *buf = buf_priv;
126         struct sg_table *sgt = buf->dma_sgt;
127
128         /* DMABUF exporter will flush the cache for us */
129         if (!sgt || buf->db_attach)
130                 return;
131
132         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
133 }
134
135 /*********************************************/
136 /*        callbacks for MMAP buffers         */
137 /*********************************************/
138
139 static void vb2_dc_put(void *buf_priv)
140 {
141         struct vb2_dc_buf *buf = buf_priv;
142
143         if (!atomic_dec_and_test(&buf->refcount))
144                 return;
145
146         if (buf->sgt_base) {
147                 sg_free_table(buf->sgt_base);
148                 kfree(buf->sgt_base);
149         }
150         dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
151         put_device(buf->dev);
152         kfree(buf);
153 }
154
155 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
156 {
157         struct vb2_dc_conf *conf = alloc_ctx;
158         struct device *dev = conf->dev;
159         struct vb2_dc_buf *buf;
160
161         buf = kzalloc(sizeof *buf, GFP_KERNEL);
162         if (!buf)
163                 return ERR_PTR(-ENOMEM);
164
165         /* align image size to PAGE_SIZE */
166         size = PAGE_ALIGN(size);
167
168         buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
169         if (!buf->vaddr) {
170                 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
171                 kfree(buf);
172                 return ERR_PTR(-ENOMEM);
173         }
174
175         /* Prevent the device from being released while the buffer is used */
176         buf->dev = get_device(dev);
177         buf->size = size;
178
179         buf->handler.refcount = &buf->refcount;
180         buf->handler.put = vb2_dc_put;
181         buf->handler.arg = buf;
182
183         atomic_inc(&buf->refcount);
184
185         return buf;
186 }
187
188 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
189 {
190         struct vb2_dc_buf *buf = buf_priv;
191         int ret;
192
193         if (!buf) {
194                 printk(KERN_ERR "No buffer to map\n");
195                 return -EINVAL;
196         }
197
198         /*
199          * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
200          * map whole buffer
201          */
202         vma->vm_pgoff = 0;
203
204         ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
205                 buf->dma_addr, buf->size);
206
207         if (ret) {
208                 pr_err("Remapping memory failed, error: %d\n", ret);
209                 return ret;
210         }
211
212         vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
213         vma->vm_private_data    = &buf->handler;
214         vma->vm_ops             = &vb2_common_vm_ops;
215
216         vma->vm_ops->open(vma);
217
218         pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
219                 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
220                 buf->size);
221
222         return 0;
223 }
224
225 /*********************************************/
226 /*         DMABUF ops for exporters          */
227 /*********************************************/
228
229 struct vb2_dc_attachment {
230         struct sg_table sgt;
231         enum dma_data_direction dir;
232 };
233
234 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
235         struct dma_buf_attachment *dbuf_attach)
236 {
237         struct vb2_dc_attachment *attach;
238         unsigned int i;
239         struct scatterlist *rd, *wr;
240         struct sg_table *sgt;
241         struct vb2_dc_buf *buf = dbuf->priv;
242         int ret;
243
244         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
245         if (!attach)
246                 return -ENOMEM;
247
248         sgt = &attach->sgt;
249         /* Copy the buf->base_sgt scatter list to the attachment, as we can't
250          * map the same scatter list to multiple attachments at the same time.
251          */
252         ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
253         if (ret) {
254                 kfree(attach);
255                 return -ENOMEM;
256         }
257
258         rd = buf->sgt_base->sgl;
259         wr = sgt->sgl;
260         for (i = 0; i < sgt->orig_nents; ++i) {
261                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
262                 rd = sg_next(rd);
263                 wr = sg_next(wr);
264         }
265
266         attach->dir = DMA_NONE;
267         dbuf_attach->priv = attach;
268
269         return 0;
270 }
271
272 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
273         struct dma_buf_attachment *db_attach)
274 {
275         struct vb2_dc_attachment *attach = db_attach->priv;
276         struct sg_table *sgt;
277
278         if (!attach)
279                 return;
280
281         sgt = &attach->sgt;
282
283         /* release the scatterlist cache */
284         if (attach->dir != DMA_NONE)
285                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
286                         attach->dir);
287         sg_free_table(sgt);
288         kfree(attach);
289         db_attach->priv = NULL;
290 }
291
292 static struct sg_table *vb2_dc_dmabuf_ops_map(
293         struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
294 {
295         struct vb2_dc_attachment *attach = db_attach->priv;
296         /* stealing dmabuf mutex to serialize map/unmap operations */
297         struct mutex *lock = &db_attach->dmabuf->lock;
298         struct sg_table *sgt;
299         int ret;
300
301         mutex_lock(lock);
302
303         sgt = &attach->sgt;
304         /* return previously mapped sg table */
305         if (attach->dir == dir) {
306                 mutex_unlock(lock);
307                 return sgt;
308         }
309
310         /* release any previous cache */
311         if (attach->dir != DMA_NONE) {
312                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
313                         attach->dir);
314                 attach->dir = DMA_NONE;
315         }
316
317         /* mapping to the client with new direction */
318         ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
319         if (ret <= 0) {
320                 pr_err("failed to map scatterlist\n");
321                 mutex_unlock(lock);
322                 return ERR_PTR(-EIO);
323         }
324
325         attach->dir = dir;
326
327         mutex_unlock(lock);
328
329         return sgt;
330 }
331
332 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
333         struct sg_table *sgt, enum dma_data_direction dir)
334 {
335         /* nothing to be done here */
336 }
337
338 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
339 {
340         /* drop reference obtained in vb2_dc_get_dmabuf */
341         vb2_dc_put(dbuf->priv);
342 }
343
344 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
345 {
346         struct vb2_dc_buf *buf = dbuf->priv;
347
348         return buf->vaddr + pgnum * PAGE_SIZE;
349 }
350
351 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
352 {
353         struct vb2_dc_buf *buf = dbuf->priv;
354
355         return buf->vaddr;
356 }
357
358 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
359         struct vm_area_struct *vma)
360 {
361         return vb2_dc_mmap(dbuf->priv, vma);
362 }
363
364 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
365         .attach = vb2_dc_dmabuf_ops_attach,
366         .detach = vb2_dc_dmabuf_ops_detach,
367         .map_dma_buf = vb2_dc_dmabuf_ops_map,
368         .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
369         .kmap = vb2_dc_dmabuf_ops_kmap,
370         .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
371         .vmap = vb2_dc_dmabuf_ops_vmap,
372         .mmap = vb2_dc_dmabuf_ops_mmap,
373         .release = vb2_dc_dmabuf_ops_release,
374 };
375
376 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
377 {
378         int ret;
379         struct sg_table *sgt;
380
381         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
382         if (!sgt) {
383                 dev_err(buf->dev, "failed to alloc sg table\n");
384                 return NULL;
385         }
386
387         ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
388                 buf->size);
389         if (ret < 0) {
390                 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
391                 kfree(sgt);
392                 return NULL;
393         }
394
395         return sgt;
396 }
397
398 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
399 {
400         struct vb2_dc_buf *buf = buf_priv;
401         struct dma_buf *dbuf;
402
403         if (!buf->sgt_base)
404                 buf->sgt_base = vb2_dc_get_base_sgt(buf);
405
406         if (WARN_ON(!buf->sgt_base))
407                 return NULL;
408
409         dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
410         if (IS_ERR(dbuf))
411                 return NULL;
412
413         /* dmabuf keeps reference to vb2 buffer */
414         atomic_inc(&buf->refcount);
415
416         return dbuf;
417 }
418
419 /*********************************************/
420 /*       callbacks for USERPTR buffers       */
421 /*********************************************/
422
423 static inline int vma_is_io(struct vm_area_struct *vma)
424 {
425         return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
426 }
427
428 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
429         int n_pages, struct vm_area_struct *vma, int write)
430 {
431         if (vma_is_io(vma)) {
432                 unsigned int i;
433
434                 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
435                         unsigned long pfn;
436                         int ret = follow_pfn(vma, start, &pfn);
437
438                         if (ret) {
439                                 pr_err("no page for address %lu\n", start);
440                                 return ret;
441                         }
442                         pages[i] = pfn_to_page(pfn);
443                 }
444         } else {
445                 int n;
446
447                 n = get_user_pages(current, current->mm, start & PAGE_MASK,
448                         n_pages, write, 1, pages, NULL);
449                 /* negative error means that no page was pinned */
450                 n = max(n, 0);
451                 if (n != n_pages) {
452                         pr_err("got only %d of %d user pages\n", n, n_pages);
453                         while (n)
454                                 put_page(pages[--n]);
455                         return -EFAULT;
456                 }
457         }
458
459         return 0;
460 }
461
462 static void vb2_dc_put_dirty_page(struct page *page)
463 {
464         set_page_dirty_lock(page);
465         put_page(page);
466 }
467
468 static void vb2_dc_put_userptr(void *buf_priv)
469 {
470         struct vb2_dc_buf *buf = buf_priv;
471         struct sg_table *sgt = buf->dma_sgt;
472
473         dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
474         if (!vma_is_io(buf->vma))
475                 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
476
477         sg_free_table(sgt);
478         kfree(sgt);
479         vb2_put_vma(buf->vma);
480         kfree(buf);
481 }
482
483 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
484         unsigned long size, int write)
485 {
486         struct vb2_dc_conf *conf = alloc_ctx;
487         struct vb2_dc_buf *buf;
488         unsigned long start;
489         unsigned long end;
490         unsigned long offset;
491         struct page **pages;
492         int n_pages;
493         int ret = 0;
494         struct vm_area_struct *vma;
495         struct sg_table *sgt;
496         unsigned long contig_size;
497         unsigned long dma_align = dma_get_cache_alignment();
498
499         /* Only cache aligned DMA transfers are reliable */
500         if (!IS_ALIGNED(vaddr | size, dma_align)) {
501                 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
502                 return ERR_PTR(-EINVAL);
503         }
504
505         if (!size) {
506                 pr_debug("size is zero\n");
507                 return ERR_PTR(-EINVAL);
508         }
509
510         buf = kzalloc(sizeof *buf, GFP_KERNEL);
511         if (!buf)
512                 return ERR_PTR(-ENOMEM);
513
514         buf->dev = conf->dev;
515         buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
516
517         start = vaddr & PAGE_MASK;
518         offset = vaddr & ~PAGE_MASK;
519         end = PAGE_ALIGN(vaddr + size);
520         n_pages = (end - start) >> PAGE_SHIFT;
521
522         pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
523         if (!pages) {
524                 ret = -ENOMEM;
525                 pr_err("failed to allocate pages table\n");
526                 goto fail_buf;
527         }
528
529         /* current->mm->mmap_sem is taken by videobuf2 core */
530         vma = find_vma(current->mm, vaddr);
531         if (!vma) {
532                 pr_err("no vma for address %lu\n", vaddr);
533                 ret = -EFAULT;
534                 goto fail_pages;
535         }
536
537         if (vma->vm_end < vaddr + size) {
538                 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
539                 ret = -EFAULT;
540                 goto fail_pages;
541         }
542
543         buf->vma = vb2_get_vma(vma);
544         if (!buf->vma) {
545                 pr_err("failed to copy vma\n");
546                 ret = -ENOMEM;
547                 goto fail_pages;
548         }
549
550         /* extract page list from userspace mapping */
551         ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
552         if (ret) {
553                 pr_err("failed to get user pages\n");
554                 goto fail_vma;
555         }
556
557         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
558         if (!sgt) {
559                 pr_err("failed to allocate sg table\n");
560                 ret = -ENOMEM;
561                 goto fail_get_user_pages;
562         }
563
564         ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
565                 offset, size, GFP_KERNEL);
566         if (ret) {
567                 pr_err("failed to initialize sg table\n");
568                 goto fail_sgt;
569         }
570
571         /* pages are no longer needed */
572         kfree(pages);
573         pages = NULL;
574
575         sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
576                 buf->dma_dir);
577         if (sgt->nents <= 0) {
578                 pr_err("failed to map scatterlist\n");
579                 ret = -EIO;
580                 goto fail_sgt_init;
581         }
582
583         contig_size = vb2_dc_get_contiguous_size(sgt);
584         if (contig_size < size) {
585                 pr_err("contiguous mapping is too small %lu/%lu\n",
586                         contig_size, size);
587                 ret = -EFAULT;
588                 goto fail_map_sg;
589         }
590
591         buf->dma_addr = sg_dma_address(sgt->sgl);
592         buf->size = size;
593         buf->dma_sgt = sgt;
594
595         return buf;
596
597 fail_map_sg:
598         dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
599
600 fail_sgt_init:
601         if (!vma_is_io(buf->vma))
602                 vb2_dc_sgt_foreach_page(sgt, put_page);
603         sg_free_table(sgt);
604
605 fail_sgt:
606         kfree(sgt);
607
608 fail_get_user_pages:
609         if (pages && !vma_is_io(buf->vma))
610                 while (n_pages)
611                         put_page(pages[--n_pages]);
612
613 fail_vma:
614         vb2_put_vma(buf->vma);
615
616 fail_pages:
617         kfree(pages); /* kfree is NULL-proof */
618
619 fail_buf:
620         kfree(buf);
621
622         return ERR_PTR(ret);
623 }
624
625 /*********************************************/
626 /*       callbacks for DMABUF buffers        */
627 /*********************************************/
628
629 static int vb2_dc_map_dmabuf(void *mem_priv)
630 {
631         struct vb2_dc_buf *buf = mem_priv;
632         struct sg_table *sgt;
633         unsigned long contig_size;
634
635         if (WARN_ON(!buf->db_attach)) {
636                 pr_err("trying to pin a non attached buffer\n");
637                 return -EINVAL;
638         }
639
640         if (WARN_ON(buf->dma_sgt)) {
641                 pr_err("dmabuf buffer is already pinned\n");
642                 return 0;
643         }
644
645         /* get the associated scatterlist for this buffer */
646         sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
647         if (IS_ERR_OR_NULL(sgt)) {
648                 pr_err("Error getting dmabuf scatterlist\n");
649                 return -EINVAL;
650         }
651
652         /* checking if dmabuf is big enough to store contiguous chunk */
653         contig_size = vb2_dc_get_contiguous_size(sgt);
654         if (contig_size < buf->size) {
655                 pr_err("contiguous chunk is too small %lu/%lu b\n",
656                         contig_size, buf->size);
657                 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
658                 return -EFAULT;
659         }
660
661         buf->dma_addr = sg_dma_address(sgt->sgl);
662         buf->dma_sgt = sgt;
663
664         return 0;
665 }
666
667 static void vb2_dc_unmap_dmabuf(void *mem_priv)
668 {
669         struct vb2_dc_buf *buf = mem_priv;
670         struct sg_table *sgt = buf->dma_sgt;
671
672         if (WARN_ON(!buf->db_attach)) {
673                 pr_err("trying to unpin a not attached buffer\n");
674                 return;
675         }
676
677         if (WARN_ON(!sgt)) {
678                 pr_err("dmabuf buffer is already unpinned\n");
679                 return;
680         }
681
682         dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
683
684         buf->dma_addr = 0;
685         buf->dma_sgt = NULL;
686 }
687
688 static void vb2_dc_detach_dmabuf(void *mem_priv)
689 {
690         struct vb2_dc_buf *buf = mem_priv;
691
692         /* if vb2 works correctly you should never detach mapped buffer */
693         if (WARN_ON(buf->dma_addr))
694                 vb2_dc_unmap_dmabuf(buf);
695
696         /* detach this attachment */
697         dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
698         kfree(buf);
699 }
700
701 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
702         unsigned long size, int write)
703 {
704         struct vb2_dc_conf *conf = alloc_ctx;
705         struct vb2_dc_buf *buf;
706         struct dma_buf_attachment *dba;
707
708         if (dbuf->size < size)
709                 return ERR_PTR(-EFAULT);
710
711         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
712         if (!buf)
713                 return ERR_PTR(-ENOMEM);
714
715         buf->dev = conf->dev;
716         /* create attachment for the dmabuf with the user device */
717         dba = dma_buf_attach(dbuf, buf->dev);
718         if (IS_ERR(dba)) {
719                 pr_err("failed to attach dmabuf\n");
720                 kfree(buf);
721                 return dba;
722         }
723
724         buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
725         buf->size = size;
726         buf->db_attach = dba;
727
728         return buf;
729 }
730
731 /*********************************************/
732 /*       DMA CONTIG exported functions       */
733 /*********************************************/
734
735 const struct vb2_mem_ops vb2_dma_contig_memops = {
736         .alloc          = vb2_dc_alloc,
737         .put            = vb2_dc_put,
738         .get_dmabuf     = vb2_dc_get_dmabuf,
739         .cookie         = vb2_dc_cookie,
740         .vaddr          = vb2_dc_vaddr,
741         .mmap           = vb2_dc_mmap,
742         .get_userptr    = vb2_dc_get_userptr,
743         .put_userptr    = vb2_dc_put_userptr,
744         .prepare        = vb2_dc_prepare,
745         .finish         = vb2_dc_finish,
746         .map_dmabuf     = vb2_dc_map_dmabuf,
747         .unmap_dmabuf   = vb2_dc_unmap_dmabuf,
748         .attach_dmabuf  = vb2_dc_attach_dmabuf,
749         .detach_dmabuf  = vb2_dc_detach_dmabuf,
750         .num_users      = vb2_dc_num_users,
751 };
752 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
753
754 void *vb2_dma_contig_init_ctx(struct device *dev)
755 {
756         struct vb2_dc_conf *conf;
757
758         conf = kzalloc(sizeof *conf, GFP_KERNEL);
759         if (!conf)
760                 return ERR_PTR(-ENOMEM);
761
762         conf->dev = dev;
763
764         return conf;
765 }
766 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
767
768 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
769 {
770         kfree(alloc_ctx);
771 }
772 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
773
774 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
775 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
776 MODULE_LICENSE("GPL");