2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
37 struct vb2_vmarea_handler handler;
39 struct sg_table *sgt_base;
42 struct vm_area_struct *vma;
45 struct dma_buf_attachment *db_attach;
48 /*********************************************/
49 /* scatterlist table functions */
50 /*********************************************/
53 static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54 void (*cb)(struct page *pg))
56 struct scatterlist *s;
59 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60 struct page *page = sg_page(s);
61 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
65 for (j = 0; j < n_pages; ++j, ++page)
70 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
72 struct scatterlist *s;
73 dma_addr_t expected = sg_dma_address(sgt->sgl);
75 unsigned long size = 0;
77 for_each_sg(sgt->sgl, s, sgt->nents, i) {
78 if (sg_dma_address(s) != expected)
80 expected = sg_dma_address(s) + sg_dma_len(s);
81 size += sg_dma_len(s);
86 /*********************************************/
87 /* callbacks for all buffers */
88 /*********************************************/
90 static void *vb2_dc_cookie(void *buf_priv)
92 struct vb2_dc_buf *buf = buf_priv;
94 return &buf->dma_addr;
97 static void *vb2_dc_vaddr(void *buf_priv)
99 struct vb2_dc_buf *buf = buf_priv;
104 static unsigned int vb2_dc_num_users(void *buf_priv)
106 struct vb2_dc_buf *buf = buf_priv;
108 return atomic_read(&buf->refcount);
111 static void vb2_dc_prepare(void *buf_priv)
113 struct vb2_dc_buf *buf = buf_priv;
114 struct sg_table *sgt = buf->dma_sgt;
116 /* DMABUF exporter will flush the cache for us */
117 if (!sgt || buf->db_attach)
120 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
123 static void vb2_dc_finish(void *buf_priv)
125 struct vb2_dc_buf *buf = buf_priv;
126 struct sg_table *sgt = buf->dma_sgt;
128 /* DMABUF exporter will flush the cache for us */
129 if (!sgt || buf->db_attach)
132 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
135 /*********************************************/
136 /* callbacks for MMAP buffers */
137 /*********************************************/
139 static void vb2_dc_put(void *buf_priv)
141 struct vb2_dc_buf *buf = buf_priv;
143 if (!atomic_dec_and_test(&buf->refcount))
147 sg_free_table(buf->sgt_base);
148 kfree(buf->sgt_base);
150 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
151 put_device(buf->dev);
155 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
157 struct vb2_dc_conf *conf = alloc_ctx;
158 struct device *dev = conf->dev;
159 struct vb2_dc_buf *buf;
161 buf = kzalloc(sizeof *buf, GFP_KERNEL);
163 return ERR_PTR(-ENOMEM);
165 /* align image size to PAGE_SIZE */
166 size = PAGE_ALIGN(size);
168 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
170 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
172 return ERR_PTR(-ENOMEM);
175 /* Prevent the device from being released while the buffer is used */
176 buf->dev = get_device(dev);
179 buf->handler.refcount = &buf->refcount;
180 buf->handler.put = vb2_dc_put;
181 buf->handler.arg = buf;
183 atomic_inc(&buf->refcount);
188 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
190 struct vb2_dc_buf *buf = buf_priv;
194 printk(KERN_ERR "No buffer to map\n");
199 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
204 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
205 buf->dma_addr, buf->size);
208 pr_err("Remapping memory failed, error: %d\n", ret);
212 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
213 vma->vm_private_data = &buf->handler;
214 vma->vm_ops = &vb2_common_vm_ops;
216 vma->vm_ops->open(vma);
218 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
219 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
225 /*********************************************/
226 /* DMABUF ops for exporters */
227 /*********************************************/
229 #ifdef HAVE_GENERIC_DMA_COHERENT
231 struct vb2_dc_attachment {
233 enum dma_data_direction dir;
236 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
237 struct dma_buf_attachment *dbuf_attach)
239 struct vb2_dc_attachment *attach;
241 struct scatterlist *rd, *wr;
242 struct sg_table *sgt;
243 struct vb2_dc_buf *buf = dbuf->priv;
246 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
251 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
252 * map the same scatter list to multiple attachments at the same time.
254 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
260 rd = buf->sgt_base->sgl;
262 for (i = 0; i < sgt->orig_nents; ++i) {
263 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
268 attach->dir = DMA_NONE;
269 dbuf_attach->priv = attach;
274 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
275 struct dma_buf_attachment *db_attach)
277 struct vb2_dc_attachment *attach = db_attach->priv;
278 struct sg_table *sgt;
285 /* release the scatterlist cache */
286 if (attach->dir != DMA_NONE)
287 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
291 db_attach->priv = NULL;
294 static struct sg_table *vb2_dc_dmabuf_ops_map(
295 struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
297 struct vb2_dc_attachment *attach = db_attach->priv;
298 /* stealing dmabuf mutex to serialize map/unmap operations */
299 struct mutex *lock = &db_attach->dmabuf->lock;
300 struct sg_table *sgt;
306 /* return previously mapped sg table */
307 if (attach->dir == dir) {
312 /* release any previous cache */
313 if (attach->dir != DMA_NONE) {
314 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
316 attach->dir = DMA_NONE;
319 /* mapping to the client with new direction */
320 ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
322 pr_err("failed to map scatterlist\n");
324 return ERR_PTR(-EIO);
334 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
335 struct sg_table *sgt, enum dma_data_direction dir)
337 /* nothing to be done here */
340 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
342 /* drop reference obtained in vb2_dc_get_dmabuf */
343 vb2_dc_put(dbuf->priv);
346 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
348 struct vb2_dc_buf *buf = dbuf->priv;
350 return buf->vaddr + pgnum * PAGE_SIZE;
353 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
355 struct vb2_dc_buf *buf = dbuf->priv;
360 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
361 struct vm_area_struct *vma)
363 return vb2_dc_mmap(dbuf->priv, vma);
366 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
367 .attach = vb2_dc_dmabuf_ops_attach,
368 .detach = vb2_dc_dmabuf_ops_detach,
369 .map_dma_buf = vb2_dc_dmabuf_ops_map,
370 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
371 .kmap = vb2_dc_dmabuf_ops_kmap,
372 .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
373 .vmap = vb2_dc_dmabuf_ops_vmap,
374 .mmap = vb2_dc_dmabuf_ops_mmap,
375 .release = vb2_dc_dmabuf_ops_release,
378 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
381 struct sg_table *sgt;
383 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
385 dev_err(buf->dev, "failed to alloc sg table\n");
389 ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
392 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
400 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
402 struct vb2_dc_buf *buf = buf_priv;
403 struct dma_buf *dbuf;
406 buf->sgt_base = vb2_dc_get_base_sgt(buf);
408 if (WARN_ON(!buf->sgt_base))
411 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
415 /* dmabuf keeps reference to vb2 buffer */
416 atomic_inc(&buf->refcount);
423 /*********************************************/
424 /* callbacks for USERPTR buffers */
425 /*********************************************/
427 static inline int vma_is_io(struct vm_area_struct *vma)
429 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
432 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
433 int n_pages, struct vm_area_struct *vma, int write)
435 if (vma_is_io(vma)) {
438 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
440 int ret = follow_pfn(vma, start, &pfn);
443 pr_err("no page for address %lu\n", start);
446 pages[i] = pfn_to_page(pfn);
451 n = get_user_pages(current, current->mm, start & PAGE_MASK,
452 n_pages, write, 1, pages, NULL);
453 /* negative error means that no page was pinned */
456 pr_err("got only %d of %d user pages\n", n, n_pages);
458 put_page(pages[--n]);
466 static void vb2_dc_put_dirty_page(struct page *page)
468 set_page_dirty_lock(page);
472 static void vb2_dc_put_userptr(void *buf_priv)
474 struct vb2_dc_buf *buf = buf_priv;
475 struct sg_table *sgt = buf->dma_sgt;
477 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
478 if (!vma_is_io(buf->vma))
479 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
483 vb2_put_vma(buf->vma);
487 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
488 unsigned long size, int write)
490 struct vb2_dc_conf *conf = alloc_ctx;
491 struct vb2_dc_buf *buf;
494 unsigned long offset;
498 struct vm_area_struct *vma;
499 struct sg_table *sgt;
500 unsigned long contig_size;
501 unsigned long dma_align = dma_get_cache_alignment();
503 /* Only cache aligned DMA transfers are reliable */
504 if (!IS_ALIGNED(vaddr | size, dma_align)) {
505 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
506 return ERR_PTR(-EINVAL);
510 pr_debug("size is zero\n");
511 return ERR_PTR(-EINVAL);
514 buf = kzalloc(sizeof *buf, GFP_KERNEL);
516 return ERR_PTR(-ENOMEM);
518 buf->dev = conf->dev;
519 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
521 start = vaddr & PAGE_MASK;
522 offset = vaddr & ~PAGE_MASK;
523 end = PAGE_ALIGN(vaddr + size);
524 n_pages = (end - start) >> PAGE_SHIFT;
526 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
529 pr_err("failed to allocate pages table\n");
533 /* current->mm->mmap_sem is taken by videobuf2 core */
534 vma = find_vma(current->mm, vaddr);
536 pr_err("no vma for address %lu\n", vaddr);
541 if (vma->vm_end < vaddr + size) {
542 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
547 buf->vma = vb2_get_vma(vma);
549 pr_err("failed to copy vma\n");
554 /* extract page list from userspace mapping */
555 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
557 pr_err("failed to get user pages\n");
561 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
563 pr_err("failed to allocate sg table\n");
565 goto fail_get_user_pages;
568 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
569 offset, size, GFP_KERNEL);
571 pr_err("failed to initialize sg table\n");
575 /* pages are no longer needed */
579 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
581 if (sgt->nents <= 0) {
582 pr_err("failed to map scatterlist\n");
587 contig_size = vb2_dc_get_contiguous_size(sgt);
588 if (contig_size < size) {
589 pr_err("contiguous mapping is too small %lu/%lu\n",
595 buf->dma_addr = sg_dma_address(sgt->sgl);
602 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
605 if (!vma_is_io(buf->vma))
606 vb2_dc_sgt_foreach_page(sgt, put_page);
613 if (pages && !vma_is_io(buf->vma))
615 put_page(pages[--n_pages]);
618 vb2_put_vma(buf->vma);
621 kfree(pages); /* kfree is NULL-proof */
629 /*********************************************/
630 /* callbacks for DMABUF buffers */
631 /*********************************************/
633 static int vb2_dc_map_dmabuf(void *mem_priv)
635 struct vb2_dc_buf *buf = mem_priv;
636 struct sg_table *sgt;
637 unsigned long contig_size;
639 if (WARN_ON(!buf->db_attach)) {
640 pr_err("trying to pin a non attached buffer\n");
644 if (WARN_ON(buf->dma_sgt)) {
645 pr_err("dmabuf buffer is already pinned\n");
649 /* get the associated scatterlist for this buffer */
650 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
651 if (IS_ERR_OR_NULL(sgt)) {
652 pr_err("Error getting dmabuf scatterlist\n");
656 /* checking if dmabuf is big enough to store contiguous chunk */
657 contig_size = vb2_dc_get_contiguous_size(sgt);
658 if (contig_size < buf->size) {
659 pr_err("contiguous chunk is too small %lu/%lu b\n",
660 contig_size, buf->size);
661 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
665 buf->dma_addr = sg_dma_address(sgt->sgl);
671 static void vb2_dc_unmap_dmabuf(void *mem_priv)
673 struct vb2_dc_buf *buf = mem_priv;
674 struct sg_table *sgt = buf->dma_sgt;
676 if (WARN_ON(!buf->db_attach)) {
677 pr_err("trying to unpin a not attached buffer\n");
682 pr_err("dmabuf buffer is already unpinned\n");
686 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
692 static void vb2_dc_detach_dmabuf(void *mem_priv)
694 struct vb2_dc_buf *buf = mem_priv;
696 /* if vb2 works correctly you should never detach mapped buffer */
697 if (WARN_ON(buf->dma_addr))
698 vb2_dc_unmap_dmabuf(buf);
700 /* detach this attachment */
701 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
705 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
706 unsigned long size, int write)
708 struct vb2_dc_conf *conf = alloc_ctx;
709 struct vb2_dc_buf *buf;
710 struct dma_buf_attachment *dba;
712 if (dbuf->size < size)
713 return ERR_PTR(-EFAULT);
715 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
717 return ERR_PTR(-ENOMEM);
719 buf->dev = conf->dev;
720 /* create attachment for the dmabuf with the user device */
721 dba = dma_buf_attach(dbuf, buf->dev);
723 pr_err("failed to attach dmabuf\n");
728 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
730 buf->db_attach = dba;
735 /*********************************************/
736 /* DMA CONTIG exported functions */
737 /*********************************************/
739 const struct vb2_mem_ops vb2_dma_contig_memops = {
740 .alloc = vb2_dc_alloc,
742 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
743 .get_dmabuf = vb2_dc_get_dmabuf,
745 .cookie = vb2_dc_cookie,
746 .vaddr = vb2_dc_vaddr,
748 .get_userptr = vb2_dc_get_userptr,
749 .put_userptr = vb2_dc_put_userptr,
750 .prepare = vb2_dc_prepare,
751 .finish = vb2_dc_finish,
752 .map_dmabuf = vb2_dc_map_dmabuf,
753 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
754 .attach_dmabuf = vb2_dc_attach_dmabuf,
755 .detach_dmabuf = vb2_dc_detach_dmabuf,
756 .num_users = vb2_dc_num_users,
758 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
760 void *vb2_dma_contig_init_ctx(struct device *dev)
762 struct vb2_dc_conf *conf;
764 conf = kzalloc(sizeof *conf, GFP_KERNEL);
766 return ERR_PTR(-ENOMEM);
772 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
774 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
778 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
780 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
781 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
782 MODULE_LICENSE("GPL");