]> Pileus Git - ~andy/linux/blobdiff - drivers/media/v4l2-core/videobuf2-dma-contig.c
Merge tag 'for-3.8-rc1' of git://gitorious.org/linux-pwm/linux-pwm
[~andy/linux] / drivers / media / v4l2-core / videobuf2-dma-contig.c
index a5804cf12c7cb1ee3d0fef2bd9673801d2ac95ab..10beaee7f0ae592ae88803d7720d298a20beac0d 100644 (file)
@@ -36,6 +36,7 @@ struct vb2_dc_buf {
        /* MMAP related */
        struct vb2_vmarea_handler       handler;
        atomic_t                        refcount;
+       struct sg_table                 *sgt_base;
 
        /* USERPTR related */
        struct vm_area_struct           *vma;
@@ -142,7 +143,12 @@ static void vb2_dc_put(void *buf_priv)
        if (!atomic_dec_and_test(&buf->refcount))
                return;
 
+       if (buf->sgt_base) {
+               sg_free_table(buf->sgt_base);
+               kfree(buf->sgt_base);
+       }
        dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
+       put_device(buf->dev);
        kfree(buf);
 }
 
@@ -156,6 +162,9 @@ static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
        if (!buf)
                return ERR_PTR(-ENOMEM);
 
+       /* align image size to PAGE_SIZE */
+       size = PAGE_ALIGN(size);
+
        buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
        if (!buf->vaddr) {
                dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
@@ -163,7 +172,8 @@ static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
                return ERR_PTR(-ENOMEM);
        }
 
-       buf->dev = dev;
+       /* Prevent the device from being released while the buffer is used */
+       buf->dev = get_device(dev);
        buf->size = size;
 
        buf->handler.refcount = &buf->refcount;
@@ -178,14 +188,232 @@ static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
 {
        struct vb2_dc_buf *buf = buf_priv;
+       int ret;
 
        if (!buf) {
                printk(KERN_ERR "No buffer to map\n");
                return -EINVAL;
        }
 
-       return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
-                                 &vb2_common_vm_ops, &buf->handler);
+       /*
+        * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
+        * map whole buffer
+        */
+       vma->vm_pgoff = 0;
+
+       ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
+               buf->dma_addr, buf->size);
+
+       if (ret) {
+               pr_err("Remapping memory failed, error: %d\n", ret);
+               return ret;
+       }
+
+       vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_private_data    = &buf->handler;
+       vma->vm_ops             = &vb2_common_vm_ops;
+
+       vma->vm_ops->open(vma);
+
+       pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
+               __func__, (unsigned long)buf->dma_addr, vma->vm_start,
+               buf->size);
+
+       return 0;
+}
+
+/*********************************************/
+/*         DMABUF ops for exporters          */
+/*********************************************/
+
+struct vb2_dc_attachment {
+       struct sg_table sgt;
+       enum dma_data_direction dir;
+};
+
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+       struct dma_buf_attachment *dbuf_attach)
+{
+       struct vb2_dc_attachment *attach;
+       unsigned int i;
+       struct scatterlist *rd, *wr;
+       struct sg_table *sgt;
+       struct vb2_dc_buf *buf = dbuf->priv;
+       int ret;
+
+       attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+       if (!attach)
+               return -ENOMEM;
+
+       sgt = &attach->sgt;
+       /* Copy the buf->base_sgt scatter list to the attachment, as we can't
+        * map the same scatter list to multiple attachments at the same time.
+        */
+       ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+       if (ret) {
+               kfree(attach);
+               return -ENOMEM;
+       }
+
+       rd = buf->sgt_base->sgl;
+       wr = sgt->sgl;
+       for (i = 0; i < sgt->orig_nents; ++i) {
+               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+               rd = sg_next(rd);
+               wr = sg_next(wr);
+       }
+
+       attach->dir = DMA_NONE;
+       dbuf_attach->priv = attach;
+
+       return 0;
+}
+
+static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
+       struct dma_buf_attachment *db_attach)
+{
+       struct vb2_dc_attachment *attach = db_attach->priv;
+       struct sg_table *sgt;
+
+       if (!attach)
+               return;
+
+       sgt = &attach->sgt;
+
+       /* release the scatterlist cache */
+       if (attach->dir != DMA_NONE)
+               dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+                       attach->dir);
+       sg_free_table(sgt);
+       kfree(attach);
+       db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dc_dmabuf_ops_map(
+       struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
+{
+       struct vb2_dc_attachment *attach = db_attach->priv;
+       /* stealing dmabuf mutex to serialize map/unmap operations */
+       struct mutex *lock = &db_attach->dmabuf->lock;
+       struct sg_table *sgt;
+       int ret;
+
+       mutex_lock(lock);
+
+       sgt = &attach->sgt;
+       /* return previously mapped sg table */
+       if (attach->dir == dir) {
+               mutex_unlock(lock);
+               return sgt;
+       }
+
+       /* release any previous cache */
+       if (attach->dir != DMA_NONE) {
+               dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+                       attach->dir);
+               attach->dir = DMA_NONE;
+       }
+
+       /* mapping to the client with new direction */
+       ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
+       if (ret <= 0) {
+               pr_err("failed to map scatterlist\n");
+               mutex_unlock(lock);
+               return ERR_PTR(-EIO);
+       }
+
+       attach->dir = dir;
+
+       mutex_unlock(lock);
+
+       return sgt;
+}
+
+static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+       struct sg_table *sgt, enum dma_data_direction dir)
+{
+       /* nothing to be done here */
+}
+
+static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+       /* drop reference obtained in vb2_dc_get_dmabuf */
+       vb2_dc_put(dbuf->priv);
+}
+
+static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+       struct vb2_dc_buf *buf = dbuf->priv;
+
+       return buf->vaddr + pgnum * PAGE_SIZE;
+}
+
+static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+       struct vb2_dc_buf *buf = dbuf->priv;
+
+       return buf->vaddr;
+}
+
+static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+       struct vm_area_struct *vma)
+{
+       return vb2_dc_mmap(dbuf->priv, vma);
+}
+
+static struct dma_buf_ops vb2_dc_dmabuf_ops = {
+       .attach = vb2_dc_dmabuf_ops_attach,
+       .detach = vb2_dc_dmabuf_ops_detach,
+       .map_dma_buf = vb2_dc_dmabuf_ops_map,
+       .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
+       .kmap = vb2_dc_dmabuf_ops_kmap,
+       .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
+       .vmap = vb2_dc_dmabuf_ops_vmap,
+       .mmap = vb2_dc_dmabuf_ops_mmap,
+       .release = vb2_dc_dmabuf_ops_release,
+};
+
+static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
+{
+       int ret;
+       struct sg_table *sgt;
+
+       sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+       if (!sgt) {
+               dev_err(buf->dev, "failed to alloc sg table\n");
+               return NULL;
+       }
+
+       ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
+               buf->size);
+       if (ret < 0) {
+               dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
+               kfree(sgt);
+               return NULL;
+       }
+
+       return sgt;
+}
+
+static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
+{
+       struct vb2_dc_buf *buf = buf_priv;
+       struct dma_buf *dbuf;
+
+       if (!buf->sgt_base)
+               buf->sgt_base = vb2_dc_get_base_sgt(buf);
+
+       if (WARN_ON(!buf->sgt_base))
+               return NULL;
+
+       dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
+       if (IS_ERR(dbuf))
+               return NULL;
+
+       /* dmabuf keeps reference to vb2 buffer */
+       atomic_inc(&buf->refcount);
+
+       return dbuf;
 }
 
 /*********************************************/
@@ -266,6 +494,18 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
        struct vm_area_struct *vma;
        struct sg_table *sgt;
        unsigned long contig_size;
+       unsigned long dma_align = dma_get_cache_alignment();
+
+       /* Only cache aligned DMA transfers are reliable */
+       if (!IS_ALIGNED(vaddr | size, dma_align)) {
+               pr_debug("user data must be aligned to %lu bytes\n", dma_align);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (!size) {
+               pr_debug("size is zero\n");
+               return ERR_PTR(-EINVAL);
+       }
 
        buf = kzalloc(sizeof *buf, GFP_KERNEL);
        if (!buf)
@@ -495,6 +735,7 @@ static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
 const struct vb2_mem_ops vb2_dma_contig_memops = {
        .alloc          = vb2_dc_alloc,
        .put            = vb2_dc_put,
+       .get_dmabuf     = vb2_dc_get_dmabuf,
        .cookie         = vb2_dc_cookie,
        .vaddr          = vb2_dc_vaddr,
        .mmap           = vb2_dc_mmap,