2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
37 struct vb2_vmarea_handler handler;
41 struct vm_area_struct *vma;
44 struct dma_buf_attachment *db_attach;
47 /*********************************************/
48 /* scatterlist table functions */
49 /*********************************************/
52 static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
53 void (*cb)(struct page *pg))
55 struct scatterlist *s;
58 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
59 struct page *page = sg_page(s);
60 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
64 for (j = 0; j < n_pages; ++j, ++page)
69 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71 struct scatterlist *s;
72 dma_addr_t expected = sg_dma_address(sgt->sgl);
74 unsigned long size = 0;
76 for_each_sg(sgt->sgl, s, sgt->nents, i) {
77 if (sg_dma_address(s) != expected)
79 expected = sg_dma_address(s) + sg_dma_len(s);
80 size += sg_dma_len(s);
85 /*********************************************/
86 /* callbacks for all buffers */
87 /*********************************************/
89 static void *vb2_dc_cookie(void *buf_priv)
91 struct vb2_dc_buf *buf = buf_priv;
93 return &buf->dma_addr;
96 static void *vb2_dc_vaddr(void *buf_priv)
98 struct vb2_dc_buf *buf = buf_priv;
103 static unsigned int vb2_dc_num_users(void *buf_priv)
105 struct vb2_dc_buf *buf = buf_priv;
107 return atomic_read(&buf->refcount);
110 static void vb2_dc_prepare(void *buf_priv)
112 struct vb2_dc_buf *buf = buf_priv;
113 struct sg_table *sgt = buf->dma_sgt;
115 /* DMABUF exporter will flush the cache for us */
116 if (!sgt || buf->db_attach)
119 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
122 static void vb2_dc_finish(void *buf_priv)
124 struct vb2_dc_buf *buf = buf_priv;
125 struct sg_table *sgt = buf->dma_sgt;
127 /* DMABUF exporter will flush the cache for us */
128 if (!sgt || buf->db_attach)
131 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
134 /*********************************************/
135 /* callbacks for MMAP buffers */
136 /*********************************************/
138 static void vb2_dc_put(void *buf_priv)
140 struct vb2_dc_buf *buf = buf_priv;
142 if (!atomic_dec_and_test(&buf->refcount))
145 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
149 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
151 struct vb2_dc_conf *conf = alloc_ctx;
152 struct device *dev = conf->dev;
153 struct vb2_dc_buf *buf;
155 buf = kzalloc(sizeof *buf, GFP_KERNEL);
157 return ERR_PTR(-ENOMEM);
159 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
161 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
163 return ERR_PTR(-ENOMEM);
169 buf->handler.refcount = &buf->refcount;
170 buf->handler.put = vb2_dc_put;
171 buf->handler.arg = buf;
173 atomic_inc(&buf->refcount);
178 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
180 struct vb2_dc_buf *buf = buf_priv;
183 printk(KERN_ERR "No buffer to map\n");
187 return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
188 &vb2_common_vm_ops, &buf->handler);
191 /*********************************************/
192 /* callbacks for USERPTR buffers */
193 /*********************************************/
195 static inline int vma_is_io(struct vm_area_struct *vma)
197 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
200 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
201 int n_pages, struct vm_area_struct *vma, int write)
203 if (vma_is_io(vma)) {
206 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
208 int ret = follow_pfn(vma, start, &pfn);
211 pr_err("no page for address %lu\n", start);
214 pages[i] = pfn_to_page(pfn);
219 n = get_user_pages(current, current->mm, start & PAGE_MASK,
220 n_pages, write, 1, pages, NULL);
221 /* negative error means that no page was pinned */
224 pr_err("got only %d of %d user pages\n", n, n_pages);
226 put_page(pages[--n]);
234 static void vb2_dc_put_dirty_page(struct page *page)
236 set_page_dirty_lock(page);
240 static void vb2_dc_put_userptr(void *buf_priv)
242 struct vb2_dc_buf *buf = buf_priv;
243 struct sg_table *sgt = buf->dma_sgt;
245 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
246 if (!vma_is_io(buf->vma))
247 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
251 vb2_put_vma(buf->vma);
255 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
256 unsigned long size, int write)
258 struct vb2_dc_conf *conf = alloc_ctx;
259 struct vb2_dc_buf *buf;
262 unsigned long offset;
266 struct vm_area_struct *vma;
267 struct sg_table *sgt;
268 unsigned long contig_size;
270 buf = kzalloc(sizeof *buf, GFP_KERNEL);
272 return ERR_PTR(-ENOMEM);
274 buf->dev = conf->dev;
275 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
277 start = vaddr & PAGE_MASK;
278 offset = vaddr & ~PAGE_MASK;
279 end = PAGE_ALIGN(vaddr + size);
280 n_pages = (end - start) >> PAGE_SHIFT;
282 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
285 pr_err("failed to allocate pages table\n");
289 /* current->mm->mmap_sem is taken by videobuf2 core */
290 vma = find_vma(current->mm, vaddr);
292 pr_err("no vma for address %lu\n", vaddr);
297 if (vma->vm_end < vaddr + size) {
298 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
303 buf->vma = vb2_get_vma(vma);
305 pr_err("failed to copy vma\n");
310 /* extract page list from userspace mapping */
311 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
313 pr_err("failed to get user pages\n");
317 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
319 pr_err("failed to allocate sg table\n");
321 goto fail_get_user_pages;
324 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
325 offset, size, GFP_KERNEL);
327 pr_err("failed to initialize sg table\n");
331 /* pages are no longer needed */
335 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
337 if (sgt->nents <= 0) {
338 pr_err("failed to map scatterlist\n");
343 contig_size = vb2_dc_get_contiguous_size(sgt);
344 if (contig_size < size) {
345 pr_err("contiguous mapping is too small %lu/%lu\n",
351 buf->dma_addr = sg_dma_address(sgt->sgl);
358 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
361 if (!vma_is_io(buf->vma))
362 vb2_dc_sgt_foreach_page(sgt, put_page);
369 if (pages && !vma_is_io(buf->vma))
371 put_page(pages[--n_pages]);
374 vb2_put_vma(buf->vma);
377 kfree(pages); /* kfree is NULL-proof */
385 /*********************************************/
386 /* callbacks for DMABUF buffers */
387 /*********************************************/
389 static int vb2_dc_map_dmabuf(void *mem_priv)
391 struct vb2_dc_buf *buf = mem_priv;
392 struct sg_table *sgt;
393 unsigned long contig_size;
395 if (WARN_ON(!buf->db_attach)) {
396 pr_err("trying to pin a non attached buffer\n");
400 if (WARN_ON(buf->dma_sgt)) {
401 pr_err("dmabuf buffer is already pinned\n");
405 /* get the associated scatterlist for this buffer */
406 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
407 if (IS_ERR_OR_NULL(sgt)) {
408 pr_err("Error getting dmabuf scatterlist\n");
412 /* checking if dmabuf is big enough to store contiguous chunk */
413 contig_size = vb2_dc_get_contiguous_size(sgt);
414 if (contig_size < buf->size) {
415 pr_err("contiguous chunk is too small %lu/%lu b\n",
416 contig_size, buf->size);
417 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
421 buf->dma_addr = sg_dma_address(sgt->sgl);
427 static void vb2_dc_unmap_dmabuf(void *mem_priv)
429 struct vb2_dc_buf *buf = mem_priv;
430 struct sg_table *sgt = buf->dma_sgt;
432 if (WARN_ON(!buf->db_attach)) {
433 pr_err("trying to unpin a not attached buffer\n");
438 pr_err("dmabuf buffer is already unpinned\n");
442 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
448 static void vb2_dc_detach_dmabuf(void *mem_priv)
450 struct vb2_dc_buf *buf = mem_priv;
452 /* if vb2 works correctly you should never detach mapped buffer */
453 if (WARN_ON(buf->dma_addr))
454 vb2_dc_unmap_dmabuf(buf);
456 /* detach this attachment */
457 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
461 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
462 unsigned long size, int write)
464 struct vb2_dc_conf *conf = alloc_ctx;
465 struct vb2_dc_buf *buf;
466 struct dma_buf_attachment *dba;
468 if (dbuf->size < size)
469 return ERR_PTR(-EFAULT);
471 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
473 return ERR_PTR(-ENOMEM);
475 buf->dev = conf->dev;
476 /* create attachment for the dmabuf with the user device */
477 dba = dma_buf_attach(dbuf, buf->dev);
479 pr_err("failed to attach dmabuf\n");
484 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
486 buf->db_attach = dba;
491 /*********************************************/
492 /* DMA CONTIG exported functions */
493 /*********************************************/
495 const struct vb2_mem_ops vb2_dma_contig_memops = {
496 .alloc = vb2_dc_alloc,
498 .cookie = vb2_dc_cookie,
499 .vaddr = vb2_dc_vaddr,
501 .get_userptr = vb2_dc_get_userptr,
502 .put_userptr = vb2_dc_put_userptr,
503 .prepare = vb2_dc_prepare,
504 .finish = vb2_dc_finish,
505 .map_dmabuf = vb2_dc_map_dmabuf,
506 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
507 .attach_dmabuf = vb2_dc_attach_dmabuf,
508 .detach_dmabuf = vb2_dc_detach_dmabuf,
509 .num_users = vb2_dc_num_users,
511 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
513 void *vb2_dma_contig_init_ctx(struct device *dev)
515 struct vb2_dc_conf *conf;
517 conf = kzalloc(sizeof *conf, GFP_KERNEL);
519 return ERR_PTR(-ENOMEM);
525 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
527 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
531 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
533 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
534 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
535 MODULE_LICENSE("GPL");