2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
27 /* called with dev->struct_mutex held */
28 static struct page **get_pages(struct drm_gem_object *obj)
30 struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 if (!msm_obj->pages) {
33 struct drm_device *dev = obj->dev;
34 struct page **p = drm_gem_get_pages(obj, 0);
35 int npages = obj->size >> PAGE_SHIFT;
38 dev_err(dev->dev, "could not get pages: %ld\n",
43 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
44 if (IS_ERR(msm_obj->sgt)) {
45 dev_err(dev->dev, "failed to allocate sgt\n");
46 return ERR_CAST(msm_obj->sgt);
51 /* For non-cached buffers, ensure the new pages are clean
52 * because display controller, GPU, etc. are not coherent:
54 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
55 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
56 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
59 return msm_obj->pages;
62 static void put_pages(struct drm_gem_object *obj)
64 struct msm_gem_object *msm_obj = to_msm_bo(obj);
67 /* For non-cached buffers, ensure the new pages are clean
68 * because display controller, GPU, etc. are not coherent:
70 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
71 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
72 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
73 sg_free_table(msm_obj->sgt);
76 drm_gem_put_pages(obj, msm_obj->pages, true, false);
77 msm_obj->pages = NULL;
81 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
83 struct drm_device *dev = obj->dev;
85 mutex_lock(&dev->struct_mutex);
87 mutex_unlock(&dev->struct_mutex);
91 void msm_gem_put_pages(struct drm_gem_object *obj)
93 /* when we start tracking the pin count, then do something here */
96 int msm_gem_mmap_obj(struct drm_gem_object *obj,
97 struct vm_area_struct *vma)
99 struct msm_gem_object *msm_obj = to_msm_bo(obj);
101 vma->vm_flags &= ~VM_PFNMAP;
102 vma->vm_flags |= VM_MIXEDMAP;
104 if (msm_obj->flags & MSM_BO_WC) {
105 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
106 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
107 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
110 * Shunt off cached objs to shmem file so they have their own
111 * address_space (so unmap_mapping_range does what we want,
112 * in particular in the case of mmap'd dmabufs)
117 vma->vm_file = obj->filp;
119 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
125 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
129 ret = drm_gem_mmap(filp, vma);
131 DBG("mmap failed: %d", ret);
135 return msm_gem_mmap_obj(vma->vm_private_data, vma);
138 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
140 struct drm_gem_object *obj = vma->vm_private_data;
141 struct msm_gem_object *msm_obj = to_msm_bo(obj);
142 struct drm_device *dev = obj->dev;
148 /* Make sure we don't parallel update on a fault, nor move or remove
149 * something from beneath our feet
151 ret = mutex_lock_interruptible(&dev->struct_mutex);
155 /* make sure we have pages attached now */
156 pages = get_pages(obj);
158 ret = PTR_ERR(pages);
162 /* We don't use vmf->pgoff since that has the fake offset: */
163 pgoff = ((unsigned long)vmf->virtual_address -
164 vma->vm_start) >> PAGE_SHIFT;
166 pfn = page_to_pfn(msm_obj->pages[pgoff]);
168 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
169 pfn, pfn << PAGE_SHIFT);
171 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
174 mutex_unlock(&dev->struct_mutex);
181 return VM_FAULT_NOPAGE;
185 return VM_FAULT_SIGBUS;
189 /** get mmap offset */
190 static uint64_t mmap_offset(struct drm_gem_object *obj)
192 struct drm_device *dev = obj->dev;
195 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
197 /* Make it mmapable */
198 ret = drm_gem_create_mmap_offset(obj);
201 dev_err(dev->dev, "could not allocate mmap offset\n");
205 return drm_vma_node_offset_addr(&obj->vma_node);
208 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
211 mutex_lock(&obj->dev->struct_mutex);
212 offset = mmap_offset(obj);
213 mutex_unlock(&obj->dev->struct_mutex);
217 /* helpers for dealing w/ iommu: */
218 static int map_range(struct iommu_domain *domain, unsigned int iova,
219 struct sg_table *sgt, unsigned int len, int prot)
221 struct scatterlist *sg;
222 unsigned int da = iova;
229 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
230 u32 pa = sg_phys(sg) - sg->offset;
231 size_t bytes = sg->length + sg->offset;
233 VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
235 ret = iommu_map(domain, da, pa, bytes, prot);
247 for_each_sg(sgt->sgl, sg, i, j) {
248 size_t bytes = sg->length + sg->offset;
249 iommu_unmap(domain, da, bytes);
255 static void unmap_range(struct iommu_domain *domain, unsigned int iova,
256 struct sg_table *sgt, unsigned int len)
258 struct scatterlist *sg;
259 unsigned int da = iova;
262 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
263 size_t bytes = sg->length + sg->offset;
266 unmapped = iommu_unmap(domain, da, bytes);
267 if (unmapped < bytes)
270 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
272 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
278 /* should be called under struct_mutex.. although it can be called
279 * from atomic context without struct_mutex to acquire an extra
280 * iova ref if you know one is already held.
282 * That means when I do eventually need to add support for unpinning
283 * the refcnt counter needs to be atomic_t.
285 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
288 struct msm_gem_object *msm_obj = to_msm_bo(obj);
291 if (!msm_obj->domain[id].iova) {
292 struct msm_drm_private *priv = obj->dev->dev_private;
293 uint32_t offset = (uint32_t)mmap_offset(obj);
295 pages = get_pages(obj);
297 return PTR_ERR(pages);
298 // XXX ideally we would not map buffers writable when not needed...
299 ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
300 obj->size, IOMMU_READ | IOMMU_WRITE);
301 msm_obj->domain[id].iova = offset;
305 *iova = msm_obj->domain[id].iova;
310 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
312 struct msm_gem_object *msm_obj = to_msm_bo(obj);
315 /* this is safe right now because we don't unmap until the
318 if (msm_obj->domain[id].iova) {
319 *iova = msm_obj->domain[id].iova;
323 mutex_lock(&obj->dev->struct_mutex);
324 ret = msm_gem_get_iova_locked(obj, id, iova);
325 mutex_unlock(&obj->dev->struct_mutex);
329 void msm_gem_put_iova(struct drm_gem_object *obj, int id)
332 // NOTE: probably don't need a _locked() version.. we wouldn't
333 // normally unmap here, but instead just mark that it could be
334 // unmapped (if the iova refcnt drops to zero), but then later
335 // if another _get_iova_locked() fails we can start unmapping
336 // things that are no longer needed..
339 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
340 struct drm_mode_create_dumb *args)
342 args->pitch = align_pitch(args->width, args->bpp);
343 args->size = PAGE_ALIGN(args->pitch * args->height);
344 return msm_gem_new_handle(dev, file, args->size,
345 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
348 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
349 uint32_t handle, uint64_t *offset)
351 struct drm_gem_object *obj;
354 /* GEM does all our handle to object mapping */
355 obj = drm_gem_object_lookup(dev, file, handle);
361 *offset = msm_gem_mmap_offset(obj);
363 drm_gem_object_unreference_unlocked(obj);
369 void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
371 struct msm_gem_object *msm_obj = to_msm_bo(obj);
372 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
373 if (!msm_obj->vaddr) {
374 struct page **pages = get_pages(obj);
376 return ERR_CAST(pages);
377 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
378 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
380 return msm_obj->vaddr;
383 void *msm_gem_vaddr(struct drm_gem_object *obj)
386 mutex_lock(&obj->dev->struct_mutex);
387 ret = msm_gem_vaddr_locked(obj);
388 mutex_unlock(&obj->dev->struct_mutex);
392 /* setup callback for when bo is no longer busy..
393 * TODO probably want to differentiate read vs write..
395 int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
396 struct msm_fence_cb *cb)
398 struct drm_device *dev = obj->dev;
399 struct msm_drm_private *priv = dev->dev_private;
400 struct msm_gem_object *msm_obj = to_msm_bo(obj);
403 mutex_lock(&dev->struct_mutex);
404 if (!list_empty(&cb->work.entry)) {
406 } else if (is_active(msm_obj)) {
407 cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
408 list_add_tail(&cb->work.entry, &priv->fence_cbs);
410 queue_work(priv->wq, &cb->work);
412 mutex_unlock(&dev->struct_mutex);
417 void msm_gem_move_to_active(struct drm_gem_object *obj,
418 struct msm_gpu *gpu, bool write, uint32_t fence)
420 struct msm_gem_object *msm_obj = to_msm_bo(obj);
423 msm_obj->write_fence = fence;
425 msm_obj->read_fence = fence;
426 list_del_init(&msm_obj->mm_list);
427 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
430 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
432 struct drm_device *dev = obj->dev;
433 struct msm_drm_private *priv = dev->dev_private;
434 struct msm_gem_object *msm_obj = to_msm_bo(obj);
436 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
439 msm_obj->read_fence = 0;
440 msm_obj->write_fence = 0;
441 list_del_init(&msm_obj->mm_list);
442 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
445 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
446 struct timespec *timeout)
448 struct drm_device *dev = obj->dev;
449 struct msm_gem_object *msm_obj = to_msm_bo(obj);
452 if (is_active(msm_obj)) {
455 if (op & MSM_PREP_READ)
456 fence = msm_obj->write_fence;
457 if (op & MSM_PREP_WRITE)
458 fence = max(fence, msm_obj->read_fence);
459 if (op & MSM_PREP_NOSYNC)
462 ret = msm_wait_fence_interruptable(dev, fence, timeout);
465 /* TODO cache maintenance */
470 int msm_gem_cpu_fini(struct drm_gem_object *obj)
472 /* TODO cache maintenance */
476 #ifdef CONFIG_DEBUG_FS
477 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
479 struct drm_device *dev = obj->dev;
480 struct msm_gem_object *msm_obj = to_msm_bo(obj);
481 uint64_t off = drm_vma_node_start(&obj->vma_node);
483 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
484 seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
485 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
486 msm_obj->read_fence, msm_obj->write_fence,
487 obj->name, obj->refcount.refcount.counter,
488 off, msm_obj->vaddr, obj->size);
491 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
493 struct msm_gem_object *msm_obj;
497 list_for_each_entry(msm_obj, list, mm_list) {
498 struct drm_gem_object *obj = &msm_obj->base;
500 msm_gem_describe(obj, m);
505 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
509 void msm_gem_free_object(struct drm_gem_object *obj)
511 struct drm_device *dev = obj->dev;
512 struct msm_gem_object *msm_obj = to_msm_bo(obj);
515 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
517 /* object should not be on active list: */
518 WARN_ON(is_active(msm_obj));
520 list_del(&msm_obj->mm_list);
522 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
523 if (msm_obj->domain[id].iova) {
524 struct msm_drm_private *priv = obj->dev->dev_private;
525 uint32_t offset = (uint32_t)mmap_offset(obj);
526 unmap_range(priv->iommus[id], offset,
527 msm_obj->sgt, obj->size);
531 drm_gem_free_mmap_offset(obj);
533 if (obj->import_attach) {
535 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
537 /* Don't drop the pages for imported dmabuf, as they are not
538 * ours, just free the array we allocated:
541 drm_free_large(msm_obj->pages);
545 vunmap(msm_obj->vaddr);
549 if (msm_obj->resv == &msm_obj->_resv)
550 reservation_object_fini(msm_obj->resv);
552 drm_gem_object_release(obj);
557 /* convenience method to construct a GEM buffer object, and userspace handle */
558 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
559 uint32_t size, uint32_t flags, uint32_t *handle)
561 struct drm_gem_object *obj;
564 ret = mutex_lock_interruptible(&dev->struct_mutex);
568 obj = msm_gem_new(dev, size, flags);
570 mutex_unlock(&dev->struct_mutex);
575 ret = drm_gem_handle_create(file, obj, handle);
577 /* drop reference from allocate - handle holds it now */
578 drm_gem_object_unreference_unlocked(obj);
583 static int msm_gem_new_impl(struct drm_device *dev,
584 uint32_t size, uint32_t flags,
585 struct drm_gem_object **obj)
587 struct msm_drm_private *priv = dev->dev_private;
588 struct msm_gem_object *msm_obj;
590 switch (flags & MSM_BO_CACHE_MASK) {
591 case MSM_BO_UNCACHED:
596 dev_err(dev->dev, "invalid cache flag: %x\n",
597 (flags & MSM_BO_CACHE_MASK));
601 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
605 msm_obj->flags = flags;
607 msm_obj->resv = &msm_obj->_resv;
608 reservation_object_init(msm_obj->resv);
610 INIT_LIST_HEAD(&msm_obj->submit_entry);
611 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
613 *obj = &msm_obj->base;
618 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
619 uint32_t size, uint32_t flags)
621 struct drm_gem_object *obj;
624 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
626 size = PAGE_ALIGN(size);
628 ret = msm_gem_new_impl(dev, size, flags, &obj);
632 ret = drm_gem_object_init(dev, obj, size);
640 drm_gem_object_unreference_unlocked(obj);
645 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
646 uint32_t size, struct sg_table *sgt)
648 struct msm_gem_object *msm_obj;
649 struct drm_gem_object *obj;
652 size = PAGE_ALIGN(size);
654 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
658 drm_gem_private_object_init(dev, obj, size);
660 npages = size / PAGE_SIZE;
662 msm_obj = to_msm_bo(obj);
664 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
665 if (!msm_obj->pages) {
670 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
678 drm_gem_object_unreference_unlocked(obj);