]> Pileus Git - ~andy/linux/blobdiff - drivers/gpu/drm/i915/i915_gem_execbuffer.c
drm/i915: Pin relocations for the duration of constructing the execbuffer
[~andy/linux] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
index 78786c44fe526edce71d2524e61f9819c531fa3e..b7e787fb4649321cd67d7456aef11af861757aac 100644 (file)
@@ -33,6 +33,9 @@
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
 
+#define  __EXEC_OBJECT_HAS_PIN (1<<31)
+#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
+
 struct eb_vmas {
        struct list_head vmas;
        int and;
@@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
        }
 }
 
-static void eb_destroy(struct eb_vmas *eb) {
+static void
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
+{
+       struct drm_i915_gem_exec_object2 *entry;
+       struct drm_i915_gem_object *obj = vma->obj;
+
+       if (!drm_mm_node_allocated(&vma->node))
+               return;
+
+       entry = vma->exec_entry;
+
+       if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+               i915_gem_object_unpin_fence(obj);
+
+       if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+               i915_gem_object_unpin(obj);
+
+       entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+}
+
+static void eb_destroy(struct eb_vmas *eb)
+{
        while (!list_empty(&eb->vmas)) {
                struct i915_vma *vma;
 
@@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) {
                                       struct i915_vma,
                                       exec_list);
                list_del_init(&vma->exec_list);
+               i915_gem_execbuffer_unreserve_vma(vma);
                drm_gem_object_unreference(&vma->obj->base);
        }
        kfree(eb);
@@ -478,9 +503,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
        return ret;
 }
 
-#define  __EXEC_OBJECT_HAS_PIN (1<<31)
-#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
-
 static int
 need_reloc_mappable(struct i915_vma *vma)
 {
@@ -552,26 +574,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
        return 0;
 }
 
-static void
-i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
-{
-       struct drm_i915_gem_exec_object2 *entry;
-       struct drm_i915_gem_object *obj = vma->obj;
-
-       if (!drm_mm_node_allocated(&vma->node))
-               return;
-
-       entry = vma->exec_entry;
-
-       if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
-               i915_gem_object_unpin_fence(obj);
-
-       if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-               i915_gem_object_unpin(obj);
-
-       entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
-}
-
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                            struct list_head *vmas,
@@ -670,13 +672,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                                goto err;
                }
 
-err:           /* Decrement pin count for bound objects */
-               list_for_each_entry(vma, vmas, exec_list)
-                       i915_gem_execbuffer_unreserve_vma(vma);
-
+err:
                if (ret != -ENOSPC || retry++)
                        return ret;
 
+               /* Decrement pin count for bound objects */
+               list_for_each_entry(vma, vmas, exec_list)
+                       i915_gem_execbuffer_unreserve_vma(vma);
+
                ret = i915_gem_evict_vm(vm, true);
                if (ret)
                        return ret;
@@ -708,6 +711,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
        while (!list_empty(&eb->vmas)) {
                vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
                list_del_init(&vma->exec_list);
+               i915_gem_execbuffer_unreserve_vma(vma);
                drm_gem_object_unreference(&vma->obj->base);
        }
 
@@ -1146,8 +1150,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
         * batch" bit. Hence we need to pin secure batches into the global gtt.
-        * hsw should have this fixed, but let's be paranoid and do it
-        * unconditionally for now. */
+        * hsw should have this fixed, but bdw mucks it up again. */
        if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
                i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);