]> Pileus Git - ~andy/linux/commitdiff
drm/i915: Pin relocations for the duration of constructing the execbuffer
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 26 Nov 2013 11:23:15 +0000 (11:23 +0000)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 27 Nov 2013 08:04:36 +0000 (09:04 +0100)
As the execbuffer dispatch grows ever more complex and involves multiple
stages of moving objects into the aperture, we need to take greater care
that we do not evict our execbuffer objects prior to dispatch. This is
relatively simple as we can just keep the objects pinned for not just
the relocation but until we are finished.

One such example is the possibility of the context switch causing an
eviction or hitting the shrinker in order to fit its object into the
aperture.

Link: http://lists.freedesktop.org/archives/intel-gfx/2013-November/036166.html
Reported-by: "Siluvery, Arun" <arun.siluvery@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: stable@vger.kernel.org
Acked-by: Ben Widawsky <ben@bwidawsk.net>
Tested-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Add the additional explanations from Chris to the commit
message.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem_execbuffer.c

index 885d595e0e02255e30fbff181247b70df1af5363..b7e787fb4649321cd67d7456aef11af861757aac 100644 (file)
@@ -33,6 +33,9 @@
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
 
+#define  __EXEC_OBJECT_HAS_PIN (1<<31)
+#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
+
 struct eb_vmas {
        struct list_head vmas;
        int and;
@@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
        }
 }
 
-static void eb_destroy(struct eb_vmas *eb) {
+static void
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
+{
+       struct drm_i915_gem_exec_object2 *entry;
+       struct drm_i915_gem_object *obj = vma->obj;
+
+       if (!drm_mm_node_allocated(&vma->node))
+               return;
+
+       entry = vma->exec_entry;
+
+       if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+               i915_gem_object_unpin_fence(obj);
+
+       if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+               i915_gem_object_unpin(obj);
+
+       entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+}
+
+static void eb_destroy(struct eb_vmas *eb)
+{
        while (!list_empty(&eb->vmas)) {
                struct i915_vma *vma;
 
@@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) {
                                       struct i915_vma,
                                       exec_list);
                list_del_init(&vma->exec_list);
+               i915_gem_execbuffer_unreserve_vma(vma);
                drm_gem_object_unreference(&vma->obj->base);
        }
        kfree(eb);
@@ -478,9 +503,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
        return ret;
 }
 
-#define  __EXEC_OBJECT_HAS_PIN (1<<31)
-#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
-
 static int
 need_reloc_mappable(struct i915_vma *vma)
 {
@@ -552,26 +574,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
        return 0;
 }
 
-static void
-i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
-{
-       struct drm_i915_gem_exec_object2 *entry;
-       struct drm_i915_gem_object *obj = vma->obj;
-
-       if (!drm_mm_node_allocated(&vma->node))
-               return;
-
-       entry = vma->exec_entry;
-
-       if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
-               i915_gem_object_unpin_fence(obj);
-
-       if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-               i915_gem_object_unpin(obj);
-
-       entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
-}
-
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                            struct list_head *vmas,
@@ -670,13 +672,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                                goto err;
                }
 
-err:           /* Decrement pin count for bound objects */
-               list_for_each_entry(vma, vmas, exec_list)
-                       i915_gem_execbuffer_unreserve_vma(vma);
-
+err:
                if (ret != -ENOSPC || retry++)
                        return ret;
 
+               /* Decrement pin count for bound objects */
+               list_for_each_entry(vma, vmas, exec_list)
+                       i915_gem_execbuffer_unreserve_vma(vma);
+
                ret = i915_gem_evict_vm(vm, true);
                if (ret)
                        return ret;
@@ -708,6 +711,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
        while (!list_empty(&eb->vmas)) {
                vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
                list_del_init(&vma->exec_list);
+               i915_gem_execbuffer_unreserve_vma(vma);
                drm_gem_object_unreference(&vma->obj->base);
        }