]> Pileus Git - ~andy/linux/blobdiff - drivers/gpu/drm/i915/i915_gem_execbuffer.c
Merge tag 'drm-intel-next-2013-11-29' of git://people.freedesktop.org/~danvet/drm...
[~andy/linux] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
index 885d595e0e02255e30fbff181247b70df1af5363..3c90dd1a3bbdb81a0604216924ae86cb578e8ed4 100644 (file)
@@ -33,6 +33,9 @@
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
 
+#define  __EXEC_OBJECT_HAS_PIN (1<<31)
+#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
+
 struct eb_vmas {
        struct list_head vmas;
        int and;
@@ -43,7 +46,7 @@ struct eb_vmas {
 };
 
 static struct eb_vmas *
-eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
+eb_create(struct drm_i915_gem_execbuffer2 *args)
 {
        struct eb_vmas *eb = NULL;
 
@@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
        }
 }
 
-static void eb_destroy(struct eb_vmas *eb) {
+static void
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
+{
+       struct drm_i915_gem_exec_object2 *entry;
+       struct drm_i915_gem_object *obj = vma->obj;
+
+       if (!drm_mm_node_allocated(&vma->node))
+               return;
+
+       entry = vma->exec_entry;
+
+       if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+               i915_gem_object_unpin_fence(obj);
+
+       if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+               i915_gem_object_unpin(obj);
+
+       entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+}
+
+static void eb_destroy(struct eb_vmas *eb)
+{
        while (!list_empty(&eb->vmas)) {
                struct i915_vma *vma;
 
@@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) {
                                       struct i915_vma,
                                       exec_list);
                list_del_init(&vma->exec_list);
+               i915_gem_execbuffer_unreserve_vma(vma);
                drm_gem_object_unreference(&vma->obj->base);
        }
        kfree(eb);
@@ -307,7 +332,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
        target_i915_obj = target_vma->obj;
        target_obj = &target_vma->obj->base;
 
-       target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
+       target_offset = target_vma->node.start;
 
        /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
         * pipe_control writes because the gpu doesn't properly redirect them
@@ -454,8 +479,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
 }
 
 static int
-i915_gem_execbuffer_relocate(struct eb_vmas *eb,
-                            struct i915_address_space *vm)
+i915_gem_execbuffer_relocate(struct eb_vmas *eb)
 {
        struct i915_vma *vma;
        int ret = 0;
@@ -478,9 +502,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
        return ret;
 }
 
-#define  __EXEC_OBJECT_HAS_PIN (1<<31)
-#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
-
 static int
 need_reloc_mappable(struct i915_vma *vma)
 {
@@ -552,26 +573,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
        return 0;
 }
 
-static void
-i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
-{
-       struct drm_i915_gem_exec_object2 *entry;
-       struct drm_i915_gem_object *obj = vma->obj;
-
-       if (!drm_mm_node_allocated(&vma->node))
-               return;
-
-       entry = vma->exec_entry;
-
-       if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
-               i915_gem_object_unpin_fence(obj);
-
-       if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-               i915_gem_object_unpin(obj);
-
-       entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
-}
-
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                            struct list_head *vmas,
@@ -670,13 +671,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                                goto err;
                }
 
-err:           /* Decrement pin count for bound objects */
-               list_for_each_entry(vma, vmas, exec_list)
-                       i915_gem_execbuffer_unreserve_vma(vma);
-
+err:
                if (ret != -ENOSPC || retry++)
                        return ret;
 
+               /* Decrement pin count for bound objects */
+               list_for_each_entry(vma, vmas, exec_list)
+                       i915_gem_execbuffer_unreserve_vma(vma);
+
                ret = i915_gem_evict_vm(vm, true);
                if (ret)
                        return ret;
@@ -708,6 +710,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
        while (!list_empty(&eb->vmas)) {
                vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
                list_del_init(&vma->exec_list);
+               i915_gem_execbuffer_unreserve_vma(vma);
                drm_gem_object_unreference(&vma->obj->base);
        }
 
@@ -1102,7 +1105,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto pre_mutex_err;
        }
 
-       eb = eb_create(args, vm);
+       eb = eb_create(args);
        if (eb == NULL) {
                mutex_unlock(&dev->struct_mutex);
                ret = -ENOMEM;
@@ -1125,7 +1128,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        /* The objects are in their final locations, apply the relocations. */
        if (need_relocs)
-               ret = i915_gem_execbuffer_relocate(eb, vm);
+               ret = i915_gem_execbuffer_relocate(eb);
        if (ret) {
                if (ret == -EFAULT) {
                        ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,