]> Pileus Git - ~andy/linux/blobdiff - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Update rules for writing through the LLC with the cpu
[~andy/linux] / drivers / gpu / drm / i915 / i915_gem.c
index 8830856bf3f97c112002e7473dba8b4ec812aa42..54d76e9392d8825672f232c323682a7cd803b74f 100644 (file)
 #include <linux/dma-buf.h>
 
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
-                                                   unsigned alignment,
-                                                   bool map_and_fenceable,
-                                                   bool nonblocking);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
+                                                  bool force);
+static __must_check int
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+                          struct i915_address_space *vm,
+                          unsigned alignment,
+                          bool map_and_fenceable,
+                          bool nonblocking);
 static int i915_gem_phys_pwrite(struct drm_device *dev,
                                struct drm_i915_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
@@ -59,6 +62,20 @@ static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
 static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
+static bool cpu_cache_is_coherent(struct drm_device *dev,
+                                 enum i915_cache_level level)
+{
+       return HAS_LLC(dev) || level != I915_CACHE_NONE;
+}
+
+static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+       if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+               return true;
+
+       return obj->pin_display;
+}
+
 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
 {
        if (obj->tiling_mode)
@@ -75,15 +92,19 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
                                  size_t size)
 {
+       spin_lock(&dev_priv->mm.object_stat_lock);
        dev_priv->mm.object_count++;
        dev_priv->mm.object_memory += size;
+       spin_unlock(&dev_priv->mm.object_stat_lock);
 }
 
 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
                                     size_t size)
 {
+       spin_lock(&dev_priv->mm.object_stat_lock);
        dev_priv->mm.object_count--;
        dev_priv->mm.object_memory -= size;
+       spin_unlock(&dev_priv->mm.object_stat_lock);
 }
 
 static int
@@ -135,7 +156,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
-       return i915_gem_obj_ggtt_bound(obj) && !obj->active;
+       return i915_gem_obj_bound_any(obj) && !obj->active;
 }
 
 int
@@ -219,16 +240,10 @@ i915_gem_create(struct drm_file *file,
                return -ENOMEM;
 
        ret = drm_gem_handle_create(file, &obj->base, &handle);
-       if (ret) {
-               drm_gem_object_release(&obj->base);
-               i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
-               i915_gem_object_free(obj);
-               return ret;
-       }
-
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference(&obj->base);
-       trace_i915_gem_object_create(obj);
+       drm_gem_object_unreference_unlocked(&obj->base);
+       if (ret)
+               return ret;
 
        *handle_p = handle;
        return 0;
@@ -420,9 +435,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
                 * read domain and manually flush cachelines (if required). This
                 * optimizes for the case when the gpu will dirty the data
                 * anyway again before the next pread happens. */
-               if (obj->cache_level == I915_CACHE_NONE)
-                       needs_clflush = 1;
-               if (i915_gem_obj_ggtt_bound(obj)) {
+               needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
+               if (i915_gem_obj_bound_any(obj)) {
                        ret = i915_gem_object_set_to_gtt_domain(obj, false);
                        if (ret)
                                return ret;
@@ -465,7 +479,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
                mutex_unlock(&dev->struct_mutex);
 
-               if (!prefaulted) {
+               if (likely(!i915_prefault_disable) && !prefaulted) {
                        ret = fault_in_multipages_writeable(user_data, remain);
                        /* Userspace is tricking us, but we've already clobbered
                         * its pages with the prefault and promised to write the
@@ -594,7 +608,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        char __user *user_data;
        int page_offset, page_length, ret;
 
-       ret = i915_gem_object_pin(obj, 0, true, true);
+       ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
        if (ret)
                goto out;
 
@@ -737,19 +751,18 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                 * write domain and manually flush cachelines (if required). This
                 * optimizes for the case when the gpu will use the data
                 * right away and we therefore have to clflush anyway. */
-               if (obj->cache_level == I915_CACHE_NONE)
-                       needs_clflush_after = 1;
-               if (i915_gem_obj_ggtt_bound(obj)) {
+               needs_clflush_after = cpu_write_needs_clflush(obj);
+               if (i915_gem_obj_bound_any(obj)) {
                        ret = i915_gem_object_set_to_gtt_domain(obj, true);
                        if (ret)
                                return ret;
                }
        }
-       /* Same trick applies for invalidate partially written cachelines before
-        * writing.  */
-       if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
-           && obj->cache_level == I915_CACHE_NONE)
-               needs_clflush_before = 1;
+       /* Same trick applies to invalidate partially written cachelines read
+        * before writing. */
+       if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+               needs_clflush_before =
+                       !cpu_cache_is_coherent(dev, obj->cache_level);
 
        ret = i915_gem_object_get_pages(obj);
        if (ret)
@@ -828,7 +841,7 @@ out:
                 */
                if (!needs_clflush_after &&
                    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
-                       i915_gem_clflush_object(obj);
+                       i915_gem_clflush_object(obj, obj->pin_display);
                        i915_gem_chipset_flush(dev);
                }
        }
@@ -860,10 +873,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                       args->size))
                return -EFAULT;
 
-       ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
-                                          args->size);
-       if (ret)
-               return -EFAULT;
+       if (likely(!i915_prefault_disable)) {
+               ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+                                                  args->size);
+               if (ret)
+                       return -EFAULT;
+       }
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
@@ -904,9 +919,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       if (obj->cache_level == I915_CACHE_NONE &&
-           obj->tiling_mode == I915_TILING_NONE &&
-           obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+       if (obj->tiling_mode == I915_TILING_NONE &&
+           obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+           cpu_write_needs_clflush(obj)) {
                ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
                /* Note that the gtt paths might fail with non-page-backed user
                 * pointers (e.g. gtt mappings when moving data between
@@ -1255,8 +1270,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        }
 
        /* Pinned buffers may be scanout, so flush the cache */
-       if (obj->pin_count)
-               i915_gem_object_flush_cpu_write_domain(obj);
+       if (obj->pin_display)
+               i915_gem_object_flush_cpu_write_domain(obj, true);
 
        drm_gem_object_unreference(&obj->base);
 unlock:
@@ -1346,7 +1361,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        /* Now bind it into the GTT if needed */
-       ret = i915_gem_object_pin(obj, 0, true, false);
+       ret = i915_gem_obj_ggtt_pin(obj,  0, true, false);
        if (ret)
                goto unlock;
 
@@ -1633,7 +1648,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
                 * hope for the best.
                 */
                WARN_ON(ret != -EIO);
-               i915_gem_clflush_object(obj);
+               i915_gem_clflush_object(obj, true);
                obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
 
@@ -1668,11 +1683,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        if (obj->pages == NULL)
                return 0;
 
-       BUG_ON(i915_gem_obj_ggtt_bound(obj));
-
        if (obj->pages_pin_count)
                return -EBUSY;
 
+       BUG_ON(i915_gem_obj_bound_any(obj));
+
        /* ->put_pages might need to allocate memory for the bit17 swizzle
         * array, hence protect them from being reaped by removing them from gtt
         * lists early. */
@@ -1692,7 +1707,6 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
                  bool purgeable_only)
 {
        struct drm_i915_gem_object *obj, *next;
-       struct i915_address_space *vm = &dev_priv->gtt.base;
        long count = 0;
 
        list_for_each_entry_safe(obj, next,
@@ -1706,10 +1720,18 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
                }
        }
 
-       list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) {
-               if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
-                   i915_gem_object_unbind(obj) == 0 &&
-                   i915_gem_object_put_pages(obj) == 0) {
+       list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
+                                global_list) {
+               struct i915_vma *vma, *v;
+
+               if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+                       continue;
+
+               list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+                       if (i915_vma_unbind(vma))
+                               break;
+
+               if (!i915_gem_object_put_pages(obj)) {
                        count += obj->base.size >> PAGE_SHIFT;
                        if (count >= target)
                                return count;
@@ -1877,10 +1899,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *vm = &dev_priv->gtt.base;
        u32 seqno = intel_ring_get_seqno(ring);
 
        BUG_ON(ring == NULL);
+       if (obj->ring != ring && obj->last_write_seqno) {
+               /* Keep the seqno relative to the current ring */
+               obj->last_write_seqno = seqno;
+       }
        obj->ring = ring;
 
        /* Add a reference if we're newly entering the active list. */
@@ -1889,8 +1914,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                obj->active = 1;
        }
 
-       /* Move from whatever list we were on to the tail of execution. */
-       list_move_tail(&obj->mm_list, &vm->active_list);
        list_move_tail(&obj->ring_list, &ring->active_list);
 
        obj->last_read_seqno = seqno;
@@ -1912,14 +1935,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *vm = &dev_priv->gtt.base;
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+       struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
 
        BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
 
-       list_move_tail(&obj->mm_list, &vm->inactive_list);
+       list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
 
        list_del_init(&obj->ring_list);
        obj->ring = NULL;
@@ -2115,10 +2138,11 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
        spin_unlock(&file_priv->mm.lock);
 }
 
-static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
+static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
+                                   struct i915_address_space *vm)
 {
-       if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
-           acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
+       if (acthd >= i915_gem_obj_offset(obj, vm) &&
+           acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
                return true;
 
        return false;
@@ -2141,6 +2165,17 @@ static bool i915_head_inside_request(const u32 acthd_unmasked,
        return false;
 }
 
+static struct i915_address_space *
+request_to_vm(struct drm_i915_gem_request *request)
+{
+       struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
+       struct i915_address_space *vm;
+
+       vm = &dev_priv->gtt.base;
+
+       return vm;
+}
+
 static bool i915_request_guilty(struct drm_i915_gem_request *request,
                                const u32 acthd, bool *inside)
 {
@@ -2148,9 +2183,9 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
         * pointing inside the ring, matches the batch_obj address range.
         * However this is extremely unlikely.
         */
-
        if (request->batch_obj) {
-               if (i915_head_inside_object(acthd, request->batch_obj)) {
+               if (i915_head_inside_object(acthd, request->batch_obj,
+                                           request_to_vm(request))) {
                        *inside = true;
                        return true;
                }
@@ -2170,17 +2205,21 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
 {
        struct i915_ctx_hang_stats *hs = NULL;
        bool inside, guilty;
+       unsigned long offset = 0;
 
        /* Innocent until proven guilty */
        guilty = false;
 
+       if (request->batch_obj)
+               offset = i915_gem_obj_offset(request->batch_obj,
+                                            request_to_vm(request));
+
        if (ring->hangcheck.action != wait &&
            i915_request_guilty(request, acthd, &inside)) {
                DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
                          ring->name,
                          inside ? "inside" : "flushing",
-                         request->batch_obj ?
-                         i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
+                         offset,
                          request->ctx ? request->ctx->id : 0,
                          acthd);
 
@@ -2247,7 +2286,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
        }
 }
 
-static void i915_gem_reset_fences(struct drm_device *dev)
+void i915_gem_restore_fences(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
@@ -2255,38 +2294,29 @@ static void i915_gem_reset_fences(struct drm_device *dev)
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
 
-               if (reg->obj)
-                       i915_gem_object_fence_lost(reg->obj);
-
-               i915_gem_write_fence(dev, i, NULL);
-
-               reg->pin_count = 0;
-               reg->obj = NULL;
-               INIT_LIST_HEAD(&reg->lru_list);
+               /*
+                * Commit delayed tiling changes if we have an object still
+                * attached to the fence, otherwise just clear the fence.
+                */
+               if (reg->obj) {
+                       i915_gem_object_update_fence(reg->obj, reg,
+                                                    reg->obj->tiling_mode);
+               } else {
+                       i915_gem_write_fence(dev, i, NULL);
+               }
        }
-
-       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 }
 
 void i915_gem_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *vm = &dev_priv->gtt.base;
-       struct drm_i915_gem_object *obj;
        struct intel_ring_buffer *ring;
        int i;
 
        for_each_ring(ring, dev_priv, i)
                i915_gem_reset_ring_lists(dev_priv, ring);
 
-       /* Move everything out of the GPU domains to ensure we do any
-        * necessary invalidation upon reuse.
-        */
-       list_for_each_entry(obj, &vm->inactive_list, mm_list)
-               obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
-
-       /* The fence registers are invalidated so clear them out */
-       i915_gem_reset_fences(dev);
+       i915_gem_restore_fences(dev);
 }
 
 /**
@@ -2580,16 +2610,13 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
                                            old_write_domain);
 }
 
-/**
- * Unbinds an object from the GTT aperture.
- */
-int
-i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+int i915_vma_unbind(struct i915_vma *vma)
 {
+       struct drm_i915_gem_object *obj = vma->obj;
        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
        int ret;
 
-       if (!i915_gem_obj_ggtt_bound(obj))
+       if (list_empty(&vma->vma_link))
                return 0;
 
        if (obj->pin_count)
@@ -2612,7 +2639,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        if (ret)
                return ret;
 
-       trace_i915_gem_object_unbind(obj);
+       trace_i915_vma_unbind(vma);
 
        if (obj->has_global_gtt_mapping)
                i915_gem_gtt_unbind_object(obj);
@@ -2623,16 +2650,44 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        i915_gem_gtt_finish_object(obj);
        i915_gem_object_unpin_pages(obj);
 
-       list_del(&obj->mm_list);
-       list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+       list_del(&vma->mm_list);
        /* Avoid an unnecessary call to unbind on rebind. */
-       obj->map_and_fenceable = true;
+       if (i915_is_ggtt(vma->vm))
+               obj->map_and_fenceable = true;
 
-       drm_mm_remove_node(&obj->gtt_space);
+       drm_mm_remove_node(&vma->node);
+       i915_gem_vma_destroy(vma);
+
+       /* Since the unbound list is global, only move to that list if
+        * no more VMAs exist.
+        * NB: Until we have real VMAs there will only ever be one */
+       WARN_ON(!list_empty(&obj->vma_list));
+       if (list_empty(&obj->vma_list))
+               list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
        return 0;
 }
 
+/**
+ * Unbinds an object from the global GTT aperture.
+ */
+int
+i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct i915_address_space *ggtt = &dev_priv->gtt.base;
+
+       if (!i915_gem_obj_ggtt_bound(obj))
+               return 0;
+
+       if (obj->pin_count)
+               return -EBUSY;
+
+       BUG_ON(obj->pages == NULL);
+
+       return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
+}
+
 int i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2659,7 +2714,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
        drm_i915_private_t *dev_priv = dev->dev_private;
        int fence_reg;
        int fence_pitch_shift;
-       uint64_t val;
 
        if (INTEL_INFO(dev)->gen >= 6) {
                fence_reg = FENCE_REG_SANDYBRIDGE_0;
@@ -2669,8 +2723,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
                fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
        }
 
+       fence_reg += reg * 8;
+
+       /* To w/a incoherency with non-atomic 64-bit register updates,
+        * we split the 64-bit update into two 32-bit writes. In order
+        * for a partial fence not to be evaluated between writes, we
+        * precede the update with write to turn off the fence register,
+        * and only enable the fence as the last step.
+        *
+        * For extra levels of paranoia, we make sure each step lands
+        * before applying the next step.
+        */
+       I915_WRITE(fence_reg, 0);
+       POSTING_READ(fence_reg);
+
        if (obj) {
                u32 size = i915_gem_obj_ggtt_size(obj);
+               uint64_t val;
 
                val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
                                 0xfffff000) << 32;
@@ -2679,12 +2748,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I965_FENCE_TILING_Y_SHIFT;
                val |= I965_FENCE_REG_VALID;
-       } else
-               val = 0;
 
-       fence_reg += reg * 8;
-       I915_WRITE64(fence_reg, val);
-       POSTING_READ(fence_reg);
+               I915_WRITE(fence_reg + 4, val >> 32);
+               POSTING_READ(fence_reg + 4);
+
+               I915_WRITE(fence_reg + 0, val);
+               POSTING_READ(fence_reg);
+       } else {
+               I915_WRITE(fence_reg + 4, 0);
+               POSTING_READ(fence_reg + 4);
+       }
 }
 
 static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2779,6 +2852,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
        if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
                mb();
 
+       WARN(obj && (!obj->stride || !obj->tiling_mode),
+            "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+            obj->stride, obj->tiling_mode);
+
        switch (INTEL_INFO(dev)->gen) {
        case 7:
        case 6:
@@ -2802,56 +2879,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
        return fence - dev_priv->fence_regs;
 }
 
-struct write_fence {
-       struct drm_device *dev;
-       struct drm_i915_gem_object *obj;
-       int fence;
-};
-
-static void i915_gem_write_fence__ipi(void *data)
-{
-       struct write_fence *args = data;
-
-       /* Required for SNB+ with LLC */
-       wbinvd();
-
-       /* Required for VLV */
-       i915_gem_write_fence(args->dev, args->fence, args->obj);
-}
-
 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
                                         struct drm_i915_fence_reg *fence,
                                         bool enable)
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       struct write_fence args = {
-               .dev = obj->base.dev,
-               .fence = fence_number(dev_priv, fence),
-               .obj = enable ? obj : NULL,
-       };
-
-       /* In order to fully serialize access to the fenced region and
-        * the update to the fence register we need to take extreme
-        * measures on SNB+. In theory, the write to the fence register
-        * flushes all memory transactions before, and coupled with the
-        * mb() placed around the register write we serialise all memory
-        * operations with respect to the changes in the tiler. Yet, on
-        * SNB+ we need to take a step further and emit an explicit wbinvd()
-        * on each processor in order to manually flush all memory
-        * transactions before updating the fence register.
-        *
-        * However, Valleyview complicates matter. There the wbinvd is
-        * insufficient and unlike SNB/IVB requires the serialising
-        * register write. (Note that that register write by itself is
-        * conversely not sufficient for SNB+.) To compromise, we do both.
-        */
-       if (INTEL_INFO(args.dev)->gen >= 6)
-               on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
-       else
-               i915_gem_write_fence(args.dev, args.fence, args.obj);
+       int reg = fence_number(dev_priv, fence);
+
+       i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
 
        if (enable) {
-               obj->fence_reg = args.fence;
+               obj->fence_reg = reg;
                fence->obj = obj;
                list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
        } else {
@@ -2859,6 +2897,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
                fence->obj = NULL;
                list_del_init(&fence->lru_list);
        }
+       obj->fence_dirty = false;
 }
 
 static int
@@ -2988,7 +3027,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
                return 0;
 
        i915_gem_object_update_fence(obj, reg, enable);
-       obj->fence_dirty = false;
 
        return 0;
 }
@@ -3067,20 +3105,24 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
  * Finds free space in the GTT aperture and binds the object there.
  */
 static int
-i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
-                           unsigned alignment,
-                           bool map_and_fenceable,
-                           bool nonblocking)
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+                          struct i915_address_space *vm,
+                          unsigned alignment,
+                          bool map_and_fenceable,
+                          bool nonblocking)
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct i915_address_space *vm = &dev_priv->gtt.base;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
-       size_t gtt_max = map_and_fenceable ?
-               dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
+       size_t gtt_max =
+               map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
+       struct i915_vma *vma;
        int ret;
 
+       if (WARN_ON(!list_empty(&obj->vma_list)))
+               return -EBUSY;
+
        fence_size = i915_gem_get_gtt_size(dev,
                                           obj->base.size,
                                           obj->tiling_mode);
@@ -3119,55 +3161,72 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
        i915_gem_object_pin_pages(obj);
 
+       /* FIXME: For now we only ever use 1 VMA per object */
+       BUG_ON(!i915_is_ggtt(vm));
+       WARN_ON(!list_empty(&obj->vma_list));
+
+       vma = i915_gem_vma_create(obj, vm);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err_unpin;
+       }
+
 search_free:
-       ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
-                                                 &obj->gtt_space,
+       ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
                                                  size, alignment,
                                                  obj->cache_level, 0, gtt_max);
        if (ret) {
-               ret = i915_gem_evict_something(dev, size, alignment,
+               ret = i915_gem_evict_something(dev, vm, size, alignment,
                                               obj->cache_level,
                                               map_and_fenceable,
                                               nonblocking);
                if (ret == 0)
                        goto search_free;
 
-               i915_gem_object_unpin_pages(obj);
-               return ret;
+               goto err_free_vma;
        }
-       if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
+       if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
                                              obj->cache_level))) {
-               i915_gem_object_unpin_pages(obj);
-               drm_mm_remove_node(&obj->gtt_space);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_remove_node;
        }
 
        ret = i915_gem_gtt_prepare_object(obj);
-       if (ret) {
-               i915_gem_object_unpin_pages(obj);
-               drm_mm_remove_node(&obj->gtt_space);
-               return ret;
-       }
+       if (ret)
+               goto err_remove_node;
 
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       list_add_tail(&obj->mm_list, &vm->inactive_list);
+       list_add_tail(&vma->mm_list, &vm->inactive_list);
 
        fenceable =
+               i915_is_ggtt(vm) &&
                i915_gem_obj_ggtt_size(obj) == fence_size &&
                (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
 
-       mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
-               dev_priv->gtt.mappable_end;
+       mappable =
+               i915_is_ggtt(vm) &&
+               vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end;
 
-       obj->map_and_fenceable = mappable && fenceable;
+       /* Map and fenceable only changes if the VM is the global GGTT */
+       if (i915_is_ggtt(vm))
+               obj->map_and_fenceable = mappable && fenceable;
 
-       trace_i915_gem_object_bind(obj, map_and_fenceable);
+       trace_i915_vma_bind(vma, map_and_fenceable);
        i915_gem_verify_gtt(dev);
        return 0;
+
+err_remove_node:
+       drm_mm_remove_node(&vma->node);
+err_free_vma:
+       i915_gem_vma_destroy(vma);
+err_unpin:
+       i915_gem_object_unpin_pages(obj);
+       return ret;
 }
 
 void
-i915_gem_clflush_object(struct drm_i915_gem_object *obj)
+i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+                       bool force)
 {
        /* If we don't have a page list set up, then we're not pinned
         * to GPU, and we can ignore the cache flush because it'll happen
@@ -3191,7 +3250,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
         * snooping behaviour occurs naturally as the result of our domain
         * tracking.
         */
-       if (obj->cache_level != I915_CACHE_NONE)
+       if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
                return;
 
        trace_i915_gem_object_clflush(obj);
@@ -3228,14 +3287,15 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
 
 /** Flushes the CPU write domain for the object if it's dirty. */
 static void
-i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
+                                      bool force)
 {
        uint32_t old_write_domain;
 
        if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
                return;
 
-       i915_gem_clflush_object(obj);
+       i915_gem_clflush_object(obj, force);
        i915_gem_chipset_flush(obj->base.dev);
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
@@ -3259,7 +3319,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        int ret;
 
        /* Not valid to be called on unbound objects. */
-       if (!i915_gem_obj_ggtt_bound(obj))
+       if (!i915_gem_obj_bound_any(obj))
                return -EINVAL;
 
        if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3269,7 +3329,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        if (ret)
                return ret;
 
-       i915_gem_object_flush_cpu_write_domain(obj);
+       i915_gem_object_flush_cpu_write_domain(obj, false);
 
        /* Serialise direct access to this object with the barriers for
         * coherent writes from the GPU, by effectively invalidating the
@@ -3297,9 +3357,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
                                            old_write_domain);
 
        /* And bump the LRU for this access */
-       if (i915_gem_object_is_inactive(obj))
-               list_move_tail(&obj->mm_list,
-                              &dev_priv->gtt.base.inactive_list);
+       if (i915_gem_object_is_inactive(obj)) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+                                                          &dev_priv->gtt.base);
+               if (vma)
+                       list_move_tail(&vma->mm_list,
+                                      &dev_priv->gtt.base.inactive_list);
+
+       }
 
        return 0;
 }
@@ -3309,6 +3374,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct i915_vma *vma;
        int ret;
 
        if (obj->cache_level == cache_level)
@@ -3319,13 +3385,17 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                return -EBUSY;
        }
 
-       if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) {
-               ret = i915_gem_object_unbind(obj);
-               if (ret)
-                       return ret;
+       list_for_each_entry(vma, &obj->vma_list, vma_link) {
+               if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
+                       ret = i915_vma_unbind(vma);
+                       if (ret)
+                               return ret;
+
+                       break;
+               }
        }
 
-       if (i915_gem_obj_ggtt_bound(obj)) {
+       if (i915_gem_obj_bound_any(obj)) {
                ret = i915_gem_object_finish_gpu(obj);
                if (ret)
                        return ret;
@@ -3347,11 +3417,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                if (obj->has_aliasing_ppgtt_mapping)
                        i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
                                               obj, cache_level);
-
-               i915_gem_obj_ggtt_set_color(obj, cache_level);
        }
 
-       if (cache_level == I915_CACHE_NONE) {
+       list_for_each_entry(vma, &obj->vma_list, vma_link)
+               vma->node.color = cache_level;
+       obj->cache_level = cache_level;
+
+       if (cpu_write_needs_clflush(obj)) {
                u32 old_read_domains, old_write_domain;
 
                /* If we're coming from LLC cached, then we haven't
@@ -3374,7 +3446,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                                    old_write_domain);
        }
 
-       obj->cache_level = cache_level;
        i915_gem_verify_gtt(dev);
        return 0;
 }
@@ -3441,6 +3512,22 @@ unlock:
        return ret;
 }
 
+static bool is_pin_display(struct drm_i915_gem_object *obj)
+{
+       /* There are 3 sources that pin objects:
+        *   1. The display engine (scanouts, sprites, cursors);
+        *   2. Reservations for execbuffer;
+        *   3. The user.
+        *
+        * We can ignore reservations as we hold the struct_mutex and
+        * are only called outside of the reservation path.  The user
+        * can only increment pin_count once, and so if after
+        * subtracting the potential reference by the user, any pin_count
+        * remains, it must be due to another use by the display engine.
+        */
+       return obj->pin_count - !!obj->user_pin_count;
+}
+
 /*
  * Prepare buffer for display plane (scanout, cursors, etc).
  * Can be called from an uninterruptible phase (modesetting) and allows
@@ -3460,6 +3547,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                        return ret;
        }
 
+       /* Mark the pin_display early so that we account for the
+        * display coherency whilst setting up the cache domains.
+        */
+       obj->pin_display = true;
+
        /* The display engine is not coherent with the LLC cache on gen6.  As
         * a result, we make sure that the pinning that is about to occur is
         * done with uncached PTEs. This is lowest common denominator for all
@@ -3471,17 +3563,17 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
         */
        ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
        if (ret)
-               return ret;
+               goto err_unpin_display;
 
        /* As the user may map the buffer once pinned in the display plane
         * (e.g. libkms for the bootup splash), we have to ensure that we
         * always use map_and_fenceable for all scanout buffers.
         */
-       ret = i915_gem_object_pin(obj, alignment, true, false);
+       ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
        if (ret)
-               return ret;
+               goto err_unpin_display;
 
-       i915_gem_object_flush_cpu_write_domain(obj);
+       i915_gem_object_flush_cpu_write_domain(obj, true);
 
        old_write_domain = obj->base.write_domain;
        old_read_domains = obj->base.read_domains;
@@ -3497,6 +3589,17 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                            old_write_domain);
 
        return 0;
+
+err_unpin_display:
+       obj->pin_display = is_pin_display(obj);
+       return ret;
+}
+
+void
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
+{
+       i915_gem_object_unpin(obj);
+       obj->pin_display = is_pin_display(obj);
 }
 
 int
@@ -3542,7 +3645,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 
        /* Flush the CPU cache if it's still invalid. */
        if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
-               i915_gem_clflush_object(obj);
+               i915_gem_clflush_object(obj, false);
 
                obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
        }
@@ -3620,37 +3723,44 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 
 int
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
+                   struct i915_address_space *vm,
                    uint32_t alignment,
                    bool map_and_fenceable,
                    bool nonblocking)
 {
+       struct i915_vma *vma;
        int ret;
 
        if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                return -EBUSY;
 
-       if (i915_gem_obj_ggtt_bound(obj)) {
-               if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
+       WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
+
+       vma = i915_gem_obj_to_vma(obj, vm);
+
+       if (vma) {
+               if ((alignment &&
+                    vma->node.start & (alignment - 1)) ||
                    (map_and_fenceable && !obj->map_and_fenceable)) {
                        WARN(obj->pin_count,
                             "bo is already pinned with incorrect alignment:"
                             " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
-                            i915_gem_obj_ggtt_offset(obj), alignment,
+                            i915_gem_obj_offset(obj, vm), alignment,
                             map_and_fenceable,
                             obj->map_and_fenceable);
-                       ret = i915_gem_object_unbind(obj);
+                       ret = i915_vma_unbind(vma);
                        if (ret)
                                return ret;
                }
        }
 
-       if (!i915_gem_obj_ggtt_bound(obj)) {
+       if (!i915_gem_obj_bound(obj, vm)) {
                struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 
-               ret = i915_gem_object_bind_to_gtt(obj, alignment,
-                                                 map_and_fenceable,
-                                                 nonblocking);
+               ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
+                                                map_and_fenceable,
+                                                nonblocking);
                if (ret)
                        return ret;
 
@@ -3671,7 +3781,7 @@ void
 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
 {
        BUG_ON(obj->pin_count == 0);
-       BUG_ON(!i915_gem_obj_ggtt_bound(obj));
+       BUG_ON(!i915_gem_obj_bound_any(obj));
 
        if (--obj->pin_count == 0)
                obj->pin_mappable = false;
@@ -3709,7 +3819,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        }
 
        if (obj->user_pin_count == 0) {
-               ret = i915_gem_object_pin(obj, args->alignment, true, false);
+               ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
                if (ret)
                        goto out;
        }
@@ -3717,10 +3827,6 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        obj->user_pin_count++;
        obj->pin_filp = file;
 
-       /* XXX - flush the CPU caches for pinned objects
-        * as the X server doesn't manage domains yet
-        */
-       i915_gem_object_flush_cpu_write_domain(obj);
        args->offset = i915_gem_obj_ggtt_offset(obj);
 out:
        drm_gem_object_unreference(&obj->base);
@@ -3860,10 +3966,10 @@ unlock:
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
                          const struct drm_i915_gem_object_ops *ops)
 {
-       INIT_LIST_HEAD(&obj->mm_list);
        INIT_LIST_HEAD(&obj->global_list);
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->exec_list);
+       INIT_LIST_HEAD(&obj->vma_list);
 
        obj->ops = ops;
 
@@ -3928,6 +4034,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        } else
                obj->cache_level = I915_CACHE_NONE;
 
+       trace_i915_gem_object_create(obj);
+
        return obj;
 }
 
@@ -3943,6 +4051,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct i915_vma *vma, *next;
 
        trace_i915_gem_object_destroy(obj);
 
@@ -3950,15 +4059,21 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
                i915_gem_detach_phys_object(dev, obj);
 
        obj->pin_count = 0;
-       if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
-               bool was_interruptible;
+       /* NB: 0 or 1 elements */
+       WARN_ON(!list_empty(&obj->vma_list) &&
+               !list_is_singular(&obj->vma_list));
+       list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+               int ret = i915_vma_unbind(vma);
+               if (WARN_ON(ret == -ERESTARTSYS)) {
+                       bool was_interruptible;
 
-               was_interruptible = dev_priv->mm.interruptible;
-               dev_priv->mm.interruptible = false;
+                       was_interruptible = dev_priv->mm.interruptible;
+                       dev_priv->mm.interruptible = false;
 
-               WARN_ON(i915_gem_object_unbind(obj));
+                       WARN_ON(i915_vma_unbind(vma));
 
-               dev_priv->mm.interruptible = was_interruptible;
+                       dev_priv->mm.interruptible = was_interruptible;
+               }
        }
 
        /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
@@ -3984,6 +4099,34 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        i915_gem_object_free(obj);
 }
 
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+                                    struct i915_address_space *vm)
+{
+       struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+       if (vma == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&vma->vma_link);
+       INIT_LIST_HEAD(&vma->mm_list);
+       vma->vm = vm;
+       vma->obj = obj;
+
+       /* Keep GGTT vmas first to make debug easier */
+       if (i915_is_ggtt(vm))
+               list_add(&vma->vma_link, &obj->vma_list);
+       else
+               list_add_tail(&vma->vma_link, &obj->vma_list);
+
+       return vma;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+       WARN_ON(vma->node.allocated);
+       list_del(&vma->vma_link);
+       kfree(vma);
+}
+
 int
 i915_gem_idle(struct drm_device *dev)
 {
@@ -4006,8 +4149,6 @@ i915_gem_idle(struct drm_device *dev)
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_gem_evict_everything(dev);
 
-       i915_gem_reset_fences(dev);
-
        del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
 
        i915_kernel_lost_context(dev);
@@ -4308,6 +4449,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
        INIT_LIST_HEAD(&ring->request_list);
 }
 
+static void i915_init_vm(struct drm_i915_private *dev_priv,
+                        struct i915_address_space *vm)
+{
+       vm->dev = dev_priv->dev;
+       INIT_LIST_HEAD(&vm->active_list);
+       INIT_LIST_HEAD(&vm->inactive_list);
+       INIT_LIST_HEAD(&vm->global_link);
+       list_add(&vm->global_link, &dev_priv->vm_list);
+}
+
 void
 i915_gem_load(struct drm_device *dev)
 {
@@ -4320,8 +4471,9 @@ i915_gem_load(struct drm_device *dev)
                                  SLAB_HWCACHE_ALIGN,
                                  NULL);
 
-       INIT_LIST_HEAD(&dev_priv->gtt.base.active_list);
-       INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->vm_list);
+       i915_init_vm(dev_priv, &dev_priv->gtt.base);
+
        INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.bound_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4353,7 +4505,8 @@ i915_gem_load(struct drm_device *dev)
                dev_priv->num_fence_regs = 8;
 
        /* Initialize fence registers to zero */
-       i915_gem_reset_fences(dev);
+       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+       i915_gem_restore_fences(dev);
 
        i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4591,7 +4744,6 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
                             struct drm_i915_private,
                             mm.inactive_shrinker);
        struct drm_device *dev = dev_priv->dev;
-       struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_i915_gem_object *obj;
        int nr_to_scan = sc->nr_to_scan;
        bool unlock = true;
@@ -4620,11 +4772,88 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
                if (obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
-       list_for_each_entry(obj, &vm->inactive_list, global_list)
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               if (obj->active)
+                       continue;
+
                if (obj->pin_count == 0 && obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
+       }
 
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
        return cnt;
 }
+
+/* All the new VM stuff */
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+                                 struct i915_address_space *vm)
+{
+       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+       struct i915_vma *vma;
+
+       if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+               vm = &dev_priv->gtt.base;
+
+       BUG_ON(list_empty(&o->vma_list));
+       list_for_each_entry(vma, &o->vma_list, vma_link) {
+               if (vma->vm == vm)
+                       return vma->node.start;
+
+       }
+       return -1;
+}
+
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+                       struct i915_address_space *vm)
+{
+       struct i915_vma *vma;
+
+       list_for_each_entry(vma, &o->vma_list, vma_link)
+               if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+                       return true;
+
+       return false;
+}
+
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
+{
+       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+       struct i915_address_space *vm;
+
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+               if (i915_gem_obj_bound(o, vm))
+                       return true;
+
+       return false;
+}
+
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+                               struct i915_address_space *vm)
+{
+       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+       struct i915_vma *vma;
+
+       if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+               vm = &dev_priv->gtt.base;
+
+       BUG_ON(list_empty(&o->vma_list));
+
+       list_for_each_entry(vma, &o->vma_list, vma_link)
+               if (vma->vm == vm)
+                       return vma->node.size;
+
+       return 0;
+}
+
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+                                    struct i915_address_space *vm)
+{
+       struct i915_vma *vma;
+       list_for_each_entry(vma, &obj->vma_list, vma_link)
+               if (vma->vm == vm)
+                       return vma;
+
+       return NULL;
+}