]> Pileus Git - ~andy/linux/blobdiff - drivers/gpu/drm/i915/i915_gem.c
Merge tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm...
[~andy/linux] / drivers / gpu / drm / i915 / i915_gem.c
index 51282b2c49430f24bb3d70d9d7fc20a1693ec3c7..e6cc020ea32c704bf3cf751f0d13322325227e72 100644 (file)
@@ -163,8 +163,8 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
                return -ENODEV;
 
        mutex_lock(&dev->struct_mutex);
-       i915_gem_init_global_gtt(dev, args->gtt_start,
-                                args->gtt_end, args->gtt_end);
+       i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
+                                 args->gtt_end);
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -1520,9 +1520,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
        if (obj->base.map_list.map)
                return 0;
 
+       dev_priv->mm.shrinker_no_lock_stealing = true;
+
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
-               return ret;
+               goto out;
 
        /* Badly fragmented mmap space? The only way we can recover
         * space is by destroying unwanted objects. We can't randomly release
@@ -1534,10 +1536,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
        i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
-               return ret;
+               goto out;
 
        i915_gem_shrink_all(dev_priv);
-       return drm_gem_create_mmap_offset(&obj->base);
+       ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+       dev_priv->mm.shrinker_no_lock_stealing = false;
+
+       return ret;
 }
 
 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -1699,10 +1705,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        if (obj->pages_pin_count)
                return -EBUSY;
 
+       /* ->put_pages might need to allocate memory for the bit17 swizzle
+        * array, hence protect them from being reaped by removing them from gtt
+        * lists early. */
+       list_del(&obj->gtt_list);
+
        ops->put_pages(obj);
        obj->pages = NULL;
 
-       list_del(&obj->gtt_list);
        if (i915_gem_object_is_purgeable(obj))
                i915_gem_object_truncate(obj);
 
@@ -1788,7 +1798,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
         */
        mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        gfp = mapping_gfp_mask(mapping);
-       gfp |= __GFP_NORETRY | __GFP_NOWARN;
+       gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
        gfp &= ~(__GFP_IO | __GFP_WAIT);
        for_each_sg(st->sgl, sg, page_count, i) {
                page = shmem_read_mapping_page_gfp(mapping, i, gfp);
@@ -1801,7 +1811,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                         * our own buffer, now let the real VM do its job and
                         * go down in flames if truly OOM.
                         */
-                       gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
+                       gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
                        gfp |= __GFP_IO | __GFP_WAIT;
 
                        i915_gem_shrink_all(dev_priv);
@@ -1809,7 +1819,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                        if (IS_ERR(page))
                                goto err_pages;
 
-                       gfp |= __GFP_NORETRY | __GFP_NOWARN;
+                       gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
                        gfp &= ~(__GFP_IO | __GFP_WAIT);
                }
 
@@ -1926,24 +1936,12 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 }
 
 static int
-i915_gem_handle_seqno_wrap(struct drm_device *dev)
+i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
        int ret, i, j;
 
-       /* The hardware uses various monotonic 32-bit counters, if we
-        * detect that they will wraparound we need to idle the GPU
-        * and reset those counters.
-        */
-       ret = 0;
-       for_each_ring(ring, dev_priv, i) {
-               for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
-                       ret |= ring->sync_seqno[j] != 0;
-       }
-       if (ret == 0)
-               return ret;
-
        /* Carefully retire all requests without writing to the rings */
        for_each_ring(ring, dev_priv, i) {
                ret = intel_ring_idle(ring);
@@ -1954,7 +1952,7 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
 
        /* Finally reset hw state */
        for_each_ring(ring, dev_priv, i) {
-               intel_ring_init_seqno(ring, 0);
+               intel_ring_init_seqno(ring, seqno);
 
                for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
                        ring->sync_seqno[j] = 0;
@@ -1963,6 +1961,32 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
        return 0;
 }
 
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       if (seqno == 0)
+               return -EINVAL;
+
+       /* HWS page needs to be set less than what we
+        * will inject to ring
+        */
+       ret = i915_gem_init_seqno(dev, seqno - 1);
+       if (ret)
+               return ret;
+
+       /* Carefully set the last_seqno value so that wrap
+        * detection still works
+        */
+       dev_priv->next_seqno = seqno;
+       dev_priv->last_seqno = seqno - 1;
+       if (dev_priv->last_seqno == 0)
+               dev_priv->last_seqno--;
+
+       return 0;
+}
+
 int
 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
 {
@@ -1970,7 +1994,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
 
        /* reserve 0 for non-seqno */
        if (dev_priv->next_seqno == 0) {
-               int ret = i915_gem_handle_seqno_wrap(dev);
+               int ret = i915_gem_init_seqno(dev, 0);
                if (ret)
                        return ret;
 
@@ -2895,7 +2919,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_mm_node *free_space;
+       struct drm_mm_node *node;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
        int ret;
@@ -2941,66 +2965,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
        i915_gem_object_pin_pages(obj);
 
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (node == NULL) {
+               i915_gem_object_unpin_pages(obj);
+               return -ENOMEM;
+       }
+
  search_free:
        if (map_and_fenceable)
-               free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
-                                                              size, alignment, obj->cache_level,
-                                                              0, dev_priv->mm.gtt_mappable_end,
-                                                              false);
+               ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+                                                         size, alignment, obj->cache_level,
+                                                         0, dev_priv->mm.gtt_mappable_end);
        else
-               free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
-                                                     size, alignment, obj->cache_level,
-                                                     false);
-
-       if (free_space != NULL) {
-               if (map_and_fenceable)
-                       free_space =
-                               drm_mm_get_block_range_generic(free_space,
-                                                              size, alignment, obj->cache_level,
-                                                              0, dev_priv->mm.gtt_mappable_end,
-                                                              false);
-               else
-                       free_space =
-                               drm_mm_get_block_generic(free_space,
-                                                        size, alignment, obj->cache_level,
-                                                        false);
-       }
-       if (free_space == NULL) {
+               ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
+                                                size, alignment, obj->cache_level);
+       if (ret) {
                ret = i915_gem_evict_something(dev, size, alignment,
                                               obj->cache_level,
                                               map_and_fenceable,
                                               nonblocking);
-               if (ret) {
-                       i915_gem_object_unpin_pages(obj);
-                       return ret;
-               }
+               if (ret == 0)
+                       goto search_free;
 
-               goto search_free;
+               i915_gem_object_unpin_pages(obj);
+               kfree(node);
+               return ret;
        }
-       if (WARN_ON(!i915_gem_valid_gtt_space(dev,
-                                             free_space,
-                                             obj->cache_level))) {
+       if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
                i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(free_space);
+               drm_mm_put_block(node);
                return -EINVAL;
        }
 
        ret = i915_gem_gtt_prepare_object(obj);
        if (ret) {
                i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(free_space);
+               drm_mm_put_block(node);
                return ret;
        }
 
        list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
        list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-       obj->gtt_space = free_space;
-       obj->gtt_offset = free_space->start;
+       obj->gtt_space = node;
+       obj->gtt_offset = node->start;
 
        fenceable =
-               free_space->size == fence_size &&
-               (free_space->start & (fence_alignment - 1)) == 0;
+               node->size == fence_size &&
+               (node->start & (fence_alignment - 1)) == 0;
 
        mappable =
                obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
@@ -3967,58 +3979,13 @@ cleanup_render_ring:
        return ret;
 }
 
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
-{
-       if (i915_enable_ppgtt >= 0)
-               return i915_enable_ppgtt;
-
-#ifdef CONFIG_INTEL_IOMMU
-       /* Disable ppgtt on SNB if VT-d is on. */
-       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
-               return false;
-#endif
-
-       return true;
-}
-
 int i915_gem_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long gtt_size, mappable_size;
        int ret;
 
-       gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
-       mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
        mutex_lock(&dev->struct_mutex);
-       if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
-               /* PPGTT pdes are stolen from global gtt ptes, so shrink the
-                * aperture accordingly when using aliasing ppgtt. */
-               gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
-
-               i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
-
-               ret = i915_gem_init_aliasing_ppgtt(dev);
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
-       } else {
-               /* Let GEM Manage all of the aperture.
-                *
-                * However, leave one page at the end still bound to the scratch
-                * page.  There are a number of places where the hardware
-                * apparently prefetches past the end of the object, and we've
-                * seen multiple hangs with the GPU head pointer stuck in a
-                * batchbuffer bound at the last page of the aperture.  One page
-                * should be enough to keep any prefetching inside of the
-                * aperture.
-                */
-               i915_gem_init_global_gtt(dev, 0, mappable_size,
-                                        gtt_size);
-       }
-
+       i915_gem_init_global_gtt(dev);
        ret = i915_gem_init_hw(dev);
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
@@ -4406,6 +4373,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
                if (!mutex_is_locked_by(&dev->struct_mutex, current))
                        return 0;
 
+               if (dev_priv->mm.shrinker_no_lock_stealing)
+                       return 0;
+
                unlock = false;
        }