]> Pileus Git - ~andy/linux/commitdiff
drm/ttm: remove lru_lock around ttm_bo_reserve
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>
Tue, 15 Jan 2013 13:56:37 +0000 (14:56 +0100)
committerMaarten Lankhorst <maarten.lankhorst@canonical.com>
Tue, 15 Jan 2013 13:56:37 +0000 (14:56 +0100)
There should no longer be assumptions that reserve will always succeed
with the lru lock held, so we can safely break the whole atomic
reserve/lru thing. As a bonus this fixes most lockdep annotations for
reservations.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
include/drm/ttm/ttm_bo_driver.h

index 33d20be87db5f4e6070a5c20fe114816b88e7efe..e8e4814b12957b26bf5e08f3ad51814d3c192fa7 100644 (file)
@@ -213,14 +213,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
        return put_count;
 }
 
-int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
                          bool interruptible,
                          bool no_wait, bool use_sequence, uint32_t sequence)
 {
-       struct ttm_bo_global *glob = bo->glob;
        int ret;
 
-       while (unlikely(atomic_read(&bo->reserved) != 0)) {
+       while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
                /**
                 * Deadlock avoidance for multi-bo reserving.
                 */
@@ -241,26 +240,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
                if (no_wait)
                        return -EBUSY;
 
-               spin_unlock(&glob->lru_lock);
                ret = ttm_bo_wait_unreserved(bo, interruptible);
-               spin_lock(&glob->lru_lock);
 
                if (unlikely(ret))
                        return ret;
        }
 
-       atomic_set(&bo->reserved, 1);
        if (use_sequence) {
+               bool wake_up = false;
                /**
                 * Wake up waiters that may need to recheck for deadlock,
                 * if we decreased the sequence number.
                 */
                if (unlikely((bo->val_seq - sequence < (1 << 31))
                             || !bo->seq_valid))
-                       wake_up_all(&bo->event_queue);
+                       wake_up = true;
 
+               /*
+                * In the worst case with memory ordering these values can be
+                * seen in the wrong order. However since we call wake_up_all
+                * in that case, this will hopefully not pose a problem,
+                * and the worst case would only cause someone to accidentally
+                * hit -EAGAIN in ttm_bo_reserve when they see old value of
+                * val_seq. However this would only happen if seq_valid was
+                * written before val_seq was, and just means some slightly
+                * increased cpu usage
+                */
                bo->val_seq = sequence;
                bo->seq_valid = true;
+               if (wake_up)
+                       wake_up_all(&bo->event_queue);
        } else {
                bo->seq_valid = false;
        }
@@ -289,14 +298,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
        int put_count = 0;
        int ret;
 
-       spin_lock(&glob->lru_lock);
-       ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
-                                   sequence);
-       if (likely(ret == 0))
+       ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
+                                  sequence);
+       if (likely(ret == 0)) {
+               spin_lock(&glob->lru_lock);
                put_count = ttm_bo_del_from_lru(bo);
-       spin_unlock(&glob->lru_lock);
-
-       ttm_bo_list_ref_sub(bo, put_count, true);
+               spin_unlock(&glob->lru_lock);
+               ttm_bo_list_ref_sub(bo, put_count, true);
+       }
 
        return ret;
 }
@@ -510,7 +519,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
        int ret;
 
        spin_lock(&glob->lru_lock);
-       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+       ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
 
        spin_lock(&bdev->fence_lock);
        (void) ttm_bo_wait(bo, false, false, true);
@@ -603,7 +612,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
                        return ret;
 
                spin_lock(&glob->lru_lock);
-               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+               ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
 
                /*
                 * We raced, and lost, someone else holds the reservation now,
@@ -667,7 +676,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
                        kref_get(&nentry->list_kref);
                }
 
-               ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+               ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
+               if (remove_all && ret) {
+                       spin_unlock(&glob->lru_lock);
+                       ret = ttm_bo_reserve_nolru(entry, false, false,
+                                                  false, 0);
+                       spin_lock(&glob->lru_lock);
+               }
+
                if (!ret)
                        ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
                                                             !remove_all);
@@ -815,7 +831,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 
        spin_lock(&glob->lru_lock);
        list_for_each_entry(bo, &man->lru, lru) {
-               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+               ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
                if (!ret)
                        break;
        }
@@ -1796,7 +1812,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
 
        spin_lock(&glob->lru_lock);
        list_for_each_entry(bo, &glob->swap_lru, swap) {
-               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+               ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
                if (!ret)
                        break;
        }
index cd9e4523dc561efaf0208689a90a4790b4dab986..bd37b5cb8553d80102d12d04a3ba5828848fdec3 100644 (file)
@@ -153,7 +153,7 @@ retry:
                struct ttm_buffer_object *bo = entry->bo;
 
 retry_this_bo:
-               ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+               ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
                switch (ret) {
                case 0:
                        break;
index e3a43a47d78c51754ae01c46511b0ed56ddf2fc0..6fff43222e20d76f8794d6f8a1bfe00af872b36a 100644 (file)
@@ -790,16 +790,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
  * to make room for a buffer already reserved. (Buffers are reserved before
  * they are evicted). The following algorithm prevents such deadlocks from
  * occurring:
- * 1) Buffers are reserved with the lru spinlock held. Upon successful
- * reservation they are removed from the lru list. This stops a reserved buffer
- * from being evicted. However the lru spinlock is released between the time
- * a buffer is selected for eviction and the time it is reserved.
- * Therefore a check is made when a buffer is reserved for eviction, that it
- * is still the first buffer in the lru list, before it is removed from the
- * list. @check_lru == 1 forces this check. If it fails, the function returns
- * -EINVAL, and the caller should then choose a new buffer to evict and repeat
- * the procedure.
- * 2) Processes attempting to reserve multiple buffers other than for eviction,
+ * Processes attempting to reserve multiple buffers other than for eviction,
  * (typically execbuf), should first obtain a unique 32-bit
  * validation sequence number,
  * and call this function with @use_sequence == 1 and @sequence == the unique
@@ -832,7 +823,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
 
 
 /**
- * ttm_bo_reserve_locked:
+ * ttm_bo_reserve_nolru:
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @interruptible: Sleep interruptible if waiting.
@@ -840,9 +831,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
  * @use_sequence: If @bo is already reserved, Only sleep waiting for
  * it to become unreserved if @sequence < (@bo)->sequence.
  *
- * Must be called with struct ttm_bo_global::lru_lock held,
- * and will not remove reserved buffers from the lru lists.
- * The function may release the LRU spinlock if it needs to sleep.
+ * Will not remove reserved buffers from the lru lists.
  * Otherwise identical to ttm_bo_reserve.
  *
  * Returns:
@@ -855,7 +844,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
  * -EDEADLK: Bo already reserved using @sequence. This error code will only
  * be returned if @use_sequence is set to true.
  */
-extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
                                 bool interruptible,
                                 bool no_wait, bool use_sequence,
                                 uint32_t sequence);