]> Pileus Git - ~andy/linux/commitdiff
dm thin: add mappings to end of prepared_* lists
authorMike Snitzer <snitzer@redhat.com>
Wed, 11 Dec 2013 19:01:20 +0000 (14:01 -0500)
committerMike Snitzer <snitzer@redhat.com>
Tue, 7 Jan 2014 15:14:25 +0000 (10:14 -0500)
Mappings could be processed in descending logical block order,
particularly if buffered IO is used.  This could adversely affect the
latency of IO processing.  Fix this by adding mappings to the end of the
'prepared_mappings' and 'prepared_discards' lists.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Acked-by: Joe Thornber <ejt@redhat.com>
drivers/md/dm-thin.c

index 1988019df5c915132112773770f464d35b1c4e51..efa3d42ac70ad258ed37721e6d18556e8a268945 100644 (file)
@@ -535,7 +535,7 @@ static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
        struct pool *pool = m->tc->pool;
 
        if (m->quiesced && m->prepared) {
-               list_add(&m->list, &pool->prepared_mappings);
+               list_add_tail(&m->list, &pool->prepared_mappings);
                wake_worker(pool);
        }
 }
@@ -1058,7 +1058,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
 
                        if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
                                spin_lock_irqsave(&pool->lock, flags);
-                               list_add(&m->list, &pool->prepared_discards);
+                               list_add_tail(&m->list, &pool->prepared_discards);
                                spin_unlock_irqrestore(&pool->lock, flags);
                                wake_worker(pool);
                        }
@@ -2919,7 +2919,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
                if (!list_empty(&work)) {
                        spin_lock_irqsave(&pool->lock, flags);
                        list_for_each_entry_safe(m, tmp, &work, list)
-                               list_add(&m->list, &pool->prepared_discards);
+                               list_add_tail(&m->list, &pool->prepared_discards);
                        spin_unlock_irqrestore(&pool->lock, flags);
                        wake_worker(pool);
                }