]> Pileus Git - ~andy/linux/blobdiff - drivers/md/raid5.c
sctp: fix a typo in prototype of __sctp_rcv_lookup()
[~andy/linux] / drivers / md / raid5.c
index 87a2d0bdedd1187a695a4d7f25a6d2e5bc2164fe..0689173fd9f568583708c53396631ac8e1838c55 100644 (file)
@@ -393,6 +393,8 @@ static int calc_degraded(struct r5conf *conf)
        degraded = 0;
        for (i = 0; i < conf->previous_raid_disks; i++) {
                struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && test_bit(Faulty, &rdev->flags))
+                       rdev = rcu_dereference(conf->disks[i].replacement);
                if (!rdev || test_bit(Faulty, &rdev->flags))
                        degraded++;
                else if (test_bit(In_sync, &rdev->flags))
@@ -417,6 +419,8 @@ static int calc_degraded(struct r5conf *conf)
        degraded2 = 0;
        for (i = 0; i < conf->raid_disks; i++) {
                struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && test_bit(Faulty, &rdev->flags))
+                       rdev = rcu_dereference(conf->disks[i].replacement);
                if (!rdev || test_bit(Faulty, &rdev->flags))
                        degraded2++;
                else if (test_bit(In_sync, &rdev->flags))
@@ -484,7 +488,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
                } else {
                        if (atomic_read(&sh->count)) {
                                BUG_ON(!list_empty(&sh->lru)
-                                   && !test_bit(STRIPE_EXPANDING, &sh->state));
+                                   && !test_bit(STRIPE_EXPANDING, &sh->state)
+                                   && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
                        } else {
                                if (!test_bit(STRIPE_HANDLE, &sh->state))
                                        atomic_inc(&conf->active_stripes);
@@ -1586,6 +1591,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
                #ifdef CONFIG_MULTICORE_RAID456
                init_waitqueue_head(&nsh->ops.wait_for_ops);
                #endif
+               spin_lock_init(&nsh->stripe_lock);
 
                list_add(&nsh->lru, &newstripes);
        }
@@ -4010,6 +4016,62 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
        return sh;
 }
 
+struct raid5_plug_cb {
+       struct blk_plug_cb      cb;
+       struct list_head        list;
+};
+
+static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
+{
+       struct raid5_plug_cb *cb = container_of(
+               blk_cb, struct raid5_plug_cb, cb);
+       struct stripe_head *sh;
+       struct mddev *mddev = cb->cb.data;
+       struct r5conf *conf = mddev->private;
+
+       if (cb->list.next && !list_empty(&cb->list)) {
+               spin_lock_irq(&conf->device_lock);
+               while (!list_empty(&cb->list)) {
+                       sh = list_first_entry(&cb->list, struct stripe_head, lru);
+                       list_del_init(&sh->lru);
+                       /*
+                        * avoid race release_stripe_plug() sees
+                        * STRIPE_ON_UNPLUG_LIST clear but the stripe
+                        * is still in our list
+                        */
+                       smp_mb__before_clear_bit();
+                       clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
+                       __release_stripe(conf, sh);
+               }
+               spin_unlock_irq(&conf->device_lock);
+       }
+       kfree(cb);
+}
+
+static void release_stripe_plug(struct mddev *mddev,
+                               struct stripe_head *sh)
+{
+       struct blk_plug_cb *blk_cb = blk_check_plugged(
+               raid5_unplug, mddev,
+               sizeof(struct raid5_plug_cb));
+       struct raid5_plug_cb *cb;
+
+       if (!blk_cb) {
+               release_stripe(sh);
+               return;
+       }
+
+       cb = container_of(blk_cb, struct raid5_plug_cb, cb);
+
+       if (cb->list.next == NULL)
+               INIT_LIST_HEAD(&cb->list);
+
+       if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
+               list_add_tail(&sh->lru, &cb->list);
+       else
+               release_stripe(sh);
+}
+
 static void make_request(struct mddev *mddev, struct bio * bi)
 {
        struct r5conf *conf = mddev->private;
@@ -4135,11 +4197,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                        finish_wait(&conf->wait_for_overlap, &w);
                        set_bit(STRIPE_HANDLE, &sh->state);
                        clear_bit(STRIPE_DELAYED, &sh->state);
-                       if ((bi->bi_rw & REQ_NOIDLE) &&
+                       if ((bi->bi_rw & REQ_SYNC) &&
                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
-                       mddev_check_plugged(mddev);
-                       release_stripe(sh);
+                       release_stripe_plug(mddev, sh);
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -4537,6 +4598,30 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
        return handled;
 }
 
+#define MAX_STRIPE_BATCH 8
+static int handle_active_stripes(struct r5conf *conf)
+{
+       struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
+       int i, batch_size = 0;
+
+       while (batch_size < MAX_STRIPE_BATCH &&
+                       (sh = __get_priority_stripe(conf)) != NULL)
+               batch[batch_size++] = sh;
+
+       if (batch_size == 0)
+               return batch_size;
+       spin_unlock_irq(&conf->device_lock);
+
+       for (i = 0; i < batch_size; i++)
+               handle_stripe(batch[i]);
+
+       cond_resched();
+
+       spin_lock_irq(&conf->device_lock);
+       for (i = 0; i < batch_size; i++)
+               __release_stripe(conf, batch[i]);
+       return batch_size;
+}
 
 /*
  * This is our raid5 kernel thread.
@@ -4547,7 +4632,6 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
  */
 static void raid5d(struct mddev *mddev)
 {
-       struct stripe_head *sh;
        struct r5conf *conf = mddev->private;
        int handled;
        struct blk_plug plug;
@@ -4561,6 +4645,7 @@ static void raid5d(struct mddev *mddev)
        spin_lock_irq(&conf->device_lock);
        while (1) {
                struct bio *bio;
+               int batch_size;
 
                if (
                    !list_empty(&conf->bitmap_list)) {
@@ -4584,21 +4669,16 @@ static void raid5d(struct mddev *mddev)
                        handled++;
                }
 
-               sh = __get_priority_stripe(conf);
-
-               if (!sh)
+               batch_size = handle_active_stripes(conf);
+               if (!batch_size)
                        break;
-               spin_unlock_irq(&conf->device_lock);
-               
-               handled++;
-               handle_stripe(sh);
-               release_stripe(sh);
-               cond_resched();
+               handled += batch_size;
 
-               if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+               if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
+                       spin_unlock_irq(&conf->device_lock);
                        md_check_recovery(mddev);
-
-               spin_lock_irq(&conf->device_lock);
+                       spin_lock_irq(&conf->device_lock);
+               }
        }
        pr_debug("%d stripes handled\n", handled);