]> Pileus Git - ~andy/linux/blobdiff - drivers/md/raid5.c
Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
[~andy/linux] / drivers / md / raid5.c
index 03f82ab87d9e73eb4fed4ede052c95fa5d891f09..67ca9c3d2939c5e4468d51f0ea0454dfdceac731 100644 (file)
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
 {
        int sectors = bio_sectors(bio);
-       if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
+       if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
                return bio->bi_next;
        else
                return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
 
                return_bi = bi->bi_next;
                bi->bi_next = NULL;
-               bi->bi_size = 0;
+               bi->bi_iter.bi_size = 0;
                trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
                                         bi, 0);
                bio_endio(bi, 0);
@@ -852,10 +852,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                bi->bi_rw, i);
                        atomic_inc(&sh->count);
                        if (use_new_offset(conf, sh))
-                               bi->bi_sector = (sh->sector
+                               bi->bi_iter.bi_sector = (sh->sector
                                                 + rdev->new_data_offset);
                        else
-                               bi->bi_sector = (sh->sector
+                               bi->bi_iter.bi_sector = (sh->sector
                                                 + rdev->data_offset);
                        if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
                                bi->bi_rw |= REQ_NOMERGE;
@@ -863,7 +863,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        bi->bi_vcnt = 1;
                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        bi->bi_io_vec[0].bv_offset = 0;
-                       bi->bi_size = STRIPE_SIZE;
+                       bi->bi_iter.bi_size = STRIPE_SIZE;
                        /*
                         * If this is discard request, set bi_vcnt 0. We don't
                         * want to confuse SCSI because SCSI will replace payload
@@ -899,15 +899,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                rbi->bi_rw, i);
                        atomic_inc(&sh->count);
                        if (use_new_offset(conf, sh))
-                               rbi->bi_sector = (sh->sector
+                               rbi->bi_iter.bi_sector = (sh->sector
                                                  + rrdev->new_data_offset);
                        else
-                               rbi->bi_sector = (sh->sector
+                               rbi->bi_iter.bi_sector = (sh->sector
                                                  + rrdev->data_offset);
                        rbi->bi_vcnt = 1;
                        rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        rbi->bi_io_vec[0].bv_offset = 0;
-                       rbi->bi_size = STRIPE_SIZE;
+                       rbi->bi_iter.bi_size = STRIPE_SIZE;
                        /*
                         * If this is discard request, set bi_vcnt 0. We don't
                         * want to confuse SCSI because SCSI will replace payload
@@ -935,24 +935,24 @@ static struct dma_async_tx_descriptor *
 async_copy_data(int frombio, struct bio *bio, struct page *page,
        sector_t sector, struct dma_async_tx_descriptor *tx)
 {
-       struct bio_vec *bvl;
+       struct bio_vec bvl;
+       struct bvec_iter iter;
        struct page *bio_page;
-       int i;
        int page_offset;
        struct async_submit_ctl submit;
        enum async_tx_flags flags = 0;
 
-       if (bio->bi_sector >= sector)
-               page_offset = (signed)(bio->bi_sector - sector) * 512;
+       if (bio->bi_iter.bi_sector >= sector)
+               page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
        else
-               page_offset = (signed)(sector - bio->bi_sector) * -512;
+               page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
 
        if (frombio)
                flags |= ASYNC_TX_FENCE;
        init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
 
-       bio_for_each_segment(bvl, bio, i) {
-               int len = bvl->bv_len;
+       bio_for_each_segment(bvl, bio, iter) {
+               int len = bvl.bv_len;
                int clen;
                int b_offset = 0;
 
@@ -968,8 +968,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
                        clen = len;
 
                if (clen > 0) {
-                       b_offset += bvl->bv_offset;
-                       bio_page = bvl->bv_page;
+                       b_offset += bvl.bv_offset;
+                       bio_page = bvl.bv_page;
                        if (frombio)
                                tx = async_memcpy(page, bio_page, page_offset,
                                                  b_offset, clen, &submit);
@@ -1012,7 +1012,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
                        BUG_ON(!dev->read);
                        rbi = dev->read;
                        dev->read = NULL;
-                       while (rbi && rbi->bi_sector <
+                       while (rbi && rbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                rbi2 = r5_next_bio(rbi, dev->sector);
                                if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1048,7 +1048,7 @@ static void ops_run_biofill(struct stripe_head *sh)
                        dev->read = rbi = dev->toread;
                        dev->toread = NULL;
                        spin_unlock_irq(&sh->stripe_lock);
-                       while (rbi && rbi->bi_sector <
+                       while (rbi && rbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                tx = async_copy_data(0, rbi, dev->page,
                                        dev->sector, tx);
@@ -1390,7 +1390,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
                        wbi = dev->written = chosen;
                        spin_unlock_irq(&sh->stripe_lock);
 
-                       while (wbi && wbi->bi_sector <
+                       while (wbi && wbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                if (wbi->bi_rw & REQ_FUA)
                                        set_bit(R5_WantFUA, &dev->flags);
@@ -2615,7 +2615,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        int firstwrite=0;
 
        pr_debug("adding bi b#%llu to stripe s#%llu\n",
-               (unsigned long long)bi->bi_sector,
+               (unsigned long long)bi->bi_iter.bi_sector,
                (unsigned long long)sh->sector);
 
        /*
@@ -2633,12 +2633,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
                        firstwrite = 1;
        } else
                bip = &sh->dev[dd_idx].toread;
-       while (*bip && (*bip)->bi_sector < bi->bi_sector) {
-               if (bio_end_sector(*bip) > bi->bi_sector)
+       while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
+               if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
                        goto overlap;
                bip = & (*bip)->bi_next;
        }
-       if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
+       if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
                goto overlap;
 
        BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2652,7 +2652,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
                sector_t sector = sh->dev[dd_idx].sector;
                for (bi=sh->dev[dd_idx].towrite;
                     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
-                            bi && bi->bi_sector <= sector;
+                            bi && bi->bi_iter.bi_sector <= sector;
                     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
                        if (bio_end_sector(bi) >= sector)
                                sector = bio_end_sector(bi);
@@ -2662,7 +2662,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        }
 
        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
-               (unsigned long long)(*bip)->bi_sector,
+               (unsigned long long)(*bip)->bi_iter.bi_sector,
                (unsigned long long)sh->sector, dd_idx);
        spin_unlock_irq(&sh->stripe_lock);
 
@@ -2737,7 +2737,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                        wake_up(&conf->wait_for_overlap);
 
-               while (bi && bi->bi_sector <
+               while (bi && bi->bi_iter.bi_sector <
                        sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2756,7 +2756,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                bi = sh->dev[i].written;
                sh->dev[i].written = NULL;
                if (bi) bitmap_end = 1;
-               while (bi && bi->bi_sector <
+               while (bi && bi->bi_iter.bi_sector <
                       sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2780,7 +2780,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        spin_unlock_irq(&sh->stripe_lock);
                        if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                                wake_up(&conf->wait_for_overlap);
-                       while (bi && bi->bi_sector <
+                       while (bi && bi->bi_iter.bi_sector <
                               sh->dev[i].sector + STRIPE_SECTORS) {
                                struct bio *nextbi =
                                        r5_next_bio(bi, sh->dev[i].sector);
@@ -3004,7 +3004,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                                        clear_bit(R5_UPTODATE, &dev->flags);
                                wbi = dev->written;
                                dev->written = NULL;
-                               while (wbi && wbi->bi_sector <
+                               while (wbi && wbi->bi_iter.bi_sector <
                                        dev->sector + STRIPE_SECTORS) {
                                        wbi2 = r5_next_bio(wbi, dev->sector);
                                        if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4096,7 +4096,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
 
 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
 {
-       sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+       sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
        unsigned int chunk_sectors = mddev->chunk_sectors;
        unsigned int bio_sectors = bio_sectors(bio);
 
@@ -4233,9 +4233,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
        /*
         *      compute position
         */
-       align_bi->bi_sector =  raid5_compute_sector(conf, raid_bio->bi_sector,
-                                                   0,
-                                                   &dd_idx, NULL);
+       align_bi->bi_iter.bi_sector =
+               raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
+                                    0, &dd_idx, NULL);
 
        end_sector = bio_end_sector(align_bi);
        rcu_read_lock();
@@ -4260,7 +4260,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
 
                if (!bio_fits_rdev(align_bi) ||
-                   is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
+                   is_badblock(rdev, align_bi->bi_iter.bi_sector,
+                               bio_sectors(align_bi),
                                &first_bad, &bad_sectors)) {
                        /* too big in some way, or has a known bad block */
                        bio_put(align_bi);
@@ -4269,7 +4270,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                }
 
                /* No reshape active, so we can trust rdev->data_offset */
-               align_bi->bi_sector += rdev->data_offset;
+               align_bi->bi_iter.bi_sector += rdev->data_offset;
 
                spin_lock_irq(&conf->device_lock);
                wait_event_lock_irq(conf->wait_for_stripe,
@@ -4281,7 +4282,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                if (mddev->gendisk)
                        trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
                                              align_bi, disk_devt(mddev->gendisk),
-                                             raid_bio->bi_sector);
+                                             raid_bio->bi_iter.bi_sector);
                generic_make_request(align_bi);
                return 1;
        } else {
@@ -4464,8 +4465,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
                /* Skip discard while reshape is happening */
                return;
 
-       logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
-       last_sector = bi->bi_sector + (bi->bi_size>>9);
+       logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
 
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4569,7 +4570,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                return;
        }
 
-       logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
        last_sector = bio_end_sector(bi);
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
@@ -5053,7 +5054,8 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
        int remaining;
        int handled = 0;
 
-       logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       logical_sector = raid_bio->bi_iter.bi_sector &
+               ~((sector_t)STRIPE_SECTORS-1);
        sector = raid5_compute_sector(conf, logical_sector,
                                      0, &dd_idx, NULL);
        last_sector = bio_end_sector(raid_bio);