]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 30 Jan 2014 19:19:05 +0000 (11:19 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 30 Jan 2014 19:19:05 +0000 (11:19 -0800)
Pull core block IO changes from Jens Axboe:
 "The major piece in here is the immutable bio_ve series from Kent, the
  rest is fairly minor.  It was supposed to go in last round, but
  various issues pushed it to this release instead.  The pull request
  contains:

   - Various smaller blk-mq fixes from different folks.  Nothing major
     here, just minor fixes and cleanups.

   - Fix for a memory leak in the error path in the block ioctl code
     from Christian Engelmayer.

   - Header export fix from CaiZhiyong.

   - Finally the immutable biovec changes from Kent Overstreet.  This
     enables some nice future work on making arbitrarily sized bios
     possible, and splitting more efficient.  Related fixes to immutable
     bio_vecs:

        - dm-cache immutable fixup from Mike Snitzer.
        - btrfs immutable fixup from Muthu Kumar.

  - bio-integrity fix from Nic Bellinger, which is also going to stable"

* 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits)
  xtensa: fixup simdisk driver to work with immutable bio_vecs
  block/blk-mq-cpu.c: use hotcpu_notifier()
  blk-mq: for_each_* macro correctness
  block: Fix memory leak in rw_copy_check_uvector() handling
  bio-integrity: Fix bio_integrity_verify segment start bug
  block: remove unrelated header files and export symbol
  blk-mq: uses page->list incorrectly
  blk-mq: use __smp_call_function_single directly
  btrfs: fix missing increment of bi_remaining
  Revert "block: Warn and free bio if bi_end_io is not set"
  block: Warn and free bio if bi_end_io is not set
  blk-mq: fix initializing request's start time
  block: blk-mq: don't export blk_mq_free_queue()
  block: blk-mq: make blk_sync_queue support mq
  block: blk-mq: support draining mq queue
  dm cache: increment bi_remaining when bi_end_io is restored
  block: fixup for generic bio chaining
  block: Really silence spurious compiler warnings
  block: Silence spurious compiler warnings
  block: Kill bio_pair_split()
  ...

139 files changed:
Documentation/block/biodoc.txt
Documentation/block/biovecs.txt [new file with mode: 0644]
arch/m68k/emu/nfblock.c
arch/powerpc/sysdev/axonram.c
arch/xtensa/platforms/iss/simdisk.c
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk-integrity.c
block/blk-lib.c
block/blk-map.c
block/blk-merge.c
block/blk-mq-cpu.c
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
block/blk-throttle.c
block/cmdline-parser.c
block/elevator.c
block/scsi_ioctl.c
drivers/block/aoe/aoe.h
drivers/block/aoe/aoecmd.c
drivers/block/brd.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_req.h
drivers/block/drbd/drbd_worker.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/nbd.c
drivers/block/nvme-core.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/block/ps3vram.c
drivers/block/rbd.c
drivers/block/rsxx/dev.c
drivers/block/rsxx/dma.c
drivers/block/umem.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/debug.c
drivers/md/bcache/io.c
drivers/md/bcache/journal.c
drivers/md/bcache/movinggc.c
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/md/bcache/util.c
drivers/md/bcache/writeback.c
drivers/md/bcache/writeback.h
drivers/md/dm-bio-record.h
drivers/md/dm-bufio.c
drivers/md/dm-cache-policy-mq.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-delay.c
drivers/md/dm-flakey.c
drivers/md/dm-io.c
drivers/md/dm-linear.c
drivers/md/dm-raid1.c
drivers/md/dm-region-hash.c
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-switch.c
drivers/md/dm-thin.c
drivers/md/dm-verity.c
drivers/md/dm.c
drivers/md/faulty.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/message/fusion/mptsas.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dcssblk.c
drivers/s390/block/scm_blk.c
drivers/s390/block/scm_blk_cluster.c
drivers/s390/block/xpram.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/mpt2sas/mpt2sas_transport.c
drivers/scsi/mpt3sas/mpt3sas_transport.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/sd.c
drivers/scsi/sd_dif.c
drivers/staging/lustre/lustre/llite/lloop.c
drivers/staging/zram/zram_drv.c
drivers/target/target_core_iblock.c
fs/bio-integrity.c
fs/bio.c
fs/btrfs/check-integrity.c
fs/btrfs/compression.c
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/file-item.c
fs/btrfs/inode.c
fs/btrfs/raid56.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/buffer.c
fs/direct-io.c
fs/ext4/page-io.c
fs/f2fs/data.c
fs/gfs2/lops.c
fs/gfs2/ops_fstype.c
fs/hfsplus/wrapper.c
fs/jfs/jfs_logmgr.c
fs/jfs/jfs_metapage.c
fs/logfs/dev_bdev.c
fs/mpage.c
fs/nfs/blocklayout/blocklayout.c
fs/nilfs2/segbuf.c
fs/ocfs2/cluster/heartbeat.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_buf.c
include/linux/bio.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/ceph/messenger.h
include/linux/cmdline-parser.h
include/linux/dm-io.h
include/trace/events/bcache.h
include/trace/events/block.h
include/trace/events/f2fs.h
kernel/power/block_io.c
kernel/trace/blktrace.c
mm/bounce.c
mm/page_io.c
net/ceph/messenger.c

index 8df5e8e6dceba06846042d0c6155fd4e986addd8..2101e718670d0248110caa4320e51e83c715fad2 100644 (file)
@@ -447,14 +447,13 @@ struct bio_vec {
  * main unit of I/O for the block layer and lower layers (ie drivers)
  */
 struct bio {
-       sector_t            bi_sector;
        struct bio          *bi_next;    /* request queue link */
        struct block_device *bi_bdev;   /* target device */
        unsigned long       bi_flags;    /* status, command, etc */
        unsigned long       bi_rw;       /* low bits: r/w, high: priority */
 
        unsigned int    bi_vcnt;     /* how may bio_vec's */
-       unsigned int    bi_idx;         /* current index into bio_vec array */
+       struct bvec_iter        bi_iter;        /* current index into bio_vec array */
 
        unsigned int    bi_size;     /* total size in bytes */
        unsigned short  bi_phys_segments; /* segments after physaddr coalesce*/
@@ -480,7 +479,7 @@ With this multipage bio design:
 - Code that traverses the req list can find all the segments of a bio
   by using rq_for_each_segment.  This handles the fact that a request
   has multiple bios, each of which can have multiple segments.
-- Drivers which can't process a large bio in one shot can use the bi_idx
+- Drivers which can't process a large bio in one shot can use the bi_iter
   field to keep track of the next bio_vec entry to process.
   (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
   [TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying
@@ -589,7 +588,7 @@ driver should not modify these values. The block layer sets up the
 nr_sectors and current_nr_sectors fields (based on the corresponding
 hard_xxx values and the number of bytes transferred) and updates it on
 every transfer that invokes end_that_request_first. It does the same for the
-buffer, bio, bio->bi_idx fields too.
+buffer, bio, bio->bi_iter fields too.
 
 The buffer field is just a virtual address mapping of the current segment
 of the i/o buffer in cases where the buffer resides in low-memory. For high
diff --git a/Documentation/block/biovecs.txt b/Documentation/block/biovecs.txt
new file mode 100644 (file)
index 0000000..74a32ad
--- /dev/null
@@ -0,0 +1,111 @@
+
+Immutable biovecs and biovec iterators:
+=======================================
+
+Kent Overstreet <kmo@daterainc.com>
+
+As of 3.13, biovecs should never be modified after a bio has been submitted.
+Instead, we have a new struct bvec_iter which represents a range of a biovec -
+the iterator will be modified as the bio is completed, not the biovec.
+
+More specifically, old code that needed to partially complete a bio would
+update bi_sector and bi_size, and advance bi_idx to the next biovec. If it
+ended up partway through a biovec, it would increment bv_offset and decrement
+bv_len by the number of bytes completed in that biovec.
+
+In the new scheme of things, everything that must be mutated in order to
+partially complete a bio is segregated into struct bvec_iter: bi_sector,
+bi_size and bi_idx have been moved there; and instead of modifying bv_offset
+and bv_len, struct bvec_iter has bi_bvec_done, which represents the number of
+bytes completed in the current bvec.
+
+There are a bunch of new helper macros for hiding the gory details - in
+particular, presenting the illusion of partially completed biovecs so that
+normal code doesn't have to deal with bi_bvec_done.
+
+ * Driver code should no longer refer to biovecs directly; we now have
+   bio_iovec() and bio_iovec_iter() macros that return literal struct biovecs,
+   constructed from the raw biovecs but taking into account bi_bvec_done and
+   bi_size.
+
+   bio_for_each_segment() has been updated to take a bvec_iter argument
+   instead of an integer (that corresponded to bi_idx); for a lot of code the
+   conversion just required changing the types of the arguments to
+   bio_for_each_segment().
+
+ * Advancing a bvec_iter is done with bio_advance_iter(); bio_advance() is a
+   wrapper around bio_advance_iter() that operates on bio->bi_iter, and also
+   advances the bio integrity's iter if present.
+
+   There is a lower level advance function - bvec_iter_advance() - which takes
+   a pointer to a biovec, not a bio; this is used by the bio integrity code.
+
+What's all this get us?
+=======================
+
+Having a real iterator, and making biovecs immutable, has a number of
+advantages:
+
+ * Before, iterating over bios was very awkward when you weren't processing
+   exactly one bvec at a time - for example, bio_copy_data() in fs/bio.c,
+   which copies the contents of one bio into another. Because the biovecs
+   wouldn't necessarily be the same size, the old code was tricky convoluted -
+   it had to walk two different bios at the same time, keeping both bi_idx and
+   and offset into the current biovec for each.
+
+   The new code is much more straightforward - have a look. This sort of
+   pattern comes up in a lot of places; a lot of drivers were essentially open
+   coding bvec iterators before, and having common implementation considerably
+   simplifies a lot of code.
+
+ * Before, any code that might need to use the biovec after the bio had been
+   completed (perhaps to copy the data somewhere else, or perhaps to resubmit
+   it somewhere else if there was an error) had to save the entire bvec array
+   - again, this was being done in a fair number of places.
+
+ * Biovecs can be shared between multiple bios - a bvec iter can represent an
+   arbitrary range of an existing biovec, both starting and ending midway
+   through biovecs. This is what enables efficient splitting of arbitrary
+   bios. Note that this means we _only_ use bi_size to determine when we've
+   reached the end of a bio, not bi_vcnt - and the bio_iovec() macro takes
+   bi_size into account when constructing biovecs.
+
+ * Splitting bios is now much simpler. The old bio_split() didn't even work on
+   bios with more than a single bvec! Now, we can efficiently split arbitrary
+   size bios - because the new bio can share the old bio's biovec.
+
+   Care must be taken to ensure the biovec isn't freed while the split bio is
+   still using it, in case the original bio completes first, though. Using
+   bio_chain() when splitting bios helps with this.
+
+ * Submitting partially completed bios is now perfectly fine - this comes up
+   occasionally in stacking block drivers and various code (e.g. md and
+   bcache) had some ugly workarounds for this.
+
+   It used to be the case that submitting a partially completed bio would work
+   fine to _most_ devices, but since accessing the raw bvec array was the
+   norm, not all drivers would respect bi_idx and those would break. Now,
+   since all drivers _must_ go through the bvec iterator - and have been
+   audited to make sure they are - submitting partially completed bios is
+   perfectly fine.
+
+Other implications:
+===================
+
+ * Almost all usage of bi_idx is now incorrect and has been removed; instead,
+   where previously you would have used bi_idx you'd now use a bvec_iter,
+   probably passing it to one of the helper macros.
+
+   I.e. instead of using bio_iovec_idx() (or bio->bi_iovec[bio->bi_idx]), you
+   now use bio_iter_iovec(), which takes a bvec_iter and returns a
+   literal struct bio_vec - constructed on the fly from the raw biovec but
+   taking into account bi_bvec_done (and bi_size).
+
+ * bi_vcnt can't be trusted or relied upon by driver code - i.e. anything that
+   doesn't actually own the bio. The reason is twofold: firstly, it's not
+   actually needed for iterating over the bio anymore - we only use bi_size.
+   Secondly, when cloning a bio and reusing (a portion of) the original bio's
+   biovec, in order to calculate bi_vcnt for the new bio we'd have to iterate
+   over all the biovecs in the new bio - which is silly as it's not needed.
+
+   So, don't use bi_vcnt anymore.
index 0721858fbd1ef6618b288bcbea33369995e35653..2d75ae246167a37f07d71a9bb743702fff686092 100644 (file)
@@ -62,17 +62,18 @@ struct nfhd_device {
 static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
 {
        struct nfhd_device *dev = queue->queuedata;
-       struct bio_vec *bvec;
-       int i, dir, len, shift;
-       sector_t sec = bio->bi_sector;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+       int dir, len, shift;
+       sector_t sec = bio->bi_iter.bi_sector;
 
        dir = bio_data_dir(bio);
        shift = dev->bshift;
-       bio_for_each_segment(bvec, bio, i) {
-               len = bvec->bv_len;
+       bio_for_each_segment(bvec, bio, iter) {
+               len = bvec.bv_len;
                len >>= 9;
                nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
-                               bvec_to_phys(bvec));
+                               bvec_to_phys(&bvec));
                sec += len;
        }
        bio_endio(bio, 0);
index 1c16141c031c9e2d2512b0d308a8456c15fd1ae1..47b6b9f81d4305537b7d0e6290e178efb4253f4c 100644 (file)
@@ -109,27 +109,28 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
        struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
        unsigned long phys_mem, phys_end;
        void *user_mem;
-       struct bio_vec *vec;
+       struct bio_vec vec;
        unsigned int transfered;
-       unsigned short idx;
+       struct bvec_iter iter;
 
-       phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
+       phys_mem = bank->io_addr + (bio->bi_iter.bi_sector <<
+                                   AXON_RAM_SECTOR_SHIFT);
        phys_end = bank->io_addr + bank->size;
        transfered = 0;
-       bio_for_each_segment(vec, bio, idx) {
-               if (unlikely(phys_mem + vec->bv_len > phys_end)) {
+       bio_for_each_segment(vec, bio, iter) {
+               if (unlikely(phys_mem + vec.bv_len > phys_end)) {
                        bio_io_error(bio);
                        return;
                }
 
-               user_mem = page_address(vec->bv_page) + vec->bv_offset;
+               user_mem = page_address(vec.bv_page) + vec.bv_offset;
                if (bio_data_dir(bio) == READ)
-                       memcpy(user_mem, (void *) phys_mem, vec->bv_len);
+                       memcpy(user_mem, (void *) phys_mem, vec.bv_len);
                else
-                       memcpy((void *) phys_mem, user_mem, vec->bv_len);
+                       memcpy((void *) phys_mem, user_mem, vec.bv_len);
 
-               phys_mem += vec->bv_len;
-               transfered += vec->bv_len;
+               phys_mem += vec.bv_len;
+               transfered += vec.bv_len;
        }
        bio_endio(bio, 0);
 }
index 8c6e819cd8edcc26049348a9ca2e4a6d59c677b3..48eebacdf5fe089c0ff8591ddbaf07dc576a2729 100644 (file)
@@ -103,18 +103,18 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
 
 static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
 {
-       int i;
-       struct bio_vec *bvec;
-       sector_t sector = bio->bi_sector;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+       sector_t sector = bio->bi_iter.bi_sector;
 
-       bio_for_each_segment(bvec, bio, i) {
-               char *buffer = __bio_kmap_atomic(bio, i);
-               unsigned len = bvec->bv_len >> SECTOR_SHIFT;
+       bio_for_each_segment(bvec, bio, iter) {
+               char *buffer = __bio_kmap_atomic(bio, iter);
+               unsigned len = bvec.bv_len >> SECTOR_SHIFT;
 
                simdisk_transfer(dev, sector, len, buffer,
                                bio_data_dir(bio) == WRITE);
                sector += len;
-               __bio_kunmap_atomic(bio);
+               __bio_kunmap_atomic(buffer);
        }
        return 0;
 }
index 8bdd0121212a51a1dba3c568c5a6a3e070447318..c00e0bdeab4ab4724c42b379717200d405d9c584 100644 (file)
@@ -38,6 +38,7 @@
 
 #include "blk.h"
 #include "blk-cgroup.h"
+#include "blk-mq.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -130,7 +131,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
        bio_advance(bio, nbytes);
 
        /* don't actually finish bio if it's part of flush sequence */
-       if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+       if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
                bio_endio(bio, error);
 }
 
@@ -245,7 +246,16 @@ EXPORT_SYMBOL(blk_stop_queue);
 void blk_sync_queue(struct request_queue *q)
 {
        del_timer_sync(&q->timeout);
-       cancel_delayed_work_sync(&q->delay_work);
+
+       if (q->mq_ops) {
+               struct blk_mq_hw_ctx *hctx;
+               int i;
+
+               queue_for_each_hw_ctx(q, hctx, i)
+                       cancel_delayed_work_sync(&hctx->delayed_work);
+       } else {
+               cancel_delayed_work_sync(&q->delay_work);
+       }
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
@@ -497,8 +507,13 @@ void blk_cleanup_queue(struct request_queue *q)
         * Drain all requests queued before DYING marking. Set DEAD flag to
         * prevent that q->request_fn() gets invoked after draining finished.
         */
-       spin_lock_irq(lock);
-       __blk_drain_queue(q, true);
+       if (q->mq_ops) {
+               blk_mq_drain_queue(q);
+               spin_lock_irq(lock);
+       } else {
+               spin_lock_irq(lock);
+               __blk_drain_queue(q, true);
+       }
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
 
@@ -1326,7 +1341,7 @@ void blk_add_request_payload(struct request *rq, struct page *page,
        bio->bi_io_vec->bv_offset = 0;
        bio->bi_io_vec->bv_len = len;
 
-       bio->bi_size = len;
+       bio->bi_iter.bi_size = len;
        bio->bi_vcnt = 1;
        bio->bi_phys_segments = 1;
 
@@ -1351,7 +1366,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 
        req->biotail->bi_next = bio;
        req->biotail = bio;
-       req->__data_len += bio->bi_size;
+       req->__data_len += bio->bi_iter.bi_size;
        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
 
        blk_account_io_start(req, false);
@@ -1380,8 +1395,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
         * not touch req->buffer either...
         */
        req->buffer = bio_data(bio);
-       req->__sector = bio->bi_sector;
-       req->__data_len += bio->bi_size;
+       req->__sector = bio->bi_iter.bi_sector;
+       req->__data_len += bio->bi_iter.bi_size;
        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
 
        blk_account_io_start(req, false);
@@ -1459,7 +1474,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
                req->cmd_flags |= REQ_FAILFAST_MASK;
 
        req->errors = 0;
-       req->__sector = bio->bi_sector;
+       req->__sector = bio->bi_iter.bi_sector;
        req->ioprio = bio_prio(bio);
        blk_rq_bio_prep(req->q, req, bio);
 }
@@ -1583,12 +1598,12 @@ static inline void blk_partition_remap(struct bio *bio)
        if (bio_sectors(bio) && bdev != bdev->bd_contains) {
                struct hd_struct *p = bdev->bd_part;
 
-               bio->bi_sector += p->start_sect;
+               bio->bi_iter.bi_sector += p->start_sect;
                bio->bi_bdev = bdev->bd_contains;
 
                trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
                                      bdev->bd_dev,
-                                     bio->bi_sector - p->start_sect);
+                                     bio->bi_iter.bi_sector - p->start_sect);
        }
 }
 
@@ -1654,7 +1669,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
        /* Test device or partition size, when known. */
        maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
        if (maxsector) {
-               sector_t sector = bio->bi_sector;
+               sector_t sector = bio->bi_iter.bi_sector;
 
                if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
                        /*
@@ -1690,7 +1705,7 @@ generic_make_request_checks(struct bio *bio)
                       "generic_make_request: Trying to access "
                        "nonexistent block-device %s (%Lu)\n",
                        bdevname(bio->bi_bdev, b),
-                       (long long) bio->bi_sector);
+                       (long long) bio->bi_iter.bi_sector);
                goto end_io;
        }
 
@@ -1704,9 +1719,9 @@ generic_make_request_checks(struct bio *bio)
        }
 
        part = bio->bi_bdev->bd_part;
-       if (should_fail_request(part, bio->bi_size) ||
+       if (should_fail_request(part, bio->bi_iter.bi_size) ||
            should_fail_request(&part_to_disk(part)->part0,
-                               bio->bi_size))
+                               bio->bi_iter.bi_size))
                goto end_io;
 
        /*
@@ -1865,7 +1880,7 @@ void submit_bio(int rw, struct bio *bio)
                if (rw & WRITE) {
                        count_vm_events(PGPGOUT, count);
                } else {
-                       task_io_account_read(bio->bi_size);
+                       task_io_account_read(bio->bi_iter.bi_size);
                        count_vm_events(PGPGIN, count);
                }
 
@@ -1874,7 +1889,7 @@ void submit_bio(int rw, struct bio *bio)
                        printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
                        current->comm, task_pid_nr(current),
                                (rw & WRITE) ? "WRITE" : "READ",
-                               (unsigned long long)bio->bi_sector,
+                               (unsigned long long)bio->bi_iter.bi_sector,
                                bdevname(bio->bi_bdev, b),
                                count);
                }
@@ -2007,7 +2022,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
        for (bio = rq->bio; bio; bio = bio->bi_next) {
                if ((bio->bi_rw & ff) != ff)
                        break;
-               bytes += bio->bi_size;
+               bytes += bio->bi_iter.bi_size;
        }
 
        /* this could lead to infinite loop */
@@ -2378,9 +2393,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
        total_bytes = 0;
        while (req->bio) {
                struct bio *bio = req->bio;
-               unsigned bio_bytes = min(bio->bi_size, nr_bytes);
+               unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
 
-               if (bio_bytes == bio->bi_size)
+               if (bio_bytes == bio->bi_iter.bi_size)
                        req->bio = bio->bi_next;
 
                req_bio_endio(req, bio, bio_bytes, error);
@@ -2728,7 +2743,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                rq->nr_phys_segments = bio_phys_segments(q, bio);
                rq->buffer = bio_data(bio);
        }
-       rq->__data_len = bio->bi_size;
+       rq->__data_len = bio->bi_iter.bi_size;
        rq->bio = rq->biotail = bio;
 
        if (bio->bi_bdev)
@@ -2746,10 +2761,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 void rq_flush_dcache_pages(struct request *rq)
 {
        struct req_iterator iter;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
 
        rq_for_each_segment(bvec, rq, iter)
-               flush_dcache_page(bvec->bv_page);
+               flush_dcache_page(bvec.bv_page);
 }
 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
 #endif
index c3edf9dff566f47883f3fac946b72e1e208c5286..bbfc072a79c2b5d0921ee84d322e4391d85c529b 100644 (file)
@@ -60,6 +60,10 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        rq->rq_disk = bd_disk;
        rq->end_io = done;
 
+       /*
+        * don't check dying flag for MQ because the request won't
+        * be resued after dying flag is set
+        */
        if (q->mq_ops) {
                blk_mq_insert_request(q, rq, true);
                return;
index fb6f3c0ffa494f4f2adcce6fc35c95ecf383c9a8..9288aaf35c21fc8c0f579fa001f316e697a4dfbb 100644 (file)
@@ -548,7 +548,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
         * copied from blk_rq_pos(rq).
         */
        if (error_sector)
-               *error_sector = bio->bi_sector;
+               *error_sector = bio->bi_iter.bi_sector;
 
        bio_put(bio);
        return ret;
index 03cf7179e8ef1aac2f1698eae57377e65a94f275..7fbab84399e6c9c602c52c8157596535a2ce2e05 100644 (file)
@@ -43,30 +43,32 @@ static const char *bi_unsupported_name = "unsupported";
  */
 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
 {
-       struct bio_vec *iv, *ivprv = NULL;
+       struct bio_vec iv, ivprv = { NULL };
        unsigned int segments = 0;
        unsigned int seg_size = 0;
-       unsigned int i = 0;
+       struct bvec_iter iter;
+       int prev = 0;
 
-       bio_for_each_integrity_vec(iv, bio, i) {
+       bio_for_each_integrity_vec(iv, bio, iter) {
 
-               if (ivprv) {
-                       if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+               if (prev) {
+                       if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
                                goto new_segment;
 
-                       if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+                       if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
                                goto new_segment;
 
-                       if (seg_size + iv->bv_len > queue_max_segment_size(q))
+                       if (seg_size + iv.bv_len > queue_max_segment_size(q))
                                goto new_segment;
 
-                       seg_size += iv->bv_len;
+                       seg_size += iv.bv_len;
                } else {
 new_segment:
                        segments++;
-                       seg_size = iv->bv_len;
+                       seg_size = iv.bv_len;
                }
 
+               prev = 1;
                ivprv = iv;
        }
 
@@ -87,24 +89,25 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
                            struct scatterlist *sglist)
 {
-       struct bio_vec *iv, *ivprv = NULL;
+       struct bio_vec iv, ivprv = { NULL };
        struct scatterlist *sg = NULL;
        unsigned int segments = 0;
-       unsigned int i = 0;
+       struct bvec_iter iter;
+       int prev = 0;
 
-       bio_for_each_integrity_vec(iv, bio, i) {
+       bio_for_each_integrity_vec(iv, bio, iter) {
 
-               if (ivprv) {
-                       if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+               if (prev) {
+                       if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
                                goto new_segment;
 
-                       if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+                       if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
                                goto new_segment;
 
-                       if (sg->length + iv->bv_len > queue_max_segment_size(q))
+                       if (sg->length + iv.bv_len > queue_max_segment_size(q))
                                goto new_segment;
 
-                       sg->length += iv->bv_len;
+                       sg->length += iv.bv_len;
                } else {
 new_segment:
                        if (!sg)
@@ -114,10 +117,11 @@ new_segment:
                                sg = sg_next(sg);
                        }
 
-                       sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset);
+                       sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
                        segments++;
                }
 
+               prev = 1;
                ivprv = iv;
        }
 
index 9b5b561cb92812fba2562b3bdb6c5bd8be012905..2da76c999ef3f37bd965f9d91b48dac43196a208 100644 (file)
@@ -108,12 +108,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                        req_sects = end_sect - sector;
                }
 
-               bio->bi_sector = sector;
+               bio->bi_iter.bi_sector = sector;
                bio->bi_end_io = bio_batch_end_io;
                bio->bi_bdev = bdev;
                bio->bi_private = &bb;
 
-               bio->bi_size = req_sects << 9;
+               bio->bi_iter.bi_size = req_sects << 9;
                nr_sects -= req_sects;
                sector = end_sect;
 
@@ -174,7 +174,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                        break;
                }
 
-               bio->bi_sector = sector;
+               bio->bi_iter.bi_sector = sector;
                bio->bi_end_io = bio_batch_end_io;
                bio->bi_bdev = bdev;
                bio->bi_private = &bb;
@@ -184,11 +184,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
 
                if (nr_sects > max_write_same_sectors) {
-                       bio->bi_size = max_write_same_sectors << 9;
+                       bio->bi_iter.bi_size = max_write_same_sectors << 9;
                        nr_sects -= max_write_same_sectors;
                        sector += max_write_same_sectors;
                } else {
-                       bio->bi_size = nr_sects << 9;
+                       bio->bi_iter.bi_size = nr_sects << 9;
                        nr_sects = 0;
                }
 
@@ -240,7 +240,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                        break;
                }
 
-               bio->bi_sector = sector;
+               bio->bi_iter.bi_sector = sector;
                bio->bi_bdev   = bdev;
                bio->bi_end_io = bio_batch_end_io;
                bio->bi_private = &bb;
index 623e1cd4cffe997e71fbb54577bd42f220af64b3..ae4ae1047fd99575473a8b251dff562a151f0719 100644 (file)
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
                rq->biotail->bi_next = bio;
                rq->biotail = bio;
 
-               rq->__data_len += bio->bi_size;
+               rq->__data_len += bio->bi_iter.bi_size;
        }
        return 0;
 }
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
 
        ret = blk_rq_append_bio(q, rq, bio);
        if (!ret)
-               return bio->bi_size;
+               return bio->bi_iter.bi_size;
 
        /* if it was boucned we must call the end io function */
        bio_endio(bio, 0);
@@ -220,7 +220,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       if (bio->bi_size != len) {
+       if (bio->bi_iter.bi_size != len) {
                /*
                 * Grab an extra reference to this bio, as bio_unmap_user()
                 * expects to be able to drop it twice as it happens on the
index 1ffc58977835ff2e581c97faa35ee85ebc9a5095..8f8adaa95466ccc8335cde7e313b551b375ed9b9 100644 (file)
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                                             struct bio *bio)
 {
-       struct bio_vec *bv, *bvprv = NULL;
-       int cluster, i, high, highprv = 1;
+       struct bio_vec bv, bvprv = { NULL };
+       int cluster, high, highprv = 1;
        unsigned int seg_size, nr_phys_segs;
        struct bio *fbio, *bbio;
+       struct bvec_iter iter;
 
        if (!bio)
                return 0;
@@ -25,25 +26,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
        seg_size = 0;
        nr_phys_segs = 0;
        for_each_bio(bio) {
-               bio_for_each_segment(bv, bio, i) {
+               bio_for_each_segment(bv, bio, iter) {
                        /*
                         * the trick here is making sure that a high page is
                         * never considered part of another segment, since that
                         * might change with the bounce page.
                         */
-                       high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
-                       if (high || highprv)
-                               goto new_segment;
-                       if (cluster) {
-                               if (seg_size + bv->bv_len
+                       high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
+                       if (!high && !highprv && cluster) {
+                               if (seg_size + bv.bv_len
                                    > queue_max_segment_size(q))
                                        goto new_segment;
-                               if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+                               if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
                                        goto new_segment;
-                               if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+                               if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
                                        goto new_segment;
 
-                               seg_size += bv->bv_len;
+                               seg_size += bv.bv_len;
                                bvprv = bv;
                                continue;
                        }
@@ -54,7 +53,7 @@ new_segment:
 
                        nr_phys_segs++;
                        bvprv = bv;
-                       seg_size = bv->bv_len;
+                       seg_size = bv.bv_len;
                        highprv = high;
                }
                bbio = bio;
@@ -87,6 +86,9 @@ EXPORT_SYMBOL(blk_recount_segments);
 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                                   struct bio *nxt)
 {
+       struct bio_vec end_bv = { NULL }, nxt_bv;
+       struct bvec_iter iter;
+
        if (!blk_queue_cluster(q))
                return 0;
 
@@ -97,34 +99,40 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
        if (!bio_has_data(bio))
                return 1;
 
-       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+       bio_for_each_segment(end_bv, bio, iter)
+               if (end_bv.bv_len == iter.bi_size)
+                       break;
+
+       nxt_bv = bio_iovec(nxt);
+
+       if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
                return 0;
 
        /*
         * bio and nxt are contiguous in memory; check if the queue allows
         * these two to be merged into one
         */
-       if (BIO_SEG_BOUNDARY(q, bio, nxt))
+       if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
                return 1;
 
        return 0;
 }
 
-static void
+static inline void
 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
-                    struct scatterlist *sglist, struct bio_vec **bvprv,
+                    struct scatterlist *sglist, struct bio_vec *bvprv,
                     struct scatterlist **sg, int *nsegs, int *cluster)
 {
 
        int nbytes = bvec->bv_len;
 
-       if (*bvprv && *cluster) {
+       if (*sg && *cluster) {
                if ((*sg)->length + nbytes > queue_max_segment_size(q))
                        goto new_segment;
 
-               if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
+               if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
                        goto new_segment;
-               if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
+               if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
                        goto new_segment;
 
                (*sg)->length += nbytes;
@@ -150,7 +158,7 @@ new_segment:
                sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
                (*nsegs)++;
        }
-       *bvprv = bvec;
+       *bvprv = *bvec;
 }
 
 /*
@@ -160,7 +168,7 @@ new_segment:
 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
                  struct scatterlist *sglist)
 {
-       struct bio_vec *bvec, *bvprv;
+       struct bio_vec bvec, bvprv = { NULL };
        struct req_iterator iter;
        struct scatterlist *sg;
        int nsegs, cluster;
@@ -171,10 +179,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
        /*
         * for each bio in rq
         */
-       bvprv = NULL;
        sg = NULL;
        rq_for_each_segment(bvec, rq, iter) {
-               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+               __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
                                     &nsegs, &cluster);
        } /* segments in rq */
 
@@ -223,18 +230,17 @@ EXPORT_SYMBOL(blk_rq_map_sg);
 int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
                   struct scatterlist *sglist)
 {
-       struct bio_vec *bvec, *bvprv;
+       struct bio_vec bvec, bvprv = { NULL };
        struct scatterlist *sg;
        int nsegs, cluster;
-       unsigned long i;
+       struct bvec_iter iter;
 
        nsegs = 0;
        cluster = blk_queue_cluster(q);
 
-       bvprv = NULL;
        sg = NULL;
-       bio_for_each_segment(bvec, bio, i) {
-               __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+       bio_for_each_segment(bvec, bio, iter) {
+               __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
                                     &nsegs, &cluster);
        } /* segments in bio */
 
@@ -543,9 +549,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 
 int blk_try_merge(struct request *rq, struct bio *bio)
 {
-       if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
+       if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
                return ELEVATOR_BACK_MERGE;
-       else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
+       else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
                return ELEVATOR_FRONT_MERGE;
        return ELEVATOR_NO_MERGE;
 }
index 0045ace9bdf0301f724463c2e37ae8b62e9902cb..3146befb56aaac7b925428d0afee89c9e083094a 100644 (file)
@@ -28,36 +28,6 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
-static void blk_mq_cpu_notify(void *data, unsigned long action,
-                             unsigned int cpu)
-{
-       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-               /*
-                * If the CPU goes away, ensure that we run any pending
-                * completions.
-                */
-               struct llist_node *node;
-               struct request *rq;
-
-               local_irq_disable();
-
-               node = llist_del_all(&per_cpu(ipi_lists, cpu));
-               while (node) {
-                       struct llist_node *next = node->next;
-
-                       rq = llist_entry(node, struct request, ll_list);
-                       __blk_mq_end_io(rq, rq->errors);
-                       node = next;
-               }
-
-               local_irq_enable();
-       }
-}
-
-static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = {
-       .notifier_call  = blk_mq_main_cpu_notify,
-};
-
 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
 {
        BUG_ON(!notifier->notify);
@@ -82,12 +52,7 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
        notifier->data = data;
 }
 
-static struct blk_mq_cpu_notifier __cpuinitdata cpu_notifier = {
-       .notify = blk_mq_cpu_notify,
-};
-
 void __init blk_mq_cpu_init(void)
 {
-       register_hotcpu_notifier(&blk_mq_main_cpu_notifier);
-       blk_mq_register_cpu_notifier(&cpu_notifier);
+       hotcpu_notifier(blk_mq_main_cpu_notify, 0);
 }
index c79126e110308e8b1ea4b322506a425ceeb3085c..57039fcd9c93e7c3e014842fbbcaf2fc6550edd1 100644 (file)
@@ -27,8 +27,6 @@ static LIST_HEAD(all_q_list);
 
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
 
-DEFINE_PER_CPU(struct llist_head, ipi_lists);
-
 static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
                                           unsigned int cpu)
 {
@@ -106,10 +104,13 @@ static int blk_mq_queue_enter(struct request_queue *q)
 
        spin_lock_irq(q->queue_lock);
        ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
-               !blk_queue_bypass(q), *q->queue_lock);
+               !blk_queue_bypass(q) || blk_queue_dying(q),
+               *q->queue_lock);
        /* inc usage with lock hold to avoid freeze_queue runs here */
-       if (!ret)
+       if (!ret && !blk_queue_dying(q))
                __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+       else if (blk_queue_dying(q))
+               ret = -ENODEV;
        spin_unlock_irq(q->queue_lock);
 
        return ret;
@@ -120,6 +121,22 @@ static void blk_mq_queue_exit(struct request_queue *q)
        __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
 }
 
+static void __blk_mq_drain_queue(struct request_queue *q)
+{
+       while (true) {
+               s64 count;
+
+               spin_lock_irq(q->queue_lock);
+               count = percpu_counter_sum(&q->mq_usage_counter);
+               spin_unlock_irq(q->queue_lock);
+
+               if (count == 0)
+                       break;
+               blk_mq_run_queues(q, false);
+               msleep(10);
+       }
+}
+
 /*
  * Guarantee no request is in use, so we can change any data structure of
  * the queue afterward.
@@ -133,21 +150,13 @@ static void blk_mq_freeze_queue(struct request_queue *q)
        queue_flag_set(QUEUE_FLAG_BYPASS, q);
        spin_unlock_irq(q->queue_lock);
 
-       if (!drain)
-               return;
-
-       while (true) {
-               s64 count;
-
-               spin_lock_irq(q->queue_lock);
-               count = percpu_counter_sum(&q->mq_usage_counter);
-               spin_unlock_irq(q->queue_lock);
+       if (drain)
+               __blk_mq_drain_queue(q);
+}
 
-               if (count == 0)
-                       break;
-               blk_mq_run_queues(q, false);
-               msleep(10);
-       }
+void blk_mq_drain_queue(struct request_queue *q)
+{
+       __blk_mq_drain_queue(q);
 }
 
 static void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -179,6 +188,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
 
        rq->mq_ctx = ctx;
        rq->cmd_flags = rw_flags;
+       rq->start_time = jiffies;
+       set_start_time_ns(rq);
        ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
 }
 
@@ -305,7 +316,7 @@ void blk_mq_complete_request(struct request *rq, int error)
                struct bio *next = bio->bi_next;
 
                bio->bi_next = NULL;
-               bytes += bio->bi_size;
+               bytes += bio->bi_iter.bi_size;
                blk_mq_bio_endio(rq, bio, error);
                bio = next;
        }
@@ -326,55 +337,12 @@ void __blk_mq_end_io(struct request *rq, int error)
                blk_mq_complete_request(rq, error);
 }
 
-#if defined(CONFIG_SMP)
-
-/*
- * Called with interrupts disabled.
- */
-static void ipi_end_io(void *data)
-{
-       struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id());
-       struct llist_node *entry, *next;
-       struct request *rq;
-
-       entry = llist_del_all(list);
-
-       while (entry) {
-               next = entry->next;
-               rq = llist_entry(entry, struct request, ll_list);
-               __blk_mq_end_io(rq, rq->errors);
-               entry = next;
-       }
-}
-
-static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
-                         struct request *rq, const int error)
+static void blk_mq_end_io_remote(void *data)
 {
-       struct call_single_data *data = &rq->csd;
-
-       rq->errors = error;
-       rq->ll_list.next = NULL;
+       struct request *rq = data;
 
-       /*
-        * If the list is non-empty, an existing IPI must already
-        * be "in flight". If that is the case, we need not schedule
-        * a new one.
-        */
-       if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) {
-               data->func = ipi_end_io;
-               data->flags = 0;
-               __smp_call_function_single(ctx->cpu, data, 0);
-       }
-
-       return true;
-}
-#else /* CONFIG_SMP */
-static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
-                         struct request *rq, const int error)
-{
-       return false;
+       __blk_mq_end_io(rq, rq->errors);
 }
-#endif
 
 /*
  * End IO on this request on a multiqueue enabled driver. We'll either do
@@ -390,11 +358,15 @@ void blk_mq_end_io(struct request *rq, int error)
                return __blk_mq_end_io(rq, error);
 
        cpu = get_cpu();
-
-       if (cpu == ctx->cpu || !cpu_online(ctx->cpu) ||
-           !ipi_remote_cpu(ctx, cpu, rq, error))
+       if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
+               rq->errors = error;
+               rq->csd.func = blk_mq_end_io_remote;
+               rq->csd.info = rq;
+               rq->csd.flags = 0;
+               __smp_call_function_single(ctx->cpu, &rq->csd, 0);
+       } else {
                __blk_mq_end_io(rq, error);
-
+       }
        put_cpu();
 }
 EXPORT_SYMBOL(blk_mq_end_io);
@@ -1091,8 +1063,8 @@ static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
        struct page *page;
 
        while (!list_empty(&hctx->page_list)) {
-               page = list_first_entry(&hctx->page_list, struct page, list);
-               list_del_init(&page->list);
+               page = list_first_entry(&hctx->page_list, struct page, lru);
+               list_del_init(&page->lru);
                __free_pages(page, page->private);
        }
 
@@ -1156,7 +1128,7 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
                        break;
 
                page->private = this_order;
-               list_add_tail(&page->list, &hctx->page_list);
+               list_add_tail(&page->lru, &hctx->page_list);
 
                p = page_address(page);
                entries_per_page = order_to_size(this_order) / rq_size;
@@ -1429,7 +1401,6 @@ void blk_mq_free_queue(struct request_queue *q)
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               cancel_delayed_work_sync(&hctx->delayed_work);
                kfree(hctx->ctx_map);
                kfree(hctx->ctxs);
                blk_mq_free_rq_map(hctx);
@@ -1451,7 +1422,6 @@ void blk_mq_free_queue(struct request_queue *q)
        list_del_init(&q->all_q_node);
        mutex_unlock(&all_q_mutex);
 }
-EXPORT_SYMBOL(blk_mq_free_queue);
 
 /* Basically redo blk_mq_init_queue with queue frozen */
 static void blk_mq_queue_reinit(struct request_queue *q)
@@ -1495,11 +1465,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
 
 static int __init blk_mq_init(void)
 {
-       unsigned int i;
-
-       for_each_possible_cpu(i)
-               init_llist_head(&per_cpu(ipi_lists, i));
-
        blk_mq_cpu_init();
 
        /* Must be called after percpu_counter_hotcpu_callback() */
index 52bf1f96a2c239195e564fb5bdb19164709770f4..5c3917984b005f13ea35254074744ec91f2e5bd3 100644 (file)
@@ -27,6 +27,8 @@ void blk_mq_complete_request(struct request *rq, int error);
 void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_init_flush(struct request_queue *q);
+void blk_mq_drain_queue(struct request_queue *q);
+void blk_mq_free_queue(struct request_queue *q);
 
 /*
  * CPU hotplug helpers
@@ -38,7 +40,6 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
 void blk_mq_cpu_init(void);
-DECLARE_PER_CPU(struct llist_head, ipi_lists);
 
 /*
  * CPU -> queue mappings
index 97779522472f8356d5b09e91a33b1b310293d230..8095c4a21fc0f53e6e46ff191de283500dcc97de 100644 (file)
@@ -11,6 +11,7 @@
 
 #include "blk.h"
 #include "blk-cgroup.h"
+#include "blk-mq.h"
 
 struct queue_sysfs_entry {
        struct attribute attr;
index a760857e6b62609dde239ad74aebe2b5ac2ebaac..1474c3ab7e72cb85698ffe8bb3687df66729281b 100644 (file)
@@ -877,14 +877,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
        do_div(tmp, HZ);
        bytes_allowed = tmp;
 
-       if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
+       if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
                if (wait)
                        *wait = 0;
                return 1;
        }
 
        /* Calc approx time to dispatch */
-       extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
+       extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
        jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
 
        if (!jiffy_wait)
@@ -987,7 +987,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
        bool rw = bio_data_dir(bio);
 
        /* Charge the bio to the group */
-       tg->bytes_disp[rw] += bio->bi_size;
+       tg->bytes_disp[rw] += bio->bi_iter.bi_size;
        tg->io_disp[rw]++;
 
        /*
@@ -1003,8 +1003,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
         */
        if (!(bio->bi_rw & REQ_THROTTLED)) {
                bio->bi_rw |= REQ_THROTTLED;
-               throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size,
-                                            bio->bi_rw);
+               throtl_update_dispatch_stats(tg_to_blkg(tg),
+                                            bio->bi_iter.bi_size, bio->bi_rw);
        }
 }
 
@@ -1503,7 +1503,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
        if (tg) {
                if (!tg->has_rules[rw]) {
                        throtl_update_dispatch_stats(tg_to_blkg(tg),
-                                                    bio->bi_size, bio->bi_rw);
+                                       bio->bi_iter.bi_size, bio->bi_rw);
                        goto out_unlock_rcu;
                }
        }
@@ -1559,7 +1559,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
        /* out-of-limit, queue to @tg */
        throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
                   rw == READ ? 'R' : 'W',
-                  tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
+                  tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
                   tg->io_disp[rw], tg->iops[rw],
                   sq->nr_queued[READ], sq->nr_queued[WRITE]);
 
index cc2637f8674ed61df149fb3bf51da4cc3a04f7cf..9dbc67e42a993193fb56d169cab00ecf4b825ef0 100644 (file)
@@ -4,8 +4,7 @@
  * Written by Cai Zhiyong <caizhiyong@huawei.com>
  *
  */
-#include <linux/buffer_head.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/cmdline-parser.h>
 
 static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
@@ -159,6 +158,7 @@ void cmdline_parts_free(struct cmdline_parts **parts)
                *parts = next_parts;
        }
 }
+EXPORT_SYMBOL(cmdline_parts_free);
 
 int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
 {
@@ -206,6 +206,7 @@ fail:
        cmdline_parts_free(parts);
        goto done;
 }
+EXPORT_SYMBOL(cmdline_parts_parse);
 
 struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
                                         const char *bdev)
@@ -214,17 +215,17 @@ struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
                parts = parts->next_parts;
        return parts;
 }
+EXPORT_SYMBOL(cmdline_parts_find);
 
 /*
  *  add_part()
  *    0 success.
  *    1 can not add so many partitions.
  */
-void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
-                      int slot,
-                      int (*add_part)(int, struct cmdline_subpart *, void *),
-                      void *param)
-
+int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+                     int slot,
+                     int (*add_part)(int, struct cmdline_subpart *, void *),
+                     void *param)
 {
        sector_t from = 0;
        struct cmdline_subpart *subpart;
@@ -247,4 +248,7 @@ void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
                if (add_part(slot, subpart, param))
                        break;
        }
+
+       return slot;
 }
+EXPORT_SYMBOL(cmdline_parts_set);
index b7ff2861b6bdc0bd8e57528ac776fc9b923b5c70..42c45a7d67144a5598f5d7b2242a63eb9d58e292 100644 (file)
@@ -440,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
        /*
         * See if our hash lookup can find a potential backmerge.
         */
-       __rq = elv_rqhash_find(q, bio->bi_sector);
+       __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
        if (__rq && elv_rq_merge_ok(__rq, bio)) {
                *req = __rq;
                return ELEVATOR_BACK_MERGE;
index 625e3e471d65f55495bd639b0418e8ad85d53d62..26487972ac549ba899a723201125e5b3c59934ff 100644 (file)
@@ -323,12 +323,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
 
        if (hdr->iovec_count) {
                size_t iov_data_len;
-               struct iovec *iov;
+               struct iovec *iov = NULL;
 
                ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
                                            0, NULL, &iov);
-               if (ret < 0)
+               if (ret < 0) {
+                       kfree(iov);
                        goto out;
+               }
 
                iov_data_len = ret;
                ret = 0;
index 14a9d1912318b99fe764108420b143159c2bca32..9220f8e833d08228297373d116c308176c8d44a8 100644 (file)
@@ -100,11 +100,8 @@ enum {
 
 struct buf {
        ulong nframesout;
-       ulong resid;
-       ulong bv_resid;
-       sector_t sector;
        struct bio *bio;
-       struct bio_vec *bv;
+       struct bvec_iter iter;
        struct request *rq;
 };
 
@@ -120,13 +117,10 @@ struct frame {
        ulong waited;
        ulong waited_total;
        struct aoetgt *t;               /* parent target I belong to */
-       sector_t lba;
        struct sk_buff *skb;            /* command skb freed on module exit */
        struct sk_buff *r_skb;          /* response skb for async processing */
        struct buf *buf;
-       struct bio_vec *bv;
-       ulong bcnt;
-       ulong bv_off;
+       struct bvec_iter iter;
        char flags;
 };
 
index d2515435e23f2f87215558ec703f5a625ab8ac80..8184451b57c04999bd23c286080e14ca7f069031 100644 (file)
@@ -196,8 +196,7 @@ aoe_freetframe(struct frame *f)
 
        t = f->t;
        f->buf = NULL;
-       f->lba = 0;
-       f->bv = NULL;
+       memset(&f->iter, 0, sizeof(f->iter));
        f->r_skb = NULL;
        f->flags = 0;
        list_add(&f->head, &t->ffree);
@@ -295,21 +294,14 @@ newframe(struct aoedev *d)
 }
 
 static void
-skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
+skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
 {
        int frag = 0;
-       ulong fcnt;
-loop:
-       fcnt = bv->bv_len - (off - bv->bv_offset);
-       if (fcnt > cnt)
-               fcnt = cnt;
-       skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
-       cnt -= fcnt;
-       if (cnt <= 0)
-               return;
-       bv++;
-       off = bv->bv_offset;
-       goto loop;
+       struct bio_vec bv;
+
+       __bio_for_each_segment(bv, bio, iter, iter)
+               skb_fill_page_desc(skb, frag++, bv.bv_page,
+                                  bv.bv_offset, bv.bv_len);
 }
 
 static void
@@ -346,12 +338,10 @@ ata_rw_frameinit(struct frame *f)
        t->nout++;
        f->waited = 0;
        f->waited_total = 0;
-       if (f->buf)
-               f->lba = f->buf->sector;
 
        /* set up ata header */
-       ah->scnt = f->bcnt >> 9;
-       put_lba(ah, f->lba);
+       ah->scnt = f->iter.bi_size >> 9;
+       put_lba(ah, f->iter.bi_sector);
        if (t->d->flags & DEVFL_EXT) {
                ah->aflags |= AOEAFL_EXT;
        } else {
@@ -360,11 +350,11 @@ ata_rw_frameinit(struct frame *f)
                ah->lba3 |= 0xe0;       /* LBA bit + obsolete 0xa0 */
        }
        if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
-               skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
+               skb_fillup(skb, f->buf->bio, f->iter);
                ah->aflags |= AOEAFL_WRITE;
-               skb->len += f->bcnt;
-               skb->data_len = f->bcnt;
-               skb->truesize += f->bcnt;
+               skb->len += f->iter.bi_size;
+               skb->data_len = f->iter.bi_size;
+               skb->truesize += f->iter.bi_size;
                t->wpkts++;
        } else {
                t->rpkts++;
@@ -382,7 +372,6 @@ aoecmd_ata_rw(struct aoedev *d)
        struct buf *buf;
        struct sk_buff *skb;
        struct sk_buff_head queue;
-       ulong bcnt, fbcnt;
 
        buf = nextbuf(d);
        if (buf == NULL)
@@ -390,39 +379,22 @@ aoecmd_ata_rw(struct aoedev *d)
        f = newframe(d);
        if (f == NULL)
                return 0;
-       bcnt = d->maxbcnt;
-       if (bcnt == 0)
-               bcnt = DEFAULTBCNT;
-       if (bcnt > buf->resid)
-               bcnt = buf->resid;
-       fbcnt = bcnt;
-       f->bv = buf->bv;
-       f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
-       do {
-               if (fbcnt < buf->bv_resid) {
-                       buf->bv_resid -= fbcnt;
-                       buf->resid -= fbcnt;
-                       break;
-               }
-               fbcnt -= buf->bv_resid;
-               buf->resid -= buf->bv_resid;
-               if (buf->resid == 0) {
-                       d->ip.buf = NULL;
-                       break;
-               }
-               buf->bv++;
-               buf->bv_resid = buf->bv->bv_len;
-               WARN_ON(buf->bv_resid == 0);
-       } while (fbcnt);
 
        /* initialize the headers & frame */
        f->buf = buf;
-       f->bcnt = bcnt;
-       ata_rw_frameinit(f);
+       f->iter = buf->iter;
+       f->iter.bi_size = min_t(unsigned long,
+                               d->maxbcnt ?: DEFAULTBCNT,
+                               f->iter.bi_size);
+       bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
+
+       if (!buf->iter.bi_size)
+               d->ip.buf = NULL;
 
        /* mark all tracking fields and load out */
        buf->nframesout += 1;
-       buf->sector += bcnt >> 9;
+
+       ata_rw_frameinit(f);
 
        skb = skb_clone(f->skb, GFP_ATOMIC);
        if (skb) {
@@ -613,10 +585,7 @@ reassign_frame(struct frame *f)
        skb = nf->skb;
        nf->skb = f->skb;
        nf->buf = f->buf;
-       nf->bcnt = f->bcnt;
-       nf->lba = f->lba;
-       nf->bv = f->bv;
-       nf->bv_off = f->bv_off;
+       nf->iter = f->iter;
        nf->waited = 0;
        nf->waited_total = f->waited_total;
        nf->sent = f->sent;
@@ -648,19 +617,19 @@ probe(struct aoetgt *t)
        }
        f->flags |= FFL_PROBE;
        ifrotate(t);
-       f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
+       f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
        ata_rw_frameinit(f);
        skb = f->skb;
-       for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
+       for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
                if (n < PAGE_SIZE)
                        m = n;
                else
                        m = PAGE_SIZE;
                skb_fill_page_desc(skb, frag, empty_page, 0, m);
        }
-       skb->len += f->bcnt;
-       skb->data_len = f->bcnt;
-       skb->truesize += f->bcnt;
+       skb->len += f->iter.bi_size;
+       skb->data_len = f->iter.bi_size;
+       skb->truesize += f->iter.bi_size;
 
        skb = skb_clone(f->skb, GFP_ATOMIC);
        if (skb) {
@@ -897,15 +866,15 @@ rqbiocnt(struct request *r)
 static void
 bio_pageinc(struct bio *bio)
 {
-       struct bio_vec *bv;
+       struct bio_vec bv;
        struct page *page;
-       int i;
+       struct bvec_iter iter;
 
-       bio_for_each_segment(bv, bio, i) {
+       bio_for_each_segment(bv, bio, iter) {
                /* Non-zero page count for non-head members of
                 * compound pages is no longer allowed by the kernel.
                 */
-               page = compound_trans_head(bv->bv_page);
+               page = compound_trans_head(bv.bv_page);
                atomic_inc(&page->_count);
        }
 }
@@ -913,12 +882,12 @@ bio_pageinc(struct bio *bio)
 static void
 bio_pagedec(struct bio *bio)
 {
-       struct bio_vec *bv;
        struct page *page;
-       int i;
+       struct bio_vec bv;
+       struct bvec_iter iter;
 
-       bio_for_each_segment(bv, bio, i) {
-               page = compound_trans_head(bv->bv_page);
+       bio_for_each_segment(bv, bio, iter) {
+               page = compound_trans_head(bv.bv_page);
                atomic_dec(&page->_count);
        }
 }
@@ -929,12 +898,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
        memset(buf, 0, sizeof(*buf));
        buf->rq = rq;
        buf->bio = bio;
-       buf->resid = bio->bi_size;
-       buf->sector = bio->bi_sector;
+       buf->iter = bio->bi_iter;
        bio_pageinc(bio);
-       buf->bv = bio_iovec(bio);
-       buf->bv_resid = buf->bv->bv_len;
-       WARN_ON(buf->bv_resid == 0);
 }
 
 static struct buf *
@@ -1119,24 +1084,18 @@ gettgt(struct aoedev *d, char *addr)
 }
 
 static void
-bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
+bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
 {
-       ulong fcnt;
-       char *p;
        int soff = 0;
-loop:
-       fcnt = bv->bv_len - (off - bv->bv_offset);
-       if (fcnt > cnt)
-               fcnt = cnt;
-       p = page_address(bv->bv_page) + off;
-       skb_copy_bits(skb, soff, p, fcnt);
-       soff += fcnt;
-       cnt -= fcnt;
-       if (cnt <= 0)
-               return;
-       bv++;
-       off = bv->bv_offset;
-       goto loop;
+       struct bio_vec bv;
+
+       iter.bi_size = cnt;
+
+       __bio_for_each_segment(bv, bio, iter, iter) {
+               char *p = page_address(bv.bv_page) + bv.bv_offset;
+               skb_copy_bits(skb, soff, p, bv.bv_len);
+               soff += bv.bv_len;
+       }
 }
 
 void
@@ -1152,7 +1111,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
        do {
                bio = rq->bio;
                bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
-       } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
+       } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
 
        /* cf. http://lkml.org/lkml/2006/10/31/28 */
        if (!fastfail)
@@ -1229,7 +1188,15 @@ noskb:           if (buf)
                        clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
                        break;
                }
-               bvcpy(f->bv, f->bv_off, skb, n);
+               if (n > f->iter.bi_size) {
+                       pr_err_ratelimited("%s e%ld.%d.  bytes=%ld need=%u\n",
+                               "aoe: too-large data size in read from",
+                               (long) d->aoemajor, d->aoeminor,
+                               n, f->iter.bi_size);
+                       clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+                       break;
+               }
+               bvcpy(skb, f->buf->bio, f->iter, n);
        case ATA_CMD_PIO_WRITE:
        case ATA_CMD_PIO_WRITE_EXT:
                spin_lock_irq(&d->lock);
@@ -1272,7 +1239,7 @@ out:
 
        aoe_freetframe(f);
 
-       if (buf && --buf->nframesout == 0 && buf->resid == 0)
+       if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
                aoe_end_buf(d, buf);
 
        spin_unlock_irq(&d->lock);
@@ -1727,7 +1694,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
 {
        if (buf == NULL)
                return;
-       buf->resid = 0;
+       buf->iter.bi_size = 0;
        clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
        if (buf->nframesout == 0)
                aoe_end_buf(d, buf);
index d91f1a56e8617f56c019bfb6389bb79f71fa8ad2..e73b85cf0756876adbf4ad9b8220fc466bcdb412 100644 (file)
@@ -328,18 +328,18 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
        struct block_device *bdev = bio->bi_bdev;
        struct brd_device *brd = bdev->bd_disk->private_data;
        int rw;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
        sector_t sector;
-       int i;
+       struct bvec_iter iter;
        int err = -EIO;
 
-       sector = bio->bi_sector;
+       sector = bio->bi_iter.bi_sector;
        if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
                goto out;
 
        if (unlikely(bio->bi_rw & REQ_DISCARD)) {
                err = 0;
-               discard_from_brd(brd, sector, bio->bi_size);
+               discard_from_brd(brd, sector, bio->bi_iter.bi_size);
                goto out;
        }
 
@@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
        if (rw == READA)
                rw = READ;
 
-       bio_for_each_segment(bvec, bio, i) {
-               unsigned int len = bvec->bv_len;
-               err = brd_do_bvec(brd, bvec->bv_page, len,
-                                       bvec->bv_offset, rw, sector);
+       bio_for_each_segment(bvec, bio, iter) {
+               unsigned int len = bvec.bv_len;
+               err = brd_do_bvec(brd, bvec.bv_page, len,
+                                       bvec.bv_offset, rw, sector);
                if (err)
                        break;
                sector += len >> SECTOR_SHIFT;
index 28c73ca320a8f7f9b1741492785fa2f18e042d8c..a9b13f2cc420b055b8089fde687d601a23b8e635 100644 (file)
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
 
        bio = bio_alloc_drbd(GFP_NOIO);
        bio->bi_bdev = bdev->md_bdev;
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        err = -EIO;
        if (bio_add_page(bio, page, size, 0) != size)
                goto out;
index b12c11ec4bd21e405fe75276d656e3519c8ad873..597f111df67b3597987eb816f8ff2ca2ec17c285 100644 (file)
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
        } else
                page = b->bm_pages[page_nr];
        bio->bi_bdev = mdev->ldev->md_bdev;
-       bio->bi_sector = on_disk_sector;
+       bio->bi_iter.bi_sector = on_disk_sector;
        /* bio_add_page of a single page to an empty bio will always succeed,
         * according to api.  Do we want to assert that? */
        bio_add_page(bio, page, len, 0);
index 9e3818b1bc8321e5883a1ef1b3dfe9542e7ea619..929468e1512a687d44bb310b8e3cc94a6b15161d 100644 (file)
@@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
 
 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
 {
-       struct bio_vec *bvec;
-       int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+
        /* hint all but last page with MSG_MORE */
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment(bvec, bio, iter) {
                int err;
 
-               err = _drbd_no_send_page(mdev, bvec->bv_page,
-                                        bvec->bv_offset, bvec->bv_len,
-                                        i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+               err = _drbd_no_send_page(mdev, bvec.bv_page,
+                                        bvec.bv_offset, bvec.bv_len,
+                                        bio_iter_last(bvec, iter)
+                                        ? 0 : MSG_MORE);
                if (err)
                        return err;
        }
@@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
 
 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
 {
-       struct bio_vec *bvec;
-       int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+
        /* hint all but last page with MSG_MORE */
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment(bvec, bio, iter) {
                int err;
 
-               err = _drbd_send_page(mdev, bvec->bv_page,
-                                     bvec->bv_offset, bvec->bv_len,
-                                     i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+               err = _drbd_send_page(mdev, bvec.bv_page,
+                                     bvec.bv_offset, bvec.bv_len,
+                                     bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
                if (err)
                        return err;
        }
index 6fa6673b36b396765b58142e8e8abcdc4beaae05..d073305ffd5e76e17a7d0804ad92be1ad82bba67 100644 (file)
@@ -1333,7 +1333,7 @@ next_bio:
                goto fail;
        }
        /* > peer_req->i.sector, unless this is the first bio */
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = mdev->ldev->backing_bdev;
        bio->bi_rw = rw;
        bio->bi_private = peer_req;
@@ -1353,7 +1353,7 @@ next_bio:
                                dev_err(DEV,
                                        "bio_add_page failed for len=%u, "
                                        "bi_vcnt=0 (bi_sector=%llu)\n",
-                                       len, (unsigned long long)bio->bi_sector);
+                                       len, (uint64_t)bio->bi_iter.bi_sector);
                                err = -ENOSPC;
                                goto fail;
                        }
@@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
                           sector_t sector, int data_size)
 {
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        struct bio *bio;
-       int dgs, err, i, expect;
+       int dgs, err, expect;
        void *dig_in = mdev->tconn->int_dig_in;
        void *dig_vv = mdev->tconn->int_dig_vv;
 
@@ -1615,13 +1616,13 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
        mdev->recv_cnt += data_size>>9;
 
        bio = req->master_bio;
-       D_ASSERT(sector == bio->bi_sector);
+       D_ASSERT(sector == bio->bi_iter.bi_sector);
 
-       bio_for_each_segment(bvec, bio, i) {
-               void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
-               expect = min_t(int, data_size, bvec->bv_len);
+       bio_for_each_segment(bvec, bio, iter) {
+               void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
+               expect = min_t(int, data_size, bvec.bv_len);
                err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
-               kunmap(bvec->bv_page);
+               kunmap(bvec.bv_page);
                if (err)
                        return err;
                data_size -= expect;
index fec7bef44994cf8b76e595f69b5e34b42cdaf230..104a040f24de74141274b364ee625aa49eefbb94 100644 (file)
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
        req->epoch       = 0;
 
        drbd_clear_interval(&req->i);
-       req->i.sector     = bio_src->bi_sector;
-       req->i.size      = bio_src->bi_size;
+       req->i.sector     = bio_src->bi_iter.bi_sector;
+       req->i.size      = bio_src->bi_iter.bi_size;
        req->i.local = true;
        req->i.waiting = false;
 
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
        /*
         * what we "blindly" assume:
         */
-       D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
+       D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
 
        inc_ap_bio(mdev);
        __drbd_make_request(mdev, bio, start_time);
index 978cb1addc98845fb8ca49838cfb5ec2478170f7..28e15d91197af1b234e43136740709adc090bcde 100644 (file)
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
 
 /* Short lived temporary struct on the stack.
  * We could squirrel the error to be returned into
- * bio->bi_size, or similar. But that would be too ugly. */
+ * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
 struct bio_and_error {
        struct bio *bio;
        int error;
index 891c0ecaa292c84998f7357b82e3a80cdc0f6484..84d3175d493aaef91edabd97297238a717f6973c 100644 (file)
@@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
 {
        struct hash_desc desc;
        struct scatterlist sg;
-       struct bio_vec *bvec;
-       int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
        desc.tfm = tfm;
        desc.flags = 0;
@@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
        sg_init_table(&sg, 1);
        crypto_hash_init(&desc);
 
-       bio_for_each_segment(bvec, bio, i) {
-               sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
+       bio_for_each_segment(bvec, bio, iter) {
+               sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
                crypto_hash_update(&desc, &sg, sg.length);
        }
        crypto_hash_final(&desc, digest);
index 000abe2f105c60f06d5fac31a2a4889cddd22b15..6b29c4422828a4dc5442bf19b1a2c719d5d2377c 100644 (file)
@@ -2351,7 +2351,7 @@ static void rw_interrupt(void)
 /* Compute maximal contiguous buffer size. */
 static int buffer_chain_size(void)
 {
-       struct bio_vec *bv;
+       struct bio_vec bv;
        int size;
        struct req_iterator iter;
        char *base;
@@ -2360,10 +2360,10 @@ static int buffer_chain_size(void)
        size = 0;
 
        rq_for_each_segment(bv, current_req, iter) {
-               if (page_address(bv->bv_page) + bv->bv_offset != base + size)
+               if (page_address(bv.bv_page) + bv.bv_offset != base + size)
                        break;
 
-               size += bv->bv_len;
+               size += bv.bv_len;
        }
 
        return size >> 9;
@@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size)
 static void copy_buffer(int ssize, int max_sector, int max_sector_2)
 {
        int remaining;          /* number of transferred 512-byte sectors */
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *buffer;
        char *dma_buffer;
        int size;
@@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
                if (!remaining)
                        break;
 
-               size = bv->bv_len;
+               size = bv.bv_len;
                SUPBOUND(size, remaining);
 
-               buffer = page_address(bv->bv_page) + bv->bv_offset;
+               buffer = page_address(bv.bv_page) + bv.bv_offset;
                if (dma_buffer + size >
                    floppy_track_buffer + (max_buffer_sectors << 10) ||
                    dma_buffer < floppy_track_buffer) {
@@ -3775,9 +3775,9 @@ static int __floppy_read_block_0(struct block_device *bdev)
        bio_vec.bv_len = size;
        bio_vec.bv_offset = 0;
        bio.bi_vcnt = 1;
-       bio.bi_size = size;
+       bio.bi_iter.bi_size = size;
        bio.bi_bdev = bdev;
-       bio.bi_sector = 0;
+       bio.bi_iter.bi_sector = 0;
        bio.bi_flags = (1 << BIO_QUIET);
        init_completion(&complete);
        bio.bi_private = &complete;
index c8dac730524408f63e78cb9acdae8294aaf8dafa..33fde3a3975954c793d0d2e9feb846e5e6a2bdc2 100644 (file)
@@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
 {
        int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
                        struct page *page);
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        struct page *page = NULL;
-       int i, ret = 0;
+       int ret = 0;
 
        if (lo->transfer != transfer_none) {
                page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
@@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
                do_lo_send = do_lo_send_direct_write;
        }
 
-       bio_for_each_segment(bvec, bio, i) {
-               ret = do_lo_send(lo, bvec, pos, page);
+       bio_for_each_segment(bvec, bio, iter) {
+               ret = do_lo_send(lo, &bvec, pos, page);
                if (ret < 0)
                        break;
-               pos += bvec->bv_len;
+               pos += bvec.bv_len;
        }
        if (page) {
                kunmap(page);
@@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo,
 static int
 lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
 {
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        ssize_t s;
-       int i;
 
-       bio_for_each_segment(bvec, bio, i) {
-               s = do_lo_receive(lo, bvec, bsize, pos);
+       bio_for_each_segment(bvec, bio, iter) {
+               s = do_lo_receive(lo, &bvec, bsize, pos);
                if (s < 0)
                        return s;
 
-               if (s != bvec->bv_len) {
+               if (s != bvec.bv_len) {
                        zero_fill_bio(bio);
                        break;
                }
-               pos += bvec->bv_len;
+               pos += bvec.bv_len;
        }
        return 0;
 }
@@ -415,7 +416,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        loff_t pos;
        int ret;
 
-       pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+       pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
 
        if (bio_rw(bio) == WRITE) {
                struct file *file = lo->lo_backing_file;
@@ -444,7 +445,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
                                goto out;
                        }
                        ret = file->f_op->fallocate(file, mode, pos,
-                                                   bio->bi_size);
+                                                   bio->bi_iter.bi_size);
                        if (unlikely(ret && ret != -EINVAL &&
                                     ret != -EOPNOTSUPP))
                                ret = -EIO;
index 050c71267f146340281992e341ab86c93d79c33d..52b2f2a714708a756b4adcee140e8ae2a91f5006 100644 (file)
@@ -3962,8 +3962,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
 {
        struct driver_data *dd = queue->queuedata;
        struct scatterlist *sg;
-       struct bio_vec *bvec;
-       int i, nents = 0;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+       int nents = 0;
        int tag = 0, unaligned = 0;
 
        if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3993,7 +3994,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
        }
 
        if (unlikely(bio->bi_rw & REQ_DISCARD)) {
-               bio_endio(bio, mtip_send_trim(dd, bio->bi_sector,
+               bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
                                                bio_sectors(bio)));
                return;
        }
@@ -4006,7 +4007,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
 
        if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
                                                        dd->unal_qdepth) {
-               if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
+               if (bio->bi_iter.bi_sector % 8 != 0)
+                       /* Unaligned on 4k boundaries */
                        unaligned = 1;
                else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
                        unaligned = 1;
@@ -4025,17 +4027,17 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
                }
 
                /* Create the scatter list for this bio. */
-               bio_for_each_segment(bvec, bio, i) {
+               bio_for_each_segment(bvec, bio, iter) {
                        sg_set_page(&sg[nents],
-                                       bvec->bv_page,
-                                       bvec->bv_len,
-                                       bvec->bv_offset);
+                                       bvec.bv_page,
+                                       bvec.bv_len,
+                                       bvec.bv_offset);
                        nents++;
                }
 
                /* Issue the read/write. */
                mtip_hw_submit_io(dd,
-                               bio->bi_sector,
+                               bio->bi_iter.bi_sector,
                                bio_sectors(bio),
                                nents,
                                tag,
index 2dc3b5153f0d82b42cfab720464e17bd963223f8..55298db36b2d61a113f25c22905fffb0f22ddd32 100644 (file)
@@ -271,18 +271,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
 
        if (nbd_cmd(req) == NBD_CMD_WRITE) {
                struct req_iterator iter;
-               struct bio_vec *bvec;
+               struct bio_vec bvec;
                /*
                 * we are really probing at internals to determine
                 * whether to set MSG_MORE or not...
                 */
                rq_for_each_segment(bvec, req, iter) {
                        flags = 0;
-                       if (!rq_iter_last(req, iter))
+                       if (!rq_iter_last(bvec, iter))
                                flags = MSG_MORE;
                        dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
-                                       nbd->disk->disk_name, req, bvec->bv_len);
-                       result = sock_send_bvec(nbd, bvec, flags);
+                                       nbd->disk->disk_name, req, bvec.bv_len);
+                       result = sock_send_bvec(nbd, &bvec, flags);
                        if (result <= 0) {
                                dev_err(disk_to_dev(nbd->disk),
                                        "Send data failed (result %d)\n",
@@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
                        nbd->disk->disk_name, req);
        if (nbd_cmd(req) == NBD_CMD_READ) {
                struct req_iterator iter;
-               struct bio_vec *bvec;
+               struct bio_vec bvec;
 
                rq_for_each_segment(bvec, req, iter) {
-                       result = sock_recv_bvec(nbd, bvec);
+                       result = sock_recv_bvec(nbd, &bvec);
                        if (result <= 0) {
                                dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
                                        result);
@@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
                                return req;
                        }
                        dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
-                               nbd->disk->disk_name, req, bvec->bv_len);
+                               nbd->disk->disk_name, req, bvec.bv_len);
                }
        }
        return req;
index 26d03fa0bf26696d9e004b3983a580d409d3d006..1f14ac4039450e84137b4eab0dacf043aa46d16e 100644 (file)
@@ -441,104 +441,19 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
        return total_len;
 }
 
-struct nvme_bio_pair {
-       struct bio b1, b2, *parent;
-       struct bio_vec *bv1, *bv2;
-       int err;
-       atomic_t cnt;
-};
-
-static void nvme_bio_pair_endio(struct bio *bio, int err)
-{
-       struct nvme_bio_pair *bp = bio->bi_private;
-
-       if (err)
-               bp->err = err;
-
-       if (atomic_dec_and_test(&bp->cnt)) {
-               bio_endio(bp->parent, bp->err);
-               kfree(bp->bv1);
-               kfree(bp->bv2);
-               kfree(bp);
-       }
-}
-
-static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
-                                                       int len, int offset)
-{
-       struct nvme_bio_pair *bp;
-
-       BUG_ON(len > bio->bi_size);
-       BUG_ON(idx > bio->bi_vcnt);
-
-       bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
-       if (!bp)
-               return NULL;
-       bp->err = 0;
-
-       bp->b1 = *bio;
-       bp->b2 = *bio;
-
-       bp->b1.bi_size = len;
-       bp->b2.bi_size -= len;
-       bp->b1.bi_vcnt = idx;
-       bp->b2.bi_idx = idx;
-       bp->b2.bi_sector += len >> 9;
-
-       if (offset) {
-               bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
-                                                               GFP_ATOMIC);
-               if (!bp->bv1)
-                       goto split_fail_1;
-
-               bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
-                                                               GFP_ATOMIC);
-               if (!bp->bv2)
-                       goto split_fail_2;
-
-               memcpy(bp->bv1, bio->bi_io_vec,
-                       bio->bi_max_vecs * sizeof(struct bio_vec));
-               memcpy(bp->bv2, bio->bi_io_vec,
-                       bio->bi_max_vecs * sizeof(struct bio_vec));
-
-               bp->b1.bi_io_vec = bp->bv1;
-               bp->b2.bi_io_vec = bp->bv2;
-               bp->b2.bi_io_vec[idx].bv_offset += offset;
-               bp->b2.bi_io_vec[idx].bv_len -= offset;
-               bp->b1.bi_io_vec[idx].bv_len = offset;
-               bp->b1.bi_vcnt++;
-       } else
-               bp->bv1 = bp->bv2 = NULL;
-
-       bp->b1.bi_private = bp;
-       bp->b2.bi_private = bp;
-
-       bp->b1.bi_end_io = nvme_bio_pair_endio;
-       bp->b2.bi_end_io = nvme_bio_pair_endio;
-
-       bp->parent = bio;
-       atomic_set(&bp->cnt, 2);
-
-       return bp;
-
- split_fail_2:
-       kfree(bp->bv1);
- split_fail_1:
-       kfree(bp);
-       return NULL;
-}
-
 static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
-                                               int idx, int len, int offset)
+                                int len)
 {
-       struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
-       if (!bp)
+       struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
+       if (!split)
                return -ENOMEM;
 
+       bio_chain(split, bio);
+
        if (bio_list_empty(&nvmeq->sq_cong))
                add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
-       bio_list_add(&nvmeq->sq_cong, &bp->b1);
-       bio_list_add(&nvmeq->sq_cong, &bp->b2);
+       bio_list_add(&nvmeq->sq_cong, split);
+       bio_list_add(&nvmeq->sq_cong, bio);
 
        return 0;
 }
@@ -550,41 +465,44 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
 static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
                struct bio *bio, enum dma_data_direction dma_dir, int psegs)
 {
-       struct bio_vec *bvec, *bvprv = NULL;
+       struct bio_vec bvec, bvprv;
+       struct bvec_iter iter;
        struct scatterlist *sg = NULL;
-       int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+       int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
+       int first = 1;
 
        if (nvmeq->dev->stripe_size)
                split_len = nvmeq->dev->stripe_size -
-                       ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
+                       ((bio->bi_iter.bi_sector << 9) &
+                        (nvmeq->dev->stripe_size - 1));
 
        sg_init_table(iod->sg, psegs);
-       bio_for_each_segment(bvec, bio, i) {
-               if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
-                       sg->length += bvec->bv_len;
+       bio_for_each_segment(bvec, bio, iter) {
+               if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
+                       sg->length += bvec.bv_len;
                } else {
-                       if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
-                               return nvme_split_and_submit(bio, nvmeq, i,
-                                                               length, 0);
+                       if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
+                               return nvme_split_and_submit(bio, nvmeq,
+                                                            length);
 
                        sg = sg ? sg + 1 : iod->sg;
-                       sg_set_page(sg, bvec->bv_page, bvec->bv_len,
-                                                       bvec->bv_offset);
+                       sg_set_page(sg, bvec.bv_page,
+                                   bvec.bv_len, bvec.bv_offset);
                        nsegs++;
                }
 
-               if (split_len - length < bvec->bv_len)
-                       return nvme_split_and_submit(bio, nvmeq, i, split_len,
-                                                       split_len - length);
-               length += bvec->bv_len;
+               if (split_len - length < bvec.bv_len)
+                       return nvme_split_and_submit(bio, nvmeq, split_len);
+               length += bvec.bv_len;
                bvprv = bvec;
+               first = 0;
        }
        iod->nents = nsegs;
        sg_mark_end(sg);
        if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
                return -ENOMEM;
 
-       BUG_ON(length != bio->bi_size);
+       BUG_ON(length != bio->bi_iter.bi_size);
        return length;
 }
 
@@ -608,8 +526,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
        iod->npages = 0;
 
        range->cattr = cpu_to_le32(0);
-       range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
-       range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+       range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
+       range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
 
        memset(cmnd, 0, sizeof(*cmnd));
        cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +592,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
        }
 
        result = -ENOMEM;
-       iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+       iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
        if (!iod)
                goto nomem;
        iod->private = bio;
@@ -723,7 +641,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
        cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
        length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
                                                                GFP_ATOMIC);
-       cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+       cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
        cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
        cmnd->rw.control = cpu_to_le16(control);
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
index ff8668c5efb10eebc1a02736d306cce1f43ad7ff..3dda09a5ec4172bc7e57f6e18dff272ecd8deaba 100644 (file)
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
 
        for (;;) {
                tmp = rb_entry(n, struct pkt_rb_node, rb_node);
-               if (s <= tmp->bio->bi_sector)
+               if (s <= tmp->bio->bi_iter.bi_sector)
                        next = n->rb_left;
                else
                        next = n->rb_right;
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
                n = next;
        }
 
-       if (s > tmp->bio->bi_sector) {
+       if (s > tmp->bio->bi_iter.bi_sector) {
                tmp = pkt_rbtree_next(tmp);
                if (!tmp)
                        return NULL;
        }
-       BUG_ON(s > tmp->bio->bi_sector);
+       BUG_ON(s > tmp->bio->bi_iter.bi_sector);
        return tmp;
 }
 
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
 {
        struct rb_node **p = &pd->bio_queue.rb_node;
        struct rb_node *parent = NULL;
-       sector_t s = node->bio->bi_sector;
+       sector_t s = node->bio->bi_iter.bi_sector;
        struct pkt_rb_node *tmp;
 
        while (*p) {
                parent = *p;
                tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
-               if (s < tmp->bio->bi_sector)
+               if (s < tmp->bio->bi_iter.bi_sector)
                        p = &(*p)->rb_left;
                else
                        p = &(*p)->rb_right;
@@ -857,7 +857,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
                        spin_lock(&pd->iosched.lock);
                        bio = bio_list_peek(&pd->iosched.write_queue);
                        spin_unlock(&pd->iosched.lock);
-                       if (bio && (bio->bi_sector == pd->iosched.last_write))
+                       if (bio && (bio->bi_iter.bi_sector ==
+                                   pd->iosched.last_write))
                                need_write_seek = 0;
                        if (need_write_seek && reads_queued) {
                                if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -888,7 +889,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
                        continue;
 
                if (bio_data_dir(bio) == READ)
-                       pd->iosched.successive_reads += bio->bi_size >> 10;
+                       pd->iosched.successive_reads +=
+                               bio->bi_iter.bi_size >> 10;
                else {
                        pd->iosched.successive_reads = 0;
                        pd->iosched.last_write = bio_end_sector(bio);
@@ -978,7 +980,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
 
        pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
                bio, (unsigned long long)pkt->sector,
-               (unsigned long long)bio->bi_sector, err);
+               (unsigned long long)bio->bi_iter.bi_sector, err);
 
        if (err)
                atomic_inc(&pkt->io_errors);
@@ -1026,8 +1028,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
        memset(written, 0, sizeof(written));
        spin_lock(&pkt->lock);
        bio_list_for_each(bio, &pkt->orig_bios) {
-               int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
-               int num_frames = bio->bi_size / CD_FRAMESIZE;
+               int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
+                       (CD_FRAMESIZE >> 9);
+               int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
                pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
                BUG_ON(first_frame < 0);
                BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1053,7 +1056,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
 
                bio = pkt->r_bios[f];
                bio_reset(bio);
-               bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+               bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
                bio->bi_bdev = pd->bdev;
                bio->bi_end_io = pkt_end_io_read;
                bio->bi_private = pkt;
@@ -1150,8 +1153,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
        bio_reset(pkt->bio);
        pkt->bio->bi_bdev = pd->bdev;
        pkt->bio->bi_rw = REQ_WRITE;
-       pkt->bio->bi_sector = new_sector;
-       pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
+       pkt->bio->bi_iter.bi_sector = new_sector;
+       pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
        pkt->bio->bi_vcnt = pkt->frames;
 
        pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1213,7 +1216,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
        node = first_node;
        while (node) {
                bio = node->bio;
-               zone = get_zone(bio->bi_sector, pd);
+               zone = get_zone(bio->bi_iter.bi_sector, pd);
                list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
                        if (p->sector == zone) {
                                bio = NULL;
@@ -1252,14 +1255,14 @@ try_next_bio:
        pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
        while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
                bio = node->bio;
-               pkt_dbg(2, pd, "found zone=%llx\n",
-                       (unsigned long long)get_zone(bio->bi_sector, pd));
-               if (get_zone(bio->bi_sector, pd) != zone)
+               pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
+                       get_zone(bio->bi_iter.bi_sector, pd));
+               if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
                        break;
                pkt_rbtree_erase(pd, node);
                spin_lock(&pkt->lock);
                bio_list_add(&pkt->orig_bios, bio);
-               pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+               pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
                spin_unlock(&pkt->lock);
        }
        /* check write congestion marks, and if bio_queue_size is
@@ -1293,7 +1296,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
        struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
 
        bio_reset(pkt->w_bio);
-       pkt->w_bio->bi_sector = pkt->sector;
+       pkt->w_bio->bi_iter.bi_sector = pkt->sector;
        pkt->w_bio->bi_bdev = pd->bdev;
        pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
        pkt->w_bio->bi_private = pkt;
@@ -2335,75 +2338,29 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
        pkt_bio_finished(pd);
 }
 
-static void pkt_make_request(struct request_queue *q, struct bio *bio)
+static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
 {
-       struct pktcdvd_device *pd;
-       char b[BDEVNAME_SIZE];
+       struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
+       struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
+
+       psd->pd = pd;
+       psd->bio = bio;
+       cloned_bio->bi_bdev = pd->bdev;
+       cloned_bio->bi_private = psd;
+       cloned_bio->bi_end_io = pkt_end_io_read_cloned;
+       pd->stats.secs_r += bio_sectors(bio);
+       pkt_queue_bio(pd, cloned_bio);
+}
+
+static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
+{
+       struct pktcdvd_device *pd = q->queuedata;
        sector_t zone;
        struct packet_data *pkt;
        int was_empty, blocked_bio;
        struct pkt_rb_node *node;
 
-       pd = q->queuedata;
-       if (!pd) {
-               pr_err("%s incorrect request queue\n",
-                      bdevname(bio->bi_bdev, b));
-               goto end_io;
-       }
-
-       /*
-        * Clone READ bios so we can have our own bi_end_io callback.
-        */
-       if (bio_data_dir(bio) == READ) {
-               struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
-               struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
-
-               psd->pd = pd;
-               psd->bio = bio;
-               cloned_bio->bi_bdev = pd->bdev;
-               cloned_bio->bi_private = psd;
-               cloned_bio->bi_end_io = pkt_end_io_read_cloned;
-               pd->stats.secs_r += bio_sectors(bio);
-               pkt_queue_bio(pd, cloned_bio);
-               return;
-       }
-
-       if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
-               pkt_notice(pd, "WRITE for ro device (%llu)\n",
-                          (unsigned long long)bio->bi_sector);
-               goto end_io;
-       }
-
-       if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
-               pkt_err(pd, "wrong bio size\n");
-               goto end_io;
-       }
-
-       blk_queue_bounce(q, &bio);
-
-       zone = get_zone(bio->bi_sector, pd);
-       pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
-               (unsigned long long)bio->bi_sector,
-               (unsigned long long)bio_end_sector(bio));
-
-       /* Check if we have to split the bio */
-       {
-               struct bio_pair *bp;
-               sector_t last_zone;
-               int first_sectors;
-
-               last_zone = get_zone(bio_end_sector(bio) - 1, pd);
-               if (last_zone != zone) {
-                       BUG_ON(last_zone != zone + pd->settings.size);
-                       first_sectors = last_zone - bio->bi_sector;
-                       bp = bio_split(bio, first_sectors);
-                       BUG_ON(!bp);
-                       pkt_make_request(q, &bp->bio1);
-                       pkt_make_request(q, &bp->bio2);
-                       bio_pair_release(bp);
-                       return;
-               }
-       }
+       zone = get_zone(bio->bi_iter.bi_sector, pd);
 
        /*
         * If we find a matching packet in state WAITING or READ_WAIT, we can
@@ -2417,7 +2374,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
                        if ((pkt->state == PACKET_WAITING_STATE) ||
                            (pkt->state == PACKET_READ_WAIT_STATE)) {
                                bio_list_add(&pkt->orig_bios, bio);
-                               pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+                               pkt->write_size +=
+                                       bio->bi_iter.bi_size / CD_FRAMESIZE;
                                if ((pkt->write_size >= pkt->frames) &&
                                    (pkt->state == PACKET_WAITING_STATE)) {
                                        atomic_inc(&pkt->run_sm);
@@ -2476,6 +2434,64 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
                 */
                wake_up(&pd->wqueue);
        }
+}
+
+static void pkt_make_request(struct request_queue *q, struct bio *bio)
+{
+       struct pktcdvd_device *pd;
+       char b[BDEVNAME_SIZE];
+       struct bio *split;
+
+       pd = q->queuedata;
+       if (!pd) {
+               pr_err("%s incorrect request queue\n",
+                      bdevname(bio->bi_bdev, b));
+               goto end_io;
+       }
+
+       pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
+               (unsigned long long)bio->bi_iter.bi_sector,
+               (unsigned long long)bio_end_sector(bio));
+
+       /*
+        * Clone READ bios so we can have our own bi_end_io callback.
+        */
+       if (bio_data_dir(bio) == READ) {
+               pkt_make_request_read(pd, bio);
+               return;
+       }
+
+       if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
+               pkt_notice(pd, "WRITE for ro device (%llu)\n",
+                          (unsigned long long)bio->bi_iter.bi_sector);
+               goto end_io;
+       }
+
+       if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
+               pkt_err(pd, "wrong bio size\n");
+               goto end_io;
+       }
+
+       blk_queue_bounce(q, &bio);
+
+       do {
+               sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
+               sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
+
+               if (last_zone != zone) {
+                       BUG_ON(last_zone != zone + pd->settings.size);
+
+                       split = bio_split(bio, last_zone -
+                                         bio->bi_iter.bi_sector,
+                                         GFP_NOIO, fs_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
+
+               pkt_make_request_write(q, split);
+       } while (split != bio);
+
        return;
 end_io:
        bio_io_error(bio);
index d754a88d75858ef46f8553ac54b0163ff7b60d8a..c120d70d3fb3b31fdfb584859a8cdd5bf0849dab 100644 (file)
@@ -94,26 +94,25 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
 {
        unsigned int offset = 0;
        struct req_iterator iter;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
        unsigned int i = 0;
        size_t size;
        void *buf;
 
        rq_for_each_segment(bvec, req, iter) {
                unsigned long flags;
-               dev_dbg(&dev->sbd.core,
-                       "%s:%u: bio %u: %u segs %u sectors from %lu\n",
-                       __func__, __LINE__, i, bio_segments(iter.bio),
-                       bio_sectors(iter.bio), iter.bio->bi_sector);
+               dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
+                       __func__, __LINE__, i, bio_sectors(iter.bio),
+                       iter.bio->bi_iter.bi_sector);
 
-               size = bvec->bv_len;
-               buf = bvec_kmap_irq(bvec, &flags);
+               size = bvec.bv_len;
+               buf = bvec_kmap_irq(&bvec, &flags);
                if (gather)
                        memcpy(dev->bounce_buf+offset, buf, size);
                else
                        memcpy(buf, dev->bounce_buf+offset, size);
                offset += size;
-               flush_kernel_dcache_page(bvec->bv_page);
+               flush_kernel_dcache_page(bvec.bv_page);
                bvec_kunmap_irq(buf, &flags);
                i++;
        }
@@ -130,7 +129,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
 
 #ifdef DEBUG
        unsigned int n = 0;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        struct req_iterator iter;
 
        rq_for_each_segment(bv, req, iter)
index 06a2e53e5f37299191b1d9c7fe5e2caf036461a6..ef45cfb98fd2f12278d9f9caa6d044c1336e7baa 100644 (file)
@@ -553,16 +553,16 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
        struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
        int write = bio_data_dir(bio) == WRITE;
        const char *op = write ? "write" : "read";
-       loff_t offset = bio->bi_sector << 9;
+       loff_t offset = bio->bi_iter.bi_sector << 9;
        int error = 0;
-       struct bio_vec *bvec;
-       unsigned int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        struct bio *next;
 
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment(bvec, bio, iter) {
                /* PS3 is ppc64, so we don't handle highmem */
-               char *ptr = page_address(bvec->bv_page) + bvec->bv_offset;
-               size_t len = bvec->bv_len, retlen;
+               char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
+               size_t len = bvec.bv_len, retlen;
 
                dev_dbg(&dev->core, "    %s %zu bytes at offset %llu\n", op,
                        len, offset);
index 16cab6635163797da9414a27cb8634356d5cd999..b365e0dfccb66f7c256a9d07d7fd976fba17ae95 100644 (file)
@@ -1156,23 +1156,23 @@ static void bio_chain_put(struct bio *chain)
  */
 static void zero_bio_chain(struct bio *chain, int start_ofs)
 {
-       struct bio_vec *bv;
+       struct bio_vec bv;
+       struct bvec_iter iter;
        unsigned long flags;
        void *buf;
-       int i;
        int pos = 0;
 
        while (chain) {
-               bio_for_each_segment(bv, chain, i) {
-                       if (pos + bv->bv_len > start_ofs) {
+               bio_for_each_segment(bv, chain, iter) {
+                       if (pos + bv.bv_len > start_ofs) {
                                int remainder = max(start_ofs - pos, 0);
-                               buf = bvec_kmap_irq(bv, &flags);
+                               buf = bvec_kmap_irq(&bv, &flags);
                                memset(buf + remainder, 0,
-                                      bv->bv_len - remainder);
-                               flush_dcache_page(bv->bv_page);
+                                      bv.bv_len - remainder);
+                               flush_dcache_page(bv.bv_page);
                                bvec_kunmap_irq(buf, &flags);
                        }
-                       pos += bv->bv_len;
+                       pos += bv.bv_len;
                }
 
                chain = chain->bi_next;
@@ -1220,74 +1220,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
                                        unsigned int len,
                                        gfp_t gfpmask)
 {
-       struct bio_vec *bv;
-       unsigned int resid;
-       unsigned short idx;
-       unsigned int voff;
-       unsigned short end_idx;
-       unsigned short vcnt;
        struct bio *bio;
 
-       /* Handle the easy case for the caller */
-
-       if (!offset && len == bio_src->bi_size)
-               return bio_clone(bio_src, gfpmask);
-
-       if (WARN_ON_ONCE(!len))
-               return NULL;
-       if (WARN_ON_ONCE(len > bio_src->bi_size))
-               return NULL;
-       if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
-               return NULL;
-
-       /* Find first affected segment... */
-
-       resid = offset;
-       bio_for_each_segment(bv, bio_src, idx) {
-               if (resid < bv->bv_len)
-                       break;
-               resid -= bv->bv_len;
-       }
-       voff = resid;
-
-       /* ...and the last affected segment */
-
-       resid += len;
-       __bio_for_each_segment(bv, bio_src, end_idx, idx) {
-               if (resid <= bv->bv_len)
-                       break;
-               resid -= bv->bv_len;
-       }
-       vcnt = end_idx - idx + 1;
-
-       /* Build the clone */
-
-       bio = bio_alloc(gfpmask, (unsigned int) vcnt);
+       bio = bio_clone(bio_src, gfpmask);
        if (!bio)
                return NULL;    /* ENOMEM */
 
-       bio->bi_bdev = bio_src->bi_bdev;
-       bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
-       bio->bi_rw = bio_src->bi_rw;
-       bio->bi_flags |= 1 << BIO_CLONED;
-
-       /*
-        * Copy over our part of the bio_vec, then update the first
-        * and last (or only) entries.
-        */
-       memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
-                       vcnt * sizeof (struct bio_vec));
-       bio->bi_io_vec[0].bv_offset += voff;
-       if (vcnt > 1) {
-               bio->bi_io_vec[0].bv_len -= voff;
-               bio->bi_io_vec[vcnt - 1].bv_len = resid;
-       } else {
-               bio->bi_io_vec[0].bv_len = len;
-       }
-
-       bio->bi_vcnt = vcnt;
-       bio->bi_size = len;
-       bio->bi_idx = 0;
+       bio_advance(bio, offset);
+       bio->bi_iter.bi_size = len;
 
        return bio;
 }
@@ -1318,7 +1258,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
 
        /* Build up a chain of clone bios up to the limit */
 
-       if (!bi || off >= bi->bi_size || !len)
+       if (!bi || off >= bi->bi_iter.bi_size || !len)
                return NULL;            /* Nothing to clone */
 
        end = &chain;
@@ -1330,7 +1270,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
                        rbd_warn(NULL, "bio_chain exhausted with %u left", len);
                        goto out_err;   /* EINVAL; ran out of bio's */
                }
-               bi_size = min_t(unsigned int, bi->bi_size - off, len);
+               bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
                bio = bio_clone_range(bi, off, bi_size, gfpmask);
                if (!bio)
                        goto out_err;   /* ENOMEM */
@@ -1339,7 +1279,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
                end = &bio->bi_next;
 
                off += bi_size;
-               if (off == bi->bi_size) {
+               if (off == bi->bi_iter.bi_size) {
                        bi = bi->bi_next;
                        off = 0;
                }
@@ -2227,7 +2167,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
 
        if (type == OBJ_REQUEST_BIO) {
                bio_list = data_desc;
-               rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
+               rbd_assert(img_offset ==
+                          bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
        } else {
                rbd_assert(type == OBJ_REQUEST_PAGES);
                pages = data_desc;
index 2284f5d3a54ad00dd05c512b30b7bfed30411482..2839d37e5af77922051cb1d48561602ea3a3c2b7 100644 (file)
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
        if (!card)
                goto req_err;
 
-       if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk))
+       if (bio_end_sector(bio) > get_capacity(card->gendisk))
                goto req_err;
 
        if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
                goto req_err;
        }
 
-       if (bio->bi_size == 0) {
+       if (bio->bi_iter.bi_size == 0) {
                dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
                goto req_err;
        }
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
 
        dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
                 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
-                (u64)bio->bi_sector << 9, bio->bi_size);
+                (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
 
        st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
                                    bio_dma_done_cb, bio_meta);
index fc88ba3e1bd27835ecf170d5ba321cf8313a6cea..cf8cd293abb51d338cd6d0ae7762070c80be99a2 100644 (file)
@@ -684,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                           void *cb_data)
 {
        struct list_head dma_list[RSXX_MAX_TARGETS];
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        unsigned long long addr8;
        unsigned int laddr;
        unsigned int bv_len;
@@ -696,7 +697,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
        int st;
        int i;
 
-       addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
+       addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
        atomic_set(n_dmas, 0);
 
        for (i = 0; i < card->n_targets; i++) {
@@ -705,7 +706,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
        }
 
        if (bio->bi_rw & REQ_DISCARD) {
-               bv_len = bio->bi_size;
+               bv_len = bio->bi_iter.bi_size;
 
                while (bv_len > 0) {
                        tgt   = rsxx_get_dma_tgt(card, addr8);
@@ -722,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                        bv_len -= RSXX_HW_BLK_SIZE;
                }
        } else {
-               bio_for_each_segment(bvec, bio, i) {
-                       bv_len = bvec->bv_len;
-                       bv_off = bvec->bv_offset;
+               bio_for_each_segment(bvec, bio, iter) {
+                       bv_len = bvec.bv_len;
+                       bv_off = bvec.bv_offset;
 
                        while (bv_len > 0) {
                                tgt   = rsxx_get_dma_tgt(card, addr8);
@@ -736,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                                st = rsxx_queue_dma(card, &dma_list[tgt],
                                                        bio_data_dir(bio),
                                                        dma_off, dma_len,
-                                                       laddr, bvec->bv_page,
+                                                       laddr, bvec.bv_page,
                                                        bv_off, cb, cb_data);
                                if (st)
                                        goto bvec_err;
index ad70868f8a967b40bc866bc5430387b55f4601ac..4cf81b5bf0f7fba42a243a7e717ead98b2144759 100644 (file)
@@ -108,8 +108,7 @@ struct cardinfo {
                                    * have been written
                                    */
        struct bio      *bio, *currentbio, **biotail;
-       int             current_idx;
-       sector_t        current_sector;
+       struct bvec_iter current_iter;
 
        struct request_queue *queue;
 
@@ -118,7 +117,7 @@ struct cardinfo {
                struct mm_dma_desc      *desc;
                int                     cnt, headcnt;
                struct bio              *bio, **biotail;
-               int                     idx;
+               struct bvec_iter        iter;
        } mm_pages[2];
 #define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
 
@@ -344,16 +343,13 @@ static int add_bio(struct cardinfo *card)
        dma_addr_t dma_handle;
        int offset;
        struct bio *bio;
-       struct bio_vec *vec;
-       int idx;
+       struct bio_vec vec;
        int rw;
-       int len;
 
        bio = card->currentbio;
        if (!bio && card->bio) {
                card->currentbio = card->bio;
-               card->current_idx = card->bio->bi_idx;
-               card->current_sector = card->bio->bi_sector;
+               card->current_iter = card->bio->bi_iter;
                card->bio = card->bio->bi_next;
                if (card->bio == NULL)
                        card->biotail = &card->bio;
@@ -362,18 +358,17 @@ static int add_bio(struct cardinfo *card)
        }
        if (!bio)
                return 0;
-       idx = card->current_idx;
 
        rw = bio_rw(bio);
        if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
                return 0;
 
-       vec = bio_iovec_idx(bio, idx);
-       len = vec->bv_len;
+       vec = bio_iter_iovec(bio, card->current_iter);
+
        dma_handle = pci_map_page(card->dev,
-                                 vec->bv_page,
-                                 vec->bv_offset,
-                                 len,
+                                 vec.bv_page,
+                                 vec.bv_offset,
+                                 vec.bv_len,
                                  (rw == READ) ?
                                  PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
 
@@ -381,7 +376,7 @@ static int add_bio(struct cardinfo *card)
        desc = &p->desc[p->cnt];
        p->cnt++;
        if (p->bio == NULL)
-               p->idx = idx;
+               p->iter = card->current_iter;
        if ((p->biotail) != &bio->bi_next) {
                *(p->biotail) = bio;
                p->biotail = &(bio->bi_next);
@@ -391,8 +386,8 @@ static int add_bio(struct cardinfo *card)
        desc->data_dma_handle = dma_handle;
 
        desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
-       desc->local_addr = cpu_to_le64(card->current_sector << 9);
-       desc->transfer_size = cpu_to_le32(len);
+       desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
+       desc->transfer_size = cpu_to_le32(vec.bv_len);
        offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
        desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
        desc->zero1 = desc->zero2 = 0;
@@ -407,10 +402,9 @@ static int add_bio(struct cardinfo *card)
                desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
        desc->sem_control_bits = desc->control_bits;
 
-       card->current_sector += (len >> 9);
-       idx++;
-       card->current_idx = idx;
-       if (idx >= bio->bi_vcnt)
+
+       bio_advance_iter(bio, &card->current_iter, vec.bv_len);
+       if (!card->current_iter.bi_size)
                card->currentbio = NULL;
 
        return 1;
@@ -439,23 +433,25 @@ static void process_page(unsigned long data)
                struct mm_dma_desc *desc = &page->desc[page->headcnt];
                int control = le32_to_cpu(desc->sem_control_bits);
                int last = 0;
-               int idx;
+               struct bio_vec vec;
 
                if (!(control & DMASCR_DMA_COMPLETE)) {
                        control = dma_status;
                        last = 1;
                }
+
                page->headcnt++;
-               idx = page->idx;
-               page->idx++;
-               if (page->idx >= bio->bi_vcnt) {
+               vec = bio_iter_iovec(bio, page->iter);
+               bio_advance_iter(bio, &page->iter, vec.bv_len);
+
+               if (!page->iter.bi_size) {
                        page->bio = bio->bi_next;
                        if (page->bio)
-                               page->idx = page->bio->bi_idx;
+                               page->iter = page->bio->bi_iter;
                }
 
                pci_unmap_page(card->dev, desc->data_dma_handle,
-                              bio_iovec_idx(bio, idx)->bv_len,
+                              vec.bv_len,
                                 (control & DMASCR_TRANSFER_READ) ?
                                PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
                if (control & DMASCR_HARD_ERROR) {
@@ -532,7 +528,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
        pr_debug("mm_make_request %llu %u\n",
-                (unsigned long long)bio->bi_sector, bio->bi_size);
+                (unsigned long long)bio->bi_iter.bi_sector,
+                bio->bi_iter.bi_size);
 
        spin_lock_irq(&card->lock);
        *card->biotail = bio;
index 6620b73d04906191132d771dade31f9e00043e07..4b97b86da9265b4ca5dcb3a7ab562dbf1eb5bac0 100644 (file)
@@ -1257,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                        bio->bi_bdev    = preq.bdev;
                        bio->bi_private = pending_req;
                        bio->bi_end_io  = end_block_io_op;
-                       bio->bi_sector  = preq.sector_number;
+                       bio->bi_iter.bi_sector  = preq.sector_number;
                }
 
                preq.sector_number += seg[i].nsec;
index f9c43f91f03e5de68bff030b663f094e56fc1f9f..8dcfb54f160302e0e1d91c232387f758b2f8e0f6 100644 (file)
@@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info)
                        for (i = 0; i < pending; i++) {
                                offset = (i * segs * PAGE_SIZE) >> 9;
                                size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
-                                          (unsigned int)(bio->bi_size >> 9) - offset);
+                                          (unsigned int)bio_sectors(bio) - offset);
                                cloned_bio = bio_clone(bio, GFP_NOIO);
                                BUG_ON(cloned_bio == NULL);
                                bio_trim(cloned_bio, offset, size);
index 754f4317748322e7450d69da9591b6b72aff6dc4..dbdbca5a95910a421898cef06bcf902a43401c6a 100644 (file)
@@ -280,7 +280,6 @@ struct bcache_device {
        unsigned long           sectors_dirty_last;
        long                    sectors_dirty_derivative;
 
-       mempool_t               *unaligned_bvec;
        struct bio_set          *bio_split;
 
        unsigned                data_csum:1;
@@ -902,7 +901,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
 void bch_bbio_free(struct bio *, struct cache_set *);
 struct bio *bch_bbio_alloc(struct cache_set *);
 
-struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
 void bch_generic_make_request(struct bio *, struct bio_split_pool *);
 void __bch_submit_bbio(struct bio *, struct cache_set *);
 void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
index 31bb53fcc67a40806cf73659a596f98297d36128..946ecd3b048b0ae1c9bd47ab4c42572f7919b838 100644 (file)
@@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b)
 
        bio = bch_bbio_alloc(b->c);
        bio->bi_rw      = REQ_META|READ_SYNC;
-       bio->bi_size    = KEY_SIZE(&b->key) << 9;
+       bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
        bio->bi_end_io  = btree_node_read_endio;
        bio->bi_private = &cl;
 
@@ -362,7 +362,7 @@ static void btree_node_write_done(struct closure *cl)
        struct bio_vec *bv;
        int n;
 
-       __bio_for_each_segment(bv, b->bio, n, 0)
+       bio_for_each_segment_all(bv, b->bio, n)
                __free_page(bv->bv_page);
 
        __btree_node_write_done(cl);
@@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b)
        b->bio->bi_end_io       = btree_node_write_endio;
        b->bio->bi_private      = cl;
        b->bio->bi_rw           = REQ_META|WRITE_SYNC|REQ_FUA;
-       b->bio->bi_size         = set_blocks(i, b->c) * block_bytes(b->c);
+       b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
        bch_bio_map(b->bio, i);
 
        /*
@@ -421,7 +421,7 @@ static void do_btree_node_write(struct btree *b)
                struct bio_vec *bv;
                void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
 
-               bio_for_each_segment(bv, b->bio, j)
+               bio_for_each_segment_all(bv, b->bio, j)
                        memcpy(page_address(bv->bv_page),
                               base + j * PAGE_SIZE, PAGE_SIZE);
 
index 264fcfbd629016aa1ab890cce56de2699c49be70..03cb4d114e1624997d114a3ac4a131d80d3d90fb 100644 (file)
@@ -173,7 +173,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
 {
        char name[BDEVNAME_SIZE];
        struct bio *check;
-       struct bio_vec *bv;
+       struct bio_vec bv, *bv2;
+       struct bvec_iter iter;
        int i;
 
        check = bio_clone(bio, GFP_NOIO);
@@ -185,23 +186,23 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
 
        submit_bio_wait(READ_SYNC, check);
 
-       bio_for_each_segment(bv, bio, i) {
-               void *p1 = kmap_atomic(bv->bv_page);
-               void *p2 = page_address(check->bi_io_vec[i].bv_page);
+       bio_for_each_segment(bv, bio, iter) {
+               void *p1 = kmap_atomic(bv.bv_page);
+               void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
 
-               cache_set_err_on(memcmp(p1 + bv->bv_offset,
-                                       p2 + bv->bv_offset,
-                                       bv->bv_len),
+               cache_set_err_on(memcmp(p1 + bv.bv_offset,
+                                       p2 + bv.bv_offset,
+                                       bv.bv_len),
                                 dc->disk.c,
                                 "verify failed at dev %s sector %llu",
                                 bdevname(dc->bdev, name),
-                                (uint64_t) bio->bi_sector);
+                                (uint64_t) bio->bi_iter.bi_sector);
 
                kunmap_atomic(p1);
        }
 
-       bio_for_each_segment_all(bv, check, i)
-               __free_page(bv->bv_page);
+       bio_for_each_segment_all(bv2, check, i)
+               __free_page(bv2->bv_page);
 out_put:
        bio_put(check);
 }
index 9056632995b1b8a2de9de607c60cd693704c9c65..fa028fa82df41bf509e15d140f9b3b943c5a8c2d 100644 (file)
 
 #include <linux/blkdev.h>
 
-static void bch_bi_idx_hack_endio(struct bio *bio, int error)
-{
-       struct bio *p = bio->bi_private;
-
-       bio_endio(p, error);
-       bio_put(bio);
-}
-
-static void bch_generic_make_request_hack(struct bio *bio)
-{
-       if (bio->bi_idx) {
-               struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
-
-               memcpy(clone->bi_io_vec,
-                      bio_iovec(bio),
-                      bio_segments(bio) * sizeof(struct bio_vec));
-
-               clone->bi_sector        = bio->bi_sector;
-               clone->bi_bdev          = bio->bi_bdev;
-               clone->bi_rw            = bio->bi_rw;
-               clone->bi_vcnt          = bio_segments(bio);
-               clone->bi_size          = bio->bi_size;
-
-               clone->bi_private       = bio;
-               clone->bi_end_io        = bch_bi_idx_hack_endio;
-
-               bio = clone;
-       }
-
-       /*
-        * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
-        * bios might have had more than that (before we split them per device
-        * limitations).
-        *
-        * To be taken out once immutable bvec stuff is in.
-        */
-       bio->bi_max_vecs = bio->bi_vcnt;
-
-       generic_make_request(bio);
-}
-
-/**
- * bch_bio_split - split a bio
- * @bio:       bio to split
- * @sectors:   number of sectors to split from the front of @bio
- * @gfp:       gfp mask
- * @bs:                bio set to allocate from
- *
- * Allocates and returns a new bio which represents @sectors from the start of
- * @bio, and updates @bio to represent the remaining sectors.
- *
- * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
- * unchanged.
- *
- * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
- * bvec boundry; it is the caller's responsibility to ensure that @bio is not
- * freed before the split.
- */
-struct bio *bch_bio_split(struct bio *bio, int sectors,
-                         gfp_t gfp, struct bio_set *bs)
-{
-       unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
-       struct bio_vec *bv;
-       struct bio *ret = NULL;
-
-       BUG_ON(sectors <= 0);
-
-       if (sectors >= bio_sectors(bio))
-               return bio;
-
-       if (bio->bi_rw & REQ_DISCARD) {
-               ret = bio_alloc_bioset(gfp, 1, bs);
-               if (!ret)
-                       return NULL;
-               idx = 0;
-               goto out;
-       }
-
-       bio_for_each_segment(bv, bio, idx) {
-               vcnt = idx - bio->bi_idx;
-
-               if (!nbytes) {
-                       ret = bio_alloc_bioset(gfp, vcnt, bs);
-                       if (!ret)
-                               return NULL;
-
-                       memcpy(ret->bi_io_vec, bio_iovec(bio),
-                              sizeof(struct bio_vec) * vcnt);
-
-                       break;
-               } else if (nbytes < bv->bv_len) {
-                       ret = bio_alloc_bioset(gfp, ++vcnt, bs);
-                       if (!ret)
-                               return NULL;
-
-                       memcpy(ret->bi_io_vec, bio_iovec(bio),
-                              sizeof(struct bio_vec) * vcnt);
-
-                       ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
-                       bv->bv_offset   += nbytes;
-                       bv->bv_len      -= nbytes;
-                       break;
-               }
-
-               nbytes -= bv->bv_len;
-       }
-out:
-       ret->bi_bdev    = bio->bi_bdev;
-       ret->bi_sector  = bio->bi_sector;
-       ret->bi_size    = sectors << 9;
-       ret->bi_rw      = bio->bi_rw;
-       ret->bi_vcnt    = vcnt;
-       ret->bi_max_vecs = vcnt;
-
-       bio->bi_sector  += sectors;
-       bio->bi_size    -= sectors << 9;
-       bio->bi_idx      = idx;
-
-       if (bio_integrity(bio)) {
-               if (bio_integrity_clone(ret, bio, gfp)) {
-                       bio_put(ret);
-                       return NULL;
-               }
-
-               bio_integrity_trim(ret, 0, bio_sectors(ret));
-               bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
-       }
-
-       return ret;
-}
-
 static unsigned bch_bio_max_sectors(struct bio *bio)
 {
-       unsigned ret = bio_sectors(bio);
        struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-       unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
-                                     queue_max_segments(q));
+       struct bio_vec bv;
+       struct bvec_iter iter;
+       unsigned ret = 0, seg = 0;
 
        if (bio->bi_rw & REQ_DISCARD)
-               return min(ret, q->limits.max_discard_sectors);
-
-       if (bio_segments(bio) > max_segments ||
-           q->merge_bvec_fn) {
-               struct bio_vec *bv;
-               int i, seg = 0;
-
-               ret = 0;
-
-               bio_for_each_segment(bv, bio, i) {
-                       struct bvec_merge_data bvm = {
-                               .bi_bdev        = bio->bi_bdev,
-                               .bi_sector      = bio->bi_sector,
-                               .bi_size        = ret << 9,
-                               .bi_rw          = bio->bi_rw,
-                       };
-
-                       if (seg == max_segments)
-                               break;
+               return min(bio_sectors(bio), q->limits.max_discard_sectors);
+
+       bio_for_each_segment(bv, bio, iter) {
+               struct bvec_merge_data bvm = {
+                       .bi_bdev        = bio->bi_bdev,
+                       .bi_sector      = bio->bi_iter.bi_sector,
+                       .bi_size        = ret << 9,
+                       .bi_rw          = bio->bi_rw,
+               };
+
+               if (seg == min_t(unsigned, BIO_MAX_PAGES,
+                                queue_max_segments(q)))
+                       break;
 
-                       if (q->merge_bvec_fn &&
-                           q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
-                               break;
+               if (q->merge_bvec_fn &&
+                   q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
+                       break;
 
-                       seg++;
-                       ret += bv->bv_len >> 9;
-               }
+               seg++;
+               ret += bv.bv_len >> 9;
        }
 
        ret = min(ret, queue_max_sectors(q));
 
        WARN_ON(!ret);
-       ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9);
+       ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
 
        return ret;
 }
@@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)
 
        s->bio->bi_end_io = s->bi_end_io;
        s->bio->bi_private = s->bi_private;
-       bio_endio(s->bio, 0);
+       bio_endio_nodec(s->bio, 0);
 
        closure_debug_destroy(&s->cl);
        mempool_free(s, s->p->bio_split_hook);
@@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
        bio_get(bio);
 
        do {
-               n = bch_bio_split(bio, bch_bio_max_sectors(bio),
-                                 GFP_NOIO, s->p->bio_split);
+               n = bio_next_split(bio, bch_bio_max_sectors(bio),
+                                  GFP_NOIO, s->p->bio_split);
 
                n->bi_end_io    = bch_bio_submit_split_endio;
                n->bi_private   = &s->cl;
 
                closure_get(&s->cl);
-               bch_generic_make_request_hack(n);
+               generic_make_request(n);
        } while (n != bio);
 
        continue_at(&s->cl, bch_bio_submit_split_done, NULL);
 submit:
-       bch_generic_make_request_hack(bio);
+       generic_make_request(bio);
 }
 
 /* Bios with headers */
@@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
 {
        struct bbio *b = container_of(bio, struct bbio, bio);
 
-       bio->bi_sector  = PTR_OFFSET(&b->key, 0);
-       bio->bi_bdev    = PTR_CACHE(c, &b->key, 0)->bdev;
+       bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
+       bio->bi_bdev            = PTR_CACHE(c, &b->key, 0)->bdev;
 
        b->submit_time_us = local_clock_us();
        closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
index ecdaa671bd50457bf38d1cf9f896ffd1c8352546..7eafdf09a0ae11cb1c4108926e4d1c51ac90c9f4 100644 (file)
@@ -51,10 +51,10 @@ reread:             left = ca->sb.bucket_size - offset;
                len = min_t(unsigned, left, PAGE_SECTORS * 8);
 
                bio_reset(bio);
-               bio->bi_sector  = bucket + offset;
+               bio->bi_iter.bi_sector  = bucket + offset;
                bio->bi_bdev    = ca->bdev;
                bio->bi_rw      = READ;
-               bio->bi_size    = len << 9;
+               bio->bi_iter.bi_size    = len << 9;
 
                bio->bi_end_io  = journal_read_endio;
                bio->bi_private = &cl;
@@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca)
                atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
 
                bio_init(bio);
-               bio->bi_sector          = bucket_to_sector(ca->set,
+               bio->bi_iter.bi_sector  = bucket_to_sector(ca->set,
                                                ca->sb.d[ja->discard_idx]);
                bio->bi_bdev            = ca->bdev;
                bio->bi_rw              = REQ_WRITE|REQ_DISCARD;
                bio->bi_max_vecs        = 1;
                bio->bi_io_vec          = bio->bi_inline_vecs;
-               bio->bi_size            = bucket_bytes(ca);
+               bio->bi_iter.bi_size    = bucket_bytes(ca);
                bio->bi_end_io          = journal_discard_endio;
 
                closure_get(&ca->set->cl);
@@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl)
                atomic_long_add(sectors, &ca->meta_sectors_written);
 
                bio_reset(bio);
-               bio->bi_sector  = PTR_OFFSET(k, i);
+               bio->bi_iter.bi_sector  = PTR_OFFSET(k, i);
                bio->bi_bdev    = ca->bdev;
                bio->bi_rw      = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
-               bio->bi_size    = sectors << 9;
+               bio->bi_iter.bi_size = sectors << 9;
 
                bio->bi_end_io  = journal_write_endio;
                bio->bi_private = w;
index f2f0998c4a91872407dd036a54fe72d243885fed..052bd24d24b42b42c3d434564a361dc3b693a2e5 100644 (file)
@@ -86,7 +86,7 @@ static void moving_init(struct moving_io *io)
        bio_get(bio);
        bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 
-       bio->bi_size            = KEY_SIZE(&io->w->key) << 9;
+       bio->bi_iter.bi_size    = KEY_SIZE(&io->w->key) << 9;
        bio->bi_max_vecs        = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
                                               PAGE_SECTORS);
        bio->bi_private         = &io->cl;
@@ -102,7 +102,7 @@ static void write_moving(struct closure *cl)
        if (!op->error) {
                moving_init(io);
 
-               io->bio.bio.bi_sector = KEY_START(&io->w->key);
+               io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
                op->write_prio          = 1;
                op->bio                 = &io->bio.bio;
 
index 61bcfc21d2a0f4972b581a689fd1c3c929f7bd38..c906571997d7a4ab256188f05f4a8c11ea5928f8 100644 (file)
@@ -197,14 +197,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio)
 
 static void bio_csum(struct bio *bio, struct bkey *k)
 {
-       struct bio_vec *bv;
+       struct bio_vec bv;
+       struct bvec_iter iter;
        uint64_t csum = 0;
-       int i;
 
-       bio_for_each_segment(bv, bio, i) {
-               void *d = kmap(bv->bv_page) + bv->bv_offset;
-               csum = bch_crc64_update(csum, d, bv->bv_len);
-               kunmap(bv->bv_page);
+       bio_for_each_segment(bv, bio, iter) {
+               void *d = kmap(bv.bv_page) + bv.bv_offset;
+               csum = bch_crc64_update(csum, d, bv.bv_len);
+               kunmap(bv.bv_page);
        }
 
        k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -260,7 +260,7 @@ static void bch_data_invalidate(struct closure *cl)
        struct bio *bio = op->bio;
 
        pr_debug("invalidating %i sectors from %llu",
-                bio_sectors(bio), (uint64_t) bio->bi_sector);
+                bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
 
        while (bio_sectors(bio)) {
                unsigned sectors = min(bio_sectors(bio),
@@ -269,11 +269,11 @@ static void bch_data_invalidate(struct closure *cl)
                if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
                        goto out;
 
-               bio->bi_sector  += sectors;
-               bio->bi_size    -= sectors << 9;
+               bio->bi_iter.bi_sector  += sectors;
+               bio->bi_iter.bi_size    -= sectors << 9;
 
                bch_keylist_add(&op->insert_keys,
-                               &KEY(op->inode, bio->bi_sector, sectors));
+                               &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
        }
 
        op->insert_data_done = true;
@@ -363,14 +363,14 @@ static void bch_data_insert_start(struct closure *cl)
                k = op->insert_keys.top;
                bkey_init(k);
                SET_KEY_INODE(k, op->inode);
-               SET_KEY_OFFSET(k, bio->bi_sector);
+               SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
 
                if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
                                       op->write_point, op->write_prio,
                                       op->writeback))
                        goto err;
 
-               n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
+               n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
 
                n->bi_end_io    = bch_data_insert_endio;
                n->bi_private   = cl;
@@ -521,7 +521,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
             (bio->bi_rw & REQ_WRITE)))
                goto skip;
 
-       if (bio->bi_sector & (c->sb.block_size - 1) ||
+       if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
            bio_sectors(bio) & (c->sb.block_size - 1)) {
                pr_debug("skipping unaligned io");
                goto skip;
@@ -545,8 +545,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 
        spin_lock(&dc->io_lock);
 
-       hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
-               if (i->last == bio->bi_sector &&
+       hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
+               if (i->last == bio->bi_iter.bi_sector &&
                    time_before(jiffies, i->jiffies))
                        goto found;
 
@@ -555,8 +555,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
        add_sequential(task);
        i->sequential = 0;
 found:
-       if (i->sequential + bio->bi_size > i->sequential)
-               i->sequential   += bio->bi_size;
+       if (i->sequential + bio->bi_iter.bi_size > i->sequential)
+               i->sequential   += bio->bi_iter.bi_size;
 
        i->last                  = bio_end_sector(bio);
        i->jiffies               = jiffies + msecs_to_jiffies(5000);
@@ -605,7 +605,6 @@ struct search {
        unsigned                insert_bio_sectors;
 
        unsigned                recoverable:1;
-       unsigned                unaligned_bvec:1;
        unsigned                write:1;
        unsigned                read_dirty_data:1;
 
@@ -649,15 +648,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
        struct bkey *bio_key;
        unsigned ptr;
 
-       if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0)
+       if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
                return MAP_CONTINUE;
 
        if (KEY_INODE(k) != s->iop.inode ||
-           KEY_START(k) > bio->bi_sector) {
+           KEY_START(k) > bio->bi_iter.bi_sector) {
                unsigned bio_sectors = bio_sectors(bio);
                unsigned sectors = KEY_INODE(k) == s->iop.inode
                        ? min_t(uint64_t, INT_MAX,
-                               KEY_START(k) - bio->bi_sector)
+                               KEY_START(k) - bio->bi_iter.bi_sector)
                        : INT_MAX;
 
                int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -679,14 +678,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
        if (KEY_DIRTY(k))
                s->read_dirty_data = true;
 
-       n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
-                                    KEY_OFFSET(k) - bio->bi_sector),
-                         GFP_NOIO, s->d->bio_split);
+       n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
+                                     KEY_OFFSET(k) - bio->bi_iter.bi_sector),
+                          GFP_NOIO, s->d->bio_split);
 
        bio_key = &container_of(n, struct bbio, bio)->key;
        bch_bkey_copy_single_ptr(bio_key, k, ptr);
 
-       bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key);
+       bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
        bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
 
        n->bi_end_io    = bch_cache_read_endio;
@@ -713,7 +712,7 @@ static void cache_lookup(struct closure *cl)
        struct bio *bio = &s->bio.bio;
 
        int ret = bch_btree_map_keys(&s->op, s->iop.c,
-                                    &KEY(s->iop.inode, bio->bi_sector, 0),
+                                    &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
                                     cache_lookup_fn, MAP_END_KEY);
        if (ret == -EAGAIN)
                continue_at(cl, cache_lookup, bcache_wq);
@@ -758,10 +757,12 @@ static void bio_complete(struct search *s)
 static void do_bio_hook(struct search *s)
 {
        struct bio *bio = &s->bio.bio;
-       memcpy(bio, s->orig_bio, sizeof(struct bio));
 
+       bio_init(bio);
+       __bio_clone_fast(bio, s->orig_bio);
        bio->bi_end_io          = request_endio;
        bio->bi_private         = &s->cl;
+
        atomic_set(&bio->bi_cnt, 3);
 }
 
@@ -773,9 +774,6 @@ static void search_free(struct closure *cl)
        if (s->iop.bio)
                bio_put(s->iop.bio);
 
-       if (s->unaligned_bvec)
-               mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
-
        closure_debug_destroy(cl);
        mempool_free(s, s->d->c->search);
 }
@@ -783,7 +781,6 @@ static void search_free(struct closure *cl)
 static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
 {
        struct search *s;
-       struct bio_vec *bv;
 
        s = mempool_alloc(d->c->search, GFP_NOIO);
        memset(s, 0, offsetof(struct search, iop.insert_keys));
@@ -802,15 +799,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
        s->start_time           = jiffies;
        do_bio_hook(s);
 
-       if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
-               bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
-               memcpy(bv, bio_iovec(bio),
-                      sizeof(struct bio_vec) * bio_segments(bio));
-
-               s->bio.bio.bi_io_vec    = bv;
-               s->unaligned_bvec       = 1;
-       }
-
        return s;
 }
 
@@ -849,26 +837,13 @@ static void cached_dev_read_error(struct closure *cl)
 {
        struct search *s = container_of(cl, struct search, cl);
        struct bio *bio = &s->bio.bio;
-       struct bio_vec *bv;
-       int i;
 
        if (s->recoverable) {
                /* Retry from the backing device: */
                trace_bcache_read_retry(s->orig_bio);
 
                s->iop.error = 0;
-               bv = s->bio.bio.bi_io_vec;
                do_bio_hook(s);
-               s->bio.bio.bi_io_vec = bv;
-
-               if (!s->unaligned_bvec)
-                       bio_for_each_segment(bv, s->orig_bio, i)
-                               bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
-               else
-                       memcpy(s->bio.bio.bi_io_vec,
-                              bio_iovec(s->orig_bio),
-                              sizeof(struct bio_vec) *
-                              bio_segments(s->orig_bio));
 
                /* XXX: invalidate cache */
 
@@ -893,9 +868,9 @@ static void cached_dev_read_done(struct closure *cl)
 
        if (s->iop.bio) {
                bio_reset(s->iop.bio);
-               s->iop.bio->bi_sector = s->cache_miss->bi_sector;
+               s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
                s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
-               s->iop.bio->bi_size = s->insert_bio_sectors << 9;
+               s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
                bch_bio_map(s->iop.bio, NULL);
 
                bio_copy_data(s->cache_miss, s->iop.bio);
@@ -904,8 +879,7 @@ static void cached_dev_read_done(struct closure *cl)
                s->cache_miss = NULL;
        }
 
-       if (verify(dc, &s->bio.bio) && s->recoverable &&
-           !s->unaligned_bvec && !s->read_dirty_data)
+       if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
                bch_data_verify(dc, s->orig_bio);
 
        bio_complete(s);
@@ -945,7 +919,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        struct bio *miss, *cache_bio;
 
        if (s->cache_miss || s->iop.bypass) {
-               miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+               miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
                ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
                goto out_submit;
        }
@@ -959,7 +933,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
 
        s->iop.replace_key = KEY(s->iop.inode,
-                                bio->bi_sector + s->insert_bio_sectors,
+                                bio->bi_iter.bi_sector + s->insert_bio_sectors,
                                 s->insert_bio_sectors);
 
        ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -968,7 +942,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
 
        s->iop.replace = true;
 
-       miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+       miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
 
        /* btree_search_recurse()'s btree iterator is no good anymore */
        ret = miss == bio ? MAP_DONE : -EINTR;
@@ -979,9 +953,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        if (!cache_bio)
                goto out_submit;
 
-       cache_bio->bi_sector    = miss->bi_sector;
-       cache_bio->bi_bdev      = miss->bi_bdev;
-       cache_bio->bi_size      = s->insert_bio_sectors << 9;
+       cache_bio->bi_iter.bi_sector    = miss->bi_iter.bi_sector;
+       cache_bio->bi_bdev              = miss->bi_bdev;
+       cache_bio->bi_iter.bi_size      = s->insert_bio_sectors << 9;
 
        cache_bio->bi_end_io    = request_endio;
        cache_bio->bi_private   = &s->cl;
@@ -1031,7 +1005,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
 {
        struct closure *cl = &s->cl;
        struct bio *bio = &s->bio.bio;
-       struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
+       struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
        struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
 
        bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1087,8 +1061,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
                        closure_bio_submit(flush, cl, s->d);
                }
        } else {
-               s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
-                                             dc->disk.bio_split);
+               s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
 
                closure_bio_submit(bio, cl, s->d);
        }
@@ -1126,13 +1099,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
        part_stat_unlock();
 
        bio->bi_bdev = dc->bdev;
-       bio->bi_sector += dc->sb.data_offset;
+       bio->bi_iter.bi_sector += dc->sb.data_offset;
 
        if (cached_dev_get(dc)) {
                s = search_alloc(bio, d);
                trace_bcache_request_start(s->d, bio);
 
-               if (!bio->bi_size) {
+               if (!bio->bi_iter.bi_size) {
                        /*
                         * can't call bch_journal_meta from under
                         * generic_make_request
@@ -1204,24 +1177,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
 static int flash_dev_cache_miss(struct btree *b, struct search *s,
                                struct bio *bio, unsigned sectors)
 {
-       struct bio_vec *bv;
-       int i;
+       struct bio_vec bv;
+       struct bvec_iter iter;
 
        /* Zero fill bio */
 
-       bio_for_each_segment(bv, bio, i) {
-               unsigned j = min(bv->bv_len >> 9, sectors);
+       bio_for_each_segment(bv, bio, iter) {
+               unsigned j = min(bv.bv_len >> 9, sectors);
 
-               void *p = kmap(bv->bv_page);
-               memset(p + bv->bv_offset, 0, j << 9);
-               kunmap(bv->bv_page);
+               void *p = kmap(bv.bv_page);
+               memset(p + bv.bv_offset, 0, j << 9);
+               kunmap(bv.bv_page);
 
                sectors -= j;
        }
 
-       bio_advance(bio, min(sectors << 9, bio->bi_size));
+       bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
 
-       if (!bio->bi_size)
+       if (!bio->bi_iter.bi_size)
                return MAP_DONE;
 
        return MAP_CONTINUE;
@@ -1255,7 +1228,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
 
        trace_bcache_request_start(s->d, bio);
 
-       if (!bio->bi_size) {
+       if (!bio->bi_iter.bi_size) {
                /*
                 * can't call bch_journal_meta from under
                 * generic_make_request
@@ -1265,7 +1238,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
                                      bcache_wq);
        } else if (rw) {
                bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
-                                       &KEY(d->id, bio->bi_sector, 0),
+                                       &KEY(d->id, bio->bi_iter.bi_sector, 0),
                                        &KEY(d->id, bio_end_sector(bio), 0));
 
                s->iop.bypass           = (bio->bi_rw & REQ_DISCARD) != 0;
index c57bfa071a57c58b06fabeb194cbf98f5f4fbf56..93d593f957f662829c30feba2ea2804e88fa8aa8 100644 (file)
@@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
        struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
        unsigned i;
 
-       bio->bi_sector  = SB_SECTOR;
-       bio->bi_rw      = REQ_SYNC|REQ_META;
-       bio->bi_size    = SB_SIZE;
+       bio->bi_iter.bi_sector  = SB_SECTOR;
+       bio->bi_rw              = REQ_SYNC|REQ_META;
+       bio->bi_iter.bi_size    = SB_SIZE;
        bch_bio_map(bio, NULL);
 
        out->offset             = cpu_to_le64(sb->offset);
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
                struct bio *bio = bch_bbio_alloc(c);
 
                bio->bi_rw      = REQ_SYNC|REQ_META|rw;
-               bio->bi_size    = KEY_SIZE(k) << 9;
+               bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
 
                bio->bi_end_io  = uuid_endio;
                bio->bi_private = cl;
@@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
 
        closure_init_stack(cl);
 
-       bio->bi_sector  = bucket * ca->sb.bucket_size;
-       bio->bi_bdev    = ca->bdev;
-       bio->bi_rw      = REQ_SYNC|REQ_META|rw;
-       bio->bi_size    = bucket_bytes(ca);
+       bio->bi_iter.bi_sector  = bucket * ca->sb.bucket_size;
+       bio->bi_bdev            = ca->bdev;
+       bio->bi_rw              = REQ_SYNC|REQ_META|rw;
+       bio->bi_iter.bi_size    = bucket_bytes(ca);
 
        bio->bi_end_io  = prio_endio;
        bio->bi_private = ca;
@@ -739,8 +739,6 @@ static void bcache_device_free(struct bcache_device *d)
        }
 
        bio_split_pool_free(&d->bio_split_hook);
-       if (d->unaligned_bvec)
-               mempool_destroy(d->unaligned_bvec);
        if (d->bio_split)
                bioset_free(d->bio_split);
        if (is_vmalloc_addr(d->full_dirty_stripes))
@@ -793,8 +791,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
                return minor;
 
        if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-           !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
-                               sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
            bio_split_pool_init(&d->bio_split_hook) ||
            !(d->disk = alloc_disk(1))) {
                ida_simple_remove(&bcache_minor, minor);
index bb37618e76648b7bc3caf99532e4f81b48666dfe..db3ae4c2b2233a4026ebe8a183042eb84d53cdc4 100644 (file)
@@ -224,10 +224,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
 
 void bch_bio_map(struct bio *bio, void *base)
 {
-       size_t size = bio->bi_size;
+       size_t size = bio->bi_iter.bi_size;
        struct bio_vec *bv = bio->bi_io_vec;
 
-       BUG_ON(!bio->bi_size);
+       BUG_ON(!bio->bi_iter.bi_size);
        BUG_ON(bio->bi_vcnt);
 
        bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
index 6c44fe059c2769a4b2c317f25878596268726f0d..f4300e4c0114a0cc1abc3b90f757a03666d2637b 100644 (file)
@@ -111,7 +111,7 @@ static void dirty_init(struct keybuf_key *w)
        if (!io->dc->writeback_percent)
                bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 
-       bio->bi_size            = KEY_SIZE(&w->key) << 9;
+       bio->bi_iter.bi_size    = KEY_SIZE(&w->key) << 9;
        bio->bi_max_vecs        = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
        bio->bi_private         = w;
        bio->bi_io_vec          = bio->bi_inline_vecs;
@@ -184,7 +184,7 @@ static void write_dirty(struct closure *cl)
 
        dirty_init(w);
        io->bio.bi_rw           = WRITE;
-       io->bio.bi_sector       = KEY_START(&w->key);
+       io->bio.bi_iter.bi_sector = KEY_START(&w->key);
        io->bio.bi_bdev         = io->dc->bdev;
        io->bio.bi_end_io       = dirty_endio;
 
@@ -253,7 +253,7 @@ static void read_dirty(struct cached_dev *dc)
                io->dc          = dc;
 
                dirty_init(w);
-               io->bio.bi_sector       = PTR_OFFSET(&w->key, 0);
+               io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
                io->bio.bi_bdev         = PTR_CACHE(dc->disk.c,
                                                    &w->key, 0)->bdev;
                io->bio.bi_rw           = READ;
index c9ddcf4614b9300701c9867033c82bc13cadf472..e2f8598937ac41ff5c7577bc5e65aeb39de95386 100644 (file)
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
                return false;
 
        if (dc->partial_stripes_expensive &&
-           bcache_dev_stripe_dirty(dc, bio->bi_sector,
+           bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
                                    bio_sectors(bio)))
                return true;
 
index 3a8cfa2645c72f6539170f2ab2d3242bb4a6fa58..dd3646111561512f50728aa915b8a279be1c26ac 100644 (file)
  * original bio state.
  */
 
-struct dm_bio_vec_details {
-#if PAGE_SIZE < 65536
-       __u16 bv_len;
-       __u16 bv_offset;
-#else
-       unsigned bv_len;
-       unsigned bv_offset;
-#endif
-};
-
 struct dm_bio_details {
-       sector_t bi_sector;
        struct block_device *bi_bdev;
-       unsigned int bi_size;
-       unsigned short bi_idx;
        unsigned long bi_flags;
-       struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES];
+       struct bvec_iter bi_iter;
 };
 
 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
 {
-       unsigned i;
-
-       bd->bi_sector = bio->bi_sector;
        bd->bi_bdev = bio->bi_bdev;
-       bd->bi_size = bio->bi_size;
-       bd->bi_idx = bio->bi_idx;
        bd->bi_flags = bio->bi_flags;
-
-       for (i = 0; i < bio->bi_vcnt; i++) {
-               bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
-               bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
-       }
+       bd->bi_iter = bio->bi_iter;
 }
 
 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
 {
-       unsigned i;
-
-       bio->bi_sector = bd->bi_sector;
        bio->bi_bdev = bd->bi_bdev;
-       bio->bi_size = bd->bi_size;
-       bio->bi_idx = bd->bi_idx;
        bio->bi_flags = bd->bi_flags;
-
-       for (i = 0; i < bio->bi_vcnt; i++) {
-               bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
-               bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
-       }
+       bio->bi_iter = bd->bi_iter;
 }
 
 #endif
index 9ed42125514b38d560464e4dd3d741038db06858..66c5d130c8c24c4f3101ce78296460da4487f38b 100644 (file)
@@ -540,7 +540,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
        bio_init(&b->bio);
        b->bio.bi_io_vec = b->bio_vec;
        b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
-       b->bio.bi_sector = block << b->c->sectors_per_block_bits;
+       b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
        b->bio.bi_bdev = b->c->bdev;
        b->bio.bi_end_io = end_io;
 
index 930e8c3d73e985b1e75769a9894f13ffd32d756a..1e018e986610a57ef9f82a818aa1f70a8c364e30 100644 (file)
@@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)
 
 static void iot_update_stats(struct io_tracker *t, struct bio *bio)
 {
-       if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+       if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
                t->nr_seq_samples++;
        else {
                /*
@@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)
                t->nr_rand_samples++;
        }
 
-       t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+       t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
 }
 
 static void iot_check_for_pattern_switch(struct io_tracker *t)
index 09334c275c79e91c7bf4fd41e18e641b2196073a..ffd472e015caa918facaed4f65a621c0f61e58a9 100644 (file)
@@ -85,6 +85,12 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
 {
        bio->bi_end_io = h->bi_end_io;
        bio->bi_private = h->bi_private;
+
+       /*
+        * Must bump bi_remaining to allow bio to complete with
+        * restored bi_end_io.
+        */
+       atomic_inc(&bio->bi_remaining);
 }
 
 /*----------------------------------------------------------------*/
@@ -664,15 +670,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)
 static void remap_to_cache(struct cache *cache, struct bio *bio,
                           dm_cblock_t cblock)
 {
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        bio->bi_bdev = cache->cache_dev->bdev;
        if (!block_size_is_power_of_two(cache))
-               bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
-                               sector_div(bi_sector, cache->sectors_per_block);
+               bio->bi_iter.bi_sector =
+                       (from_cblock(cblock) * cache->sectors_per_block) +
+                       sector_div(bi_sector, cache->sectors_per_block);
        else
-               bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
-                               (bi_sector & (cache->sectors_per_block - 1));
+               bio->bi_iter.bi_sector =
+                       (from_cblock(cblock) << cache->sectors_per_block_shift) |
+                       (bi_sector & (cache->sectors_per_block - 1));
 }
 
 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -712,7 +720,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
 
 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
 {
-       sector_t block_nr = bio->bi_sector;
+       sector_t block_nr = bio->bi_iter.bi_sector;
 
        if (!block_size_is_power_of_two(cache))
                (void) sector_div(block_nr, cache->sectors_per_block);
@@ -1027,7 +1035,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
 {
        return (bio_data_dir(bio) == WRITE) &&
-               (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
+               (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
 }
 
 static void avoid_copy(struct dm_cache_migration *mg)
@@ -1252,7 +1260,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
        size_t pb_data_size = get_per_bio_data_size(cache);
        struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
 
-       BUG_ON(bio->bi_size);
+       BUG_ON(bio->bi_iter.bi_size);
        if (!pb->req_nr)
                remap_to_origin(cache, bio);
        else
@@ -1275,9 +1283,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
  */
 static void process_discard_bio(struct cache *cache, struct bio *bio)
 {
-       dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
+       dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
                                                  cache->discard_block_size);
-       dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
+       dm_block_t end_block = bio_end_sector(bio);
        dm_block_t b;
 
        end_block = block_div(end_block, cache->discard_block_size);
index 81b0fa66045204604a979fdc929e723f6c914a5c..784695d22fde1acaaf11acd78c7263438c04648e 100644 (file)
@@ -39,10 +39,8 @@ struct convert_context {
        struct completion restart;
        struct bio *bio_in;
        struct bio *bio_out;
-       unsigned int offset_in;
-       unsigned int offset_out;
-       unsigned int idx_in;
-       unsigned int idx_out;
+       struct bvec_iter iter_in;
+       struct bvec_iter iter_out;
        sector_t cc_sector;
        atomic_t cc_pending;
 };
@@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc,
 {
        ctx->bio_in = bio_in;
        ctx->bio_out = bio_out;
-       ctx->offset_in = 0;
-       ctx->offset_out = 0;
-       ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
-       ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
+       if (bio_in)
+               ctx->iter_in = bio_in->bi_iter;
+       if (bio_out)
+               ctx->iter_out = bio_out->bi_iter;
        ctx->cc_sector = sector + cc->iv_offset;
        init_completion(&ctx->restart);
 }
@@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc,
                               struct convert_context *ctx,
                               struct ablkcipher_request *req)
 {
-       struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
-       struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
+       struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
+       struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
        struct dm_crypt_request *dmreq;
        u8 *iv;
        int r;
@@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc,
        dmreq->iv_sector = ctx->cc_sector;
        dmreq->ctx = ctx;
        sg_init_table(&dmreq->sg_in, 1);
-       sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
-                   bv_in->bv_offset + ctx->offset_in);
+       sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
+                   bv_in.bv_offset);
 
        sg_init_table(&dmreq->sg_out, 1);
-       sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
-                   bv_out->bv_offset + ctx->offset_out);
+       sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
+                   bv_out.bv_offset);
 
-       ctx->offset_in += 1 << SECTOR_SHIFT;
-       if (ctx->offset_in >= bv_in->bv_len) {
-               ctx->offset_in = 0;
-               ctx->idx_in++;
-       }
-
-       ctx->offset_out += 1 << SECTOR_SHIFT;
-       if (ctx->offset_out >= bv_out->bv_len) {
-               ctx->offset_out = 0;
-               ctx->idx_out++;
-       }
+       bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
+       bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
 
        if (cc->iv_gen_ops) {
                r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc,
 
        atomic_set(&ctx->cc_pending, 1);
 
-       while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
-             ctx->idx_out < ctx->bio_out->bi_vcnt) {
+       while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
 
                crypt_alloc_req(cc, ctx);
 
@@ -1021,7 +1009,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
                size -= len;
        }
 
-       if (!clone->bi_size) {
+       if (!clone->bi_iter.bi_size) {
                bio_put(clone);
                return NULL;
        }
@@ -1161,7 +1149,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
        crypt_inc_pending(io);
 
        clone_init(io, clone);
-       clone->bi_sector = cc->start + io->sector;
+       clone->bi_iter.bi_sector = cc->start + io->sector;
 
        generic_make_request(clone);
        return 0;
@@ -1207,9 +1195,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
        }
 
        /* crypt_convert should have filled the clone bio */
-       BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
+       BUG_ON(io->ctx.iter_out.bi_size);
 
-       clone->bi_sector = cc->start + io->sector;
+       clone->bi_iter.bi_sector = cc->start + io->sector;
 
        if (async)
                kcryptd_queue_io(io);
@@ -1224,7 +1212,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
        struct dm_crypt_io *new_io;
        int crypt_finished;
        unsigned out_of_pages = 0;
-       unsigned remaining = io->base_bio->bi_size;
+       unsigned remaining = io->base_bio->bi_iter.bi_size;
        sector_t sector = io->sector;
        int r;
 
@@ -1246,9 +1234,9 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
                }
 
                io->ctx.bio_out = clone;
-               io->ctx.idx_out = 0;
+               io->ctx.iter_out = clone->bi_iter;
 
-               remaining -= clone->bi_size;
+               remaining -= clone->bi_iter.bi_size;
                sector += bio_sectors(clone);
 
                crypt_inc_pending(io);
@@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
                        crypt_inc_pending(new_io);
                        crypt_convert_init(cc, &new_io->ctx, NULL,
                                           io->base_bio, sector);
-                       new_io->ctx.idx_in = io->ctx.idx_in;
-                       new_io->ctx.offset_in = io->ctx.offset_in;
+                       new_io->ctx.iter_in = io->ctx.iter_in;
 
                        /*
                         * Fragments after the first use the base_io
@@ -1869,11 +1856,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
        if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
                bio->bi_bdev = cc->dev->bdev;
                if (bio_sectors(bio))
-                       bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
+                       bio->bi_iter.bi_sector = cc->start +
+                               dm_target_offset(ti, bio->bi_iter.bi_sector);
                return DM_MAPIO_REMAPPED;
        }
 
-       io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
+       io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
 
        if (bio_data_dir(io->base_bio) == READ) {
                if (kcryptd_io_read(io, GFP_NOWAIT))
index a8a511c053a5d5fda6574933e616719256768d31..42c3a27a14cc3a906b5f892a6206de348b6b58ee 100644 (file)
@@ -277,14 +277,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
        if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
                bio->bi_bdev = dc->dev_write->bdev;
                if (bio_sectors(bio))
-                       bio->bi_sector = dc->start_write +
-                                        dm_target_offset(ti, bio->bi_sector);
+                       bio->bi_iter.bi_sector = dc->start_write +
+                               dm_target_offset(ti, bio->bi_iter.bi_sector);
 
                return delay_bio(dc, dc->write_delay, bio);
        }
 
        bio->bi_bdev = dc->dev_read->bdev;
-       bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
+       bio->bi_iter.bi_sector = dc->start_read +
+               dm_target_offset(ti, bio->bi_iter.bi_sector);
 
        return delay_bio(dc, dc->read_delay, bio);
 }
index c80a0ec5f1269b40be7da133c68c6e789c5e5329..b257e46876d357f831bf1c751010d5b16fa125b8 100644 (file)
@@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
 
        bio->bi_bdev = fc->dev->bdev;
        if (bio_sectors(bio))
-               bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
+               bio->bi_iter.bi_sector =
+                       flakey_map_sector(ti, bio->bi_iter.bi_sector);
 }
 
 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
@@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
                DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
                        "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
                        bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
-                       (bio_data_dir(bio) == WRITE) ? 'w' : 'r',
-                       bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
+                       (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+                       (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
        }
 }
 
index 2a20986a2fec9701cd25e443c990f2b7a8479f9f..b2b8a10e842784de5454e2639474f1a208b4b3f1 100644 (file)
@@ -201,26 +201,29 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
 /*
  * Functions for getting the pages from a bvec.
  */
-static void bvec_get_page(struct dpages *dp,
+static void bio_get_page(struct dpages *dp,
                  struct page **p, unsigned long *len, unsigned *offset)
 {
-       struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
-       *p = bvec->bv_page;
-       *len = bvec->bv_len;
-       *offset = bvec->bv_offset;
+       struct bio *bio = dp->context_ptr;
+       struct bio_vec bvec = bio_iovec(bio);
+       *p = bvec.bv_page;
+       *len = bvec.bv_len;
+       *offset = bvec.bv_offset;
 }
 
-static void bvec_next_page(struct dpages *dp)
+static void bio_next_page(struct dpages *dp)
 {
-       struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
-       dp->context_ptr = bvec + 1;
+       struct bio *bio = dp->context_ptr;
+       struct bio_vec bvec = bio_iovec(bio);
+
+       bio_advance(bio, bvec.bv_len);
 }
 
-static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
+static void bio_dp_init(struct dpages *dp, struct bio *bio)
 {
-       dp->get_page = bvec_get_page;
-       dp->next_page = bvec_next_page;
-       dp->context_ptr = bvec;
+       dp->get_page = bio_get_page;
+       dp->next_page = bio_next_page;
+       dp->context_ptr = bio;
 }
 
 /*
@@ -304,14 +307,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
                                          dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
 
                bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
-               bio->bi_sector = where->sector + (where->count - remaining);
+               bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
                bio->bi_bdev = where->bdev;
                bio->bi_end_io = endio;
                store_io_and_region_in_bio(bio, io, region);
 
                if (rw & REQ_DISCARD) {
                        num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
-                       bio->bi_size = num_sectors << SECTOR_SHIFT;
+                       bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
                        remaining -= num_sectors;
                } else if (rw & REQ_WRITE_SAME) {
                        /*
@@ -320,7 +323,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
                        dp->get_page(dp, &page, &len, &offset);
                        bio_add_page(bio, page, logical_block_size, offset);
                        num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
-                       bio->bi_size = num_sectors << SECTOR_SHIFT;
+                       bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
 
                        offset = 0;
                        remaining -= num_sectors;
@@ -457,8 +460,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
                list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
                break;
 
-       case DM_IO_BVEC:
-               bvec_dp_init(dp, io_req->mem.ptr.bvec);
+       case DM_IO_BIO:
+               bio_dp_init(dp, io_req->mem.ptr.bio);
                break;
 
        case DM_IO_VMA:
index 4f99d267340cdb48c3a7b64edcdd4ed9a7fd48ea..53e848c1093936560a9554c9fdacbec2f6dae5bd 100644 (file)
@@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
 
        bio->bi_bdev = lc->dev->bdev;
        if (bio_sectors(bio))
-               bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
+               bio->bi_iter.bi_sector =
+                       linear_map_sector(ti, bio->bi_iter.bi_sector);
 }
 
 static int linear_map(struct dm_target *ti, struct bio *bio)
index 9584443c56148608d159ceab1d436fd6bacfda3b..f284e0bfb25fca869855f390f2d8d7a5519a0864 100644 (file)
@@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
        region_t region = dm_rh_bio_to_region(ms->rh, bio);
 
        if (log->type->in_sync(log, region, 0))
-               return choose_mirror(ms,  bio->bi_sector) ? 1 : 0;
+               return choose_mirror(ms,  bio->bi_iter.bi_sector) ? 1 : 0;
 
        return 0;
 }
@@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
  */
 static sector_t map_sector(struct mirror *m, struct bio *bio)
 {
-       if (unlikely(!bio->bi_size))
+       if (unlikely(!bio->bi_iter.bi_size))
                return 0;
-       return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
+       return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
 }
 
 static void map_bio(struct mirror *m, struct bio *bio)
 {
        bio->bi_bdev = m->dev->bdev;
-       bio->bi_sector = map_sector(m, bio);
+       bio->bi_iter.bi_sector = map_sector(m, bio);
 }
 
 static void map_region(struct dm_io_region *io, struct mirror *m,
@@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
        struct dm_io_region io;
        struct dm_io_request io_req = {
                .bi_rw = READ,
-               .mem.type = DM_IO_BVEC,
-               .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+               .mem.type = DM_IO_BIO,
+               .mem.ptr.bio = bio,
                .notify.fn = read_callback,
                .notify.context = bio,
                .client = m->ms->io_client,
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
                 * We can only read balance if the region is in sync.
                 */
                if (likely(region_in_sync(ms, region, 1)))
-                       m = choose_mirror(ms, bio->bi_sector);
+                       m = choose_mirror(ms, bio->bi_iter.bi_sector);
                else if (m && atomic_read(&m->error_count))
                        m = NULL;
 
@@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
-               .mem.type = DM_IO_BVEC,
-               .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+               .mem.type = DM_IO_BIO,
+               .mem.ptr.bio = bio,
                .notify.fn = write_callback,
                .notify.context = bio,
                .client = ms->io_client,
@@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
         * The region is in-sync and we can perform reads directly.
         * Store enough information so we can retry if it fails.
         */
-       m = choose_mirror(ms, bio->bi_sector);
+       m = choose_mirror(ms, bio->bi_iter.bi_sector);
        if (unlikely(!m))
                return -EIO;
 
index 69732e03eb3490d636a0183bd22303742ab65c65..b929fd5f4984bb67fbb62474e24e5af425758770 100644 (file)
@@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
 
 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
 {
-       return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
+       return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
+                                     rh->target_begin);
 }
 EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
 
index 717718558bd9908469b23bbb9b3cd0223ac243f3..ebddef5237e4b28e6254e486b3267dbccca9864e 100644 (file)
@@ -1438,6 +1438,7 @@ out:
        if (full_bio) {
                full_bio->bi_end_io = pe->full_bio_end_io;
                full_bio->bi_private = pe->full_bio_private;
+               atomic_inc(&full_bio->bi_remaining);
        }
        free_pending_exception(pe);
 
@@ -1619,11 +1620,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
                            struct bio *bio, chunk_t chunk)
 {
        bio->bi_bdev = s->cow->bdev;
-       bio->bi_sector = chunk_to_sector(s->store,
-                                        dm_chunk_number(e->new_chunk) +
-                                        (chunk - e->old_chunk)) +
-                                        (bio->bi_sector &
-                                         s->store->chunk_mask);
+       bio->bi_iter.bi_sector =
+               chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
+                               (chunk - e->old_chunk)) +
+               (bio->bi_iter.bi_sector & s->store->chunk_mask);
 }
 
 static int snapshot_map(struct dm_target *ti, struct bio *bio)
@@ -1641,7 +1641,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_REMAPPED;
        }
 
-       chunk = sector_to_chunk(s->store, bio->bi_sector);
+       chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
 
        /* Full snapshots are not usable */
        /* To get here the table must be live so s->active is always set. */
@@ -1702,7 +1702,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
                r = DM_MAPIO_SUBMITTED;
 
                if (!pe->started &&
-                   bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+                   bio->bi_iter.bi_size ==
+                   (s->store->chunk_size << SECTOR_SHIFT)) {
                        pe->started = 1;
                        up_write(&s->lock);
                        start_full_bio(pe, bio);
@@ -1758,7 +1759,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_REMAPPED;
        }
 
-       chunk = sector_to_chunk(s->store, bio->bi_sector);
+       chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
 
        down_write(&s->lock);
 
@@ -2095,7 +2096,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
        down_read(&_origins_lock);
        o = __lookup_origin(origin->bdev);
        if (o)
-               r = __origin_write(&o->snapshots, bio->bi_sector, bio);
+               r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
        up_read(&_origins_lock);
 
        return r;
index 73c1712dad96d09f2760416852dd0cacd22cbd33..d1600d2aa2e2e6983643ef0ef864195f858d4f9d 100644 (file)
@@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
 {
        sector_t begin, end;
 
-       stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
+       stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
+                               target_stripe, &begin);
        stripe_map_range_sector(sc, bio_end_sector(bio),
                                target_stripe, &end);
        if (begin < end) {
                bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
-               bio->bi_sector = begin + sc->stripe[target_stripe].physical_start;
-               bio->bi_size = to_bytes(end - begin);
+               bio->bi_iter.bi_sector = begin +
+                       sc->stripe[target_stripe].physical_start;
+               bio->bi_iter.bi_size = to_bytes(end - begin);
                return DM_MAPIO_REMAPPED;
        } else {
                /* The range doesn't map to the target stripe */
@@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
                return stripe_map_range(sc, bio, target_bio_nr);
        }
 
-       stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
+       stripe_map_sector(sc, bio->bi_iter.bi_sector,
+                         &stripe, &bio->bi_iter.bi_sector);
 
-       bio->bi_sector += sc->stripe[stripe].physical_start;
+       bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
        bio->bi_bdev = sc->stripe[stripe].dev->bdev;
 
        return DM_MAPIO_REMAPPED;
index ff9ac4be47210839369e233d1d1dfc161cbfb852..09a688b3d48ca1445e136544321a54b112b280e1 100644 (file)
@@ -311,11 +311,11 @@ error:
 static int switch_map(struct dm_target *ti, struct bio *bio)
 {
        struct switch_ctx *sctx = ti->private;
-       sector_t offset = dm_target_offset(ti, bio->bi_sector);
+       sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
        unsigned path_nr = switch_get_path_nr(sctx, offset);
 
        bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
-       bio->bi_sector = sctx->path_list[path_nr].start + offset;
+       bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
 
        return DM_MAPIO_REMAPPED;
 }
index 726228b33a012f9994fc2f8843b25a0ca46ef966..faaf944597ab7669b90f3ecb85152fbcd16cbe33 100644 (file)
@@ -414,7 +414,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 {
        struct pool *pool = tc->pool;
-       sector_t block_nr = bio->bi_sector;
+       sector_t block_nr = bio->bi_iter.bi_sector;
 
        if (block_size_is_power_of_two(pool))
                block_nr >>= pool->sectors_per_block_shift;
@@ -427,14 +427,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
 {
        struct pool *pool = tc->pool;
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        bio->bi_bdev = tc->pool_dev->bdev;
        if (block_size_is_power_of_two(pool))
-               bio->bi_sector = (block << pool->sectors_per_block_shift) |
-                               (bi_sector & (pool->sectors_per_block - 1));
+               bio->bi_iter.bi_sector =
+                       (block << pool->sectors_per_block_shift) |
+                       (bi_sector & (pool->sectors_per_block - 1));
        else
-               bio->bi_sector = (block * pool->sectors_per_block) +
+               bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
                                 sector_div(bi_sector, pool->sectors_per_block);
 }
 
@@ -612,8 +613,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
 
 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
 {
-       if (m->bio)
+       if (m->bio) {
                m->bio->bi_end_io = m->saved_bi_end_io;
+               atomic_inc(&m->bio->bi_remaining);
+       }
        cell_error(m->tc->pool, m->cell);
        list_del(&m->list);
        mempool_free(m, m->tc->pool->mapping_pool);
@@ -627,8 +630,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
        int r;
 
        bio = m->bio;
-       if (bio)
+       if (bio) {
                bio->bi_end_io = m->saved_bi_end_io;
+               atomic_inc(&bio->bi_remaining);
+       }
 
        if (m->err) {
                cell_error(pool, m->cell);
@@ -731,7 +736,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
  */
 static int io_overlaps_block(struct pool *pool, struct bio *bio)
 {
-       return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
+       return bio->bi_iter.bi_size ==
+               (pool->sectors_per_block << SECTOR_SHIFT);
 }
 
 static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -1136,7 +1142,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
        if (bio_detain(pool, &key, bio, &cell))
                return;
 
-       if (bio_data_dir(bio) == WRITE && bio->bi_size)
+       if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
                break_sharing(tc, bio, block, &key, lookup_result, cell);
        else {
                struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1159,7 +1165,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
        /*
         * Remap empty bios (flushes) immediately, without provisioning.
         */
-       if (!bio->bi_size) {
+       if (!bio->bi_iter.bi_size) {
                inc_all_io_entry(pool, bio);
                cell_defer_no_holder(tc, cell);
 
@@ -1258,7 +1264,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
        switch (r) {
        case 0:
-               if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
+               if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
                        handle_unserviceable_bio(tc->pool, bio);
                else {
                        inc_all_io_entry(tc->pool, bio);
@@ -2939,7 +2945,7 @@ out_unlock:
 
 static int thin_map(struct dm_target *ti, struct bio *bio)
 {
-       bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
+       bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
 
        return thin_bio_map(ti, bio);
 }
index 4b7941db3aff33223481464f24a162d101ab7698..796007a5e0e1a4b6e83b0871c1fca1ef8c0c461f 100644 (file)
@@ -73,15 +73,10 @@ struct dm_verity_io {
        sector_t block;
        unsigned n_blocks;
 
-       /* saved bio vector */
-       struct bio_vec *io_vec;
-       unsigned io_vec_size;
+       struct bvec_iter iter;
 
        struct work_struct work;
 
-       /* A space for short vectors; longer vectors are allocated separately. */
-       struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
-
        /*
         * Three variably-size fields follow this struct:
         *
@@ -284,9 +279,10 @@ release_ret_r:
 static int verity_verify_io(struct dm_verity_io *io)
 {
        struct dm_verity *v = io->v;
+       struct bio *bio = dm_bio_from_per_bio_data(io,
+                                                  v->ti->per_bio_data_size);
        unsigned b;
        int i;
-       unsigned vector = 0, offset = 0;
 
        for (b = 0; b < io->n_blocks; b++) {
                struct shash_desc *desc;
@@ -336,31 +332,22 @@ test_block_hash:
                }
 
                todo = 1 << v->data_dev_block_bits;
-               do {
-                       struct bio_vec *bv;
+               while (io->iter.bi_size) {
                        u8 *page;
-                       unsigned len;
-
-                       BUG_ON(vector >= io->io_vec_size);
-                       bv = &io->io_vec[vector];
-                       page = kmap_atomic(bv->bv_page);
-                       len = bv->bv_len - offset;
-                       if (likely(len >= todo))
-                               len = todo;
-                       r = crypto_shash_update(desc,
-                                       page + bv->bv_offset + offset, len);
+                       struct bio_vec bv = bio_iter_iovec(bio, io->iter);
+
+                       page = kmap_atomic(bv.bv_page);
+                       r = crypto_shash_update(desc, page + bv.bv_offset,
+                                               bv.bv_len);
                        kunmap_atomic(page);
+
                        if (r < 0) {
                                DMERR("crypto_shash_update failed: %d", r);
                                return r;
                        }
-                       offset += len;
-                       if (likely(offset == bv->bv_len)) {
-                               offset = 0;
-                               vector++;
-                       }
-                       todo -= len;
-               } while (todo);
+
+                       bio_advance_iter(bio, &io->iter, bv.bv_len);
+               }
 
                if (!v->version) {
                        r = crypto_shash_update(desc, v->salt, v->salt_size);
@@ -383,8 +370,6 @@ test_block_hash:
                        return -EIO;
                }
        }
-       BUG_ON(vector != io->io_vec_size);
-       BUG_ON(offset);
 
        return 0;
 }
@@ -400,10 +385,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
        bio->bi_end_io = io->orig_bi_end_io;
        bio->bi_private = io->orig_bi_private;
 
-       if (io->io_vec != io->io_vec_inline)
-               mempool_free(io->io_vec, v->vec_mempool);
-
-       bio_endio(bio, error);
+       bio_endio_nodec(bio, error);
 }
 
 static void verity_work(struct work_struct *w)
@@ -493,9 +475,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
        struct dm_verity_io *io;
 
        bio->bi_bdev = v->data_dev->bdev;
-       bio->bi_sector = verity_map_sector(v, bio->bi_sector);
+       bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
 
-       if (((unsigned)bio->bi_sector | bio_sectors(bio)) &
+       if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
            ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
                DMERR_LIMIT("unaligned io");
                return -EIO;
@@ -514,18 +496,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
        io->v = v;
        io->orig_bi_end_io = bio->bi_end_io;
        io->orig_bi_private = bio->bi_private;
-       io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
-       io->n_blocks = bio->bi_size >> v->data_dev_block_bits;
+       io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
+       io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
 
        bio->bi_end_io = verity_end_io;
        bio->bi_private = io;
-       io->io_vec_size = bio_segments(bio);
-       if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
-               io->io_vec = io->io_vec_inline;
-       else
-               io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
-       memcpy(io->io_vec, bio_iovec(bio),
-              io->io_vec_size * sizeof(struct bio_vec));
+       io->iter = bio->bi_iter;
 
        verity_submit_prefetch(v, io);
 
index b49c7628424171f0622ed4446e5c4111b00ba418..8c53b09b9a2c5a3050b22f4fba82af5563f1d59a 100644 (file)
@@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io)
                atomic_inc_return(&md->pending[rw]));
 
        if (unlikely(dm_stats_used(&md->stats)))
-               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
                                    bio_sectors(bio), false, 0, &io->stats_aux);
 }
 
@@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io)
        part_stat_unlock();
 
        if (unlikely(dm_stats_used(&md->stats)))
-               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
                                    bio_sectors(bio), true, duration, &io->stats_aux);
 
        /*
@@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error)
                if (io_error == DM_ENDIO_REQUEUE)
                        return;
 
-               if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
+               if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
                        /*
                         * Preflush done for flush with data, reissue
                         * without REQ_FLUSH.
@@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error)
        struct dm_rq_clone_bio_info *info = clone->bi_private;
        struct dm_rq_target_io *tio = info->tio;
        struct bio *bio = info->orig;
-       unsigned int nr_bytes = info->orig->bi_size;
+       unsigned int nr_bytes = info->orig->bi_iter.bi_size;
 
        bio_put(clone);
 
@@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio)
         * this io.
         */
        atomic_inc(&tio->io->io_count);
-       sector = clone->bi_sector;
+       sector = clone->bi_iter.bi_sector;
        r = ti->type->map(ti, clone);
        if (r == DM_MAPIO_REMAPPED) {
                /* the bio has been remapped so dispatch it */
@@ -1155,76 +1155,32 @@ struct clone_info {
        struct dm_io *io;
        sector_t sector;
        sector_t sector_count;
-       unsigned short idx;
 };
 
 static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
 {
-       bio->bi_sector = sector;
-       bio->bi_size = to_bytes(len);
-}
-
-static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
-{
-       bio->bi_idx = idx;
-       bio->bi_vcnt = idx + bv_count;
-       bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-}
-
-static void clone_bio_integrity(struct bio *bio, struct bio *clone,
-                               unsigned short idx, unsigned len, unsigned offset,
-                               unsigned trim)
-{
-       if (!bio_integrity(bio))
-               return;
-
-       bio_integrity_clone(clone, bio, GFP_NOIO);
-
-       if (trim)
-               bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
-}
-
-/*
- * Creates a little bio that just does part of a bvec.
- */
-static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
-                           sector_t sector, unsigned short idx,
-                           unsigned offset, unsigned len)
-{
-       struct bio *clone = &tio->clone;
-       struct bio_vec *bv = bio->bi_io_vec + idx;
-
-       *clone->bi_io_vec = *bv;
-
-       bio_setup_sector(clone, sector, len);
-
-       clone->bi_bdev = bio->bi_bdev;
-       clone->bi_rw = bio->bi_rw;
-       clone->bi_vcnt = 1;
-       clone->bi_io_vec->bv_offset = offset;
-       clone->bi_io_vec->bv_len = clone->bi_size;
-       clone->bi_flags |= 1 << BIO_CLONED;
-
-       clone_bio_integrity(bio, clone, idx, len, offset, 1);
+       bio->bi_iter.bi_sector = sector;
+       bio->bi_iter.bi_size = to_bytes(len);
 }
 
 /*
  * Creates a bio that consists of range of complete bvecs.
  */
 static void clone_bio(struct dm_target_io *tio, struct bio *bio,
-                     sector_t sector, unsigned short idx,
-                     unsigned short bv_count, unsigned len)
+                     sector_t sector, unsigned len)
 {
        struct bio *clone = &tio->clone;
-       unsigned trim = 0;
 
-       __bio_clone(clone, bio);
-       bio_setup_sector(clone, sector, len);
-       bio_setup_bv(clone, idx, bv_count);
+       __bio_clone_fast(clone, bio);
+
+       if (bio_integrity(bio))
+               bio_integrity_clone(clone, bio, GFP_NOIO);
+
+       bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+       clone->bi_iter.bi_size = to_bytes(len);
 
-       if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
-               trim = 1;
-       clone_bio_integrity(bio, clone, idx, len, 0, trim);
+       if (bio_integrity(bio))
+               bio_integrity_trim(clone, 0, len);
 }
 
 static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1257,7 +1213,7 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
         * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
         * and discard, so no need for concern about wasted bvec allocations.
         */
-        __bio_clone(clone, ci->bio);
+        __bio_clone_fast(clone, ci->bio);
        if (len)
                bio_setup_sector(clone, ci->sector, len);
 
@@ -1286,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci)
 }
 
 static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
-                                    sector_t sector, int nr_iovecs,
-                                    unsigned short idx, unsigned short bv_count,
-                                    unsigned offset, unsigned len,
-                                    unsigned split_bvec)
+                                    sector_t sector, unsigned len)
 {
        struct bio *bio = ci->bio;
        struct dm_target_io *tio;
@@ -1303,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
                num_target_bios = ti->num_write_bios(ti, bio);
 
        for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
-               tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
-               if (split_bvec)
-                       clone_split_bio(tio, bio, sector, idx, offset, len);
-               else
-                       clone_bio(tio, bio, sector, idx, bv_count, len);
+               tio = alloc_tio(ci, ti, 0, target_bio_nr);
+               clone_bio(tio, bio, sector, len);
                __map_bio(tio);
        }
 }
@@ -1378,60 +1328,6 @@ static int __send_write_same(struct clone_info *ci)
        return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
 }
 
-/*
- * Find maximum number of sectors / bvecs we can process with a single bio.
- */
-static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
-{
-       struct bio *bio = ci->bio;
-       sector_t bv_len, total_len = 0;
-
-       for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
-               bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
-
-               if (bv_len > max)
-                       break;
-
-               max -= bv_len;
-               total_len += bv_len;
-       }
-
-       return total_len;
-}
-
-static int __split_bvec_across_targets(struct clone_info *ci,
-                                      struct dm_target *ti, sector_t max)
-{
-       struct bio *bio = ci->bio;
-       struct bio_vec *bv = bio->bi_io_vec + ci->idx;
-       sector_t remaining = to_sector(bv->bv_len);
-       unsigned offset = 0;
-       sector_t len;
-
-       do {
-               if (offset) {
-                       ti = dm_table_find_target(ci->map, ci->sector);
-                       if (!dm_target_is_valid(ti))
-                               return -EIO;
-
-                       max = max_io_len(ci->sector, ti);
-               }
-
-               len = min(remaining, max);
-
-               __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
-                                        bv->bv_offset + offset, len, 1);
-
-               ci->sector += len;
-               ci->sector_count -= len;
-               offset += to_bytes(len);
-       } while (remaining -= len);
-
-       ci->idx++;
-
-       return 0;
-}
-
 /*
  * Select the correct strategy for processing a non-flush bio.
  */
@@ -1439,8 +1335,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
 {
        struct bio *bio = ci->bio;
        struct dm_target *ti;
-       sector_t len, max;
-       int idx;
+       unsigned len;
 
        if (unlikely(bio->bi_rw & REQ_DISCARD))
                return __send_discard(ci);
@@ -1451,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci)
        if (!dm_target_is_valid(ti))
                return -EIO;
 
-       max = max_io_len(ci->sector, ti);
-
-       /*
-        * Optimise for the simple case where we can do all of
-        * the remaining io with a single clone.
-        */
-       if (ci->sector_count <= max) {
-               __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
-                                        ci->idx, bio->bi_vcnt - ci->idx, 0,
-                                        ci->sector_count, 0);
-               ci->sector_count = 0;
-               return 0;
-       }
-
-       /*
-        * There are some bvecs that don't span targets.
-        * Do as many of these as possible.
-        */
-       if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
-               len = __len_within_target(ci, max, &idx);
-
-               __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
-                                        ci->idx, idx - ci->idx, 0, len, 0);
+       len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
 
-               ci->sector += len;
-               ci->sector_count -= len;
-               ci->idx = idx;
+       __clone_and_map_data_bio(ci, ti, ci->sector, len);
 
-               return 0;
-       }
+       ci->sector += len;
+       ci->sector_count -= len;
 
-       /*
-        * Handle a bvec that must be split between two or more targets.
-        */
-       return __split_bvec_across_targets(ci, ti, max);
+       return 0;
 }
 
 /*
@@ -1510,8 +1378,7 @@ static void __split_and_process_bio(struct mapped_device *md,
        ci.io->bio = bio;
        ci.io->md = md;
        spin_lock_init(&ci.io->endio_lock);
-       ci.sector = bio->bi_sector;
-       ci.idx = bio->bi_idx;
+       ci.sector = bio->bi_iter.bi_sector;
 
        start_io_acct(ci.io);
 
index 3193aefe982b7b42badf4eba4adc36f89439d70c..e8b4574956c73e500cd634fa0acafad4fad0b93d 100644 (file)
@@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error)
 {
        struct bio *b = bio->bi_private;
 
-       b->bi_size = bio->bi_size;
-       b->bi_sector = bio->bi_sector;
+       b->bi_iter.bi_size = bio->bi_iter.bi_size;
+       b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
 
        bio_put(bio);
 
@@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio)
                        return;
                }
 
-               if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
+               if (check_sector(conf, bio->bi_iter.bi_sector,
+                                bio_end_sector(bio), WRITE))
                        failit = 1;
                if (check_mode(conf, WritePersistent)) {
-                       add_sector(conf, bio->bi_sector, WritePersistent);
+                       add_sector(conf, bio->bi_iter.bi_sector,
+                                  WritePersistent);
                        failit = 1;
                }
                if (check_mode(conf, WriteTransient))
                        failit = 1;
        } else {
                /* read request */
-               if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
+               if (check_sector(conf, bio->bi_iter.bi_sector,
+                                bio_end_sector(bio), READ))
                        failit = 1;
                if (check_mode(conf, ReadTransient))
                        failit = 1;
                if (check_mode(conf, ReadPersistent)) {
-                       add_sector(conf, bio->bi_sector, ReadPersistent);
+                       add_sector(conf, bio->bi_iter.bi_sector,
+                                  ReadPersistent);
                        failit = 1;
                }
                if (check_mode(conf, ReadFixable)) {
-                       add_sector(conf, bio->bi_sector, ReadFixable);
+                       add_sector(conf, bio->bi_iter.bi_sector,
+                                  ReadFixable);
                        failit = 1;
                }
        }
index f03fabd2b37bacf34a231a0bb034a6d8f2826e68..56f534b4a2d27036b1f820f8bf4bfed56a9b2002 100644 (file)
@@ -288,65 +288,65 @@ static int linear_stop (struct mddev *mddev)
 
 static void linear_make_request(struct mddev *mddev, struct bio *bio)
 {
+       char b[BDEVNAME_SIZE];
        struct dev_info *tmp_dev;
-       sector_t start_sector;
+       struct bio *split;
+       sector_t start_sector, end_sector, data_offset;
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
 
-       rcu_read_lock();
-       tmp_dev = which_dev(mddev, bio->bi_sector);
-       start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
-
-
-       if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
-                    || (bio->bi_sector < start_sector))) {
-               char b[BDEVNAME_SIZE];
-
-               printk(KERN_ERR
-                      "md/linear:%s: make_request: Sector %llu out of bounds on "
-                      "dev %s: %llu sectors, offset %llu\n",
-                      mdname(mddev),
-                      (unsigned long long)bio->bi_sector,
-                      bdevname(tmp_dev->rdev->bdev, b),
-                      (unsigned long long)tmp_dev->rdev->sectors,
-                      (unsigned long long)start_sector);
-               rcu_read_unlock();
-               bio_io_error(bio);
-               return;
-       }
-       if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
-               /* This bio crosses a device boundary, so we have to
-                * split it.
-                */
-               struct bio_pair *bp;
-               sector_t end_sector = tmp_dev->end_sector;
+       do {
+               rcu_read_lock();
 
-               rcu_read_unlock();
-
-               bp = bio_split(bio, end_sector - bio->bi_sector);
+               tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+               start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+               end_sector = tmp_dev->end_sector;
+               data_offset = tmp_dev->rdev->data_offset;
+               bio->bi_bdev = tmp_dev->rdev->bdev;
 
-               linear_make_request(mddev, &bp->bio1);
-               linear_make_request(mddev, &bp->bio2);
-               bio_pair_release(bp);
-               return;
-       }
-                   
-       bio->bi_bdev = tmp_dev->rdev->bdev;
-       bio->bi_sector = bio->bi_sector - start_sector
-               + tmp_dev->rdev->data_offset;
-       rcu_read_unlock();
+               rcu_read_unlock();
 
-       if (unlikely((bio->bi_rw & REQ_DISCARD) &&
-                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
-               /* Just ignore it */
-               bio_endio(bio, 0);
-               return;
-       }
+               if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
+                            bio->bi_iter.bi_sector < start_sector))
+                       goto out_of_bounds;
+
+               if (unlikely(bio_end_sector(bio) > end_sector)) {
+                       /* This bio crosses a device boundary, so we have to
+                        * split it.
+                        */
+                       split = bio_split(bio, end_sector -
+                                         bio->bi_iter.bi_sector,
+                                         GFP_NOIO, fs_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
 
-       generic_make_request(bio);
+               split->bi_iter.bi_sector = split->bi_iter.bi_sector -
+                       start_sector + data_offset;
+
+               if (unlikely((split->bi_rw & REQ_DISCARD) &&
+                        !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+                       /* Just ignore it */
+                       bio_endio(split, 0);
+               } else
+                       generic_make_request(split);
+       } while (split != bio);
+       return;
+
+out_of_bounds:
+       printk(KERN_ERR
+              "md/linear:%s: make_request: Sector %llu out of bounds on "
+              "dev %s: %llu sectors, offset %llu\n",
+              mdname(mddev),
+              (unsigned long long)bio->bi_iter.bi_sector,
+              bdevname(tmp_dev->rdev->bdev, b),
+              (unsigned long long)tmp_dev->rdev->sectors,
+              (unsigned long long)start_sector);
+       bio_io_error(bio);
 }
 
 static void linear_status (struct seq_file *seq, struct mddev *mddev)
index 40c531359a15af61ad9c3ba70506d1863085dffe..4ad5cc4e63e8438ca3c32fea1f40f69ec71657fb 100644 (file)
@@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws)
        struct mddev *mddev = container_of(ws, struct mddev, flush_work);
        struct bio *bio = mddev->flush_bio;
 
-       if (bio->bi_size == 0)
+       if (bio->bi_iter.bi_size == 0)
                /* an empty barrier - all done */
                bio_endio(bio, 0);
        else {
@@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
        struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
 
        bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio_add_page(bio, page, size, 0);
        bio->bi_private = rdev;
        bio->bi_end_io = super_written;
@@ -782,18 +782,16 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
        struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
        int ret;
 
-       rw |= REQ_SYNC;
-
        bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
                rdev->meta_bdev : rdev->bdev;
        if (metadata_op)
-               bio->bi_sector = sector + rdev->sb_start;
+               bio->bi_iter.bi_sector = sector + rdev->sb_start;
        else if (rdev->mddev->reshape_position != MaxSector &&
                 (rdev->mddev->reshape_backwards ==
                  (sector >= rdev->mddev->reshape_position)))
-               bio->bi_sector = sector + rdev->new_data_offset;
+               bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
        else
-               bio->bi_sector = sector + rdev->data_offset;
+               bio->bi_iter.bi_sector = sector + rdev->data_offset;
        bio_add_page(bio, page, size, 0);
        submit_bio_wait(rw, bio);
 
index 1642eae75a3335d1282a4bf53751802e1aeb52db..849ad39f547b9c1fbb8d993e118261a33b242134 100644 (file)
@@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error)
                md_error (mp_bh->mddev, rdev);
                printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 
                       bdevname(rdev->bdev,b), 
-                      (unsigned long long)bio->bi_sector);
+                      (unsigned long long)bio->bi_iter.bi_sector);
                multipath_reschedule_retry(mp_bh);
        } else
                multipath_end_bh_io(mp_bh, error);
@@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
        multipath = conf->multipaths + mp_bh->path;
 
        mp_bh->bio = *bio;
-       mp_bh->bio.bi_sector += multipath->rdev->data_offset;
+       mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
        mp_bh->bio.bi_bdev = multipath->rdev->bdev;
        mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
        mp_bh->bio.bi_end_io = multipath_end_request;
@@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread)
                spin_unlock_irqrestore(&conf->device_lock, flags);
 
                bio = &mp_bh->bio;
-               bio->bi_sector = mp_bh->master_bio->bi_sector;
+               bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
                
                if ((mp_bh->path = multipath_map (conf))<0) {
                        printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
                                " error for block %llu\n",
                                bdevname(bio->bi_bdev,b),
-                               (unsigned long long)bio->bi_sector);
+                               (unsigned long long)bio->bi_iter.bi_sector);
                        multipath_end_bh_io(mp_bh, -EIO);
                } else {
                        printk(KERN_ERR "multipath: %s: redirecting sector %llu"
                                " to another IO path\n",
                                bdevname(bio->bi_bdev,b),
-                               (unsigned long long)bio->bi_sector);
+                               (unsigned long long)bio->bi_iter.bi_sector);
                        *bio = *(mp_bh->master_bio);
-                       bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
+                       bio->bi_iter.bi_sector +=
+                               conf->multipaths[mp_bh->path].rdev->data_offset;
                        bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
                        bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
                        bio->bi_end_io = multipath_end_request;
index c4d420b7d2f43d0804e1c1a94d88ca63484b5ec3..407a99e46f6993a770c21fdfff1972b7f64063b6 100644 (file)
@@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
                        unsigned int chunk_sects, struct bio *bio)
 {
        if (likely(is_power_of_2(chunk_sects))) {
-               return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+               return chunk_sects >=
+                       ((bio->bi_iter.bi_sector & (chunk_sects-1))
                                        + bio_sectors(bio));
        } else{
-               sector_t sector = bio->bi_sector;
+               sector_t sector = bio->bi_iter.bi_sector;
                return chunk_sects >= (sector_div(sector, chunk_sects)
                                                + bio_sectors(bio));
        }
@@ -512,64 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
 
 static void raid0_make_request(struct mddev *mddev, struct bio *bio)
 {
-       unsigned int chunk_sects;
-       sector_t sector_offset;
        struct strip_zone *zone;
        struct md_rdev *tmp_dev;
+       struct bio *split;
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
 
-       chunk_sects = mddev->chunk_sectors;
-       if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
-               sector_t sector = bio->bi_sector;
-               struct bio_pair *bp;
-               /* Sanity check -- queue functions should prevent this happening */
-               if (bio_segments(bio) > 1)
-                       goto bad_map;
-               /* This is a one page bio that upper layers
-                * refuse to split for us, so we need to split it.
-                */
-               if (likely(is_power_of_2(chunk_sects)))
-                       bp = bio_split(bio, chunk_sects - (sector &
-                                                          (chunk_sects-1)));
-               else
-                       bp = bio_split(bio, chunk_sects -
-                                      sector_div(sector, chunk_sects));
-               raid0_make_request(mddev, &bp->bio1);
-               raid0_make_request(mddev, &bp->bio2);
-               bio_pair_release(bp);
-               return;
-       }
+       do {
+               sector_t sector = bio->bi_iter.bi_sector;
+               unsigned chunk_sects = mddev->chunk_sectors;
 
-       sector_offset = bio->bi_sector;
-       zone = find_zone(mddev->private, &sector_offset);
-       tmp_dev = map_sector(mddev, zone, bio->bi_sector,
-                            &sector_offset);
-       bio->bi_bdev = tmp_dev->bdev;
-       bio->bi_sector = sector_offset + zone->dev_start +
-               tmp_dev->data_offset;
-
-       if (unlikely((bio->bi_rw & REQ_DISCARD) &&
-                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
-               /* Just ignore it */
-               bio_endio(bio, 0);
-               return;
-       }
+               unsigned sectors = chunk_sects -
+                       (likely(is_power_of_2(chunk_sects))
+                        ? (sector & (chunk_sects-1))
+                        : sector_div(sector, chunk_sects));
 
-       generic_make_request(bio);
-       return;
-
-bad_map:
-       printk("md/raid0:%s: make_request bug: can't convert block across chunks"
-              " or bigger than %dk %llu %d\n",
-              mdname(mddev), chunk_sects / 2,
-              (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
+               if (sectors < bio_sectors(bio)) {
+                       split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
 
-       bio_io_error(bio);
-       return;
+               zone = find_zone(mddev->private, &sector);
+               tmp_dev = map_sector(mddev, zone, sector, &sector);
+               split->bi_bdev = tmp_dev->bdev;
+               split->bi_iter.bi_sector = sector + zone->dev_start +
+                       tmp_dev->data_offset;
+
+               if (unlikely((split->bi_rw & REQ_DISCARD) &&
+                        !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
+                       /* Just ignore it */
+                       bio_endio(split, 0);
+               } else
+                       generic_make_request(split);
+       } while (split != bio);
 }
 
 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
index a49cfcc7a343188a5579350886795ce6fef35c4f..fd3a2a14b587da5e3bb5046b0017ed7bd46f67a1 100644 (file)
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
        int done;
        struct r1conf *conf = r1_bio->mddev->private;
        sector_t start_next_window = r1_bio->start_next_window;
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        if (bio->bi_phys_segments) {
                unsigned long flags;
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
        if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
                pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
                         (bio_data_dir(bio) == WRITE) ? "write" : "read",
-                        (unsigned long long) bio->bi_sector,
-                        (unsigned long long) bio->bi_sector +
-                        bio_sectors(bio) - 1);
+                        (unsigned long long) bio->bi_iter.bi_sector,
+                        (unsigned long long) bio_end_sector(bio) - 1);
 
                call_bio_endio(r1_bio);
        }
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error)
                                struct bio *mbio = r1_bio->master_bio;
                                pr_debug("raid1: behind end write sectors"
                                         " %llu-%llu\n",
-                                        (unsigned long long) mbio->bi_sector,
-                                        (unsigned long long) mbio->bi_sector +
-                                        bio_sectors(mbio) - 1);
+                                        (unsigned long long) mbio->bi_iter.bi_sector,
+                                        (unsigned long long) bio_end_sector(mbio) - 1);
                                call_bio_endio(r1_bio);
                        }
                }
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
                else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
                                >= bio_end_sector(bio)) ||
                         (conf->next_resync + NEXT_NORMALIO_DISTANCE
-                               <= bio->bi_sector))
+                               <= bio->bi_iter.bi_sector))
                        wait = false;
                else
                        wait = true;
@@ -913,14 +911,14 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
 
        if (bio && bio_data_dir(bio) == WRITE) {
                if (conf->next_resync + NEXT_NORMALIO_DISTANCE
-                   <= bio->bi_sector) {
+                   <= bio->bi_iter.bi_sector) {
                        if (conf->start_next_window == MaxSector)
                                conf->start_next_window =
                                        conf->next_resync +
                                        NEXT_NORMALIO_DISTANCE;
 
                        if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
-                           <= bio->bi_sector)
+                           <= bio->bi_iter.bi_sector)
                                conf->next_window_requests++;
                        else
                                conf->current_window_requests++;
@@ -1027,7 +1025,8 @@ do_sync_io:
                if (bvecs[i].bv_page)
                        put_page(bvecs[i].bv_page);
        kfree(bvecs);
-       pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
+       pr_debug("%dB behind alloc failed, doing sync I/O\n",
+                bio->bi_iter.bi_size);
 }
 
 struct raid1_plug_cb {
@@ -1107,7 +1106,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
 
        if (bio_data_dir(bio) == WRITE &&
            bio_end_sector(bio) > mddev->suspend_lo &&
-           bio->bi_sector < mddev->suspend_hi) {
+           bio->bi_iter.bi_sector < mddev->suspend_hi) {
                /* As the suspend_* range is controlled by
                 * userspace, we want an interruptible
                 * wait.
@@ -1118,7 +1117,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
                        prepare_to_wait(&conf->wait_barrier,
                                        &w, TASK_INTERRUPTIBLE);
                        if (bio_end_sector(bio) <= mddev->suspend_lo ||
-                           bio->bi_sector >= mddev->suspend_hi)
+                           bio->bi_iter.bi_sector >= mddev->suspend_hi)
                                break;
                        schedule();
                }
@@ -1140,7 +1139,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        r1_bio->sectors = bio_sectors(bio);
        r1_bio->state = 0;
        r1_bio->mddev = mddev;
-       r1_bio->sector = bio->bi_sector;
+       r1_bio->sector = bio->bi_iter.bi_sector;
 
        /* We might need to issue multiple reads to different
         * devices if there are bad blocks around, so we keep
@@ -1180,12 +1179,13 @@ read_again:
                r1_bio->read_disk = rdisk;
 
                read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(read_bio, r1_bio->sector - bio->bi_sector,
+               bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
                         max_sectors);
 
                r1_bio->bios[rdisk] = read_bio;
 
-               read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
+               read_bio->bi_iter.bi_sector = r1_bio->sector +
+                       mirror->rdev->data_offset;
                read_bio->bi_bdev = mirror->rdev->bdev;
                read_bio->bi_end_io = raid1_end_read_request;
                read_bio->bi_rw = READ | do_sync;
@@ -1197,7 +1197,7 @@ read_again:
                         */
 
                        sectors_handled = (r1_bio->sector + max_sectors
-                                          - bio->bi_sector);
+                                          - bio->bi_iter.bi_sector);
                        r1_bio->sectors = max_sectors;
                        spin_lock_irq(&conf->device_lock);
                        if (bio->bi_phys_segments == 0)
@@ -1218,7 +1218,8 @@ read_again:
                        r1_bio->sectors = bio_sectors(bio) - sectors_handled;
                        r1_bio->state = 0;
                        r1_bio->mddev = mddev;
-                       r1_bio->sector = bio->bi_sector + sectors_handled;
+                       r1_bio->sector = bio->bi_iter.bi_sector +
+                               sectors_handled;
                        goto read_again;
                } else
                        generic_make_request(read_bio);
@@ -1321,7 +1322,7 @@ read_again:
                        if (r1_bio->bios[j])
                                rdev_dec_pending(conf->mirrors[j].rdev, mddev);
                r1_bio->state = 0;
-               allow_barrier(conf, start_next_window, bio->bi_sector);
+               allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
                md_wait_for_blocked_rdev(blocked_rdev, mddev);
                start_next_window = wait_barrier(conf, bio);
                /*
@@ -1348,7 +1349,7 @@ read_again:
                        bio->bi_phys_segments++;
                spin_unlock_irq(&conf->device_lock);
        }
-       sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
+       sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
 
        atomic_set(&r1_bio->remaining, 1);
        atomic_set(&r1_bio->behind_remaining, 0);
@@ -1360,7 +1361,7 @@ read_again:
                        continue;
 
                mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+               bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
 
                if (first_clone) {
                        /* do behind I/O ?
@@ -1394,7 +1395,7 @@ read_again:
 
                r1_bio->bios[i] = mbio;
 
-               mbio->bi_sector = (r1_bio->sector +
+               mbio->bi_iter.bi_sector = (r1_bio->sector +
                                   conf->mirrors[i].rdev->data_offset);
                mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
                mbio->bi_end_io = raid1_end_write_request;
@@ -1434,7 +1435,7 @@ read_again:
                r1_bio->sectors = bio_sectors(bio) - sectors_handled;
                r1_bio->state = 0;
                r1_bio->mddev = mddev;
-               r1_bio->sector = bio->bi_sector + sectors_handled;
+               r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
                goto retry_write;
        }
 
@@ -1958,14 +1959,14 @@ static int process_checks(struct r1bio *r1_bio)
                /* fixup the bio for reuse */
                bio_reset(b);
                b->bi_vcnt = vcnt;
-               b->bi_size = r1_bio->sectors << 9;
-               b->bi_sector = r1_bio->sector +
+               b->bi_iter.bi_size = r1_bio->sectors << 9;
+               b->bi_iter.bi_sector = r1_bio->sector +
                        conf->mirrors[i].rdev->data_offset;
                b->bi_bdev = conf->mirrors[i].rdev->bdev;
                b->bi_end_io = end_sync_read;
                b->bi_private = r1_bio;
 
-               size = b->bi_size;
+               size = b->bi_iter.bi_size;
                for (j = 0; j < vcnt ; j++) {
                        struct bio_vec *bi;
                        bi = &b->bi_io_vec[j];
@@ -2220,11 +2221,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
                }
 
                wbio->bi_rw = WRITE;
-               wbio->bi_sector = r1_bio->sector;
-               wbio->bi_size = r1_bio->sectors << 9;
+               wbio->bi_iter.bi_sector = r1_bio->sector;
+               wbio->bi_iter.bi_size = r1_bio->sectors << 9;
 
                bio_trim(wbio, sector - r1_bio->sector, sectors);
-               wbio->bi_sector += rdev->data_offset;
+               wbio->bi_iter.bi_sector += rdev->data_offset;
                wbio->bi_bdev = rdev->bdev;
                if (submit_bio_wait(WRITE, wbio) == 0)
                        /* failure! */
@@ -2338,7 +2339,8 @@ read_more:
                }
                r1_bio->read_disk = disk;
                bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
-               bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+               bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
+                        max_sectors);
                r1_bio->bios[r1_bio->read_disk] = bio;
                rdev = conf->mirrors[disk].rdev;
                printk_ratelimited(KERN_ERR
@@ -2347,7 +2349,7 @@ read_more:
                                   mdname(mddev),
                                   (unsigned long long)r1_bio->sector,
                                   bdevname(rdev->bdev, b));
-               bio->bi_sector = r1_bio->sector + rdev->data_offset;
+               bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
                bio->bi_bdev = rdev->bdev;
                bio->bi_end_io = raid1_end_read_request;
                bio->bi_rw = READ | do_sync;
@@ -2356,7 +2358,7 @@ read_more:
                        /* Drat - have to split this up more */
                        struct bio *mbio = r1_bio->master_bio;
                        int sectors_handled = (r1_bio->sector + max_sectors
-                                              - mbio->bi_sector);
+                                              - mbio->bi_iter.bi_sector);
                        r1_bio->sectors = max_sectors;
                        spin_lock_irq(&conf->device_lock);
                        if (mbio->bi_phys_segments == 0)
@@ -2374,7 +2376,8 @@ read_more:
                        r1_bio->state = 0;
                        set_bit(R1BIO_ReadError, &r1_bio->state);
                        r1_bio->mddev = mddev;
-                       r1_bio->sector = mbio->bi_sector + sectors_handled;
+                       r1_bio->sector = mbio->bi_iter.bi_sector +
+                               sectors_handled;
 
                        goto read_more;
                } else
@@ -2598,7 +2601,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
                }
                if (bio->bi_end_io) {
                        atomic_inc(&rdev->nr_pending);
-                       bio->bi_sector = sector_nr + rdev->data_offset;
+                       bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
                        bio->bi_bdev = rdev->bdev;
                        bio->bi_private = r1_bio;
                }
@@ -2698,7 +2701,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
                                                        continue;
                                                /* remove last page from this bio */
                                                bio->bi_vcnt--;
-                                               bio->bi_size -= len;
+                                               bio->bi_iter.bi_size -= len;
                                                bio->bi_flags &= ~(1<< BIO_SEG_VALID);
                                        }
                                        goto bio_full;
index 8d39d63281b9b5441b3ec8e524955356c8690871..33fc408e5eacef0a1dce55fd5c0d578fc244b663 100644 (file)
@@ -1152,14 +1152,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
        kfree(plug);
 }
 
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void __make_request(struct mddev *mddev, struct bio *bio)
 {
        struct r10conf *conf = mddev->private;
        struct r10bio *r10_bio;
        struct bio *read_bio;
        int i;
-       sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
-       int chunk_sects = chunk_mask + 1;
        const int rw = bio_data_dir(bio);
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
@@ -1174,88 +1172,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        int max_sectors;
        int sectors;
 
-       if (unlikely(bio->bi_rw & REQ_FLUSH)) {
-               md_flush_request(mddev, bio);
-               return;
-       }
-
-       /* If this request crosses a chunk boundary, we need to
-        * split it.  This will only happen for 1 PAGE (or less) requests.
-        */
-       if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
-                    > chunk_sects
-                    && (conf->geo.near_copies < conf->geo.raid_disks
-                        || conf->prev.near_copies < conf->prev.raid_disks))) {
-               struct bio_pair *bp;
-               /* Sanity check -- queue functions should prevent this happening */
-               if (bio_segments(bio) > 1)
-                       goto bad_map;
-               /* This is a one page bio that upper layers
-                * refuse to split for us, so we need to split it.
-                */
-               bp = bio_split(bio,
-                              chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
-
-               /* Each of these 'make_request' calls will call 'wait_barrier'.
-                * If the first succeeds but the second blocks due to the resync
-                * thread raising the barrier, we will deadlock because the
-                * IO to the underlying device will be queued in generic_make_request
-                * and will never complete, so will never reduce nr_pending.
-                * So increment nr_waiting here so no new raise_barriers will
-                * succeed, and so the second wait_barrier cannot block.
-                */
-               spin_lock_irq(&conf->resync_lock);
-               conf->nr_waiting++;
-               spin_unlock_irq(&conf->resync_lock);
-
-               make_request(mddev, &bp->bio1);
-               make_request(mddev, &bp->bio2);
-
-               spin_lock_irq(&conf->resync_lock);
-               conf->nr_waiting--;
-               wake_up(&conf->wait_barrier);
-               spin_unlock_irq(&conf->resync_lock);
-
-               bio_pair_release(bp);
-               return;
-       bad_map:
-               printk("md/raid10:%s: make_request bug: can't convert block across chunks"
-                      " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
-                      (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
-
-               bio_io_error(bio);
-               return;
-       }
-
-       md_write_start(mddev, bio);
-
-       /*
-        * Register the new request and wait if the reconstruction
-        * thread has put up a bar for new requests.
-        * Continue immediately if no resync is active currently.
-        */
-       wait_barrier(conf);
-
        sectors = bio_sectors(bio);
        while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
-           bio->bi_sector < conf->reshape_progress &&
-           bio->bi_sector + sectors > conf->reshape_progress) {
+           bio->bi_iter.bi_sector < conf->reshape_progress &&
+           bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
                /* IO spans the reshape position.  Need to wait for
                 * reshape to pass
                 */
                allow_barrier(conf);
                wait_event(conf->wait_barrier,
-                          conf->reshape_progress <= bio->bi_sector ||
-                          conf->reshape_progress >= bio->bi_sector + sectors);
+                          conf->reshape_progress <= bio->bi_iter.bi_sector ||
+                          conf->reshape_progress >= bio->bi_iter.bi_sector +
+                          sectors);
                wait_barrier(conf);
        }
        if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
            bio_data_dir(bio) == WRITE &&
            (mddev->reshape_backwards
-            ? (bio->bi_sector < conf->reshape_safe &&
-               bio->bi_sector + sectors > conf->reshape_progress)
-            : (bio->bi_sector + sectors > conf->reshape_safe &&
-               bio->bi_sector < conf->reshape_progress))) {
+            ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
+               bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
+            : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
+               bio->bi_iter.bi_sector < conf->reshape_progress))) {
                /* Need to update reshape_position in metadata */
                mddev->reshape_position = conf->reshape_progress;
                set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1273,7 +1210,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        r10_bio->sectors = sectors;
 
        r10_bio->mddev = mddev;
-       r10_bio->sector = bio->bi_sector;
+       r10_bio->sector = bio->bi_iter.bi_sector;
        r10_bio->state = 0;
 
        /* We might need to issue multiple reads to different
@@ -1302,13 +1239,13 @@ read_again:
                slot = r10_bio->read_slot;
 
                read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(read_bio, r10_bio->sector - bio->bi_sector,
+               bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
                         max_sectors);
 
                r10_bio->devs[slot].bio = read_bio;
                r10_bio->devs[slot].rdev = rdev;
 
-               read_bio->bi_sector = r10_bio->devs[slot].addr +
+               read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
                        choose_data_offset(r10_bio, rdev);
                read_bio->bi_bdev = rdev->bdev;
                read_bio->bi_end_io = raid10_end_read_request;
@@ -1320,7 +1257,7 @@ read_again:
                         * need another r10_bio.
                         */
                        sectors_handled = (r10_bio->sector + max_sectors
-                                          - bio->bi_sector);
+                                          - bio->bi_iter.bi_sector);
                        r10_bio->sectors = max_sectors;
                        spin_lock_irq(&conf->device_lock);
                        if (bio->bi_phys_segments == 0)
@@ -1341,7 +1278,8 @@ read_again:
                        r10_bio->sectors = bio_sectors(bio) - sectors_handled;
                        r10_bio->state = 0;
                        r10_bio->mddev = mddev;
-                       r10_bio->sector = bio->bi_sector + sectors_handled;
+                       r10_bio->sector = bio->bi_iter.bi_sector +
+                               sectors_handled;
                        goto read_again;
                } else
                        generic_make_request(read_bio);
@@ -1499,7 +1437,8 @@ retry_write:
                        bio->bi_phys_segments++;
                spin_unlock_irq(&conf->device_lock);
        }
-       sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
+       sectors_handled = r10_bio->sector + max_sectors -
+               bio->bi_iter.bi_sector;
 
        atomic_set(&r10_bio->remaining, 1);
        bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
@@ -1510,11 +1449,11 @@ retry_write:
                if (r10_bio->devs[i].bio) {
                        struct md_rdev *rdev = conf->mirrors[d].rdev;
                        mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-                       bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+                       bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
                                 max_sectors);
                        r10_bio->devs[i].bio = mbio;
 
-                       mbio->bi_sector = (r10_bio->devs[i].addr+
+                       mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
                                           choose_data_offset(r10_bio,
                                                              rdev));
                        mbio->bi_bdev = rdev->bdev;
@@ -1553,11 +1492,11 @@ retry_write:
                                rdev = conf->mirrors[d].rdev;
                        }
                        mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-                       bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+                       bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
                                 max_sectors);
                        r10_bio->devs[i].repl_bio = mbio;
 
-                       mbio->bi_sector = (r10_bio->devs[i].addr +
+                       mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
                                           choose_data_offset(
                                                   r10_bio, rdev));
                        mbio->bi_bdev = rdev->bdev;
@@ -1591,11 +1530,57 @@ retry_write:
                r10_bio->sectors = bio_sectors(bio) - sectors_handled;
 
                r10_bio->mddev = mddev;
-               r10_bio->sector = bio->bi_sector + sectors_handled;
+               r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
                r10_bio->state = 0;
                goto retry_write;
        }
        one_write_done(r10_bio);
+}
+
+static void make_request(struct mddev *mddev, struct bio *bio)
+{
+       struct r10conf *conf = mddev->private;
+       sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
+       int chunk_sects = chunk_mask + 1;
+
+       struct bio *split;
+
+       if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+               md_flush_request(mddev, bio);
+               return;
+       }
+
+       md_write_start(mddev, bio);
+
+       /*
+        * Register the new request and wait if the reconstruction
+        * thread has put up a bar for new requests.
+        * Continue immediately if no resync is active currently.
+        */
+       wait_barrier(conf);
+
+       do {
+
+               /*
+                * If this request crosses a chunk boundary, we need to split
+                * it.
+                */
+               if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
+                            bio_sectors(bio) > chunk_sects
+                            && (conf->geo.near_copies < conf->geo.raid_disks
+                                || conf->prev.near_copies <
+                                conf->prev.raid_disks))) {
+                       split = bio_split(bio, chunk_sects -
+                                         (bio->bi_iter.bi_sector &
+                                          (chunk_sects - 1)),
+                                         GFP_NOIO, fs_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
+
+               __make_request(mddev, split);
+       } while (split != bio);
 
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
@@ -2124,10 +2109,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                bio_reset(tbio);
 
                tbio->bi_vcnt = vcnt;
-               tbio->bi_size = r10_bio->sectors << 9;
+               tbio->bi_iter.bi_size = r10_bio->sectors << 9;
                tbio->bi_rw = WRITE;
                tbio->bi_private = r10_bio;
-               tbio->bi_sector = r10_bio->devs[i].addr;
+               tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
 
                for (j=0; j < vcnt ; j++) {
                        tbio->bi_io_vec[j].bv_offset = 0;
@@ -2144,7 +2129,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                atomic_inc(&r10_bio->remaining);
                md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
 
-               tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
+               tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
                tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
                generic_make_request(tbio);
        }
@@ -2614,8 +2599,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
                        sectors = sect_to_write;
                /* Write at 'sector' for 'sectors' */
                wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(wbio, sector - bio->bi_sector, sectors);
-               wbio->bi_sector = (r10_bio->devs[i].addr+
+               bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
+               wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
                                   choose_data_offset(r10_bio, rdev) +
                                   (sector - r10_bio->sector));
                wbio->bi_bdev = rdev->bdev;
@@ -2687,10 +2672,10 @@ read_more:
                (unsigned long long)r10_bio->sector);
        bio = bio_clone_mddev(r10_bio->master_bio,
                              GFP_NOIO, mddev);
-       bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors);
+       bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
        r10_bio->devs[slot].bio = bio;
        r10_bio->devs[slot].rdev = rdev;
-       bio->bi_sector = r10_bio->devs[slot].addr
+       bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
                + choose_data_offset(r10_bio, rdev);
        bio->bi_bdev = rdev->bdev;
        bio->bi_rw = READ | do_sync;
@@ -2701,7 +2686,7 @@ read_more:
                struct bio *mbio = r10_bio->master_bio;
                int sectors_handled =
                        r10_bio->sector + max_sectors
-                       - mbio->bi_sector;
+                       - mbio->bi_iter.bi_sector;
                r10_bio->sectors = max_sectors;
                spin_lock_irq(&conf->device_lock);
                if (mbio->bi_phys_segments == 0)
@@ -2719,7 +2704,7 @@ read_more:
                set_bit(R10BIO_ReadError,
                        &r10_bio->state);
                r10_bio->mddev = mddev;
-               r10_bio->sector = mbio->bi_sector
+               r10_bio->sector = mbio->bi_iter.bi_sector
                        + sectors_handled;
 
                goto read_more;
@@ -3157,7 +3142,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                                bio->bi_end_io = end_sync_read;
                                bio->bi_rw = READ;
                                from_addr = r10_bio->devs[j].addr;
-                               bio->bi_sector = from_addr + rdev->data_offset;
+                               bio->bi_iter.bi_sector = from_addr +
+                                       rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
                                atomic_inc(&rdev->nr_pending);
                                /* and we write to 'i' (if not in_sync) */
@@ -3181,7 +3167,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                                        bio->bi_private = r10_bio;
                                        bio->bi_end_io = end_sync_write;
                                        bio->bi_rw = WRITE;
-                                       bio->bi_sector = to_addr
+                                       bio->bi_iter.bi_sector = to_addr
                                                + rdev->data_offset;
                                        bio->bi_bdev = rdev->bdev;
                                        atomic_inc(&r10_bio->remaining);
@@ -3210,7 +3196,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                                bio->bi_private = r10_bio;
                                bio->bi_end_io = end_sync_write;
                                bio->bi_rw = WRITE;
-                               bio->bi_sector = to_addr + rdev->data_offset;
+                               bio->bi_iter.bi_sector = to_addr +
+                                       rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
                                atomic_inc(&r10_bio->remaining);
                                break;
@@ -3328,7 +3315,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        bio->bi_private = r10_bio;
                        bio->bi_end_io = end_sync_read;
                        bio->bi_rw = READ;
-                       bio->bi_sector = sector +
+                       bio->bi_iter.bi_sector = sector +
                                conf->mirrors[d].rdev->data_offset;
                        bio->bi_bdev = conf->mirrors[d].rdev->bdev;
                        count++;
@@ -3350,7 +3337,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        bio->bi_private = r10_bio;
                        bio->bi_end_io = end_sync_write;
                        bio->bi_rw = WRITE;
-                       bio->bi_sector = sector +
+                       bio->bi_iter.bi_sector = sector +
                                conf->mirrors[d].replacement->data_offset;
                        bio->bi_bdev = conf->mirrors[d].replacement->bdev;
                        count++;
@@ -3397,7 +3384,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                             bio2 = bio2->bi_next) {
                                /* remove last page from this bio */
                                bio2->bi_vcnt--;
-                               bio2->bi_size -= len;
+                               bio2->bi_iter.bi_size -= len;
                                bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
                        }
                        goto bio_full;
@@ -4418,7 +4405,7 @@ read_more:
        read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
 
        read_bio->bi_bdev = rdev->bdev;
-       read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+       read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
                               + rdev->data_offset);
        read_bio->bi_private = r10_bio;
        read_bio->bi_end_io = end_sync_read;
@@ -4426,7 +4413,7 @@ read_more:
        read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
        read_bio->bi_flags |= 1 << BIO_UPTODATE;
        read_bio->bi_vcnt = 0;
-       read_bio->bi_size = 0;
+       read_bio->bi_iter.bi_size = 0;
        r10_bio->master_bio = read_bio;
        r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
 
@@ -4452,7 +4439,8 @@ read_more:
 
                bio_reset(b);
                b->bi_bdev = rdev2->bdev;
-               b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
+               b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
+                       rdev2->new_data_offset;
                b->bi_private = r10_bio;
                b->bi_end_io = end_reshape_write;
                b->bi_rw = WRITE;
@@ -4479,7 +4467,7 @@ read_more:
                             bio2 = bio2->bi_next) {
                                /* Remove last page from this bio */
                                bio2->bi_vcnt--;
-                               bio2->bi_size -= len;
+                               bio2->bi_iter.bi_size -= len;
                                bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
                        }
                        goto bio_full;
index 03f82ab87d9e73eb4fed4ede052c95fa5d891f09..67ca9c3d2939c5e4468d51f0ea0454dfdceac731 100644 (file)
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
 {
        int sectors = bio_sectors(bio);
-       if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
+       if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
                return bio->bi_next;
        else
                return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
 
                return_bi = bi->bi_next;
                bi->bi_next = NULL;
-               bi->bi_size = 0;
+               bi->bi_iter.bi_size = 0;
                trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
                                         bi, 0);
                bio_endio(bi, 0);
@@ -852,10 +852,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                bi->bi_rw, i);
                        atomic_inc(&sh->count);
                        if (use_new_offset(conf, sh))
-                               bi->bi_sector = (sh->sector
+                               bi->bi_iter.bi_sector = (sh->sector
                                                 + rdev->new_data_offset);
                        else
-                               bi->bi_sector = (sh->sector
+                               bi->bi_iter.bi_sector = (sh->sector
                                                 + rdev->data_offset);
                        if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
                                bi->bi_rw |= REQ_NOMERGE;
@@ -863,7 +863,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        bi->bi_vcnt = 1;
                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        bi->bi_io_vec[0].bv_offset = 0;
-                       bi->bi_size = STRIPE_SIZE;
+                       bi->bi_iter.bi_size = STRIPE_SIZE;
                        /*
                         * If this is discard request, set bi_vcnt 0. We don't
                         * want to confuse SCSI because SCSI will replace payload
@@ -899,15 +899,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                rbi->bi_rw, i);
                        atomic_inc(&sh->count);
                        if (use_new_offset(conf, sh))
-                               rbi->bi_sector = (sh->sector
+                               rbi->bi_iter.bi_sector = (sh->sector
                                                  + rrdev->new_data_offset);
                        else
-                               rbi->bi_sector = (sh->sector
+                               rbi->bi_iter.bi_sector = (sh->sector
                                                  + rrdev->data_offset);
                        rbi->bi_vcnt = 1;
                        rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        rbi->bi_io_vec[0].bv_offset = 0;
-                       rbi->bi_size = STRIPE_SIZE;
+                       rbi->bi_iter.bi_size = STRIPE_SIZE;
                        /*
                         * If this is discard request, set bi_vcnt 0. We don't
                         * want to confuse SCSI because SCSI will replace payload
@@ -935,24 +935,24 @@ static struct dma_async_tx_descriptor *
 async_copy_data(int frombio, struct bio *bio, struct page *page,
        sector_t sector, struct dma_async_tx_descriptor *tx)
 {
-       struct bio_vec *bvl;
+       struct bio_vec bvl;
+       struct bvec_iter iter;
        struct page *bio_page;
-       int i;
        int page_offset;
        struct async_submit_ctl submit;
        enum async_tx_flags flags = 0;
 
-       if (bio->bi_sector >= sector)
-               page_offset = (signed)(bio->bi_sector - sector) * 512;
+       if (bio->bi_iter.bi_sector >= sector)
+               page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
        else
-               page_offset = (signed)(sector - bio->bi_sector) * -512;
+               page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
 
        if (frombio)
                flags |= ASYNC_TX_FENCE;
        init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
 
-       bio_for_each_segment(bvl, bio, i) {
-               int len = bvl->bv_len;
+       bio_for_each_segment(bvl, bio, iter) {
+               int len = bvl.bv_len;
                int clen;
                int b_offset = 0;
 
@@ -968,8 +968,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
                        clen = len;
 
                if (clen > 0) {
-                       b_offset += bvl->bv_offset;
-                       bio_page = bvl->bv_page;
+                       b_offset += bvl.bv_offset;
+                       bio_page = bvl.bv_page;
                        if (frombio)
                                tx = async_memcpy(page, bio_page, page_offset,
                                                  b_offset, clen, &submit);
@@ -1012,7 +1012,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
                        BUG_ON(!dev->read);
                        rbi = dev->read;
                        dev->read = NULL;
-                       while (rbi && rbi->bi_sector <
+                       while (rbi && rbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                rbi2 = r5_next_bio(rbi, dev->sector);
                                if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1048,7 +1048,7 @@ static void ops_run_biofill(struct stripe_head *sh)
                        dev->read = rbi = dev->toread;
                        dev->toread = NULL;
                        spin_unlock_irq(&sh->stripe_lock);
-                       while (rbi && rbi->bi_sector <
+                       while (rbi && rbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                tx = async_copy_data(0, rbi, dev->page,
                                        dev->sector, tx);
@@ -1390,7 +1390,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
                        wbi = dev->written = chosen;
                        spin_unlock_irq(&sh->stripe_lock);
 
-                       while (wbi && wbi->bi_sector <
+                       while (wbi && wbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                if (wbi->bi_rw & REQ_FUA)
                                        set_bit(R5_WantFUA, &dev->flags);
@@ -2615,7 +2615,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        int firstwrite=0;
 
        pr_debug("adding bi b#%llu to stripe s#%llu\n",
-               (unsigned long long)bi->bi_sector,
+               (unsigned long long)bi->bi_iter.bi_sector,
                (unsigned long long)sh->sector);
 
        /*
@@ -2633,12 +2633,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
                        firstwrite = 1;
        } else
                bip = &sh->dev[dd_idx].toread;
-       while (*bip && (*bip)->bi_sector < bi->bi_sector) {
-               if (bio_end_sector(*bip) > bi->bi_sector)
+       while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
+               if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
                        goto overlap;
                bip = & (*bip)->bi_next;
        }
-       if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
+       if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
                goto overlap;
 
        BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2652,7 +2652,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
                sector_t sector = sh->dev[dd_idx].sector;
                for (bi=sh->dev[dd_idx].towrite;
                     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
-                            bi && bi->bi_sector <= sector;
+                            bi && bi->bi_iter.bi_sector <= sector;
                     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
                        if (bio_end_sector(bi) >= sector)
                                sector = bio_end_sector(bi);
@@ -2662,7 +2662,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        }
 
        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
-               (unsigned long long)(*bip)->bi_sector,
+               (unsigned long long)(*bip)->bi_iter.bi_sector,
                (unsigned long long)sh->sector, dd_idx);
        spin_unlock_irq(&sh->stripe_lock);
 
@@ -2737,7 +2737,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                        wake_up(&conf->wait_for_overlap);
 
-               while (bi && bi->bi_sector <
+               while (bi && bi->bi_iter.bi_sector <
                        sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2756,7 +2756,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                bi = sh->dev[i].written;
                sh->dev[i].written = NULL;
                if (bi) bitmap_end = 1;
-               while (bi && bi->bi_sector <
+               while (bi && bi->bi_iter.bi_sector <
                       sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2780,7 +2780,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        spin_unlock_irq(&sh->stripe_lock);
                        if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                                wake_up(&conf->wait_for_overlap);
-                       while (bi && bi->bi_sector <
+                       while (bi && bi->bi_iter.bi_sector <
                               sh->dev[i].sector + STRIPE_SECTORS) {
                                struct bio *nextbi =
                                        r5_next_bio(bi, sh->dev[i].sector);
@@ -3004,7 +3004,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                                        clear_bit(R5_UPTODATE, &dev->flags);
                                wbi = dev->written;
                                dev->written = NULL;
-                               while (wbi && wbi->bi_sector <
+                               while (wbi && wbi->bi_iter.bi_sector <
                                        dev->sector + STRIPE_SECTORS) {
                                        wbi2 = r5_next_bio(wbi, dev->sector);
                                        if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4096,7 +4096,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
 
 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
 {
-       sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+       sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
        unsigned int chunk_sectors = mddev->chunk_sectors;
        unsigned int bio_sectors = bio_sectors(bio);
 
@@ -4233,9 +4233,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
        /*
         *      compute position
         */
-       align_bi->bi_sector =  raid5_compute_sector(conf, raid_bio->bi_sector,
-                                                   0,
-                                                   &dd_idx, NULL);
+       align_bi->bi_iter.bi_sector =
+               raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
+                                    0, &dd_idx, NULL);
 
        end_sector = bio_end_sector(align_bi);
        rcu_read_lock();
@@ -4260,7 +4260,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
 
                if (!bio_fits_rdev(align_bi) ||
-                   is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
+                   is_badblock(rdev, align_bi->bi_iter.bi_sector,
+                               bio_sectors(align_bi),
                                &first_bad, &bad_sectors)) {
                        /* too big in some way, or has a known bad block */
                        bio_put(align_bi);
@@ -4269,7 +4270,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                }
 
                /* No reshape active, so we can trust rdev->data_offset */
-               align_bi->bi_sector += rdev->data_offset;
+               align_bi->bi_iter.bi_sector += rdev->data_offset;
 
                spin_lock_irq(&conf->device_lock);
                wait_event_lock_irq(conf->wait_for_stripe,
@@ -4281,7 +4282,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                if (mddev->gendisk)
                        trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
                                              align_bi, disk_devt(mddev->gendisk),
-                                             raid_bio->bi_sector);
+                                             raid_bio->bi_iter.bi_sector);
                generic_make_request(align_bi);
                return 1;
        } else {
@@ -4464,8 +4465,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
                /* Skip discard while reshape is happening */
                return;
 
-       logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
-       last_sector = bi->bi_sector + (bi->bi_size>>9);
+       logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
 
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4569,7 +4570,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                return;
        }
 
-       logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
        last_sector = bio_end_sector(bi);
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
@@ -5053,7 +5054,8 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
        int remaining;
        int handled = 0;
 
-       logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       logical_sector = raid_bio->bi_iter.bi_sector &
+               ~((sector_t)STRIPE_SECTORS-1);
        sector = raid5_compute_sector(conf, logical_sector,
                                      0, &dd_idx, NULL);
        last_sector = bio_end_sector(raid_bio);
index dd239bdbfcb4a0877db2ab49aa3c27a81eec7dd1..00d339c361fc0ecbd0b9b086e8c5756d28983d77 100644 (file)
@@ -2235,10 +2235,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        }
 
        /* do we need to support multiple segments? */
-       if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
-               printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
-                   ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req),
-                   bio_segments(rsp->bio), blk_rq_bytes(rsp));
+       if (bio_multiple_segments(req->bio) ||
+           bio_multiple_segments(rsp->bio)) {
+               printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n",
+                   ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
                return -EINVAL;
        }
 
index 92bd22ce676012697be18504d96741b4a3910466..9cbc567698cefd0d3933f7c77198d4ccf34ceeeb 100644 (file)
@@ -504,7 +504,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        struct dasd_diag_req *dreq;
        struct dasd_diag_bio *dbio;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        unsigned int count, datasize;
        sector_t recid, first_rec, last_rec;
@@ -525,10 +525,10 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        /* Check struct bio and count the number of blocks for the request. */
        count = 0;
        rq_for_each_segment(bv, req, iter) {
-               if (bv->bv_len & (blksize - 1))
+               if (bv.bv_len & (blksize - 1))
                        /* Fba can only do full blocks. */
                        return ERR_PTR(-EINVAL);
-               count += bv->bv_len >> (block->s2b_shift + 9);
+               count += bv.bv_len >> (block->s2b_shift + 9);
        }
        /* Paranoia. */
        if (count != last_rec - first_rec + 1)
@@ -545,8 +545,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        dbio = dreq->bio;
        recid = first_rec;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        memset(dbio, 0, sizeof (struct dasd_diag_bio));
                        dbio->type = rw_cmd;
                        dbio->block_number = recid + 1;
index 95e45782692fa7bb2a89e9ec566a6a035f736381..2e8e0755070b609b13e9e49f5db5f17ac232ef69 100644 (file)
@@ -2551,7 +2551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        unsigned int off;
        int count, cidaw, cplength, datasize;
@@ -2573,13 +2573,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
        count = 0;
        cidaw = 0;
        rq_for_each_segment(bv, req, iter) {
-               if (bv->bv_len & (blksize - 1))
+               if (bv.bv_len & (blksize - 1))
                        /* Eckd can only do full blocks. */
                        return ERR_PTR(-EINVAL);
-               count += bv->bv_len >> (block->s2b_shift + 9);
+               count += bv.bv_len >> (block->s2b_shift + 9);
 #if defined(CONFIG_64BIT)
-               if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
-                       cidaw += bv->bv_len >> (block->s2b_shift + 9);
+               if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+                       cidaw += bv.bv_len >> (block->s2b_shift + 9);
 #endif
        }
        /* Paranoia. */
@@ -2650,16 +2650,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
                              last_rec - recid + 1, cmd, basedev, blksize);
        }
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
+               dst = page_address(bv.bv_page) + bv.bv_offset;
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
                                                      GFP_DMA | __GFP_NOWARN);
                        if (copy && rq_data_dir(req) == WRITE)
-                               memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+                               memcpy(copy + bv.bv_offset, dst, bv.bv_len);
                        if (copy)
-                               dst = copy + bv->bv_offset;
+                               dst = copy + bv.bv_offset;
                }
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        sector_t trkid = recid;
                        unsigned int recoffs = sector_div(trkid, blk_per_trk);
                        rcmd = cmd;
@@ -2735,7 +2735,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst, *idaw_dst;
        unsigned int cidaw, cplength, datasize;
        unsigned int tlf;
@@ -2813,8 +2813,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
        idaw_dst = NULL;
        idaw_len = 0;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               seg_len = bv->bv_len;
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               seg_len = bv.bv_len;
                while (seg_len) {
                        if (new_track) {
                                trkid = recid;
@@ -3039,7 +3039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
 {
        struct dasd_ccw_req *cqr;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        unsigned int trkcount, ctidaw;
        unsigned char cmd;
@@ -3125,8 +3125,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
                new_track = 1;
                recid = first_rec;
                rq_for_each_segment(bv, req, iter) {
-                       dst = page_address(bv->bv_page) + bv->bv_offset;
-                       seg_len = bv->bv_len;
+                       dst = page_address(bv.bv_page) + bv.bv_offset;
+                       seg_len = bv.bv_len;
                        while (seg_len) {
                                if (new_track) {
                                        trkid = recid;
@@ -3158,9 +3158,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
                }
        } else {
                rq_for_each_segment(bv, req, iter) {
-                       dst = page_address(bv->bv_page) + bv->bv_offset;
+                       dst = page_address(bv.bv_page) + bv.bv_offset;
                        last_tidaw = itcw_add_tidaw(itcw, 0x00,
-                                                   dst, bv->bv_len);
+                                                   dst, bv.bv_len);
                        if (IS_ERR(last_tidaw)) {
                                ret = -EINVAL;
                                goto out_error;
@@ -3278,7 +3278,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        unsigned char cmd;
        unsigned int trkcount;
@@ -3378,8 +3378,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
                        idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
        }
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               seg_len = bv->bv_len;
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               seg_len = bv.bv_len;
                if (cmd == DASD_ECKD_CCW_READ_TRACK)
                        memset(dst, 0, seg_len);
                if (!len_to_track_end) {
@@ -3424,7 +3424,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        struct dasd_eckd_private *private;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst, *cda;
        unsigned int blksize, blk_per_trk, off;
        sector_t recid;
@@ -3442,8 +3442,8 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
                ccw++;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Skip locate record. */
                        if (private->uses_cdl && recid <= 2*blk_per_trk)
                                ccw++;
@@ -3454,7 +3454,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
                                        cda = (char *)((addr_t) ccw->cda);
                                if (dst != cda) {
                                        if (rq_data_dir(req) == READ)
-                                               memcpy(dst, cda, bv->bv_len);
+                                               memcpy(dst, cda, bv.bv_len);
                                        kmem_cache_free(dasd_page_cache,
                                            (void *)((addr_t)cda & PAGE_MASK));
                                }
index 9cbc8c32ba595739cdff63da752a8f080b51e0de..2c8e68bf9a1cd658be919151387fe3ae24bffad3 100644 (file)
@@ -260,7 +260,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
        struct dasd_ccw_req *cqr;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst;
        int count, cidaw, cplength, datasize;
        sector_t recid, first_rec, last_rec;
@@ -283,13 +283,13 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
        count = 0;
        cidaw = 0;
        rq_for_each_segment(bv, req, iter) {
-               if (bv->bv_len & (blksize - 1))
+               if (bv.bv_len & (blksize - 1))
                        /* Fba can only do full blocks. */
                        return ERR_PTR(-EINVAL);
-               count += bv->bv_len >> (block->s2b_shift + 9);
+               count += bv.bv_len >> (block->s2b_shift + 9);
 #if defined(CONFIG_64BIT)
-               if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
-                       cidaw += bv->bv_len / blksize;
+               if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+                       cidaw += bv.bv_len / blksize;
 #endif
        }
        /* Paranoia. */
@@ -326,16 +326,16 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
        }
        recid = first_rec;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
+               dst = page_address(bv.bv_page) + bv.bv_offset;
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
                                                      GFP_DMA | __GFP_NOWARN);
                        if (copy && rq_data_dir(req) == WRITE)
-                               memcpy(copy + bv->bv_offset, dst, bv->bv_len);
+                               memcpy(copy + bv.bv_offset, dst, bv.bv_len);
                        if (copy)
-                               dst = copy + bv->bv_offset;
+                               dst = copy + bv.bv_offset;
                }
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Locate record for stupid devices. */
                        if (private->rdc_data.mode.bits.data_chain == 0) {
                                ccw[-1].flags |= CCW_FLAG_CC;
@@ -384,7 +384,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        struct dasd_fba_private *private;
        struct ccw1 *ccw;
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        char *dst, *cda;
        unsigned int blksize, off;
        int status;
@@ -399,8 +399,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        if (private->rdc_data.mode.bits.data_chain != 0)
                ccw++;
        rq_for_each_segment(bv, req, iter) {
-               dst = page_address(bv->bv_page) + bv->bv_offset;
-               for (off = 0; off < bv->bv_len; off += blksize) {
+               dst = page_address(bv.bv_page) + bv.bv_offset;
+               for (off = 0; off < bv.bv_len; off += blksize) {
                        /* Skip locate record. */
                        if (private->rdc_data.mode.bits.data_chain == 0)
                                ccw++;
@@ -411,7 +411,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
                                        cda = (char *)((addr_t) ccw->cda);
                                if (dst != cda) {
                                        if (rq_data_dir(req) == READ)
-                                               memcpy(dst, cda, bv->bv_len);
+                                               memcpy(dst, cda, bv.bv_len);
                                        kmem_cache_free(dasd_page_cache,
                                            (void *)((addr_t)cda & PAGE_MASK));
                                }
index 6eca019bcf30a50edfab1a80daf1b351d2320474..ebf41e228e55836e6ec764b105c357e1856c9d1b 100644 (file)
@@ -808,18 +808,19 @@ static void
 dcssblk_make_request(struct request_queue *q, struct bio *bio)
 {
        struct dcssblk_dev_info *dev_info;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        unsigned long index;
        unsigned long page_addr;
        unsigned long source_addr;
        unsigned long bytes_done;
-       int i;
 
        bytes_done = 0;
        dev_info = bio->bi_bdev->bd_disk->private_data;
        if (dev_info == NULL)
                goto fail;
-       if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+       if ((bio->bi_iter.bi_sector & 7) != 0 ||
+           (bio->bi_iter.bi_size & 4095) != 0)
                /* Request is not page-aligned. */
                goto fail;
        if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
@@ -842,22 +843,22 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
                }
        }
 
-       index = (bio->bi_sector >> 3);
-       bio_for_each_segment(bvec, bio, i) {
+       index = (bio->bi_iter.bi_sector >> 3);
+       bio_for_each_segment(bvec, bio, iter) {
                page_addr = (unsigned long)
-                       page_address(bvec->bv_page) + bvec->bv_offset;
+                       page_address(bvec.bv_page) + bvec.bv_offset;
                source_addr = dev_info->start + (index<<12) + bytes_done;
-               if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
+               if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
                        // More paranoia.
                        goto fail;
                if (bio_data_dir(bio) == READ) {
                        memcpy((void*)page_addr, (void*)source_addr,
-                               bvec->bv_len);
+                               bvec.bv_len);
                } else {
                        memcpy((void*)source_addr, (void*)page_addr,
-                               bvec->bv_len);
+                               bvec.bv_len);
                }
-               bytes_done += bvec->bv_len;
+               bytes_done += bvec.bv_len;
        }
        bio_endio(bio, 0);
        return;
index d0ab5019d885cea6113f677a4f58990fa4a3ca55..76bed1743db1c7ef23576282b13d9ef8f551aed8 100644 (file)
@@ -130,7 +130,7 @@ static void scm_request_prepare(struct scm_request *scmrq)
        struct aidaw *aidaw = scmrq->aidaw;
        struct msb *msb = &scmrq->aob->msb[0];
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
 
        msb->bs = MSB_BS_4K;
        scmrq->aob->request.msb_count = 1;
@@ -142,9 +142,9 @@ static void scm_request_prepare(struct scm_request *scmrq)
        msb->data_addr = (u64) aidaw;
 
        rq_for_each_segment(bv, scmrq->request, iter) {
-               WARN_ON(bv->bv_offset);
-               msb->blk_count += bv->bv_len >> 12;
-               aidaw->data_addr = (u64) page_address(bv->bv_page);
+               WARN_ON(bv.bv_offset);
+               msb->blk_count += bv.bv_len >> 12;
+               aidaw->data_addr = (u64) page_address(bv.bv_page);
                aidaw++;
        }
 }
index 27f930cd657fcdd3a223412cb68c393ebd565556..9aae909d47a53c88d6345101db2f344b8bc1f7a9 100644 (file)
@@ -122,7 +122,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
        struct aidaw *aidaw = scmrq->aidaw;
        struct msb *msb = &scmrq->aob->msb[0];
        struct req_iterator iter;
-       struct bio_vec *bv;
+       struct bio_vec bv;
        int i = 0;
        u64 addr;
 
@@ -163,7 +163,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
                        i++;
                }
                rq_for_each_segment(bv, req, iter) {
-                       aidaw->data_addr = (u64) page_address(bv->bv_page);
+                       aidaw->data_addr = (u64) page_address(bv.bv_page);
                        aidaw++;
                        i++;
                }
index 58141f0651f280b4cc48b510f09455edf8b6cc0a..6969d39f1e2eba7de41856cabc0d1557b7f3efe4 100644 (file)
@@ -184,25 +184,26 @@ static unsigned long xpram_highest_page_index(void)
 static void xpram_make_request(struct request_queue *q, struct bio *bio)
 {
        xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
        unsigned int index;
        unsigned long page_addr;
        unsigned long bytes;
-       int i;
 
-       if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
+       if ((bio->bi_iter.bi_sector & 7) != 0 ||
+           (bio->bi_iter.bi_size & 4095) != 0)
                /* Request is not page-aligned. */
                goto fail;
-       if ((bio->bi_size >> 12) > xdev->size)
+       if ((bio->bi_iter.bi_size >> 12) > xdev->size)
                /* Request size is no page-aligned. */
                goto fail;
-       if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset)
+       if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
                goto fail;
-       index = (bio->bi_sector >> 3) + xdev->offset;
-       bio_for_each_segment(bvec, bio, i) {
+       index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
+       bio_for_each_segment(bvec, bio, iter) {
                page_addr = (unsigned long)
-                       kmap(bvec->bv_page) + bvec->bv_offset;
-               bytes = bvec->bv_len;
+                       kmap(bvec.bv_page) + bvec.bv_offset;
+               bytes = bvec.bv_len;
                if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
                        /* More paranoia. */
                        goto fail;
index 446b85110a1fc0a69b07e42bb3ecc7144d76d1ce..0cac7d8fd0f7cac75b0ecc2d63662d18eeda4a56 100644 (file)
@@ -2163,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        }
 
        /* do we need to support multiple segments? */
-       if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
-               printk("%s: multiple segments req %u %u, rsp %u %u\n",
-                      __func__, bio_segments(req->bio), blk_rq_bytes(req),
-                      bio_segments(rsp->bio), blk_rq_bytes(rsp));
+       if (bio_multiple_segments(req->bio) ||
+           bio_multiple_segments(rsp->bio)) {
+               printk("%s: multiple segments req %u, rsp %u\n",
+                      __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
                return -EINVAL;
        }
 
index 9d26637308bebe2fc2b2fdec681891cfcbaf9b07..410f4a3e88887a6f0087c06f9da42f22b61f556d 100644 (file)
@@ -1901,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
        Mpi2SmpPassthroughRequest_t *mpi_request;
        Mpi2SmpPassthroughReply_t *mpi_reply;
-       int rc, i;
+       int rc;
        u16 smid;
        u32 ioc_state;
        unsigned long timeleft;
@@ -1916,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        void *pci_addr_out = NULL;
        u16 wait_state_count;
        struct request *rsp = req->next_rq;
-       struct bio_vec *bvec = NULL;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
        if (!rsp) {
                printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
@@ -1942,7 +1943,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        ioc->transport_cmds.status = MPT2_CMD_PENDING;
 
        /* Check if the request is split across multiple segments */
-       if (bio_segments(req->bio) > 1) {
+       if (bio_multiple_segments(req->bio)) {
                u32 offset = 0;
 
                /* Allocate memory and copy the request */
@@ -1955,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                        goto out;
                }
 
-               bio_for_each_segment(bvec, req->bio, i) {
+               bio_for_each_segment(bvec, req->bio, iter) {
                        memcpy(pci_addr_out + offset,
-                           page_address(bvec->bv_page) + bvec->bv_offset,
-                           bvec->bv_len);
-                       offset += bvec->bv_len;
+                           page_address(bvec.bv_page) + bvec.bv_offset,
+                           bvec.bv_len);
+                       offset += bvec.bv_len;
                }
        } else {
                dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1974,7 +1975,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
        /* Check if the response needs to be populated across
         * multiple segments */
-       if (bio_segments(rsp->bio) > 1) {
+       if (bio_multiple_segments(rsp->bio)) {
                pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
                    &pci_dma_in);
                if (!pci_addr_in) {
@@ -2041,7 +2042,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
            MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-       if (bio_segments(req->bio) > 1) {
+       if (bio_multiple_segments(req->bio)) {
                ioc->base_add_sg_single(psge, sgl_flags |
                    (blk_rq_bytes(req) - 4), pci_dma_out);
        } else {
@@ -2057,7 +2058,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
            MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
            MPI2_SGE_FLAGS_END_OF_LIST);
        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-       if (bio_segments(rsp->bio) > 1) {
+       if (bio_multiple_segments(rsp->bio)) {
                ioc->base_add_sg_single(psge, sgl_flags |
                    (blk_rq_bytes(rsp) + 4), pci_dma_in);
        } else {
@@ -2102,23 +2103,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                    le16_to_cpu(mpi_reply->ResponseDataLength);
                /* check if the resp needs to be copied from the allocated
                 * pci mem */
-               if (bio_segments(rsp->bio) > 1) {
+               if (bio_multiple_segments(rsp->bio)) {
                        u32 offset = 0;
                        u32 bytes_to_copy =
                            le16_to_cpu(mpi_reply->ResponseDataLength);
-                       bio_for_each_segment(bvec, rsp->bio, i) {
-                               if (bytes_to_copy <= bvec->bv_len) {
-                                       memcpy(page_address(bvec->bv_page) +
-                                           bvec->bv_offset, pci_addr_in +
+                       bio_for_each_segment(bvec, rsp->bio, iter) {
+                               if (bytes_to_copy <= bvec.bv_len) {
+                                       memcpy(page_address(bvec.bv_page) +
+                                           bvec.bv_offset, pci_addr_in +
                                            offset, bytes_to_copy);
                                        break;
                                } else {
-                                       memcpy(page_address(bvec->bv_page) +
-                                           bvec->bv_offset, pci_addr_in +
-                                           offset, bvec->bv_len);
-                                       bytes_to_copy -= bvec->bv_len;
+                                       memcpy(page_address(bvec.bv_page) +
+                                           bvec.bv_offset, pci_addr_in +
+                                           offset, bvec.bv_len);
+                                       bytes_to_copy -= bvec.bv_len;
                                }
-                               offset += bvec->bv_len;
+                               offset += bvec.bv_len;
                        }
                }
        } else {
index e771a88c6a7441c45c6b49e8ffb85d5258b22e99..65170cb1a00fa5fa0ca3c95a94e3ed686edca9f7 100644 (file)
@@ -1884,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
        Mpi2SmpPassthroughRequest_t *mpi_request;
        Mpi2SmpPassthroughReply_t *mpi_reply;
-       int rc, i;
+       int rc;
        u16 smid;
        u32 ioc_state;
        unsigned long timeleft;
@@ -1898,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        void *pci_addr_out = NULL;
        u16 wait_state_count;
        struct request *rsp = req->next_rq;
-       struct bio_vec *bvec = NULL;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
        if (!rsp) {
                pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
@@ -1925,7 +1926,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        ioc->transport_cmds.status = MPT3_CMD_PENDING;
 
        /* Check if the request is split across multiple segments */
-       if (req->bio->bi_vcnt > 1) {
+       if (bio_multiple_segments(req->bio)) {
                u32 offset = 0;
 
                /* Allocate memory and copy the request */
@@ -1938,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                        goto out;
                }
 
-               bio_for_each_segment(bvec, req->bio, i) {
+               bio_for_each_segment(bvec, req->bio, iter) {
                        memcpy(pci_addr_out + offset,
-                           page_address(bvec->bv_page) + bvec->bv_offset,
-                           bvec->bv_len);
-                       offset += bvec->bv_len;
+                           page_address(bvec.bv_page) + bvec.bv_offset,
+                           bvec.bv_len);
+                       offset += bvec.bv_len;
                }
        } else {
                dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1957,7 +1958,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
        /* Check if the response needs to be populated across
         * multiple segments */
-       if (rsp->bio->bi_vcnt > 1) {
+       if (bio_multiple_segments(rsp->bio)) {
                pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
                    &pci_dma_in);
                if (!pci_addr_in) {
@@ -2018,7 +2019,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
        psge = &mpi_request->SGL;
 
-       if (req->bio->bi_vcnt > 1)
+       if (bio_multiple_segments(req->bio))
                ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
                    pci_dma_in, (blk_rq_bytes(rsp) + 4));
        else
@@ -2063,23 +2064,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
                /* check if the resp needs to be copied from the allocated
                 * pci mem */
-               if (rsp->bio->bi_vcnt > 1) {
+               if (bio_multiple_segments(rsp->bio)) {
                        u32 offset = 0;
                        u32 bytes_to_copy =
                            le16_to_cpu(mpi_reply->ResponseDataLength);
-                       bio_for_each_segment(bvec, rsp->bio, i) {
-                               if (bytes_to_copy <= bvec->bv_len) {
-                                       memcpy(page_address(bvec->bv_page) +
-                                           bvec->bv_offset, pci_addr_in +
+                       bio_for_each_segment(bvec, rsp->bio, iter) {
+                               if (bytes_to_copy <= bvec.bv_len) {
+                                       memcpy(page_address(bvec.bv_page) +
+                                           bvec.bv_offset, pci_addr_in +
                                            offset, bytes_to_copy);
                                        break;
                                } else {
-                                       memcpy(page_address(bvec->bv_page) +
-                                           bvec->bv_offset, pci_addr_in +
-                                           offset, bvec->bv_len);
-                                       bytes_to_copy -= bvec->bv_len;
+                                       memcpy(page_address(bvec.bv_page) +
+                                           bvec.bv_offset, pci_addr_in +
+                                           offset, bvec.bv_len);
+                                       bytes_to_copy -= bvec.bv_len;
                                }
-                               offset += bvec->bv_len;
+                               offset += bvec.bv_len;
                        }
                }
        } else {
index aa66361ed44b71772da913c74c69640b43909f23..bac04c2335aaf997c73e7b3b8b8a08129bfca455 100644 (file)
@@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or,
 
        bio->bi_rw &= ~REQ_WRITE;
        or->in.bio = bio;
-       or->in.total_bytes = bio->bi_size;
+       or->in.total_bytes = bio->bi_iter.bi_size;
        return 0;
 }
 
index 9846c6ab2aaa92eeab130a92fe4d7b8d539b624c..470954aba7289a758a650cd82b2f1dfe50ae54f1 100644 (file)
@@ -801,7 +801,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
        if (sdkp->device->no_write_same)
                return BLKPREP_KILL;
 
-       BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size);
+       BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
 
        sector >>= ilog2(sdp->sector_size) - 9;
        nr_sectors >>= ilog2(sdp->sector_size) - 9;
index 6174ca4ea27594487d7dc0828d9e21841742b8ed..a7a691d0af7d105a431ba3b560a5496acb71d58b 100644 (file)
@@ -365,7 +365,6 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
        struct bio *bio;
        struct scsi_disk *sdkp;
        struct sd_dif_tuple *sdt;
-       unsigned int i, j;
        u32 phys, virt;
 
        sdkp = rq->bio->bi_bdev->bd_disk->private_data;
@@ -376,19 +375,21 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
        phys = hw_sector & 0xffffffff;
 
        __rq_for_each_bio(bio, rq) {
-               struct bio_vec *iv;
+               struct bio_vec iv;
+               struct bvec_iter iter;
+               unsigned int j;
 
                /* Already remapped? */
                if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
                        break;
 
-               virt = bio->bi_integrity->bip_sector & 0xffffffff;
+               virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
 
-               bip_for_each_vec(iv, bio->bi_integrity, i) {
-                       sdt = kmap_atomic(iv->bv_page)
-                               + iv->bv_offset;
+               bip_for_each_vec(iv, bio->bi_integrity, iter) {
+                       sdt = kmap_atomic(iv.bv_page)
+                               + iv.bv_offset;
 
-                       for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+                       for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
 
                                if (be32_to_cpu(sdt->ref_tag) == virt)
                                        sdt->ref_tag = cpu_to_be32(phys);
@@ -414,7 +415,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
        struct scsi_disk *sdkp;
        struct bio *bio;
        struct sd_dif_tuple *sdt;
-       unsigned int i, j, sectors, sector_sz;
+       unsigned int j, sectors, sector_sz;
        u32 phys, virt;
 
        sdkp = scsi_disk(scmd->request->rq_disk);
@@ -430,15 +431,16 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
                phys >>= 3;
 
        __rq_for_each_bio(bio, scmd->request) {
-               struct bio_vec *iv;
+               struct bio_vec iv;
+               struct bvec_iter iter;
 
-               virt = bio->bi_integrity->bip_sector & 0xffffffff;
+               virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
 
-               bip_for_each_vec(iv, bio->bi_integrity, i) {
-                       sdt = kmap_atomic(iv->bv_page)
-                               + iv->bv_offset;
+               bip_for_each_vec(iv, bio->bi_integrity, iter) {
+                       sdt = kmap_atomic(iv.bv_page)
+                               + iv.bv_offset;
 
-                       for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
+                       for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
 
                                if (sectors == 0) {
                                        kunmap_atomic(sdt);
index 5338e8d4c50fa998582fb86209f66c95a11419a8..0718905adeb256cb2a2dd12336f3dbb7db365d23 100644 (file)
@@ -194,10 +194,10 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
        struct cl_object     *obj = ll_i2info(inode)->lli_clob;
        pgoff_t        offset;
        int                ret;
-       int                i;
        int                rw;
        obd_count            page_count = 0;
-       struct bio_vec       *bvec;
+       struct bio_vec       bvec;
+       struct bvec_iter   iter;
        struct bio         *bio;
        ssize_t        bytes;
 
@@ -220,15 +220,15 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
        for (bio = head; bio != NULL; bio = bio->bi_next) {
                LASSERT(rw == bio->bi_rw);
 
-               offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
-               bio_for_each_segment(bvec, bio, i) {
-                       BUG_ON(bvec->bv_offset != 0);
-                       BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
+               offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
+               bio_for_each_segment(bvec, bio, iter) {
+                       BUG_ON(bvec.bv_offset != 0);
+                       BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
 
-                       pages[page_count] = bvec->bv_page;
+                       pages[page_count] = bvec.bv_page;
                        offsets[page_count] = offset;
                        page_count++;
-                       offset += bvec->bv_len;
+                       offset += bvec.bv_len;
                }
                LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
        }
@@ -313,7 +313,8 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
        bio = &lo->lo_bio;
        while (*bio && (*bio)->bi_rw == rw) {
                CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
-                      (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
+                      (unsigned long long)(*bio)->bi_iter.bi_sector,
+                      (*bio)->bi_iter.bi_size,
                       page_count, (*bio)->bi_vcnt);
                if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
                        break;
@@ -347,7 +348,8 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
                goto err;
 
        CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
-              (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
+              (unsigned long long)old_bio->bi_iter.bi_sector,
+              old_bio->bi_iter.bi_size);
 
        spin_lock_irq(&lo->lo_lock);
        inactive = (lo->lo_state != LLOOP_BOUND);
@@ -367,7 +369,7 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
        loop_add_bio(lo, old_bio);
        return;
 err:
-       cfs_bio_io_error(old_bio, old_bio->bi_size);
+       cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size);
 }
 
 
@@ -378,7 +380,7 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
        while (bio) {
                struct bio *tmp = bio->bi_next;
                bio->bi_next = NULL;
-               cfs_bio_endio(bio, bio->bi_size, ret);
+               cfs_bio_endio(bio, bio->bi_iter.bi_size, ret);
                bio = tmp;
        }
 }
index 3277d9838f4e928ab3555720a186e476e826a720..108f2733106d77c00b73003e106febe6aa8998cc 100644 (file)
@@ -171,13 +171,14 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
        u64 start, end, bound;
 
        /* unaligned request */
-       if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+       if (unlikely(bio->bi_iter.bi_sector &
+                    (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
                return 0;
-       if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+       if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
                return 0;
 
-       start = bio->bi_sector;
-       end = start + (bio->bi_size >> SECTOR_SHIFT);
+       start = bio->bi_iter.bi_sector;
+       end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
        bound = zram->disksize >> SECTOR_SHIFT;
        /* out of range range */
        if (unlikely(start >= bound || end > bound || start > end))
@@ -680,9 +681,10 @@ out:
 
 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
 {
-       int i, offset;
+       int offset;
        u32 index;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
        switch (rw) {
        case READ:
@@ -693,36 +695,37 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
                break;
        }
 
-       index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
-       offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
+       index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
+       offset = (bio->bi_iter.bi_sector &
+                 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
 
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment(bvec, bio, iter) {
                int max_transfer_size = PAGE_SIZE - offset;
 
-               if (bvec->bv_len > max_transfer_size) {
+               if (bvec.bv_len > max_transfer_size) {
                        /*
                         * zram_bvec_rw() can only make operation on a single
                         * zram page. Split the bio vector.
                         */
                        struct bio_vec bv;
 
-                       bv.bv_page = bvec->bv_page;
+                       bv.bv_page = bvec.bv_page;
                        bv.bv_len = max_transfer_size;
-                       bv.bv_offset = bvec->bv_offset;
+                       bv.bv_offset = bvec.bv_offset;
 
                        if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
                                goto out;
 
-                       bv.bv_len = bvec->bv_len - max_transfer_size;
+                       bv.bv_len = bvec.bv_len - max_transfer_size;
                        bv.bv_offset += max_transfer_size;
                        if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
                                goto out;
                } else
-                       if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
+                       if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
                            < 0)
                                goto out;
 
-               update_position(&index, &offset, bvec);
+               update_position(&index, &offset, &bvec);
        }
 
        set_bit(BIO_UPTODATE, &bio->bi_flags);
index c87959f12760462ca76740737e7cc839bdd4fc58..2d29356d0c85a076e90db99bbda9f1a428f9c336 100644 (file)
@@ -319,7 +319,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
        bio->bi_bdev = ib_dev->ibd_bd;
        bio->bi_private = cmd;
        bio->bi_end_io = &iblock_bio_done;
-       bio->bi_sector = lba;
+       bio->bi_iter.bi_sector = lba;
 
        return bio;
 }
index fc60b31453eefbbdcc234c7df78c5504da655980..0bad24ddc2e7a39abf29547f24173cc050017d15 100644 (file)
@@ -134,8 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
                return 0;
        }
 
-       iv = bip_vec_idx(bip, bip->bip_vcnt);
-       BUG_ON(iv == NULL);
+       iv = bip->bip_vec + bip->bip_vcnt;
 
        iv->bv_page = page;
        iv->bv_len = len;
@@ -203,6 +202,12 @@ static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi,
        return sectors;
 }
 
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+                                              unsigned int sectors)
+{
+       return bio_integrity_hw_sectors(bi, sectors) * bi->tuple_size;
+}
+
 /**
  * bio_integrity_tag_size - Retrieve integrity tag space
  * @bio:       bio to inspect
@@ -215,9 +220,9 @@ unsigned int bio_integrity_tag_size(struct bio *bio)
 {
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 
-       BUG_ON(bio->bi_size == 0);
+       BUG_ON(bio->bi_iter.bi_size == 0);
 
-       return bi->tag_size * (bio->bi_size / bi->sector_size);
+       return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size);
 }
 EXPORT_SYMBOL(bio_integrity_tag_size);
 
@@ -235,9 +240,9 @@ int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set)
        nr_sectors = bio_integrity_hw_sectors(bi,
                                        DIV_ROUND_UP(len, bi->tag_size));
 
-       if (nr_sectors * bi->tuple_size > bip->bip_size) {
-               printk(KERN_ERR "%s: tag too big for bio: %u > %u\n",
-                      __func__, nr_sectors * bi->tuple_size, bip->bip_size);
+       if (nr_sectors * bi->tuple_size > bip->bip_iter.bi_size) {
+               printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__,
+                      nr_sectors * bi->tuple_size, bip->bip_iter.bi_size);
                return -1;
        }
 
@@ -299,29 +304,30 @@ static void bio_integrity_generate(struct bio *bio)
 {
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
        struct blk_integrity_exchg bix;
-       struct bio_vec *bv;
-       sector_t sector = bio->bi_sector;
-       unsigned int i, sectors, total;
+       struct bio_vec bv;
+       struct bvec_iter iter;
+       sector_t sector = bio->bi_iter.bi_sector;
+       unsigned int sectors, total;
        void *prot_buf = bio->bi_integrity->bip_buf;
 
        total = 0;
        bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
        bix.sector_size = bi->sector_size;
 
-       bio_for_each_segment(bv, bio, i) {
-               void *kaddr = kmap_atomic(bv->bv_page);
-               bix.data_buf = kaddr + bv->bv_offset;
-               bix.data_size = bv->bv_len;
+       bio_for_each_segment(bv, bio, iter) {
+               void *kaddr = kmap_atomic(bv.bv_page);
+               bix.data_buf = kaddr + bv.bv_offset;
+               bix.data_size = bv.bv_len;
                bix.prot_buf = prot_buf;
                bix.sector = sector;
 
                bi->generate_fn(&bix);
 
-               sectors = bv->bv_len / bi->sector_size;
+               sectors = bv.bv_len / bi->sector_size;
                sector += sectors;
                prot_buf += sectors * bi->tuple_size;
                total += sectors * bi->tuple_size;
-               BUG_ON(total > bio->bi_integrity->bip_size);
+               BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
 
                kunmap_atomic(kaddr);
        }
@@ -386,8 +392,8 @@ int bio_integrity_prep(struct bio *bio)
 
        bip->bip_owns_buf = 1;
        bip->bip_buf = buf;
-       bip->bip_size = len;
-       bip->bip_sector = bio->bi_sector;
+       bip->bip_iter.bi_size = len;
+       bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
 
        /* Map it */
        offset = offset_in_page(buf);
@@ -442,16 +448,18 @@ static int bio_integrity_verify(struct bio *bio)
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
        struct blk_integrity_exchg bix;
        struct bio_vec *bv;
-       sector_t sector = bio->bi_integrity->bip_sector;
-       unsigned int i, sectors, total, ret;
+       sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
+       unsigned int sectors, total, ret;
        void *prot_buf = bio->bi_integrity->bip_buf;
+       int i;
 
        ret = total = 0;
        bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
        bix.sector_size = bi->sector_size;
 
-       bio_for_each_segment(bv, bio, i) {
+       bio_for_each_segment_all(bv, bio, i) {
                void *kaddr = kmap_atomic(bv->bv_page);
+
                bix.data_buf = kaddr + bv->bv_offset;
                bix.data_size = bv->bv_len;
                bix.prot_buf = prot_buf;
@@ -468,7 +476,7 @@ static int bio_integrity_verify(struct bio *bio)
                sector += sectors;
                prot_buf += sectors * bi->tuple_size;
                total += sectors * bi->tuple_size;
-               BUG_ON(total > bio->bi_integrity->bip_size);
+               BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
 
                kunmap_atomic(kaddr);
        }
@@ -495,7 +503,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
 
        /* Restore original bio completion handler */
        bio->bi_end_io = bip->bip_end_io;
-       bio_endio(bio, error);
+       bio_endio_nodec(bio, error);
 }
 
 /**
@@ -532,56 +540,6 @@ void bio_integrity_endio(struct bio *bio, int error)
 }
 EXPORT_SYMBOL(bio_integrity_endio);
 
-/**
- * bio_integrity_mark_head - Advance bip_vec skip bytes
- * @bip:       Integrity vector to advance
- * @skip:      Number of bytes to advance it
- */
-void bio_integrity_mark_head(struct bio_integrity_payload *bip,
-                            unsigned int skip)
-{
-       struct bio_vec *iv;
-       unsigned int i;
-
-       bip_for_each_vec(iv, bip, i) {
-               if (skip == 0) {
-                       bip->bip_idx = i;
-                       return;
-               } else if (skip >= iv->bv_len) {
-                       skip -= iv->bv_len;
-               } else { /* skip < iv->bv_len) */
-                       iv->bv_offset += skip;
-                       iv->bv_len -= skip;
-                       bip->bip_idx = i;
-                       return;
-               }
-       }
-}
-
-/**
- * bio_integrity_mark_tail - Truncate bip_vec to be len bytes long
- * @bip:       Integrity vector to truncate
- * @len:       New length of integrity vector
- */
-void bio_integrity_mark_tail(struct bio_integrity_payload *bip,
-                            unsigned int len)
-{
-       struct bio_vec *iv;
-       unsigned int i;
-
-       bip_for_each_vec(iv, bip, i) {
-               if (len == 0) {
-                       bip->bip_vcnt = i;
-                       return;
-               } else if (len >= iv->bv_len) {
-                       len -= iv->bv_len;
-               } else { /* len < iv->bv_len) */
-                       iv->bv_len = len;
-                       len = 0;
-               }
-       }
-}
-
 /**
  * bio_integrity_advance - Advance integrity vector
  * @bio:       bio whose integrity vector to update
@@ -595,13 +553,9 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
 {
        struct bio_integrity_payload *bip = bio->bi_integrity;
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
-       unsigned int nr_sectors;
+       unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
 
-       BUG_ON(bip == NULL);
-       BUG_ON(bi == NULL);
-
-       nr_sectors = bio_integrity_hw_sectors(bi, bytes_done >> 9);
-       bio_integrity_mark_head(bip, nr_sectors * bi->tuple_size);
+       bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
 }
 EXPORT_SYMBOL(bio_integrity_advance);
 
@@ -621,63 +575,12 @@ void bio_integrity_trim(struct bio *bio, unsigned int offset,
 {
        struct bio_integrity_payload *bip = bio->bi_integrity;
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
-       unsigned int nr_sectors;
-
-       BUG_ON(bip == NULL);
-       BUG_ON(bi == NULL);
-       BUG_ON(!bio_flagged(bio, BIO_CLONED));
 
-       nr_sectors = bio_integrity_hw_sectors(bi, sectors);
-       bip->bip_sector = bip->bip_sector + offset;
-       bio_integrity_mark_head(bip, offset * bi->tuple_size);
-       bio_integrity_mark_tail(bip, sectors * bi->tuple_size);
+       bio_integrity_advance(bio, offset << 9);
+       bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors);
 }
 EXPORT_SYMBOL(bio_integrity_trim);
 
-/**
- * bio_integrity_split - Split integrity metadata
- * @bio:       Protected bio
- * @bp:                Resulting bio_pair
- * @sectors:   Offset
- *
- * Description: Splits an integrity page into a bio_pair.
- */
-void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
-{
-       struct blk_integrity *bi;
-       struct bio_integrity_payload *bip = bio->bi_integrity;
-       unsigned int nr_sectors;
-
-       if (bio_integrity(bio) == 0)
-               return;
-
-       bi = bdev_get_integrity(bio->bi_bdev);
-       BUG_ON(bi == NULL);
-       BUG_ON(bip->bip_vcnt != 1);
-
-       nr_sectors = bio_integrity_hw_sectors(bi, sectors);
-
-       bp->bio1.bi_integrity = &bp->bip1;
-       bp->bio2.bi_integrity = &bp->bip2;
-
-       bp->iv1 = bip->bip_vec[bip->bip_idx];
-       bp->iv2 = bip->bip_vec[bip->bip_idx];
-
-       bp->bip1.bip_vec = &bp->iv1;
-       bp->bip2.bip_vec = &bp->iv2;
-
-       bp->iv1.bv_len = sectors * bi->tuple_size;
-       bp->iv2.bv_offset += sectors * bi->tuple_size;
-       bp->iv2.bv_len -= sectors * bi->tuple_size;
-
-       bp->bip1.bip_sector = bio->bi_integrity->bip_sector;
-       bp->bip2.bip_sector = bio->bi_integrity->bip_sector + nr_sectors;
-
-       bp->bip1.bip_vcnt = bp->bip2.bip_vcnt = 1;
-       bp->bip1.bip_idx = bp->bip2.bip_idx = 0;
-}
-EXPORT_SYMBOL(bio_integrity_split);
-
 /**
  * bio_integrity_clone - Callback for cloning bios with integrity metadata
  * @bio:       New bio
@@ -702,9 +605,8 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
        memcpy(bip->bip_vec, bip_src->bip_vec,
               bip_src->bip_vcnt * sizeof(struct bio_vec));
 
-       bip->bip_sector = bip_src->bip_sector;
        bip->bip_vcnt = bip_src->bip_vcnt;
-       bip->bip_idx = bip_src->bip_idx;
+       bip->bip_iter = bip_src->bip_iter;
 
        return 0;
 }
index 33d79a4eb92d6e623aa90e2291af39b2b2689d83..75c49a38223969c1f7256868cb3b09fc7d3bd286 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -38,8 +38,6 @@
  */
 #define BIO_INLINE_VECS                4
 
-static mempool_t *bio_split_pool __read_mostly;
-
 /*
  * if you change this list, also change bvec_alloc or things will
  * break badly! cannot be bigger than what you can fit into an
@@ -273,6 +271,7 @@ void bio_init(struct bio *bio)
 {
        memset(bio, 0, sizeof(*bio));
        bio->bi_flags = 1 << BIO_UPTODATE;
+       atomic_set(&bio->bi_remaining, 1);
        atomic_set(&bio->bi_cnt, 1);
 }
 EXPORT_SYMBOL(bio_init);
@@ -295,9 +294,35 @@ void bio_reset(struct bio *bio)
 
        memset(bio, 0, BIO_RESET_BYTES);
        bio->bi_flags = flags|(1 << BIO_UPTODATE);
+       atomic_set(&bio->bi_remaining, 1);
 }
 EXPORT_SYMBOL(bio_reset);
 
+static void bio_chain_endio(struct bio *bio, int error)
+{
+       bio_endio(bio->bi_private, error);
+       bio_put(bio);
+}
+
+/**
+ * bio_chain - chain bio completions
+ *
+ * The caller won't have a bi_end_io called when @bio completes - instead,
+ * @parent's bi_end_io won't be called until both @parent and @bio have
+ * completed; the chained bio will also be freed when it completes.
+ *
+ * The caller must not set bi_private or bi_end_io in @bio.
+ */
+void bio_chain(struct bio *bio, struct bio *parent)
+{
+       BUG_ON(bio->bi_private || bio->bi_end_io);
+
+       bio->bi_private = parent;
+       bio->bi_end_io  = bio_chain_endio;
+       atomic_inc(&parent->bi_remaining);
+}
+EXPORT_SYMBOL(bio_chain);
+
 static void bio_alloc_rescue(struct work_struct *work)
 {
        struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
@@ -473,13 +498,13 @@ EXPORT_SYMBOL(bio_alloc_bioset);
 void zero_fill_bio(struct bio *bio)
 {
        unsigned long flags;
-       struct bio_vec *bv;
-       int i;
+       struct bio_vec bv;
+       struct bvec_iter iter;
 
-       bio_for_each_segment(bv, bio, i) {
-               char *data = bvec_kmap_irq(bv, &flags);
-               memset(data, 0, bv->bv_len);
-               flush_dcache_page(bv->bv_page);
+       bio_for_each_segment(bv, bio, iter) {
+               char *data = bvec_kmap_irq(&bv, &flags);
+               memset(data, 0, bv.bv_len);
+               flush_dcache_page(bv.bv_page);
                bvec_kunmap_irq(data, &flags);
        }
 }
@@ -515,51 +540,49 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
 EXPORT_SYMBOL(bio_phys_segments);
 
 /**
- *     __bio_clone     -       clone a bio
+ *     __bio_clone_fast - clone a bio that shares the original bio's biovec
  *     @bio: destination bio
  *     @bio_src: bio to clone
  *
  *     Clone a &bio. Caller will own the returned bio, but not
  *     the actual data it points to. Reference count of returned
  *     bio will be one.
+ *
+ *     Caller must ensure that @bio_src is not freed before @bio.
  */
-void __bio_clone(struct bio *bio, struct bio *bio_src)
+void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 {
-       memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
-               bio_src->bi_max_vecs * sizeof(struct bio_vec));
+       BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
 
        /*
         * most users will be overriding ->bi_bdev with a new target,
         * so we don't set nor calculate new physical/hw segment counts here
         */
-       bio->bi_sector = bio_src->bi_sector;
        bio->bi_bdev = bio_src->bi_bdev;
        bio->bi_flags |= 1 << BIO_CLONED;
        bio->bi_rw = bio_src->bi_rw;
-       bio->bi_vcnt = bio_src->bi_vcnt;
-       bio->bi_size = bio_src->bi_size;
-       bio->bi_idx = bio_src->bi_idx;
+       bio->bi_iter = bio_src->bi_iter;
+       bio->bi_io_vec = bio_src->bi_io_vec;
 }
-EXPORT_SYMBOL(__bio_clone);
+EXPORT_SYMBOL(__bio_clone_fast);
 
 /**
- *     bio_clone_bioset -      clone a bio
+ *     bio_clone_fast - clone a bio that shares the original bio's biovec
  *     @bio: bio to clone
  *     @gfp_mask: allocation priority
  *     @bs: bio_set to allocate from
  *
- *     Like __bio_clone, only also allocates the returned bio
+ *     Like __bio_clone_fast, only also allocates the returned bio
  */
-struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
-                            struct bio_set *bs)
+struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
 {
        struct bio *b;
 
-       b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bs);
+       b = bio_alloc_bioset(gfp_mask, 0, bs);
        if (!b)
                return NULL;
 
-       __bio_clone(b, bio);
+       __bio_clone_fast(b, bio);
 
        if (bio_integrity(bio)) {
                int ret;
@@ -574,6 +597,74 @@ struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
 
        return b;
 }
+EXPORT_SYMBOL(bio_clone_fast);
+
+/**
+ *     bio_clone_bioset - clone a bio
+ *     @bio_src: bio to clone
+ *     @gfp_mask: allocation priority
+ *     @bs: bio_set to allocate from
+ *
+ *     Clone bio. Caller will own the returned bio, but not the actual data it
+ *     points to. Reference count of returned bio will be one.
+ */
+struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
+                            struct bio_set *bs)
+{
+       unsigned nr_iovecs = 0;
+       struct bvec_iter iter;
+       struct bio_vec bv;
+       struct bio *bio;
+
+       /*
+        * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
+        * bio_src->bi_io_vec to bio->bi_io_vec.
+        *
+        * We can't do that anymore, because:
+        *
+        *  - The point of cloning the biovec is to produce a bio with a biovec
+        *    the caller can modify: bi_idx and bi_bvec_done should be 0.
+        *
+        *  - The original bio could've had more than BIO_MAX_PAGES biovecs; if
+        *    we tried to clone the whole thing bio_alloc_bioset() would fail.
+        *    But the clone should succeed as long as the number of biovecs we
+        *    actually need to allocate is fewer than BIO_MAX_PAGES.
+        *
+        *  - Lastly, bi_vcnt should not be looked at or relied upon by code
+        *    that does not own the bio - reason being drivers don't use it for
+        *    iterating over the biovec anymore, so expecting it to be kept up
+        *    to date (i.e. for clones that share the parent biovec) is just
+        *    asking for trouble and would force extra work on
+        *    __bio_clone_fast() anyways.
+        */
+
+       bio_for_each_segment(bv, bio_src, iter)
+               nr_iovecs++;
+
+       bio = bio_alloc_bioset(gfp_mask, nr_iovecs, bs);
+       if (!bio)
+               return NULL;
+
+       bio->bi_bdev            = bio_src->bi_bdev;
+       bio->bi_rw              = bio_src->bi_rw;
+       bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
+       bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
+
+       bio_for_each_segment(bv, bio_src, iter)
+               bio->bi_io_vec[bio->bi_vcnt++] = bv;
+
+       if (bio_integrity(bio_src)) {
+               int ret;
+
+               ret = bio_integrity_clone(bio, bio_src, gfp_mask);
+               if (ret < 0) {
+                       bio_put(bio);
+                       return NULL;
+               }
+       }
+
+       return bio;
+}
 EXPORT_SYMBOL(bio_clone_bioset);
 
 /**
@@ -612,7 +703,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
        if (unlikely(bio_flagged(bio, BIO_CLONED)))
                return 0;
 
-       if (((bio->bi_size + len) >> 9) > max_sectors)
+       if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
                return 0;
 
        /*
@@ -635,8 +726,9 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
                                           simulate merging updated prev_bvec
                                           as new bvec. */
                                        .bi_bdev = bio->bi_bdev,
-                                       .bi_sector = bio->bi_sector,
-                                       .bi_size = bio->bi_size - prev_bv_len,
+                                       .bi_sector = bio->bi_iter.bi_sector,
+                                       .bi_size = bio->bi_iter.bi_size -
+                                               prev_bv_len,
                                        .bi_rw = bio->bi_rw,
                                };
 
@@ -684,8 +776,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
        if (q->merge_bvec_fn) {
                struct bvec_merge_data bvm = {
                        .bi_bdev = bio->bi_bdev,
-                       .bi_sector = bio->bi_sector,
-                       .bi_size = bio->bi_size,
+                       .bi_sector = bio->bi_iter.bi_sector,
+                       .bi_size = bio->bi_iter.bi_size,
                        .bi_rw = bio->bi_rw,
                };
 
@@ -708,7 +800,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
        bio->bi_vcnt++;
        bio->bi_phys_segments++;
  done:
-       bio->bi_size += len;
+       bio->bi_iter.bi_size += len;
        return len;
 }
 
@@ -807,28 +899,7 @@ void bio_advance(struct bio *bio, unsigned bytes)
        if (bio_integrity(bio))
                bio_integrity_advance(bio, bytes);
 
-       bio->bi_sector += bytes >> 9;
-       bio->bi_size -= bytes;
-
-       if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
-               return;
-
-       while (bytes) {
-               if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
-                       WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
-                                 bio->bi_idx, bio->bi_vcnt);
-                       break;
-               }
-
-               if (bytes >= bio_iovec(bio)->bv_len) {
-                       bytes -= bio_iovec(bio)->bv_len;
-                       bio->bi_idx++;
-               } else {
-                       bio_iovec(bio)->bv_len -= bytes;
-                       bio_iovec(bio)->bv_offset += bytes;
-                       bytes = 0;
-               }
-       }
+       bio_advance_iter(bio, &bio->bi_iter, bytes);
 }
 EXPORT_SYMBOL(bio_advance);
 
@@ -874,117 +945,80 @@ EXPORT_SYMBOL(bio_alloc_pages);
  */
 void bio_copy_data(struct bio *dst, struct bio *src)
 {
-       struct bio_vec *src_bv, *dst_bv;
-       unsigned src_offset, dst_offset, bytes;
+       struct bvec_iter src_iter, dst_iter;
+       struct bio_vec src_bv, dst_bv;
        void *src_p, *dst_p;
+       unsigned bytes;
 
-       src_bv = bio_iovec(src);
-       dst_bv = bio_iovec(dst);
-
-       src_offset = src_bv->bv_offset;
-       dst_offset = dst_bv->bv_offset;
+       src_iter = src->bi_iter;
+       dst_iter = dst->bi_iter;
 
        while (1) {
-               if (src_offset == src_bv->bv_offset + src_bv->bv_len) {
-                       src_bv++;
-                       if (src_bv == bio_iovec_idx(src, src->bi_vcnt)) {
-                               src = src->bi_next;
-                               if (!src)
-                                       break;
-
-                               src_bv = bio_iovec(src);
-                       }
+               if (!src_iter.bi_size) {
+                       src = src->bi_next;
+                       if (!src)
+                               break;
 
-                       src_offset = src_bv->bv_offset;
+                       src_iter = src->bi_iter;
                }
 
-               if (dst_offset == dst_bv->bv_offset + dst_bv->bv_len) {
-                       dst_bv++;
-                       if (dst_bv == bio_iovec_idx(dst, dst->bi_vcnt)) {
-                               dst = dst->bi_next;
-                               if (!dst)
-                                       break;
-
-                               dst_bv = bio_iovec(dst);
-                       }
+               if (!dst_iter.bi_size) {
+                       dst = dst->bi_next;
+                       if (!dst)
+                               break;
 
-                       dst_offset = dst_bv->bv_offset;
+                       dst_iter = dst->bi_iter;
                }
 
-               bytes = min(dst_bv->bv_offset + dst_bv->bv_len - dst_offset,
-                           src_bv->bv_offset + src_bv->bv_len - src_offset);
+               src_bv = bio_iter_iovec(src, src_iter);
+               dst_bv = bio_iter_iovec(dst, dst_iter);
+
+               bytes = min(src_bv.bv_len, dst_bv.bv_len);
 
-               src_p = kmap_atomic(src_bv->bv_page);
-               dst_p = kmap_atomic(dst_bv->bv_page);
+               src_p = kmap_atomic(src_bv.bv_page);
+               dst_p = kmap_atomic(dst_bv.bv_page);
 
-               memcpy(dst_p + dst_offset,
-                      src_p + src_offset,
+               memcpy(dst_p + dst_bv.bv_offset,
+                      src_p + src_bv.bv_offset,
                       bytes);
 
                kunmap_atomic(dst_p);
                kunmap_atomic(src_p);
 
-               src_offset += bytes;
-               dst_offset += bytes;
+               bio_advance_iter(src, &src_iter, bytes);
+               bio_advance_iter(dst, &dst_iter, bytes);
        }
 }
 EXPORT_SYMBOL(bio_copy_data);
 
 struct bio_map_data {
-       struct bio_vec *iovecs;
-       struct sg_iovec *sgvecs;
        int nr_sgvecs;
        int is_our_pages;
+       struct sg_iovec sgvecs[];
 };
 
 static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
                             struct sg_iovec *iov, int iov_count,
                             int is_our_pages)
 {
-       memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
        memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
        bmd->nr_sgvecs = iov_count;
        bmd->is_our_pages = is_our_pages;
        bio->bi_private = bmd;
 }
 
-static void bio_free_map_data(struct bio_map_data *bmd)
-{
-       kfree(bmd->iovecs);
-       kfree(bmd->sgvecs);
-       kfree(bmd);
-}
-
 static struct bio_map_data *bio_alloc_map_data(int nr_segs,
                                               unsigned int iov_count,
                                               gfp_t gfp_mask)
 {
-       struct bio_map_data *bmd;
-
        if (iov_count > UIO_MAXIOV)
                return NULL;
 
-       bmd = kmalloc(sizeof(*bmd), gfp_mask);
-       if (!bmd)
-               return NULL;
-
-       bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
-       if (!bmd->iovecs) {
-               kfree(bmd);
-               return NULL;
-       }
-
-       bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
-       if (bmd->sgvecs)
-               return bmd;
-
-       kfree(bmd->iovecs);
-       kfree(bmd);
-       return NULL;
+       return kmalloc(sizeof(struct bio_map_data) +
+                      sizeof(struct sg_iovec) * iov_count, gfp_mask);
 }
 
-static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
-                         struct sg_iovec *iov, int iov_count,
+static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
                          int to_user, int from_user, int do_free_page)
 {
        int ret = 0, i;
@@ -994,7 +1028,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
 
        bio_for_each_segment_all(bvec, bio, i) {
                char *bv_addr = page_address(bvec->bv_page);
-               unsigned int bv_len = iovecs[i].bv_len;
+               unsigned int bv_len = bvec->bv_len;
 
                while (bv_len && iov_idx < iov_count) {
                        unsigned int bytes;
@@ -1054,14 +1088,14 @@ int bio_uncopy_user(struct bio *bio)
                 * don't copy into a random user address space, just free.
                 */
                if (current->mm)
-                       ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
-                                            bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+                       ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs,
+                                            bio_data_dir(bio) == READ,
                                             0, bmd->is_our_pages);
                else if (bmd->is_our_pages)
                        bio_for_each_segment_all(bvec, bio, i)
                                __free_page(bvec->bv_page);
        }
-       bio_free_map_data(bmd);
+       kfree(bmd);
        bio_put(bio);
        return ret;
 }
@@ -1175,7 +1209,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
         */
        if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
            (map_data && map_data->from_user)) {
-               ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
+               ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0);
                if (ret)
                        goto cleanup;
        }
@@ -1189,7 +1223,7 @@ cleanup:
 
        bio_put(bio);
 out_bmd:
-       bio_free_map_data(bmd);
+       kfree(bmd);
        return ERR_PTR(ret);
 }
 
@@ -1485,7 +1519,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
        if (IS_ERR(bio))
                return bio;
 
-       if (bio->bi_size == len)
+       if (bio->bi_iter.bi_size == len)
                return bio;
 
        /*
@@ -1506,16 +1540,15 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
 
        bio_for_each_segment_all(bvec, bio, i) {
                char *addr = page_address(bvec->bv_page);
-               int len = bmd->iovecs[i].bv_len;
 
                if (read)
-                       memcpy(p, addr, len);
+                       memcpy(p, addr, bvec->bv_len);
 
                __free_page(bvec->bv_page);
-               p += len;
+               p += bvec->bv_len;
        }
 
-       bio_free_map_data(bmd);
+       kfree(bmd);
        bio_put(bio);
 }
 
@@ -1686,11 +1719,11 @@ void bio_check_pages_dirty(struct bio *bio)
 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 void bio_flush_dcache_pages(struct bio *bi)
 {
-       int i;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
 
-       bio_for_each_segment(bvec, bi, i)
-               flush_dcache_page(bvec->bv_page);
+       bio_for_each_segment(bvec, bi, iter)
+               flush_dcache_page(bvec.bv_page);
 }
 EXPORT_SYMBOL(bio_flush_dcache_pages);
 #endif
@@ -1711,96 +1744,86 @@ EXPORT_SYMBOL(bio_flush_dcache_pages);
  **/
 void bio_endio(struct bio *bio, int error)
 {
-       if (error)
-               clear_bit(BIO_UPTODATE, &bio->bi_flags);
-       else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-               error = -EIO;
+       while (bio) {
+               BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
 
-       if (bio->bi_end_io)
-               bio->bi_end_io(bio, error);
-}
-EXPORT_SYMBOL(bio_endio);
+               if (error)
+                       clear_bit(BIO_UPTODATE, &bio->bi_flags);
+               else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+                       error = -EIO;
 
-void bio_pair_release(struct bio_pair *bp)
-{
-       if (atomic_dec_and_test(&bp->cnt)) {
-               struct bio *master = bp->bio1.bi_private;
+               if (!atomic_dec_and_test(&bio->bi_remaining))
+                       return;
 
-               bio_endio(master, bp->error);
-               mempool_free(bp, bp->bio2.bi_private);
+               /*
+                * Need to have a real endio function for chained bios,
+                * otherwise various corner cases will break (like stacking
+                * block devices that save/restore bi_end_io) - however, we want
+                * to avoid unbounded recursion and blowing the stack. Tail call
+                * optimization would handle this, but compiling with frame
+                * pointers also disables gcc's sibling call optimization.
+                */
+               if (bio->bi_end_io == bio_chain_endio) {
+                       struct bio *parent = bio->bi_private;
+                       bio_put(bio);
+                       bio = parent;
+               } else {
+                       if (bio->bi_end_io)
+                               bio->bi_end_io(bio, error);
+                       bio = NULL;
+               }
        }
 }
-EXPORT_SYMBOL(bio_pair_release);
+EXPORT_SYMBOL(bio_endio);
 
-static void bio_pair_end_1(struct bio *bi, int err)
+/**
+ * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
+ * @bio:       bio
+ * @error:     error, if any
+ *
+ * For code that has saved and restored bi_end_io; thing hard before using this
+ * function, probably you should've cloned the entire bio.
+ **/
+void bio_endio_nodec(struct bio *bio, int error)
 {
-       struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
-
-       if (err)
-               bp->error = err;
-
-       bio_pair_release(bp);
+       atomic_inc(&bio->bi_remaining);
+       bio_endio(bio, error);
 }
+EXPORT_SYMBOL(bio_endio_nodec);
 
-static void bio_pair_end_2(struct bio *bi, int err)
-{
-       struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
-
-       if (err)
-               bp->error = err;
-
-       bio_pair_release(bp);
-}
-
-/*
- * split a bio - only worry about a bio with a single page in its iovec
+/**
+ * bio_split - split a bio
+ * @bio:       bio to split
+ * @sectors:   number of sectors to split from the front of @bio
+ * @gfp:       gfp mask
+ * @bs:                bio set to allocate from
+ *
+ * Allocates and returns a new bio which represents @sectors from the start of
+ * @bio, and updates @bio to represent the remaining sectors.
+ *
+ * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
+ * responsibility to ensure that @bio is not freed before the split.
  */
-struct bio_pair *bio_split(struct bio *bi, int first_sectors)
+struct bio *bio_split(struct bio *bio, int sectors,
+                     gfp_t gfp, struct bio_set *bs)
 {
-       struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
-
-       if (!bp)
-               return bp;
-
-       trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
-                               bi->bi_sector + first_sectors);
-
-       BUG_ON(bio_segments(bi) > 1);
-       atomic_set(&bp->cnt, 3);
-       bp->error = 0;
-       bp->bio1 = *bi;
-       bp->bio2 = *bi;
-       bp->bio2.bi_sector += first_sectors;
-       bp->bio2.bi_size -= first_sectors << 9;
-       bp->bio1.bi_size = first_sectors << 9;
-
-       if (bi->bi_vcnt != 0) {
-               bp->bv1 = *bio_iovec(bi);
-               bp->bv2 = *bio_iovec(bi);
-
-               if (bio_is_rw(bi)) {
-                       bp->bv2.bv_offset += first_sectors << 9;
-                       bp->bv2.bv_len -= first_sectors << 9;
-                       bp->bv1.bv_len = first_sectors << 9;
-               }
+       struct bio *split = NULL;
 
-               bp->bio1.bi_io_vec = &bp->bv1;
-               bp->bio2.bi_io_vec = &bp->bv2;
+       BUG_ON(sectors <= 0);
+       BUG_ON(sectors >= bio_sectors(bio));
 
-               bp->bio1.bi_max_vecs = 1;
-               bp->bio2.bi_max_vecs = 1;
-       }
+       split = bio_clone_fast(bio, gfp, bs);
+       if (!split)
+               return NULL;
 
-       bp->bio1.bi_end_io = bio_pair_end_1;
-       bp->bio2.bi_end_io = bio_pair_end_2;
+       split->bi_iter.bi_size = sectors << 9;
 
-       bp->bio1.bi_private = bi;
-       bp->bio2.bi_private = bio_split_pool;
+       if (bio_integrity(split))
+               bio_integrity_trim(split, 0, sectors);
 
-       if (bio_integrity(bi))
-               bio_integrity_split(bi, bp, first_sectors);
+       bio_advance(bio, split->bi_iter.bi_size);
 
-       return bp;
+       return split;
 }
 EXPORT_SYMBOL(bio_split);
 
@@ -1814,80 +1837,20 @@ void bio_trim(struct bio *bio, int offset, int size)
 {
        /* 'bio' is a cloned bio which we need to trim to match
         * the given offset and size.
-        * This requires adjusting bi_sector, bi_size, and bi_io_vec
         */
-       int i;
-       struct bio_vec *bvec;
-       int sofar = 0;
 
        size <<= 9;
-       if (offset == 0 && size == bio->bi_size)
+       if (offset == 0 && size == bio->bi_iter.bi_size)
                return;
 
        clear_bit(BIO_SEG_VALID, &bio->bi_flags);
 
        bio_advance(bio, offset << 9);
 
-       bio->bi_size = size;
-
-       /* avoid any complications with bi_idx being non-zero*/
-       if (bio->bi_idx) {
-               memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
-                       (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
-               bio->bi_vcnt -= bio->bi_idx;
-               bio->bi_idx = 0;
-       }
-       /* Make sure vcnt and last bv are not too big */
-       bio_for_each_segment(bvec, bio, i) {
-               if (sofar + bvec->bv_len > size)
-                       bvec->bv_len = size - sofar;
-               if (bvec->bv_len == 0) {
-                       bio->bi_vcnt = i;
-                       break;
-               }
-               sofar += bvec->bv_len;
-       }
+       bio->bi_iter.bi_size = size;
 }
 EXPORT_SYMBOL_GPL(bio_trim);
 
-/**
- *      bio_sector_offset - Find hardware sector offset in bio
- *      @bio:           bio to inspect
- *      @index:         bio_vec index
- *      @offset:        offset in bv_page
- *
- *      Return the number of hardware sectors between beginning of bio
- *      and an end point indicated by a bio_vec index and an offset
- *      within that vector's page.
- */
-sector_t bio_sector_offset(struct bio *bio, unsigned short index,
-                          unsigned int offset)
-{
-       unsigned int sector_sz;
-       struct bio_vec *bv;
-       sector_t sectors;
-       int i;
-
-       sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
-       sectors = 0;
-
-       if (index >= bio->bi_idx)
-               index = bio->bi_vcnt - 1;
-
-       bio_for_each_segment_all(bv, bio, i) {
-               if (i == index) {
-                       if (offset > bv->bv_offset)
-                               sectors += (offset - bv->bv_offset) / sector_sz;
-                       break;
-               }
-
-               sectors += bv->bv_len / sector_sz;
-       }
-
-       return sectors;
-}
-EXPORT_SYMBOL(bio_sector_offset);
-
 /*
  * create memory pools for biovec's in a bio_set.
  * use the global biovec slabs created for general use.
@@ -2065,11 +2028,6 @@ static int __init init_bio(void)
        if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
                panic("bio: can't create integrity pool\n");
 
-       bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
-                                                    sizeof(struct bio_pair));
-       if (!bio_split_pool)
-               panic("bio: can't create split pool\n");
-
        return 0;
 }
 subsys_initcall(init_bio);
index 131d82800b3af45778cb8651f5c559bd57cec437..cb05e1c842c5b8b84dee98d1a3f452eaa179417e 100644 (file)
@@ -1695,7 +1695,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
                        return -1;
                }
                bio->bi_bdev = block_ctx->dev->bdev;
-               bio->bi_sector = dev_bytenr >> 9;
+               bio->bi_iter.bi_sector = dev_bytenr >> 9;
 
                for (j = i; j < num_pages; j++) {
                        ret = bio_add_page(bio, block_ctx->pagev[j],
@@ -3013,7 +3013,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
                int bio_is_patched;
                char **mapped_datav;
 
-               dev_bytenr = 512 * bio->bi_sector;
+               dev_bytenr = 512 * bio->bi_iter.bi_sector;
                bio_is_patched = 0;
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
@@ -3021,8 +3021,8 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
                               "submit_bio(rw=0x%x, bi_vcnt=%u,"
                               " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
                               rw, bio->bi_vcnt,
-                              (unsigned long long)bio->bi_sector, dev_bytenr,
-                              bio->bi_bdev);
+                              (unsigned long long)bio->bi_iter.bi_sector,
+                              dev_bytenr, bio->bi_bdev);
 
                mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
                                       GFP_NOFS);
index 1499b27b41863e7dfbbe7da134b1a7fb66dece34..f5cdeb4b553824744429cff1f4d8b57c17a27909 100644 (file)
@@ -172,7 +172,8 @@ static void end_compressed_bio_read(struct bio *bio, int err)
                goto out;
 
        inode = cb->inode;
-       ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9);
+       ret = check_compressed_csum(inode, cb,
+                                   (u64)bio->bi_iter.bi_sector << 9);
        if (ret)
                goto csum_failed;
 
@@ -201,18 +202,16 @@ csum_failed:
        if (cb->errors) {
                bio_io_error(cb->orig_bio);
        } else {
-               int bio_index = 0;
-               struct bio_vec *bvec = cb->orig_bio->bi_io_vec;
+               int i;
+               struct bio_vec *bvec;
 
                /*
                 * we have verified the checksum already, set page
                 * checked so the end_io handlers know about it
                 */
-               while (bio_index < cb->orig_bio->bi_vcnt) {
+               bio_for_each_segment_all(bvec, cb->orig_bio, i)
                        SetPageChecked(bvec->bv_page);
-                       bvec++;
-                       bio_index++;
-               }
+
                bio_endio(cb->orig_bio, 0);
        }
 
@@ -372,7 +371,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
        for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
                page = compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
-               if (bio->bi_size)
+               if (bio->bi_iter.bi_size)
                        ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
                                                           PAGE_CACHE_SIZE,
                                                           bio, 0);
@@ -506,7 +505,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 
                if (!em || last_offset < em->start ||
                    (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
-                   (em->block_start >> 9) != cb->orig_bio->bi_sector) {
+                   (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
                        free_extent_map(em);
                        unlock_extent(tree, last_offset, end);
                        unlock_page(page);
@@ -552,7 +551,7 @@ next:
  * in it.  We don't actually do IO on those pages but allocate new ones
  * to hold the compressed pages on disk.
  *
- * bio->bi_sector points to the compressed extent on disk
+ * bio->bi_iter.bi_sector points to the compressed extent on disk
  * bio->bi_io_vec points to all of the inode pages
  * bio->bi_vcnt is a count of pages
  *
@@ -573,7 +572,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        struct page *page;
        struct block_device *bdev;
        struct bio *comp_bio;
-       u64 cur_disk_byte = (u64)bio->bi_sector << 9;
+       u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
        u64 em_len;
        u64 em_start;
        struct extent_map *em;
@@ -659,7 +658,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                page->mapping = inode->i_mapping;
                page->index = em_start >> PAGE_CACHE_SHIFT;
 
-               if (comp_bio->bi_size)
+               if (comp_bio->bi_iter.bi_size)
                        ret = tree->ops->merge_bio_hook(READ, page, 0,
                                                        PAGE_CACHE_SIZE,
                                                        comp_bio, 0);
@@ -687,8 +686,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                                        comp_bio, sums);
                                BUG_ON(ret); /* -ENOMEM */
                        }
-                       sums += (comp_bio->bi_size + root->sectorsize - 1) /
-                               root->sectorsize;
+                       sums += (comp_bio->bi_iter.bi_size +
+                                root->sectorsize - 1) / root->sectorsize;
 
                        ret = btrfs_map_bio(root, READ, comp_bio,
                                            mirror_num, 0);
index 8072cfa8a3b16c075e5c381f481e7cb874d9c531..e71039ea66cf9d4bf8a56bdd984bc7e26597aa74 100644 (file)
@@ -842,20 +842,17 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 
 static int btree_csum_one_bio(struct bio *bio)
 {
-       struct bio_vec *bvec = bio->bi_io_vec;
-       int bio_index = 0;
+       struct bio_vec *bvec;
        struct btrfs_root *root;
-       int ret = 0;
+       int i, ret = 0;
 
-       WARN_ON(bio->bi_vcnt <= 0);
-       while (bio_index < bio->bi_vcnt) {
+       bio_for_each_segment_all(bvec, bio, i) {
                root = BTRFS_I(bvec->bv_page->mapping->host)->root;
                ret = csum_dirty_buffer(root, bvec->bv_page);
                if (ret)
                        break;
-               bio_index++;
-               bvec++;
        }
+
        return ret;
 }
 
@@ -1695,7 +1692,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
        bio->bi_private = end_io_wq->private;
        bio->bi_end_io = end_io_wq->end_io;
        kfree(end_io_wq);
-       bio_endio(bio, error);
+       bio_endio_nodec(bio, error);
 }
 
 static int cleaner_kthread(void *arg)
index ff43802a7c886088e37c5c1c16427f2b522cad30..bcb6f1b780d64512868303c04a7939060612e3e3 100644 (file)
@@ -1984,7 +1984,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
        bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
        if (!bio)
                return -EIO;
-       bio->bi_size = 0;
+       bio->bi_iter.bi_size = 0;
        map_length = length;
 
        ret = btrfs_map_block(fs_info, WRITE, logical,
@@ -1995,7 +1995,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
        }
        BUG_ON(mirror_num != bbio->mirror_num);
        sector = bbio->stripes[mirror_num-1].physical >> 9;
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        dev = bbio->stripes[mirror_num-1].dev;
        kfree(bbio);
        if (!dev || !dev->bdev || !dev->writeable) {
@@ -2268,9 +2268,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
                return -EIO;
        }
        bio->bi_end_io = failed_bio->bi_end_io;
-       bio->bi_sector = failrec->logical >> 9;
+       bio->bi_iter.bi_sector = failrec->logical >> 9;
        bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
-       bio->bi_size = 0;
+       bio->bi_iter.bi_size = 0;
 
        btrfs_failed_bio = btrfs_io_bio(failed_bio);
        if (btrfs_failed_bio->csum) {
@@ -2332,12 +2332,13 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
  */
 static void end_bio_extent_writepage(struct bio *bio, int err)
 {
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
        struct extent_io_tree *tree;
        u64 start;
        u64 end;
+       int i;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
@@ -2355,14 +2356,11 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
                start = page_offset(page);
                end = start + bvec->bv_offset + bvec->bv_len - 1;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
                if (end_extent_writepage(page, err, start, end))
                        continue;
 
                end_page_writeback(page);
-       } while (bvec >= bio->bi_io_vec);
+       }
 
        bio_put(bio);
 }
@@ -2392,9 +2390,8 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
  */
 static void end_bio_extent_readpage(struct bio *bio, int err)
 {
+       struct bio_vec *bvec;
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct bio_vec *bvec = bio->bi_io_vec;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
        struct extent_io_tree *tree;
        u64 offset = 0;
@@ -2405,16 +2402,17 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
        u64 extent_len = 0;
        int mirror;
        int ret;
+       int i;
 
        if (err)
                uptodate = 0;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
                struct inode *inode = page->mapping->host;
 
                pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
-                        "mirror=%lu\n", (u64)bio->bi_sector, err,
+                        "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err,
                         io_bio->mirror_num);
                tree = &BTRFS_I(inode)->io_tree;
 
@@ -2433,9 +2431,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                end = start + bvec->bv_offset + bvec->bv_len - 1;
                len = bvec->bv_len;
 
-               if (++bvec <= bvec_end)
-                       prefetchw(&bvec->bv_page->flags);
-
                mirror = io_bio->mirror_num;
                if (likely(uptodate && tree->ops &&
                           tree->ops->readpage_end_io_hook)) {
@@ -2516,7 +2511,7 @@ readpage_ok:
                        extent_start = start;
                        extent_len = end + 1 - start;
                }
-       } while (bvec <= bvec_end);
+       }
 
        if (extent_len)
                endio_readpage_release_extent(tree, extent_start, extent_len,
@@ -2547,9 +2542,8 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
        }
 
        if (bio) {
-               bio->bi_size = 0;
                bio->bi_bdev = bdev;
-               bio->bi_sector = first_sector;
+               bio->bi_iter.bi_sector = first_sector;
                btrfs_bio = btrfs_io_bio(bio);
                btrfs_bio->csum = NULL;
                btrfs_bio->csum_allocated = NULL;
@@ -2643,7 +2637,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
        if (bio_ret && *bio_ret) {
                bio = *bio_ret;
                if (old_compressed)
-                       contig = bio->bi_sector == sector;
+                       contig = bio->bi_iter.bi_sector == sector;
                else
                        contig = bio_end_sector(bio) == sector;
 
@@ -3410,20 +3404,18 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb)
 
 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
 {
-       int uptodate = err == 0;
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
        struct extent_buffer *eb;
-       int done;
+       int i, done;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
 
-               bvec--;
                eb = (struct extent_buffer *)page->private;
                BUG_ON(!eb);
                done = atomic_dec_and_test(&eb->io_pages);
 
-               if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
+               if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
                        set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
                        ClearPageUptodate(page);
                        SetPageError(page);
@@ -3435,10 +3427,9 @@ static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
                        continue;
 
                end_extent_buffer_writeback(eb);
-       } while (bvec >= bio->bi_io_vec);
+       }
 
        bio_put(bio);
-
 }
 
 static int write_one_eb(struct extent_buffer *eb,
index 6f384886028386f2f069756ef18e757b10ba9dbf..84a46a42d26269b94fbb0a823e1fec43439d5e69 100644 (file)
@@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
        if (!path)
                return -ENOMEM;
 
-       nblocks = bio->bi_size >> inode->i_sb->s_blocksize_bits;
+       nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
        if (!dst) {
                if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
                        btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size,
@@ -201,7 +201,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
                csum = (u8 *)dst;
        }
 
-       if (bio->bi_size > PAGE_CACHE_SIZE * 8)
+       if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
                path->reada = 2;
 
        WARN_ON(bio->bi_vcnt <= 0);
@@ -217,7 +217,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
                path->skip_locking = 1;
        }
 
-       disk_bytenr = (u64)bio->bi_sector << 9;
+       disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
        if (dio)
                offset = logical_offset;
        while (bio_index < bio->bi_vcnt) {
@@ -302,7 +302,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
                              struct btrfs_dio_private *dip, struct bio *bio,
                              u64 offset)
 {
-       int len = (bio->bi_sector << 9) - dip->disk_bytenr;
+       int len = (bio->bi_iter.bi_sector << 9) - dip->disk_bytenr;
        u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
        int ret;
 
@@ -447,11 +447,12 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
        u64 offset;
 
        WARN_ON(bio->bi_vcnt <= 0);
-       sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS);
+       sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
+                      GFP_NOFS);
        if (!sums)
                return -ENOMEM;
 
-       sums->len = bio->bi_size;
+       sums->len = bio->bi_iter.bi_size;
        INIT_LIST_HEAD(&sums->list);
 
        if (contig)
@@ -461,7 +462,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 
        ordered = btrfs_lookup_ordered_extent(inode, offset);
        BUG_ON(!ordered); /* Logic error */
-       sums->bytenr = (u64)bio->bi_sector << 9;
+       sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
        index = 0;
 
        while (bio_index < bio->bi_vcnt) {
@@ -476,7 +477,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
                        btrfs_add_ordered_sum(inode, ordered, sums);
                        btrfs_put_ordered_extent(ordered);
 
-                       bytes_left = bio->bi_size - total_bytes;
+                       bytes_left = bio->bi_iter.bi_size - total_bytes;
 
                        sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
                                       GFP_NOFS);
@@ -484,7 +485,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
                        sums->len = bytes_left;
                        ordered = btrfs_lookup_ordered_extent(inode, offset);
                        BUG_ON(!ordered); /* Logic error */
-                       sums->bytenr = ((u64)bio->bi_sector << 9) +
+                       sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) +
                                       total_bytes;
                        index = 0;
                }
index 514b291b135405dd1fbd21f9a8e4edc1b161f5af..d546d8c3038baa4451aa2f338a0c24592a3ea48f 100644 (file)
@@ -1577,7 +1577,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
                         unsigned long bio_flags)
 {
        struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
-       u64 logical = (u64)bio->bi_sector << 9;
+       u64 logical = (u64)bio->bi_iter.bi_sector << 9;
        u64 length = 0;
        u64 map_length;
        int ret;
@@ -1585,7 +1585,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
        if (bio_flags & EXTENT_BIO_COMPRESSED)
                return 0;
 
-       length = bio->bi_size;
+       length = bio->bi_iter.bi_size;
        map_length = length;
        ret = btrfs_map_block(root->fs_info, rw, logical,
                              &map_length, NULL, 0);
@@ -6783,17 +6783,16 @@ unlock_err:
 static void btrfs_endio_direct_read(struct bio *bio, int err)
 {
        struct btrfs_dio_private *dip = bio->bi_private;
-       struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct bio_vec *bvec = bio->bi_io_vec;
+       struct bio_vec *bvec;
        struct inode *inode = dip->inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct bio *dio_bio;
        u32 *csums = (u32 *)dip->csum;
-       int index = 0;
        u64 start;
+       int i;
 
        start = dip->logical_offset;
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
                        struct page *page = bvec->bv_page;
                        char *kaddr;
@@ -6809,18 +6808,16 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
                        local_irq_restore(flags);
 
                        flush_dcache_page(bvec->bv_page);
-                       if (csum != csums[index]) {
+                       if (csum != csums[i]) {
                                btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u",
                                          btrfs_ino(inode), start, csum,
-                                         csums[index]);
+                                         csums[i]);
                                err = -EIO;
                        }
                }
 
                start += bvec->bv_len;
-               bvec++;
-               index++;
-       } while (bvec <= bvec_end);
+       }
 
        unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
                      dip->logical_offset + dip->bytes - 1);
@@ -6901,7 +6898,8 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
                printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
                      "sector %#Lx len %u err no %d\n",
                      btrfs_ino(dip->inode), bio->bi_rw,
-                     (unsigned long long)bio->bi_sector, bio->bi_size, err);
+                     (unsigned long long)bio->bi_iter.bi_sector,
+                     bio->bi_iter.bi_size, err);
                dip->errors = 1;
 
                /*
@@ -6992,7 +6990,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        struct bio *bio;
        struct bio *orig_bio = dip->orig_bio;
        struct bio_vec *bvec = orig_bio->bi_io_vec;
-       u64 start_sector = orig_bio->bi_sector;
+       u64 start_sector = orig_bio->bi_iter.bi_sector;
        u64 file_offset = dip->logical_offset;
        u64 submit_len = 0;
        u64 map_length;
@@ -7000,7 +6998,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        int ret = 0;
        int async_submit = 0;
 
-       map_length = orig_bio->bi_size;
+       map_length = orig_bio->bi_iter.bi_size;
        ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
                              &map_length, NULL, 0);
        if (ret) {
@@ -7008,7 +7006,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                return -EIO;
        }
 
-       if (map_length >= orig_bio->bi_size) {
+       if (map_length >= orig_bio->bi_iter.bi_size) {
                bio = orig_bio;
                goto submit;
        }
@@ -7060,7 +7058,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                        bio->bi_private = dip;
                        bio->bi_end_io = btrfs_end_dio_bio;
 
-                       map_length = orig_bio->bi_size;
+                       map_length = orig_bio->bi_iter.bi_size;
                        ret = btrfs_map_block(root->fs_info, rw,
                                              start_sector << 9,
                                              &map_length, NULL, 0);
@@ -7118,7 +7116,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
 
        if (!skip_sum && !write) {
                csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
-               sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits;
+               sum_len = dio_bio->bi_iter.bi_size >>
+                       inode->i_sb->s_blocksize_bits;
                sum_len *= csum_size;
        } else {
                sum_len = 0;
@@ -7133,8 +7132,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
        dip->private = dio_bio->bi_private;
        dip->inode = inode;
        dip->logical_offset = file_offset;
-       dip->bytes = dio_bio->bi_size;
-       dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
+       dip->bytes = dio_bio->bi_iter.bi_size;
+       dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
        io_bio->bi_private = dip;
        dip->errors = 0;
        dip->orig_bio = io_bio;
index 24ac21840a9a0797cdb086a58e90e64a3c1481ad..9af0b25d991a8c64653b4fa20f4dc31c2794b943 100644 (file)
@@ -1032,8 +1032,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
 
        /* see if we can add this page onto our existing bio */
        if (last) {
-               last_end = (u64)last->bi_sector << 9;
-               last_end += last->bi_size;
+               last_end = (u64)last->bi_iter.bi_sector << 9;
+               last_end += last->bi_iter.bi_size;
 
                /*
                 * we can't merge these if they are from different
@@ -1053,9 +1053,9 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
        if (!bio)
                return -ENOMEM;
 
-       bio->bi_size = 0;
+       bio->bi_iter.bi_size = 0;
        bio->bi_bdev = stripe->dev->bdev;
-       bio->bi_sector = disk_start >> 9;
+       bio->bi_iter.bi_sector = disk_start >> 9;
        set_bit(BIO_UPTODATE, &bio->bi_flags);
 
        bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
@@ -1111,7 +1111,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
 
        spin_lock_irq(&rbio->bio_list_lock);
        bio_list_for_each(bio, &rbio->bio_list) {
-               start = (u64)bio->bi_sector << 9;
+               start = (u64)bio->bi_iter.bi_sector << 9;
                stripe_offset = start - rbio->raid_map[0];
                page_index = stripe_offset >> PAGE_CACHE_SHIFT;
 
@@ -1272,7 +1272,7 @@ cleanup:
 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
                           struct bio *bio)
 {
-       u64 physical = bio->bi_sector;
+       u64 physical = bio->bi_iter.bi_sector;
        u64 stripe_start;
        int i;
        struct btrfs_bio_stripe *stripe;
@@ -1298,7 +1298,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
                                   struct bio *bio)
 {
-       u64 logical = bio->bi_sector;
+       u64 logical = bio->bi_iter.bi_sector;
        u64 stripe_start;
        int i;
 
@@ -1602,8 +1602,8 @@ static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
                                                 plug_list);
        struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
                                                 plug_list);
-       u64 a_sector = ra->bio_list.head->bi_sector;
-       u64 b_sector = rb->bio_list.head->bi_sector;
+       u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
+       u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
 
        if (a_sector < b_sector)
                return -1;
@@ -1691,7 +1691,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
        if (IS_ERR(rbio))
                return PTR_ERR(rbio);
        bio_list_add(&rbio->bio_list, bio);
-       rbio->bio_list_bytes = bio->bi_size;
+       rbio->bio_list_bytes = bio->bi_iter.bi_size;
 
        /*
         * don't plug on full rbios, just get them out the door
@@ -2044,7 +2044,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
 
        rbio->read_rebuild = 1;
        bio_list_add(&rbio->bio_list, bio);
-       rbio->bio_list_bytes = bio->bi_size;
+       rbio->bio_list_bytes = bio->bi_iter.bi_size;
 
        rbio->faila = find_logical_bio_stripe(rbio, bio);
        if (rbio->faila == -1) {
index 1fd3f33c330abe930fbd03de1deb5968e32fd7b5..bb9a928fa3a848c597d842a94fe2e49a48766cf0 100644 (file)
@@ -1308,7 +1308,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
                        continue;
                }
                bio->bi_bdev = page->dev->bdev;
-               bio->bi_sector = page->physical >> 9;
+               bio->bi_iter.bi_sector = page->physical >> 9;
 
                bio_add_page(bio, page->page, PAGE_SIZE, 0);
                if (btrfsic_submit_bio_wait(READ, bio))
@@ -1427,7 +1427,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                if (!bio)
                        return -EIO;
                bio->bi_bdev = page_bad->dev->bdev;
-               bio->bi_sector = page_bad->physical >> 9;
+               bio->bi_iter.bi_sector = page_bad->physical >> 9;
 
                ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
                if (PAGE_SIZE != ret) {
@@ -1520,7 +1520,7 @@ again:
                bio->bi_private = sbio;
                bio->bi_end_io = scrub_wr_bio_end_io;
                bio->bi_bdev = sbio->dev->bdev;
-               bio->bi_sector = sbio->physical >> 9;
+               bio->bi_iter.bi_sector = sbio->physical >> 9;
                sbio->err = 0;
        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
                   spage->physical_for_dev_replace ||
@@ -1926,7 +1926,7 @@ again:
                bio->bi_private = sbio;
                bio->bi_end_io = scrub_bio_end_io;
                bio->bi_bdev = sbio->dev->bdev;
-               bio->bi_sector = sbio->physical >> 9;
+               bio->bi_iter.bi_sector = sbio->physical >> 9;
                sbio->err = 0;
        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
                   spage->physical ||
@@ -3371,8 +3371,8 @@ static int write_page_nocow(struct scrub_ctx *sctx,
                spin_unlock(&sctx->stat_lock);
                return -ENOMEM;
        }
-       bio->bi_size = 0;
-       bio->bi_sector = physical_for_dev_replace >> 9;
+       bio->bi_iter.bi_size = 0;
+       bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
        bio->bi_bdev = dev->bdev;
        ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
        if (ret != PAGE_CACHE_SIZE) {
index 92303f42baaa92d5d845edddff1f8600fc46518e..54d2685a3071f512bc8d8a5c67a60a03f179b29b 100644 (file)
@@ -5298,6 +5298,13 @@ static void btrfs_end_bio(struct bio *bio, int err)
                        bio_put(bio);
                        bio = bbio->orig_bio;
                }
+
+               /*
+                * We have original bio now. So increment bi_remaining to
+                * account for it in endio
+                */
+               atomic_inc(&bio->bi_remaining);
+
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
@@ -5411,7 +5418,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio,
        if (!q->merge_bvec_fn)
                return 1;
 
-       bvm.bi_size = bio->bi_size - prev->bv_len;
+       bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len;
        if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
                return 0;
        return 1;
@@ -5426,7 +5433,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
        bio->bi_private = bbio;
        btrfs_io_bio(bio)->stripe_index = dev_nr;
        bio->bi_end_io = btrfs_end_bio;
-       bio->bi_sector = physical >> 9;
+       bio->bi_iter.bi_sector = physical >> 9;
 #ifdef DEBUG
        {
                struct rcu_string *name;
@@ -5464,7 +5471,7 @@ again:
        while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
                if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
                                 bvec->bv_offset) < bvec->bv_len) {
-                       u64 len = bio->bi_size;
+                       u64 len = bio->bi_iter.bi_size;
 
                        atomic_inc(&bbio->stripes_pending);
                        submit_stripe_bio(root, bbio, bio, physical, dev_nr,
@@ -5486,7 +5493,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
-               bio->bi_sector = logical >> 9;
+               bio->bi_iter.bi_sector = logical >> 9;
                kfree(bbio);
                bio_endio(bio, -EIO);
        }
@@ -5497,7 +5504,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
 {
        struct btrfs_device *dev;
        struct bio *first_bio = bio;
-       u64 logical = (u64)bio->bi_sector << 9;
+       u64 logical = (u64)bio->bi_iter.bi_sector << 9;
        u64 length = 0;
        u64 map_length;
        u64 *raid_map = NULL;
@@ -5506,7 +5513,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
        int total_devs = 1;
        struct btrfs_bio *bbio = NULL;
 
-       length = bio->bi_size;
+       length = bio->bi_iter.bi_size;
        map_length = length;
 
        ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
index 6024877335caf2a9dfa6af1018c5da19b0e8a2ae..1c04ec66974e0329e57d09f85f597632b3b9a26d 100644 (file)
@@ -2982,11 +2982,11 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
         * let it through, and the IO layer will turn it into
         * an EIO.
         */
-       if (unlikely(bio->bi_sector >= maxsector))
+       if (unlikely(bio->bi_iter.bi_sector >= maxsector))
                return;
 
-       maxsector -= bio->bi_sector;
-       bytes = bio->bi_size;
+       maxsector -= bio->bi_iter.bi_sector;
+       bytes = bio->bi_iter.bi_size;
        if (likely((bytes >> 9) <= maxsector))
                return;
 
@@ -2994,7 +2994,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
        bytes = maxsector << 9;
 
        /* Truncate the bio.. */
-       bio->bi_size = bytes;
+       bio->bi_iter.bi_size = bytes;
        bio->bi_io_vec[0].bv_len = bytes;
 
        /* ..and clear the end of the buffer for reads */
@@ -3029,14 +3029,14 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
         */
        bio = bio_alloc(GFP_NOIO, 1);
 
-       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
        bio->bi_io_vec[0].bv_page = bh->b_page;
        bio->bi_io_vec[0].bv_len = bh->b_size;
        bio->bi_io_vec[0].bv_offset = bh_offset(bh);
 
        bio->bi_vcnt = 1;
-       bio->bi_size = bh->b_size;
+       bio->bi_iter.bi_size = bh->b_size;
 
        bio->bi_end_io = end_bio_bh_io_sync;
        bio->bi_private = bh;
index 0e04142d5962312fcb055738479247b2364a252e..160a5489a93936372c85683ee8cfd6da5185007b 100644 (file)
@@ -375,7 +375,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
        bio = bio_alloc(GFP_KERNEL, nr_vecs);
 
        bio->bi_bdev = bdev;
-       bio->bi_sector = first_sector;
+       bio->bi_iter.bi_sector = first_sector;
        if (dio->is_async)
                bio->bi_end_io = dio_bio_end_aio;
        else
@@ -719,7 +719,7 @@ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
        if (sdio->bio) {
                loff_t cur_offset = sdio->cur_page_fs_offset;
                loff_t bio_next_offset = sdio->logical_offset_in_bio +
-                       sdio->bio->bi_size;
+                       sdio->bio->bi_iter.bi_size;
 
                /*
                 * See whether this new request is contiguous with the old.
index d488f80ee32df1137e91df0aed72bef2f61b49ac..ab95508e3d4018eab92647c6d2308e98524080d1 100644 (file)
@@ -65,9 +65,9 @@ static void ext4_finish_bio(struct bio *bio)
 {
        int i;
        int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
+       struct bio_vec *bvec;
 
-       for (i = 0; i < bio->bi_vcnt; i++) {
-               struct bio_vec *bvec = &bio->bi_io_vec[i];
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
                struct buffer_head *bh, *head;
                unsigned bio_start = bvec->bv_offset;
@@ -298,7 +298,7 @@ ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
 static void ext4_end_bio(struct bio *bio, int error)
 {
        ext4_io_end_t *io_end = bio->bi_private;
-       sector_t bi_sector = bio->bi_sector;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
 
        BUG_ON(!io_end);
        bio->bi_end_io = NULL;
@@ -366,7 +366,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
        bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
        if (!bio)
                return -ENOMEM;
-       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
        bio->bi_end_io = ext4_end_bio;
        bio->bi_private = ext4_get_io_end(io->io_end);
index 0ae558723506e1a8a96f5653444dc11f5a8feb27..2261ccdd0b5f04a37be390f1b28c8703fafa86b4 100644 (file)
 
 static void f2fs_read_end_io(struct bio *bio, int err)
 {
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
+       int i;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
-               if (unlikely(!uptodate)) {
+               if (!err) {
+                       SetPageUptodate(page);
+               } else {
                        ClearPageUptodate(page);
                        SetPageError(page);
-               } else {
-                       SetPageUptodate(page);
                }
                unlock_page(page);
-       } while (bvec >= bio->bi_io_vec);
-
+       }
        bio_put(bio);
 }
 
 static void f2fs_write_end_io(struct bio *bio, int err)
 {
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct f2fs_sb_info *sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_SB(bio->bi_io_vec->bv_page->mapping->host->i_sb);
+       struct bio_vec *bvec;
+       int i;
 
-       do {
+       bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
-               if (unlikely(!uptodate)) {
+               if (unlikely(err)) {
                        SetPageError(page);
                        set_bit(AS_EIO, &page->mapping->flags);
                        set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
@@ -67,7 +60,7 @@ static void f2fs_write_end_io(struct bio *bio, int err)
                }
                end_page_writeback(page);
                dec_page_count(sbi, F2FS_WRITEBACK);
-       } while (bvec >= bio->bi_io_vec);
+       }
 
        if (bio->bi_private)
                complete(bio->bi_private);
@@ -91,7 +84,7 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
        bio = bio_alloc(GFP_NOIO, npages);
 
        bio->bi_bdev = sbi->sb->s_bdev;
-       bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
+       bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
        bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
 
        return bio;
index 58f06400b7b8dcece9597b51b05ebf9ebc092396..76693793ceddfe7f936c360a6c3494d1882a849a 100644 (file)
@@ -273,7 +273,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
                nrvecs = max(nrvecs/2, 1U);
        }
 
-       bio->bi_sector = blkno * (sb->s_blocksize >> 9);
+       bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
        bio->bi_bdev = sb->s_bdev;
        bio->bi_end_io = gfs2_end_log_write;
        bio->bi_private = sdp;
index 1e712b566d76a74435b4d2faa5417956815cec78..c6872d09561a2d53c8e57374eb700f4fb578ae78 100644 (file)
@@ -238,7 +238,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
        lock_page(page);
 
        bio = bio_alloc(GFP_NOFS, 1);
-       bio->bi_sector = sector * (sb->s_blocksize >> 9);
+       bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
        bio->bi_bdev = sb->s_bdev;
        bio_add_page(bio, page, PAGE_SIZE, 0);
 
index e9a97a0d431480616043410a51567730bebafda3..3f999649587ff8185ebd326c3672acee83542de4 100644 (file)
@@ -63,7 +63,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
        sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
 
        bio = bio_alloc(GFP_NOIO, 1);
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = sb->s_bdev;
 
        if (!(rw & WRITE) && data)
index 360d27c488873825fed5c04f8bb2320a51a39d62..8d811e02b4b92bb26d28367c727fcbe909fa95d5 100644 (file)
@@ -1998,20 +1998,20 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
 
        bio = bio_alloc(GFP_NOFS, 1);
 
-       bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
+       bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
        bio->bi_bdev = log->bdev;
        bio->bi_io_vec[0].bv_page = bp->l_page;
        bio->bi_io_vec[0].bv_len = LOGPSIZE;
        bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
        bio->bi_vcnt = 1;
-       bio->bi_size = LOGPSIZE;
+       bio->bi_iter.bi_size = LOGPSIZE;
 
        bio->bi_end_io = lbmIODone;
        bio->bi_private = bp;
        /*check if journaling to disk has been disabled*/
        if (log->no_integrity) {
-               bio->bi_size = 0;
+               bio->bi_iter.bi_size = 0;
                lbmIODone(bio, 0);
        } else {
                submit_bio(READ_SYNC, bio);
@@ -2144,21 +2144,21 @@ static void lbmStartIO(struct lbuf * bp)
        jfs_info("lbmStartIO\n");
 
        bio = bio_alloc(GFP_NOFS, 1);
-       bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
+       bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
        bio->bi_bdev = log->bdev;
        bio->bi_io_vec[0].bv_page = bp->l_page;
        bio->bi_io_vec[0].bv_len = LOGPSIZE;
        bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
        bio->bi_vcnt = 1;
-       bio->bi_size = LOGPSIZE;
+       bio->bi_iter.bi_size = LOGPSIZE;
 
        bio->bi_end_io = lbmIODone;
        bio->bi_private = bp;
 
        /* check if journaling to disk has been disabled */
        if (log->no_integrity) {
-               bio->bi_size = 0;
+               bio->bi_iter.bi_size = 0;
                lbmIODone(bio, 0);
        } else {
                submit_bio(WRITE_SYNC, bio);
index d165cde0c68dda885c2f5bb512f48465f521c4a1..49ba7ff1bbb9a15d8939128df2021354f2db6c52 100644 (file)
@@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
                         * count from hitting zero before we're through
                         */
                        inc_io(page);
-                       if (!bio->bi_size)
+                       if (!bio->bi_iter.bi_size)
                                goto dump_bio;
                        submit_bio(WRITE, bio);
                        nr_underway++;
@@ -438,7 +438,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
 
                bio = bio_alloc(GFP_NOFS, 1);
                bio->bi_bdev = inode->i_sb->s_bdev;
-               bio->bi_sector = pblock << (inode->i_blkbits - 9);
+               bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
                bio->bi_end_io = metapage_write_end_io;
                bio->bi_private = page;
 
@@ -452,7 +452,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
        if (bio) {
                if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
                                goto add_failed;
-               if (!bio->bi_size)
+               if (!bio->bi_iter.bi_size)
                        goto dump_bio;
 
                submit_bio(WRITE, bio);
@@ -517,7 +517,8 @@ static int metapage_readpage(struct file *fp, struct page *page)
 
                        bio = bio_alloc(GFP_NOFS, 1);
                        bio->bi_bdev = inode->i_sb->s_bdev;
-                       bio->bi_sector = pblock << (inode->i_blkbits - 9);
+                       bio->bi_iter.bi_sector =
+                               pblock << (inode->i_blkbits - 9);
                        bio->bi_end_io = metapage_read_end_io;
                        bio->bi_private = page;
                        len = xlen << inode->i_blkbits;
index 0f95f0d0b3133e9b3129e3807a842438e162a245..76279e11982d854c9b22312cf51b3bd3a97e256d 100644 (file)
@@ -26,9 +26,9 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
        bio_vec.bv_len = PAGE_SIZE;
        bio_vec.bv_offset = 0;
        bio.bi_vcnt = 1;
-       bio.bi_size = PAGE_SIZE;
        bio.bi_bdev = bdev;
-       bio.bi_sector = page->index * (PAGE_SIZE >> 9);
+       bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
+       bio.bi_iter.bi_size = PAGE_SIZE;
 
        return submit_bio_wait(rw, &bio);
 }
@@ -56,22 +56,18 @@ static DECLARE_WAIT_QUEUE_HEAD(wq);
 static void writeseg_end_io(struct bio *bio, int err)
 {
        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
+       int i;
        struct super_block *sb = bio->bi_private;
        struct logfs_super *super = logfs_super(sb);
-       struct page *page;
 
        BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
        BUG_ON(err);
-       BUG_ON(bio->bi_vcnt == 0);
-       do {
-               page = bvec->bv_page;
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
-               end_page_writeback(page);
-               page_cache_release(page);
-       } while (bvec >= bio->bi_io_vec);
+
+       bio_for_each_segment_all(bvec, bio, i) {
+               end_page_writeback(bvec->bv_page);
+               page_cache_release(bvec->bv_page);
+       }
        bio_put(bio);
        if (atomic_dec_and_test(&super->s_pending_writes))
                wake_up(&wq);
@@ -96,9 +92,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
                if (i >= max_pages) {
                        /* Block layer cannot split bios :( */
                        bio->bi_vcnt = i;
-                       bio->bi_size = i * PAGE_SIZE;
+                       bio->bi_iter.bi_size = i * PAGE_SIZE;
                        bio->bi_bdev = super->s_bdev;
-                       bio->bi_sector = ofs >> 9;
+                       bio->bi_iter.bi_sector = ofs >> 9;
                        bio->bi_private = sb;
                        bio->bi_end_io = writeseg_end_io;
                        atomic_inc(&super->s_pending_writes);
@@ -123,9 +119,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
                unlock_page(page);
        }
        bio->bi_vcnt = nr_pages;
-       bio->bi_size = nr_pages * PAGE_SIZE;
+       bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
        bio->bi_bdev = super->s_bdev;
-       bio->bi_sector = ofs >> 9;
+       bio->bi_iter.bi_sector = ofs >> 9;
        bio->bi_private = sb;
        bio->bi_end_io = writeseg_end_io;
        atomic_inc(&super->s_pending_writes);
@@ -188,9 +184,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
                if (i >= max_pages) {
                        /* Block layer cannot split bios :( */
                        bio->bi_vcnt = i;
-                       bio->bi_size = i * PAGE_SIZE;
+                       bio->bi_iter.bi_size = i * PAGE_SIZE;
                        bio->bi_bdev = super->s_bdev;
-                       bio->bi_sector = ofs >> 9;
+                       bio->bi_iter.bi_sector = ofs >> 9;
                        bio->bi_private = sb;
                        bio->bi_end_io = erase_end_io;
                        atomic_inc(&super->s_pending_writes);
@@ -209,9 +205,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
                bio->bi_io_vec[i].bv_offset = 0;
        }
        bio->bi_vcnt = nr_pages;
-       bio->bi_size = nr_pages * PAGE_SIZE;
+       bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
        bio->bi_bdev = super->s_bdev;
-       bio->bi_sector = ofs >> 9;
+       bio->bi_iter.bi_sector = ofs >> 9;
        bio->bi_private = sb;
        bio->bi_end_io = erase_end_io;
        atomic_inc(&super->s_pending_writes);
index 0face1c4d4c6bd4ea33cb60e45b8c7fad8235acf..4979ffa60aaabfd36839adec6feafcb17a876d98 100644 (file)
  */
 static void mpage_end_io(struct bio *bio, int err)
 {
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bv;
+       int i;
 
-       do {
-               struct page *page = bvec->bv_page;
+       bio_for_each_segment_all(bv, bio, i) {
+               struct page *page = bv->bv_page;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
                if (bio_data_dir(bio) == READ) {
-                       if (uptodate) {
+                       if (!err) {
                                SetPageUptodate(page);
                        } else {
                                ClearPageUptodate(page);
@@ -60,14 +58,15 @@ static void mpage_end_io(struct bio *bio, int err)
                        }
                        unlock_page(page);
                } else { /* bio_data_dir(bio) == WRITE */
-                       if (!uptodate) {
+                       if (err) {
                                SetPageError(page);
                                if (page->mapping)
                                        set_bit(AS_EIO, &page->mapping->flags);
                        }
                        end_page_writeback(page);
                }
-       } while (bvec >= bio->bi_io_vec);
+       }
+
        bio_put(bio);
 }
 
@@ -94,7 +93,7 @@ mpage_alloc(struct block_device *bdev,
 
        if (bio) {
                bio->bi_bdev = bdev;
-               bio->bi_sector = first_sector;
+               bio->bi_iter.bi_sector = first_sector;
        }
        return bio;
 }
index e242bbf729723d1d45ae0cac7370952167b026cd..56ff823ca82e0979f08355f80aaf75ae4a019393 100644 (file)
@@ -134,8 +134,8 @@ bl_submit_bio(int rw, struct bio *bio)
        if (bio) {
                get_parallel(bio->bi_private);
                dprintk("%s submitting %s bio %u@%llu\n", __func__,
-                       rw == READ ? "read" : "write",
-                       bio->bi_size, (unsigned long long)bio->bi_sector);
+                       rw == READ ? "read" : "write", bio->bi_iter.bi_size,
+                       (unsigned long long)bio->bi_iter.bi_sector);
                submit_bio(rw, bio);
        }
        return NULL;
@@ -156,7 +156,8 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
        }
 
        if (bio) {
-               bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+               bio->bi_iter.bi_sector = isect - be->be_f_offset +
+                       be->be_v_offset;
                bio->bi_bdev = be->be_mdev;
                bio->bi_end_io = end_io;
                bio->bi_private = par;
@@ -201,18 +202,14 @@ static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
 static void bl_end_io_read(struct bio *bio, int err)
 {
        struct parallel_io *par = bio->bi_private;
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct bio_vec *bvec;
+       int i;
 
-       do {
-               struct page *page = bvec->bv_page;
+       if (!err)
+               bio_for_each_segment_all(bvec, bio, i)
+                       SetPageUptodate(bvec->bv_page);
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-               if (uptodate)
-                       SetPageUptodate(page);
-       } while (bvec >= bio->bi_io_vec);
-       if (!uptodate) {
+       if (err) {
                struct nfs_read_data *rdata = par->data;
                struct nfs_pgio_header *header = rdata->header;
 
@@ -383,20 +380,16 @@ static void mark_extents_written(struct pnfs_block_layout *bl,
 static void bl_end_io_write_zero(struct bio *bio, int err)
 {
        struct parallel_io *par = bio->bi_private;
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-
-       do {
-               struct page *page = bvec->bv_page;
+       struct bio_vec *bvec;
+       int i;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
+       bio_for_each_segment_all(bvec, bio, i) {
                /* This is the zeroing page we added */
-               end_page_writeback(page);
-               page_cache_release(page);
-       } while (bvec >= bio->bi_io_vec);
+               end_page_writeback(bvec->bv_page);
+               page_cache_release(bvec->bv_page);
+       }
 
-       if (unlikely(!uptodate)) {
+       if (unlikely(err)) {
                struct nfs_write_data *data = par->data;
                struct nfs_pgio_header *header = data->header;
 
@@ -519,7 +512,7 @@ bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
        isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
                (offset / SECTOR_SIZE);
 
-       bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+       bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
        bio->bi_bdev = be->be_mdev;
        bio->bi_end_io = bl_read_single_end_io;
 
index 2d8be51f90dc9257bf74cad77b719d17f781c739..dc3a9efdaab87751e47edcf9ef3a807fed4573db 100644 (file)
@@ -416,7 +416,8 @@ static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
        }
        if (likely(bio)) {
                bio->bi_bdev = nilfs->ns_bdev;
-               bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
+               bio->bi_iter.bi_sector =
+                       start << (nilfs->ns_blocksize_bits - 9);
        }
        return bio;
 }
index 73920ffda05b331c85ef1760d97083d1590a4a16..bf482dfed14fecf17406a6aa2d517929d6834800 100644 (file)
@@ -413,7 +413,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
        }
 
        /* Must put everything in 512 byte sectors for the bio... */
-       bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9);
+       bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
        bio->bi_bdev = reg->hr_bdev;
        bio->bi_private = wc;
        bio->bi_end_io = o2hb_bio_end_io;
index a26739451b535cf02a8016c423583f76a26bac72..db2cfb067d0b1ea88f8b64875ceb174d3ae582d2 100644 (file)
@@ -407,7 +407,7 @@ xfs_alloc_ioend_bio(
        struct bio              *bio = bio_alloc(GFP_NOIO, nvecs);
 
        ASSERT(bio->bi_private == NULL);
-       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
        return bio;
 }
index 51757113a822abc57334bbc25f0251671fdd3266..9c061ef2b0d973c913a1baaee4a43bc27523b244 100644 (file)
@@ -1240,7 +1240,7 @@ next_chunk:
 
        bio = bio_alloc(GFP_NOIO, nr_pages);
        bio->bi_bdev = bp->b_target->bt_bdev;
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio->bi_end_io = xfs_buf_bio_end_io;
        bio->bi_private = bp;
 
@@ -1262,7 +1262,7 @@ next_chunk:
                total_nr_pages--;
        }
 
-       if (likely(bio->bi_size)) {
+       if (likely(bio->bi_iter.bi_size)) {
                if (xfs_buf_is_vmapped(bp)) {
                        flush_kernel_vmap_range(bp->b_addr,
                                                xfs_buf_vmap_len(bp));
index 060ff695085c596f2ddcd2166e7825290d75bd3d..70654521dab69fb03443723550e9b9f8c64a6533 100644 (file)
  * various member access, note that bio_data should of course not be used
  * on highmem page vectors
  */
-#define bio_iovec_idx(bio, idx)        (&((bio)->bi_io_vec[(idx)]))
-#define bio_iovec(bio)         bio_iovec_idx((bio), (bio)->bi_idx)
-#define bio_page(bio)          bio_iovec((bio))->bv_page
-#define bio_offset(bio)                bio_iovec((bio))->bv_offset
-#define bio_segments(bio)      ((bio)->bi_vcnt - (bio)->bi_idx)
-#define bio_sectors(bio)       ((bio)->bi_size >> 9)
-#define bio_end_sector(bio)    ((bio)->bi_sector + bio_sectors((bio)))
+#define __bvec_iter_bvec(bvec, iter)   (&(bvec)[(iter).bi_idx])
+
+#define bvec_iter_page(bvec, iter)                             \
+       (__bvec_iter_bvec((bvec), (iter))->bv_page)
+
+#define bvec_iter_len(bvec, iter)                              \
+       min((iter).bi_size,                                     \
+           __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
+
+#define bvec_iter_offset(bvec, iter)                           \
+       (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
+
+#define bvec_iter_bvec(bvec, iter)                             \
+((struct bio_vec) {                                            \
+       .bv_page        = bvec_iter_page((bvec), (iter)),       \
+       .bv_len         = bvec_iter_len((bvec), (iter)),        \
+       .bv_offset      = bvec_iter_offset((bvec), (iter)),     \
+})
+
+#define bio_iter_iovec(bio, iter)                              \
+       bvec_iter_bvec((bio)->bi_io_vec, (iter))
+
+#define bio_iter_page(bio, iter)                               \
+       bvec_iter_page((bio)->bi_io_vec, (iter))
+#define bio_iter_len(bio, iter)                                        \
+       bvec_iter_len((bio)->bi_io_vec, (iter))
+#define bio_iter_offset(bio, iter)                             \
+       bvec_iter_offset((bio)->bi_io_vec, (iter))
+
+#define bio_page(bio)          bio_iter_page((bio), (bio)->bi_iter)
+#define bio_offset(bio)                bio_iter_offset((bio), (bio)->bi_iter)
+#define bio_iovec(bio)         bio_iter_iovec((bio), (bio)->bi_iter)
+
+#define bio_multiple_segments(bio)                             \
+       ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
+#define bio_sectors(bio)       ((bio)->bi_iter.bi_size >> 9)
+#define bio_end_sector(bio)    ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+
+/*
+ * Check whether this bio carries any data or not. A NULL bio is allowed.
+ */
+static inline bool bio_has_data(struct bio *bio)
+{
+       if (bio &&
+           bio->bi_iter.bi_size &&
+           !(bio->bi_rw & REQ_DISCARD))
+               return true;
+
+       return false;
+}
+
+static inline bool bio_is_rw(struct bio *bio)
+{
+       if (!bio_has_data(bio))
+               return false;
+
+       if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+               return false;
+
+       return true;
+}
+
+static inline bool bio_mergeable(struct bio *bio)
+{
+       if (bio->bi_rw & REQ_NOMERGE_FLAGS)
+               return false;
+
+       return true;
+}
 
 static inline unsigned int bio_cur_bytes(struct bio *bio)
 {
-       if (bio->bi_vcnt)
-               return bio_iovec(bio)->bv_len;
+       if (bio_has_data(bio))
+               return bio_iovec(bio).bv_len;
        else /* dataless requests such as discard */
-               return bio->bi_size;
+               return bio->bi_iter.bi_size;
 }
 
 static inline void *bio_data(struct bio *bio)
 {
-       if (bio->bi_vcnt)
+       if (bio_has_data(bio))
                return page_address(bio_page(bio)) + bio_offset(bio);
 
        return NULL;
@@ -97,19 +159,16 @@ static inline void *bio_data(struct bio *bio)
  * permanent PIO fall back, user is probably better off disabling highmem
  * I/O completely on that queue (see ide-dma for example)
  */
-#define __bio_kmap_atomic(bio, idx)                            \
-       (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) +    \
-               bio_iovec_idx((bio), (idx))->bv_offset)
+#define __bio_kmap_atomic(bio, iter)                           \
+       (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) +   \
+               bio_iter_iovec((bio), (iter)).bv_offset)
 
-#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
+#define __bio_kunmap_atomic(addr)      kunmap_atomic(addr)
 
 /*
  * merge helpers etc
  */
 
-#define __BVEC_END(bio)                bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
-#define __BVEC_START(bio)      bio_iovec_idx((bio), (bio)->bi_idx)
-
 /* Default implementation of BIOVEC_PHYS_MERGEABLE */
 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2)    \
        ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
@@ -126,33 +185,76 @@ static inline void *bio_data(struct bio *bio)
        (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
        __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
-#define BIO_SEG_BOUNDARY(q, b1, b2) \
-       BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
 
 #define bio_io_error(bio) bio_endio((bio), -EIO)
 
-/*
- * drivers should not use the __ version unless they _really_ know what
- * they're doing
- */
-#define __bio_for_each_segment(bvl, bio, i, start_idx)                 \
-       for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx);  \
-            i < (bio)->bi_vcnt;                                        \
-            bvl++, i++)
-
 /*
  * drivers should _never_ use the all version - the bio may have been split
  * before it got to the driver and the driver won't own all of it
  */
 #define bio_for_each_segment_all(bvl, bio, i)                          \
-       for (i = 0;                                                     \
-            bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt;       \
-            i++)
+       for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
+
+static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
+                                    unsigned bytes)
+{
+       WARN_ONCE(bytes > iter->bi_size,
+                 "Attempted to advance past end of bvec iter\n");
+
+       while (bytes) {
+               unsigned len = min(bytes, bvec_iter_len(bv, *iter));
+
+               bytes -= len;
+               iter->bi_size -= len;
+               iter->bi_bvec_done += len;
+
+               if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
+                       iter->bi_bvec_done = 0;
+                       iter->bi_idx++;
+               }
+       }
+}
+
+#define for_each_bvec(bvl, bio_vec, iter, start)                       \
+       for ((iter) = start;                                            \
+            (bvl) = bvec_iter_bvec((bio_vec), (iter)),                 \
+               (iter).bi_size;                                         \
+            bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+
+
+static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
+                                   unsigned bytes)
+{
+       iter->bi_sector += bytes >> 9;
+
+       if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+               iter->bi_size -= bytes;
+       else
+               bvec_iter_advance(bio->bi_io_vec, iter, bytes);
+}
 
-#define bio_for_each_segment(bvl, bio, i)                              \
-       for (i = (bio)->bi_idx;                                         \
-            bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt;       \
-            i++)
+#define __bio_for_each_segment(bvl, bio, iter, start)                  \
+       for (iter = (start);                                            \
+            (iter).bi_size &&                                          \
+               ((bvl = bio_iter_iovec((bio), (iter))), 1);             \
+            bio_advance_iter((bio), &(iter), (bvl).bv_len))
+
+#define bio_for_each_segment(bvl, bio, iter)                           \
+       __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
+
+#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
+
+static inline unsigned bio_segments(struct bio *bio)
+{
+       unsigned segs = 0;
+       struct bio_vec bv;
+       struct bvec_iter iter;
+
+       bio_for_each_segment(bv, bio, iter)
+               segs++;
+
+       return segs;
+}
 
 /*
  * get a reference to a bio, so it won't disappear. the intended use is
@@ -177,16 +279,15 @@ static inline void *bio_data(struct bio *bio)
 struct bio_integrity_payload {
        struct bio              *bip_bio;       /* parent bio */
 
-       sector_t                bip_sector;     /* virtual start sector */
+       struct bvec_iter        bip_iter;
 
+       /* kill - should just use bip_vec */
        void                    *bip_buf;       /* generated integrity data */
-       bio_end_io_t            *bip_end_io;    /* saved I/O completion fn */
 
-       unsigned int            bip_size;
+       bio_end_io_t            *bip_end_io;    /* saved I/O completion fn */
 
        unsigned short          bip_slab;       /* slab the bip came from */
        unsigned short          bip_vcnt;       /* # of integrity bio_vecs */
-       unsigned short          bip_idx;        /* current bip_vec index */
        unsigned                bip_owns_buf:1; /* should free bip_buf */
 
        struct work_struct      bip_work;       /* I/O completion */
@@ -196,29 +297,28 @@ struct bio_integrity_payload {
 };
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
-/*
- * A bio_pair is used when we need to split a bio.
- * This can only happen for a bio that refers to just one
- * page of data, and in the unusual situation when the
- * page crosses a chunk/device boundary
+extern void bio_trim(struct bio *bio, int offset, int size);
+extern struct bio *bio_split(struct bio *bio, int sectors,
+                            gfp_t gfp, struct bio_set *bs);
+
+/**
+ * bio_next_split - get next @sectors from a bio, splitting if necessary
+ * @bio:       bio to split
+ * @sectors:   number of sectors to split from the front of @bio
+ * @gfp:       gfp mask
+ * @bs:                bio set to allocate from
  *
- * The address of the master bio is stored in bio1.bi_private
- * The address of the pool the pair was allocated from is stored
- *   in bio2.bi_private
+ * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+ * than @sectors, returns the original bio unchanged.
  */
-struct bio_pair {
-       struct bio                      bio1, bio2;
-       struct bio_vec                  bv1, bv2;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-       struct bio_integrity_payload    bip1, bip2;
-       struct bio_vec                  iv1, iv2;
-#endif
-       atomic_t                        cnt;
-       int                             error;
-};
-extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
-extern void bio_pair_release(struct bio_pair *dbio);
-extern void bio_trim(struct bio *bio, int offset, int size);
+static inline struct bio *bio_next_split(struct bio *bio, int sectors,
+                                        gfp_t gfp, struct bio_set *bs)
+{
+       if (sectors >= bio_sectors(bio))
+               return bio;
+
+       return bio_split(bio, sectors, gfp, bs);
+}
 
 extern struct bio_set *bioset_create(unsigned int, unsigned int);
 extern void bioset_free(struct bio_set *);
@@ -227,7 +327,8 @@ extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries);
 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
 extern void bio_put(struct bio *);
 
-extern void __bio_clone(struct bio *, struct bio *);
+extern void __bio_clone_fast(struct bio *, struct bio *);
+extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
 
 extern struct bio_set *fs_bio_set;
@@ -254,6 +355,7 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
 }
 
 extern void bio_endio(struct bio *, int);
+extern void bio_endio_nodec(struct bio *, int);
 struct request_queue;
 extern int bio_phys_segments(struct request_queue *, struct bio *);
 
@@ -262,12 +364,12 @@ extern void bio_advance(struct bio *, unsigned);
 
 extern void bio_init(struct bio *);
 extern void bio_reset(struct bio *);
+void bio_chain(struct bio *, struct bio *);
 
 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
                           unsigned int, unsigned int);
 extern int bio_get_nr_vecs(struct block_device *);
-extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
 extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
                                unsigned long, unsigned int, int, gfp_t);
 struct sg_iovec;
@@ -357,47 +459,17 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
 }
 #endif
 
-static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
+static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
                                   unsigned long *flags)
 {
-       return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
+       return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
 }
 #define __bio_kunmap_irq(buf, flags)   bvec_kunmap_irq(buf, flags)
 
 #define bio_kmap_irq(bio, flags) \
-       __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
+       __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
 #define bio_kunmap_irq(buf,flags)      __bio_kunmap_irq(buf, flags)
 
-/*
- * Check whether this bio carries any data or not. A NULL bio is allowed.
- */
-static inline bool bio_has_data(struct bio *bio)
-{
-       if (bio && bio->bi_vcnt)
-               return true;
-
-       return false;
-}
-
-static inline bool bio_is_rw(struct bio *bio)
-{
-       if (!bio_has_data(bio))
-               return false;
-
-       if (bio->bi_rw & REQ_WRITE_SAME)
-               return false;
-
-       return true;
-}
-
-static inline bool bio_mergeable(struct bio *bio)
-{
-       if (bio->bi_rw & REQ_NOMERGE_FLAGS)
-               return false;
-
-       return true;
-}
-
 /*
  * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
  *
@@ -559,16 +631,12 @@ struct biovec_slab {
 
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 
-#define bip_vec_idx(bip, idx)  (&(bip->bip_vec[(idx)]))
-#define bip_vec(bip)           bip_vec_idx(bip, 0)
 
-#define __bip_for_each_vec(bvl, bip, i, start_idx)                     \
-       for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx);    \
-            i < (bip)->bip_vcnt;                                       \
-            bvl++, i++)
 
-#define bip_for_each_vec(bvl, bip, i)                                  \
-       __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
+#define bip_vec_idx(bip, idx)  (&(bip->bip_vec[(idx)]))
+
+#define bip_for_each_vec(bvl, bip, iter)                               \
+       for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
 
 #define bio_for_each_integrity_vec(_bvl, _bio, _iter)                  \
        for_each_bio(_bio)                                              \
@@ -586,7 +654,6 @@ extern int bio_integrity_prep(struct bio *);
 extern void bio_integrity_endio(struct bio *, int);
 extern void bio_integrity_advance(struct bio *, unsigned int);
 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
-extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
 extern int bioset_integrity_create(struct bio_set *, int);
 extern void bioset_integrity_free(struct bio_set *);
@@ -630,12 +697,6 @@ static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
        return 0;
 }
 
-static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
-                                      int sectors)
-{
-       return;
-}
-
 static inline void bio_integrity_advance(struct bio *bio,
                                         unsigned int bytes_done)
 {
index ab0e9b2025b36d401443f213a646fb68fe392605..161b23105b1ec9d90f3520f08fb66d0d9be66358 100644 (file)
@@ -113,7 +113,6 @@ enum {
 };
 
 struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
-void blk_mq_free_queue(struct request_queue *);
 int blk_mq_register_disk(struct gendisk *);
 void blk_mq_unregister_disk(struct gendisk *);
 void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
@@ -159,16 +158,16 @@ static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
 }
 
 #define queue_for_each_hw_ctx(q, hctx, i)                              \
-       for ((i) = 0, hctx = (q)->queue_hw_ctx[0];                      \
-            (i) < (q)->nr_hw_queues; (i)++, hctx = (q)->queue_hw_ctx[i])
+       for ((i) = 0; (i) < (q)->nr_hw_queues &&                        \
+            ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
 
 #define queue_for_each_ctx(q, ctx, i)                                  \
-       for ((i) = 0, ctx = per_cpu_ptr((q)->queue_ctx, 0);             \
-            (i) < (q)->nr_queues; (i)++, ctx = per_cpu_ptr(q->queue_ctx, (i)))
+       for ((i) = 0; (i) < (q)->nr_queues &&                           \
+            ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
 
 #define hctx_for_each_ctx(hctx, ctx, i)                                        \
-       for ((i) = 0, ctx = (hctx)->ctxs[0];                            \
-            (i) < (hctx)->nr_ctx; (i)++, ctx = (hctx)->ctxs[(i)])
+       for ((i) = 0; (i) < (hctx)->nr_ctx &&                           \
+            ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
 
 #define blk_ctx_sum(q, sum)                                            \
 ({                                                                     \
index 238ef0ed62f85f18085b6446bc681d8c18d674dc..bbc3a6c88fce3410b954b6c91c407297e2f03e7f 100644 (file)
@@ -28,13 +28,22 @@ struct bio_vec {
        unsigned int    bv_offset;
 };
 
+struct bvec_iter {
+       sector_t                bi_sector;      /* device address in 512 byte
+                                                  sectors */
+       unsigned int            bi_size;        /* residual I/O count */
+
+       unsigned int            bi_idx;         /* current index into bvl_vec */
+
+       unsigned int            bi_bvec_done;   /* number of bytes completed in
+                                                  current bvec */
+};
+
 /*
  * main unit of I/O for the block layer and lower layers (ie drivers and
  * stacking drivers)
  */
 struct bio {
-       sector_t                bi_sector;      /* device address in 512 byte
-                                                  sectors */
        struct bio              *bi_next;       /* request queue link */
        struct block_device     *bi_bdev;
        unsigned long           bi_flags;       /* status, command, etc */
@@ -42,16 +51,13 @@ struct bio {
                                                 * top bits priority
                                                 */
 
-       unsigned short          bi_vcnt;        /* how many bio_vec's */
-       unsigned short          bi_idx;         /* current index into bvl_vec */
+       struct bvec_iter        bi_iter;
 
        /* Number of segments in this BIO after
         * physical address coalescing is performed.
         */
        unsigned int            bi_phys_segments;
 
-       unsigned int            bi_size;        /* residual I/O count */
-
        /*
         * To keep track of the max segment size, we account for the
         * sizes of the first and last mergeable segments in this bio.
@@ -59,6 +65,8 @@ struct bio {
        unsigned int            bi_seg_front_size;
        unsigned int            bi_seg_back_size;
 
+       atomic_t                bi_remaining;
+
        bio_end_io_t            *bi_end_io;
 
        void                    *bi_private;
@@ -74,11 +82,13 @@ struct bio {
        struct bio_integrity_payload *bi_integrity;  /* data integrity */
 #endif
 
+       unsigned short          bi_vcnt;        /* how many bio_vec's */
+
        /*
         * Everything starting with bi_max_vecs will be preserved by bio_reset()
         */
 
-       unsigned int            bi_max_vecs;    /* max bvl_vecs we can hold */
+       unsigned short          bi_max_vecs;    /* max bvl_vecs we can hold */
 
        atomic_t                bi_cnt;         /* pin count */
 
index 1b135d49b27985d3243cdbf92d6002ce5eea8597..02cb6f0ea71d52a09c3b2147f2bbc91997904650 100644 (file)
@@ -735,7 +735,7 @@ struct rq_map_data {
 };
 
 struct req_iterator {
-       int i;
+       struct bvec_iter iter;
        struct bio *bio;
 };
 
@@ -748,10 +748,11 @@ struct req_iterator {
 
 #define rq_for_each_segment(bvl, _rq, _iter)                   \
        __rq_for_each_bio(_iter.bio, _rq)                       \
-               bio_for_each_segment(bvl, _iter.bio, _iter.i)
+               bio_for_each_segment(bvl, _iter.bio, _iter.iter)
 
-#define rq_iter_last(rq, _iter)                                        \
-               (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
+#define rq_iter_last(bvec, _iter)                              \
+               (_iter.bio->bi_next == NULL &&                  \
+                bio_iter_last(bvec, _iter.iter))
 
 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 # error        "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
index 20ee8b63a96848ad1bc63fb29ce97c853502d700..d21f2dba07314c48dce2414c4be23d2191180c81 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __FS_CEPH_MESSENGER_H
 #define __FS_CEPH_MESSENGER_H
 
+#include <linux/blk_types.h>
 #include <linux/kref.h>
 #include <linux/mutex.h>
 #include <linux/net.h>
@@ -119,8 +120,7 @@ struct ceph_msg_data_cursor {
 #ifdef CONFIG_BLOCK
                struct {                                /* bio */
                        struct bio      *bio;           /* bio from list */
-                       unsigned int    vector_index;   /* vector from bio */
-                       unsigned int    vector_offset;  /* bytes from vector */
+                       struct bvec_iter bvec_iter;
                };
 #endif /* CONFIG_BLOCK */
                struct {                                /* pages */
index a0f9280421eca511b00e80f51e8e65436ec47e3a..2e6dce6e5c2acf9bae626033c700ec0da012bbcb 100644 (file)
@@ -37,9 +37,9 @@ int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
 struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
                                         const char *bdev);
 
-void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
-                      int slot,
-                      int (*add_part)(int, struct cmdline_subpart *, void *),
-                      void *param);
+int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+                     int slot,
+                     int (*add_part)(int, struct cmdline_subpart *, void *),
+                     void *param);
 
 #endif /* CMDLINEPARSEH */
index f4b0aa3126f5deae8ff8908375a9da1eca790ecf..a68cbe59e6ad190023e410cb32784b1fb6a67d2a 100644 (file)
@@ -29,7 +29,7 @@ typedef void (*io_notify_fn)(unsigned long error, void *context);
 
 enum dm_io_mem_type {
        DM_IO_PAGE_LIST,/* Page list */
-       DM_IO_BVEC,     /* Bio vector */
+       DM_IO_BIO,      /* Bio vector */
        DM_IO_VMA,      /* Virtual memory area */
        DM_IO_KMEM,     /* Kernel memory */
 };
@@ -41,7 +41,7 @@ struct dm_io_memory {
 
        union {
                struct page_list *pl;
-               struct bio_vec *bvec;
+               struct bio *bio;
                void *vma;
                void *addr;
        } ptr;
index e2b9576d00e24772580a601a268e42bdc46e349e..095c6e4fe1e87ea02e3c6d9eeecdb66ddc1a1367 100644 (file)
@@ -24,10 +24,10 @@ DECLARE_EVENT_CLASS(bcache_request,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->orig_major     = d->disk->major;
                __entry->orig_minor     = d->disk->first_minor;
-               __entry->sector         = bio->bi_sector;
-               __entry->orig_sector    = bio->bi_sector - 16;
-               __entry->nr_sector      = bio->bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->orig_sector    = bio->bi_iter.bi_sector - 16;
+               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -99,9 +99,9 @@ DECLARE_EVENT_CLASS(bcache_bio,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d  %s %llu + %u",
@@ -134,9 +134,9 @@ TRACE_EVENT(bcache_read,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                __entry->cache_hit = hit;
                __entry->bypass = bypass;
        ),
@@ -162,9 +162,9 @@ TRACE_EVENT(bcache_write,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
-               __entry->nr_sector      = bio->bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                __entry->writeback = writeback;
                __entry->bypass = bypass;
        ),
index 4c2301d2ef1aa979ea0d6594ad1b6404368b920b..e76ae19a8d6fe65705e48104bb8212ed8bddb546 100644 (file)
@@ -243,9 +243,9 @@ TRACE_EVENT(block_bio_bounce,
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev ?
                                          bio->bi_bdev->bd_dev : 0;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -280,10 +280,10 @@ TRACE_EVENT(block_bio_complete,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                __entry->error          = error;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d %s %llu + %u [%d]",
@@ -308,9 +308,9 @@ DECLARE_EVENT_CLASS(block_bio_merge,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -375,9 +375,9 @@ TRACE_EVENT(block_bio_queue,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -403,7 +403,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
 
        TP_fast_assign(
                __entry->dev            = bio ? bio->bi_bdev->bd_dev : 0;
-               __entry->sector         = bio ? bio->bi_sector : 0;
+               __entry->sector         = bio ? bio->bi_iter.bi_sector : 0;
                __entry->nr_sector      = bio ? bio_sectors(bio) : 0;
                blk_fill_rwbs(__entry->rwbs,
                              bio ? bio->bi_rw : 0, __entry->nr_sector);
@@ -538,9 +538,9 @@ TRACE_EVENT(block_split,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->new_sector     = new_sector;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -579,11 +579,11 @@ TRACE_EVENT(block_bio_remap,
 
        TP_fast_assign(
                __entry->dev            = bio->bi_bdev->bd_dev;
-               __entry->sector         = bio->bi_sector;
+               __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                __entry->old_dev        = dev;
                __entry->old_sector     = from;
-               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
        ),
 
        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
index 3b9f28dfc8492160940d28e58acf1dc9dc6e5081..67f38faac589ad52ac5850e5af602799753b8d29 100644 (file)
@@ -629,8 +629,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
                __entry->dev            = sb->s_dev;
                __entry->rw             = rw;
                __entry->type           = type;
-               __entry->sector         = bio->bi_sector;
-               __entry->size           = bio->bi_size;
+               __entry->sector         = bio->bi_iter.bi_sector;
+               __entry->size           = bio->bi_iter.bi_size;
        ),
 
        TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u",
index d09dd10c5a5efc2c206a85bd31a431268e37cc7f..9a58bc2588105900d79ec27c46bdcb8374c32cbc 100644 (file)
@@ -32,7 +32,7 @@ static int submit(int rw, struct block_device *bdev, sector_t sector,
        struct bio *bio;
 
        bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
-       bio->bi_sector = sector;
+       bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = bdev;
        bio->bi_end_io = end_swap_bio_read;
 
index f785aef65799cdb0068f0016bb0183a3bfe312a6..b418cb0d72424ab454e66e3cde881784ce1f0fad 100644 (file)
@@ -781,8 +781,8 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
        if (!error && !bio_flagged(bio, BIO_UPTODATE))
                error = EIO;
 
-       __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
-                       error, 0, NULL);
+       __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+                       bio->bi_rw, what, error, 0, NULL);
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
@@ -885,8 +885,9 @@ static void blk_add_trace_split(void *ignore,
        if (bt) {
                __be64 rpdu = cpu_to_be64(pdu);
 
-               __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
-                               BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
+               __blk_add_trace(bt, bio->bi_iter.bi_sector,
+                               bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
+                               !bio_flagged(bio, BIO_UPTODATE),
                                sizeof(rpdu), &rpdu);
        }
 }
@@ -918,9 +919,9 @@ static void blk_add_trace_bio_remap(void *ignore,
        r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
        r.sector_from = cpu_to_be64(from);
 
-       __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
-                       BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
-                       sizeof(r), &r);
+       __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+                       bio->bi_rw, BLK_TA_REMAP,
+                       !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
 }
 
 /**
index 5a7d58fb883bfa1c4917e48d251cd132c8d9baf9..523918b8c6dcbef6968c37e2fa38ee87a3b2518c 100644 (file)
@@ -98,27 +98,24 @@ int init_emergency_isa_pool(void)
 static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
 {
        unsigned char *vfrom;
-       struct bio_vec *tovec, *fromvec;
-       int i;
-
-       bio_for_each_segment(tovec, to, i) {
-               fromvec = from->bi_io_vec + i;
-
-               /*
-                * not bounced
-                */
-               if (tovec->bv_page == fromvec->bv_page)
-                       continue;
-
-               /*
-                * fromvec->bv_offset and fromvec->bv_len might have been
-                * modified by the block layer, so use the original copy,
-                * bounce_copy_vec already uses tovec->bv_len
-                */
-               vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
+       struct bio_vec tovec, *fromvec = from->bi_io_vec;
+       struct bvec_iter iter;
+
+       bio_for_each_segment(tovec, to, iter) {
+               if (tovec.bv_page != fromvec->bv_page) {
+                       /*
+                        * fromvec->bv_offset and fromvec->bv_len might have
+                        * been modified by the block layer, so use the original
+                        * copy, bounce_copy_vec already uses tovec->bv_len
+                        */
+                       vfrom = page_address(fromvec->bv_page) +
+                               tovec.bv_offset;
+
+                       bounce_copy_vec(&tovec, vfrom);
+                       flush_dcache_page(tovec.bv_page);
+               }
 
-               bounce_copy_vec(tovec, vfrom);
-               flush_dcache_page(tovec->bv_page);
+               fromvec++;
        }
 }
 
@@ -201,13 +198,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 {
        struct bio *bio;
        int rw = bio_data_dir(*bio_orig);
-       struct bio_vec *to, *from;
+       struct bio_vec *to, from;
+       struct bvec_iter iter;
        unsigned i;
 
        if (force)
                goto bounce;
-       bio_for_each_segment(from, *bio_orig, i)
-               if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
+       bio_for_each_segment(from, *bio_orig, iter)
+               if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
                        goto bounce;
 
        return;
index 7247be6114ac894523d8273743a4f168ceab3afa..7c59ef681381bb7afeef2cf5207d269e9a95c1f8 100644 (file)
@@ -31,13 +31,13 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
 
        bio = bio_alloc(gfp_flags, 1);
        if (bio) {
-               bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
-               bio->bi_sector <<= PAGE_SHIFT - 9;
+               bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
+               bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
                bio->bi_io_vec[0].bv_page = page;
                bio->bi_io_vec[0].bv_len = PAGE_SIZE;
                bio->bi_io_vec[0].bv_offset = 0;
                bio->bi_vcnt = 1;
-               bio->bi_size = PAGE_SIZE;
+               bio->bi_iter.bi_size = PAGE_SIZE;
                bio->bi_end_io = end_io;
        }
        return bio;
@@ -62,7 +62,7 @@ void end_swap_bio_write(struct bio *bio, int err)
                printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
                                imajor(bio->bi_bdev->bd_inode),
                                iminor(bio->bi_bdev->bd_inode),
-                               (unsigned long long)bio->bi_sector);
+                               (unsigned long long)bio->bi_iter.bi_sector);
                ClearPageReclaim(page);
        }
        end_page_writeback(page);
@@ -80,7 +80,7 @@ void end_swap_bio_read(struct bio *bio, int err)
                printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
                                imajor(bio->bi_bdev->bd_inode),
                                iminor(bio->bi_bdev->bd_inode),
-                               (unsigned long long)bio->bi_sector);
+                               (unsigned long long)bio->bi_iter.bi_sector);
                goto out;
        }
 
index 2ed1304d22a7dfed5c8bc9f86d5f0f5cb1b91742..0e478a0f4204b72ed19ae49c349d632cda009e02 100644 (file)
@@ -778,13 +778,12 @@ static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
 
        bio = data->bio;
        BUG_ON(!bio);
-       BUG_ON(!bio->bi_vcnt);
 
        cursor->resid = min(length, data->bio_length);
        cursor->bio = bio;
-       cursor->vector_index = 0;
-       cursor->vector_offset = 0;
-       cursor->last_piece = length <= bio->bi_io_vec[0].bv_len;
+       cursor->bvec_iter = bio->bi_iter;
+       cursor->last_piece =
+               cursor->resid <= bio_iter_len(bio, cursor->bvec_iter);
 }
 
 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
@@ -793,71 +792,63 @@ static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
 {
        struct ceph_msg_data *data = cursor->data;
        struct bio *bio;
-       struct bio_vec *bio_vec;
-       unsigned int index;
+       struct bio_vec bio_vec;
 
        BUG_ON(data->type != CEPH_MSG_DATA_BIO);
 
        bio = cursor->bio;
        BUG_ON(!bio);
 
-       index = cursor->vector_index;
-       BUG_ON(index >= (unsigned int) bio->bi_vcnt);
+       bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
 
-       bio_vec = &bio->bi_io_vec[index];
-       BUG_ON(cursor->vector_offset >= bio_vec->bv_len);
-       *page_offset = (size_t) (bio_vec->bv_offset + cursor->vector_offset);
+       *page_offset = (size_t) bio_vec.bv_offset;
        BUG_ON(*page_offset >= PAGE_SIZE);
        if (cursor->last_piece) /* pagelist offset is always 0 */
                *length = cursor->resid;
        else
-               *length = (size_t) (bio_vec->bv_len - cursor->vector_offset);
+               *length = (size_t) bio_vec.bv_len;
        BUG_ON(*length > cursor->resid);
        BUG_ON(*page_offset + *length > PAGE_SIZE);
 
-       return bio_vec->bv_page;
+       return bio_vec.bv_page;
 }
 
 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
                                        size_t bytes)
 {
        struct bio *bio;
-       struct bio_vec *bio_vec;
-       unsigned int index;
+       struct bio_vec bio_vec;
 
        BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO);
 
        bio = cursor->bio;
        BUG_ON(!bio);
 
-       index = cursor->vector_index;
-       BUG_ON(index >= (unsigned int) bio->bi_vcnt);
-       bio_vec = &bio->bi_io_vec[index];
+       bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
 
        /* Advance the cursor offset */
 
        BUG_ON(cursor->resid < bytes);
        cursor->resid -= bytes;
-       cursor->vector_offset += bytes;
-       if (cursor->vector_offset < bio_vec->bv_len)
+
+       bio_advance_iter(bio, &cursor->bvec_iter, bytes);
+
+       if (bytes < bio_vec.bv_len)
                return false;   /* more bytes to process in this segment */
-       BUG_ON(cursor->vector_offset != bio_vec->bv_len);
 
        /* Move on to the next segment, and possibly the next bio */
 
-       if (++index == (unsigned int) bio->bi_vcnt) {
+       if (!cursor->bvec_iter.bi_size) {
                bio = bio->bi_next;
-               index = 0;
+               cursor->bvec_iter = bio->bi_iter;
        }
        cursor->bio = bio;
-       cursor->vector_index = index;
-       cursor->vector_offset = 0;
 
        if (!cursor->last_piece) {
                BUG_ON(!cursor->resid);
                BUG_ON(!bio);
                /* A short read is OK, so use <= rather than == */
-               if (cursor->resid <= bio->bi_io_vec[index].bv_len)
+               if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter))
                        cursor->last_piece = true;
        }