]> Pileus Git - ~andy/linux/commitdiff
block: remove support for bio remapping from ->make_request
authorChristoph Hellwig <hch@infradead.org>
Mon, 12 Sep 2011 10:12:01 +0000 (12:12 +0200)
committerJens Axboe <jaxboe@fusionio.com>
Mon, 12 Sep 2011 10:12:01 +0000 (12:12 +0200)
There is very little benefit in allowing to let a ->make_request
instance update the bios device and sector and loop around it in
__generic_make_request when we can archive the same through calling
generic_make_request from the driver and letting the loop in
generic_make_request handle it.

Note that various drivers got the return value from ->make_request and
returned non-zero values for errors.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: NeilBrown <neilb@suse.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
25 files changed:
arch/m68k/emu/nfblock.c
arch/powerpc/sysdev/axonram.c
block/blk-core.c
drivers/block/aoe/aoeblk.c
drivers/block/brd.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_req.c
drivers/block/loop.c
drivers/block/pktcdvd.c
drivers/block/ps3vram.c
drivers/block/umem.c
drivers/md/dm.c
drivers/md/faulty.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/md.h
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/s390/block/dcssblk.c
drivers/s390/block/xpram.c
drivers/staging/zram/zram_drv.c
include/linux/blkdev.h

index 48e50f8c1c7e07f10d2ac4a400f763585f45a7d1..e3011338ab40ece61712a293f3aeb8fef207c715 100644 (file)
@@ -59,7 +59,7 @@ struct nfhd_device {
        struct gendisk *disk;
 };
 
-static int nfhd_make_request(struct request_queue *queue, struct bio *bio)
+static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
 {
        struct nfhd_device *dev = queue->queuedata;
        struct bio_vec *bvec;
@@ -76,7 +76,6 @@ static int nfhd_make_request(struct request_queue *queue, struct bio *bio)
                sec += len;
        }
        bio_endio(bio, 0);
-       return 0;
 }
 
 static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
index 265f0f09395ae53c56f95b9c0e838939db6eb480..ba4271919062d65005c80156ac524baf796cf612 100644 (file)
@@ -104,7 +104,7 @@ axon_ram_irq_handler(int irq, void *dev)
  * axon_ram_make_request - make_request() method for block device
  * @queue, @bio: see blk_queue_make_request()
  */
-static int
+static void
 axon_ram_make_request(struct request_queue *queue, struct bio *bio)
 {
        struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
@@ -113,7 +113,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
        struct bio_vec *vec;
        unsigned int transfered;
        unsigned short idx;
-       int rc = 0;
 
        phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
        phys_end = bank->io_addr + bank->size;
@@ -121,8 +120,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
        bio_for_each_segment(vec, bio, idx) {
                if (unlikely(phys_mem + vec->bv_len > phys_end)) {
                        bio_io_error(bio);
-                       rc = -ERANGE;
-                       break;
+                       return;
                }
 
                user_mem = page_address(vec->bv_page) + vec->bv_offset;
@@ -135,8 +133,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
                transfered += vec->bv_len;
        }
        bio_endio(bio, 0);
-
-       return rc;
 }
 
 /**
index ab673f0b8c30bbff4e8dcce09f0824f7cdba22ee..f58e019be67b2821ea61356031b3c1406ad3dc11 100644 (file)
@@ -1211,7 +1211,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        blk_rq_bio_prep(req->q, req, bio);
 }
 
-int blk_queue_bio(struct request_queue *q, struct bio *bio)
+void blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
        const bool sync = !!(bio->bi_rw & REQ_SYNC);
        struct blk_plug *plug;
@@ -1236,7 +1236,7 @@ int blk_queue_bio(struct request_queue *q, struct bio *bio)
         * any locks.
         */
        if (attempt_plug_merge(current, q, bio))
-               goto out;
+               return;
 
        spin_lock_irq(q->queue_lock);
 
@@ -1312,8 +1312,6 @@ get_rq:
 out_unlock:
                spin_unlock_irq(q->queue_lock);
        }
-out:
-       return 0;
 }
 EXPORT_SYMBOL_GPL(blk_queue_bio);      /* for device mapper only */
 
@@ -1441,112 +1439,85 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
 static inline void __generic_make_request(struct bio *bio)
 {
        struct request_queue *q;
-       sector_t old_sector;
-       int ret, nr_sectors = bio_sectors(bio);
-       dev_t old_dev;
+       int nr_sectors = bio_sectors(bio);
        int err = -EIO;
+       char b[BDEVNAME_SIZE];
+       struct hd_struct *part;
 
        might_sleep();
 
        if (bio_check_eod(bio, nr_sectors))
                goto end_io;
 
-       /*
-        * Resolve the mapping until finished. (drivers are
-        * still free to implement/resolve their own stacking
-        * by explicitly returning 0)
-        *
-        * NOTE: we don't repeat the blk_size check for each new device.
-        * Stacking drivers are expected to know what they are doing.
-        */
-       old_sector = -1;
-       old_dev = 0;
-       do {
-               char b[BDEVNAME_SIZE];
-               struct hd_struct *part;
-
-               q = bdev_get_queue(bio->bi_bdev);
-               if (unlikely(!q)) {
-                       printk(KERN_ERR
-                              "generic_make_request: Trying to access "
-                               "nonexistent block-device %s (%Lu)\n",
-                               bdevname(bio->bi_bdev, b),
-                               (long long) bio->bi_sector);
-                       goto end_io;
-               }
-
-               if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
-                            nr_sectors > queue_max_hw_sectors(q))) {
-                       printk(KERN_ERR "bio too big device %s (%u > %u)\n",
-                              bdevname(bio->bi_bdev, b),
-                              bio_sectors(bio),
-                              queue_max_hw_sectors(q));
-                       goto end_io;
-               }
-
-               if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
-                       goto end_io;
-
-               part = bio->bi_bdev->bd_part;
-               if (should_fail_request(part, bio->bi_size) ||
-                   should_fail_request(&part_to_disk(part)->part0,
-                                       bio->bi_size))
-                       goto end_io;
+       q = bdev_get_queue(bio->bi_bdev);
+       if (unlikely(!q)) {
+               printk(KERN_ERR
+                      "generic_make_request: Trying to access "
+                       "nonexistent block-device %s (%Lu)\n",
+                       bdevname(bio->bi_bdev, b),
+                       (long long) bio->bi_sector);
+               goto end_io;
+       }
 
-               /*
-                * If this device has partitions, remap block n
-                * of partition p to block n+start(p) of the disk.
-                */
-               blk_partition_remap(bio);
+       if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
+                    nr_sectors > queue_max_hw_sectors(q))) {
+               printk(KERN_ERR "bio too big device %s (%u > %u)\n",
+                      bdevname(bio->bi_bdev, b),
+                      bio_sectors(bio),
+                      queue_max_hw_sectors(q));
+               goto end_io;
+       }
 
-               if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
-                       goto end_io;
+       if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+               goto end_io;
 
-               if (old_sector != -1)
-                       trace_block_bio_remap(q, bio, old_dev, old_sector);
+       part = bio->bi_bdev->bd_part;
+       if (should_fail_request(part, bio->bi_size) ||
+           should_fail_request(&part_to_disk(part)->part0,
+                               bio->bi_size))
+               goto end_io;
 
-               old_sector = bio->bi_sector;
-               old_dev = bio->bi_bdev->bd_dev;
+       /*
+        * If this device has partitions, remap block n
+        * of partition p to block n+start(p) of the disk.
+        */
+       blk_partition_remap(bio);
 
-               if (bio_check_eod(bio, nr_sectors))
-                       goto end_io;
+       if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
+               goto end_io;
 
-               /*
-                * Filter flush bio's early so that make_request based
-                * drivers without flush support don't have to worry
-                * about them.
-                */
-               if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
-                       bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
-                       if (!nr_sectors) {
-                               err = 0;
-                               goto end_io;
-                       }
-               }
+       if (bio_check_eod(bio, nr_sectors))
+               goto end_io;
 
-               if ((bio->bi_rw & REQ_DISCARD) &&
-                   (!blk_queue_discard(q) ||
-                    ((bio->bi_rw & REQ_SECURE) &&
-                     !blk_queue_secdiscard(q)))) {
-                       err = -EOPNOTSUPP;
+       /*
+        * Filter flush bio's early so that make_request based
+        * drivers without flush support don't have to worry
+        * about them.
+        */
+       if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
+               bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
+               if (!nr_sectors) {
+                       err = 0;
                        goto end_io;
                }
+       }
 
-               if (blk_throtl_bio(q, &bio))
-                       goto end_io;
-
-               /*
-                * If bio = NULL, bio has been throttled and will be submitted
-                * later.
-                */
-               if (!bio)
-                       break;
-
-               trace_block_bio_queue(q, bio);
+       if ((bio->bi_rw & REQ_DISCARD) &&
+           (!blk_queue_discard(q) ||
+            ((bio->bi_rw & REQ_SECURE) &&
+             !blk_queue_secdiscard(q)))) {
+               err = -EOPNOTSUPP;
+               goto end_io;
+       }
 
-               ret = q->make_request_fn(q, bio);
-       } while (ret);
+       if (blk_throtl_bio(q, &bio))
+               goto end_io;
 
+       /* if bio = NULL, bio has been throttled and will be submitted later. */
+       if (!bio)
+               return;
+       trace_block_bio_queue(q, bio);
+       q->make_request_fn(q, bio);
        return;
 
 end_io:
index 528f6318ded155f03d464f611f20240dd1a90c95..167ba0af47f54b527e3b231f7aaf92b96c37cf00 100644 (file)
@@ -159,7 +159,7 @@ aoeblk_release(struct gendisk *disk, fmode_t mode)
        return 0;
 }
 
-static int
+static void
 aoeblk_make_request(struct request_queue *q, struct bio *bio)
 {
        struct sk_buff_head queue;
@@ -172,25 +172,25 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
        if (bio == NULL) {
                printk(KERN_ERR "aoe: bio is NULL\n");
                BUG();
-               return 0;
+               return;
        }
        d = bio->bi_bdev->bd_disk->private_data;
        if (d == NULL) {
                printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
                BUG();
                bio_endio(bio, -ENXIO);
-               return 0;
+               return;
        } else if (bio->bi_io_vec == NULL) {
                printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
                BUG();
                bio_endio(bio, -ENXIO);
-               return 0;
+               return;
        }
        buf = mempool_alloc(d->bufpool, GFP_NOIO);
        if (buf == NULL) {
                printk(KERN_INFO "aoe: buf allocation failure\n");
                bio_endio(bio, -ENOMEM);
-               return 0;
+               return;
        }
        memset(buf, 0, sizeof(*buf));
        INIT_LIST_HEAD(&buf->bufs);
@@ -211,7 +211,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
                spin_unlock_irqrestore(&d->lock, flags);
                mempool_free(buf, d->bufpool);
                bio_endio(bio, -ENXIO);
-               return 0;
+               return;
        }
 
        list_add_tail(&buf->bufs, &d->bufq);
@@ -222,8 +222,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
 
        spin_unlock_irqrestore(&d->lock, flags);
        aoenet_xmit(&queue);
-
-       return 0;
 }
 
 static int
index dba1c32e1ddfc48229d93d014fedaa9c370e92e3..d22119d49e53e31073c4c5c8ab22438b7e7f9422 100644 (file)
@@ -323,7 +323,7 @@ out:
        return err;
 }
 
-static int brd_make_request(struct request_queue *q, struct bio *bio)
+static void brd_make_request(struct request_queue *q, struct bio *bio)
 {
        struct block_device *bdev = bio->bi_bdev;
        struct brd_device *brd = bdev->bd_disk->private_data;
@@ -359,8 +359,6 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
 
 out:
        bio_endio(bio, err);
-
-       return 0;
 }
 
 #ifdef CONFIG_BLK_DEV_XIP
index ef2ceed3be4b2767f6b3886a48a002609597e2e8..36eee3969a98cf4435e5ce1218df123a6c865867 100644 (file)
@@ -1507,7 +1507,7 @@ extern void drbd_free_mdev(struct drbd_conf *mdev);
 extern int proc_details;
 
 /* drbd_req */
-extern int drbd_make_request(struct request_queue *q, struct bio *bio);
+extern void drbd_make_request(struct request_queue *q, struct bio *bio);
 extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
 extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
 extern int is_valid_ar_handle(struct drbd_request *, sector_t);
index 3424d675b769a661857cdd41aa90a30e28c07b59..4a0f314086e522f7c6172355d3c0c3b1d757c2ec 100644 (file)
@@ -1073,7 +1073,7 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
        return 0;
 }
 
-int drbd_make_request(struct request_queue *q, struct bio *bio)
+void drbd_make_request(struct request_queue *q, struct bio *bio)
 {
        unsigned int s_enr, e_enr;
        struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
@@ -1081,7 +1081,7 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
 
        if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
                bio_endio(bio, -EPERM);
-               return 0;
+               return;
        }
 
        start_time = jiffies;
@@ -1100,7 +1100,8 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
 
        if (likely(s_enr == e_enr)) {
                inc_ap_bio(mdev, 1);
-               return drbd_make_request_common(mdev, bio, start_time);
+               drbd_make_request_common(mdev, bio, start_time);
+               return;
        }
 
        /* can this bio be split generically?
@@ -1148,7 +1149,6 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
 
                bio_pair_release(bp);
        }
-       return 0;
 }
 
 /* This is called by bio_add_page().  With this function we reduce
index 76c8da78212bffec71f5dc7af025a84e4028d456..8360239d553c7ed69a6df49a0c420b32d45f119b 100644 (file)
@@ -514,7 +514,7 @@ static struct bio *loop_get_bio(struct loop_device *lo)
        return bio_list_pop(&lo->lo_bio_list);
 }
 
-static int loop_make_request(struct request_queue *q, struct bio *old_bio)
+static void loop_make_request(struct request_queue *q, struct bio *old_bio)
 {
        struct loop_device *lo = q->queuedata;
        int rw = bio_rw(old_bio);
@@ -532,12 +532,11 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio)
        loop_add_bio(lo, old_bio);
        wake_up(&lo->lo_event);
        spin_unlock_irq(&lo->lo_lock);
-       return 0;
+       return;
 
 out:
        spin_unlock_irq(&lo->lo_lock);
        bio_io_error(old_bio);
-       return 0;
 }
 
 struct switch_request {
index e133f094ab08f40bea4174b83a1253be45a56e4b..a63b0a2b78054b75f24b131cbeddf01474a7449e 100644 (file)
@@ -2444,7 +2444,7 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
        pkt_bio_finished(pd);
 }
 
-static int pkt_make_request(struct request_queue *q, struct bio *bio)
+static void pkt_make_request(struct request_queue *q, struct bio *bio)
 {
        struct pktcdvd_device *pd;
        char b[BDEVNAME_SIZE];
@@ -2473,7 +2473,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
                cloned_bio->bi_end_io = pkt_end_io_read_cloned;
                pd->stats.secs_r += bio->bi_size >> 9;
                pkt_queue_bio(pd, cloned_bio);
-               return 0;
+               return;
        }
 
        if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
@@ -2509,7 +2509,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
                        pkt_make_request(q, &bp->bio1);
                        pkt_make_request(q, &bp->bio2);
                        bio_pair_release(bp);
-                       return 0;
+                       return;
                }
        }
 
@@ -2533,7 +2533,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
                                }
                                spin_unlock(&pkt->lock);
                                spin_unlock(&pd->cdrw.active_list_lock);
-                               return 0;
+                               return;
                        } else {
                                blocked_bio = 1;
                        }
@@ -2584,10 +2584,9 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
                 */
                wake_up(&pd->wqueue);
        }
-       return 0;
+       return;
 end_io:
        bio_io_error(bio);
-       return 0;
 }
 
 
index b3bdb8af89cfad5e80e8ea9d01f5134c1872fbbe..7fad7af87eb205dbf82805336d73fc37d797cfcb 100644 (file)
@@ -596,7 +596,7 @@ out:
        return next;
 }
 
-static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
+static void ps3vram_make_request(struct request_queue *q, struct bio *bio)
 {
        struct ps3_system_bus_device *dev = q->queuedata;
        struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -610,13 +610,11 @@ static int ps3vram_make_request(struct request_queue *q, struct bio *bio)
        spin_unlock_irq(&priv->lock);
 
        if (busy)
-               return 0;
+               return;
 
        do {
                bio = ps3vram_do_bio(dev, bio);
        } while (bio);
-
-       return 0;
 }
 
 static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
index 031ca720d926623e810368399359e6a4dcf1b02d..aa2712060bfbcd153e446f5adcf80a9d61377e75 100644 (file)
@@ -513,7 +513,7 @@ static void process_page(unsigned long data)
        }
 }
 
-static int mm_make_request(struct request_queue *q, struct bio *bio)
+static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
        pr_debug("mm_make_request %llu %u\n",
@@ -525,7 +525,7 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
        card->biotail = &bio->bi_next;
        spin_unlock_irq(&card->lock);
 
-       return 0;
+       return;
 }
 
 static irqreturn_t mm_interrupt(int irq, void *__card)
index 78b20868bcbcb659654c869fe25a5091478a3fb4..7b986e77b75e8189c1c2e0d2d4beefbe973ecaf5 100644 (file)
@@ -1388,7 +1388,7 @@ out:
  * The request function that just remaps the bio built up by
  * dm_merge_bvec.
  */
-static int _dm_request(struct request_queue *q, struct bio *bio)
+static void _dm_request(struct request_queue *q, struct bio *bio)
 {
        int rw = bio_data_dir(bio);
        struct mapped_device *md = q->queuedata;
@@ -1409,12 +1409,12 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
                        queue_io(md, bio);
                else
                        bio_io_error(bio);
-               return 0;
+               return;
        }
 
        __split_and_process_bio(md, bio);
        up_read(&md->io_lock);
-       return 0;
+       return;
 }
 
 static int dm_request_based(struct mapped_device *md)
@@ -1422,14 +1422,14 @@ static int dm_request_based(struct mapped_device *md)
        return blk_queue_stackable(md->queue);
 }
 
-static int dm_request(struct request_queue *q, struct bio *bio)
+static void dm_request(struct request_queue *q, struct bio *bio)
 {
        struct mapped_device *md = q->queuedata;
 
        if (dm_request_based(md))
-               return blk_queue_bio(q, bio);
-
-       return _dm_request(q, bio);
+               blk_queue_bio(q, bio);
+       else
+               _dm_request(q, bio);
 }
 
 void dm_dispatch_request(struct request *rq)
index 23078dabb6df0a741f1223fcae7f728a9b19958a..5ef304d4341c170b5d343a1c52ca5061df4a33ea 100644 (file)
@@ -169,7 +169,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
                conf->nfaults = n+1;
 }
 
-static int make_request(mddev_t *mddev, struct bio *bio)
+static void make_request(mddev_t *mddev, struct bio *bio)
 {
        conf_t *conf = mddev->private;
        int failit = 0;
@@ -181,7 +181,7 @@ static int make_request(mddev_t *mddev, struct bio *bio)
                         * just fail immediately
                         */
                        bio_endio(bio, -EIO);
-                       return 0;
+                       return;
                }
 
                if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
@@ -211,15 +211,15 @@ static int make_request(mddev_t *mddev, struct bio *bio)
        }
        if (failit) {
                struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
+
                b->bi_bdev = conf->rdev->bdev;
                b->bi_private = bio;
                b->bi_end_io = faulty_fail;
-               generic_make_request(b);
-               return 0;
-       } else {
+               bio = b;
+       } else
                bio->bi_bdev = conf->rdev->bdev;
-               return 1;
-       }
+
+       generic_make_request(bio);
 }
 
 static void status(struct seq_file *seq, mddev_t *mddev)
index 6cd2c313e800521d033493fb65d98544566d1211..c6ee491d98e781a57a8051ea8465baa4c07e89cc 100644 (file)
@@ -264,14 +264,14 @@ static int linear_stop (mddev_t *mddev)
        return 0;
 }
 
-static int linear_make_request (mddev_t *mddev, struct bio *bio)
+static void linear_make_request (mddev_t *mddev, struct bio *bio)
 {
        dev_info_t *tmp_dev;
        sector_t start_sector;
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
-               return 0;
+               return;
        }
 
        rcu_read_lock();
@@ -293,7 +293,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
                       (unsigned long long)start_sector);
                rcu_read_unlock();
                bio_io_error(bio);
-               return 0;
+               return;
        }
        if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
                     tmp_dev->end_sector)) {
@@ -307,20 +307,17 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
 
                bp = bio_split(bio, end_sector - bio->bi_sector);
 
-               if (linear_make_request(mddev, &bp->bio1))
-                       generic_make_request(&bp->bio1);
-               if (linear_make_request(mddev, &bp->bio2))
-                       generic_make_request(&bp->bio2);
+               linear_make_request(mddev, &bp->bio1);
+               linear_make_request(mddev, &bp->bio2);
                bio_pair_release(bp);
-               return 0;
+               return;
        }
                    
        bio->bi_bdev = tmp_dev->rdev->bdev;
        bio->bi_sector = bio->bi_sector - start_sector
                + tmp_dev->rdev->data_offset;
        rcu_read_unlock();
-
-       return 1;
+       generic_make_request(bio);
 }
 
 static void linear_status (struct seq_file *seq, mddev_t *mddev)
index 8e221a20f5d98b362e8f49660454add87a5e3b38..5c2178562c964c17d8fa941f81cde5dd21ec9a00 100644 (file)
@@ -330,18 +330,17 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
  * call has finished, the bio has been linked into some internal structure
  * and so is visible to ->quiesce(), so we don't need the refcount any more.
  */
-static int md_make_request(struct request_queue *q, struct bio *bio)
+static void md_make_request(struct request_queue *q, struct bio *bio)
 {
        const int rw = bio_data_dir(bio);
        mddev_t *mddev = q->queuedata;
-       int rv;
        int cpu;
        unsigned int sectors;
 
        if (mddev == NULL || mddev->pers == NULL
            || !mddev->ready) {
                bio_io_error(bio);
-               return 0;
+               return;
        }
        smp_rmb(); /* Ensure implications of  'active' are visible */
        rcu_read_lock();
@@ -366,7 +365,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
         * go away inside make_request
         */
        sectors = bio_sectors(bio);
-       rv = mddev->pers->make_request(mddev, bio);
+       mddev->pers->make_request(mddev, bio);
 
        cpu = part_stat_lock();
        part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
@@ -375,8 +374,6 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
 
        if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
                wake_up(&mddev->sb_wait);
-
-       return rv;
 }
 
 /* mddev_suspend makes sure no new requests are submitted
@@ -475,8 +472,7 @@ static void md_submit_flush_data(struct work_struct *ws)
                bio_endio(bio, 0);
        else {
                bio->bi_rw &= ~REQ_FLUSH;
-               if (mddev->pers->make_request(mddev, bio))
-                       generic_make_request(bio);
+               mddev->pers->make_request(mddev, bio);
        }
 
        mddev->flush_bio = NULL;
index 1e586bb4452e3011a71cc59ae31e5e96ed63488e..bd47847cf7caab6307a3c574040ce95f3fb5fc18 100644 (file)
@@ -424,7 +424,7 @@ struct mdk_personality
        int level;
        struct list_head list;
        struct module *owner;
-       int (*make_request)(mddev_t *mddev, struct bio *bio);
+       void (*make_request)(mddev_t *mddev, struct bio *bio);
        int (*run)(mddev_t *mddev);
        int (*stop)(mddev_t *mddev);
        void (*status)(struct seq_file *seq, mddev_t *mddev);
index 3535c23af288e8eaac45042eb2e1909af8f0759a..407cb56914254cf55b067f54ce06f33bfddd8c26 100644 (file)
@@ -106,7 +106,7 @@ static void multipath_end_request(struct bio *bio, int error)
        rdev_dec_pending(rdev, conf->mddev);
 }
 
-static int multipath_make_request(mddev_t *mddev, struct bio * bio)
+static void multipath_make_request(mddev_t *mddev, struct bio * bio)
 {
        multipath_conf_t *conf = mddev->private;
        struct multipath_bh * mp_bh;
@@ -114,7 +114,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
-               return 0;
+               return;
        }
 
        mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
@@ -126,7 +126,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
        if (mp_bh->path < 0) {
                bio_endio(bio, -EIO);
                mempool_free(mp_bh, conf->pool);
-               return 0;
+               return;
        }
        multipath = conf->multipaths + mp_bh->path;
 
@@ -137,7 +137,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
        mp_bh->bio.bi_end_io = multipath_end_request;
        mp_bh->bio.bi_private = mp_bh;
        generic_make_request(&mp_bh->bio);
-       return 0;
+       return;
 }
 
 static void multipath_status (struct seq_file *seq, mddev_t *mddev)
index e86bf3682e1e29c65b61f0ca7d3b00f94533046a..4066615d61af9c504ca93bd6e397069ef0f9d581 100644 (file)
@@ -466,7 +466,7 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev,
        }
 }
 
-static int raid0_make_request(mddev_t *mddev, struct bio *bio)
+static void raid0_make_request(mddev_t *mddev, struct bio *bio)
 {
        unsigned int chunk_sects;
        sector_t sector_offset;
@@ -475,7 +475,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
-               return 0;
+               return;
        }
 
        chunk_sects = mddev->chunk_sectors;
@@ -495,13 +495,10 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
                else
                        bp = bio_split(bio, chunk_sects -
                                       sector_div(sector, chunk_sects));
-               if (raid0_make_request(mddev, &bp->bio1))
-                       generic_make_request(&bp->bio1);
-               if (raid0_make_request(mddev, &bp->bio2))
-                       generic_make_request(&bp->bio2);
-
+               raid0_make_request(mddev, &bp->bio1);
+               raid0_make_request(mddev, &bp->bio2);
                bio_pair_release(bp);
-               return 0;
+               return;
        }
 
        sector_offset = bio->bi_sector;
@@ -511,10 +508,9 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
        bio->bi_bdev = tmp_dev->bdev;
        bio->bi_sector = sector_offset + zone->dev_start +
                tmp_dev->data_offset;
-       /*
-        * Let the main block layer submit the IO and resolve recursion:
-        */
-       return 1;
+
+       generic_make_request(bio);
+       return;
 
 bad_map:
        printk("md/raid0:%s: make_request bug: can't convert block across chunks"
@@ -523,7 +519,7 @@ bad_map:
               (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
 
        bio_io_error(bio);
-       return 0;
+       return;
 }
 
 static void raid0_status(struct seq_file *seq, mddev_t *mddev)
index 32323f0afd8954714401cb8ed8c5b455b32a9743..97f2a5f977b16c241b6cd5c1f6d7874c8bef5b42 100644 (file)
@@ -785,7 +785,7 @@ do_sync_io:
        PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
 }
 
-static int make_request(mddev_t *mddev, struct bio * bio)
+static void make_request(mddev_t *mddev, struct bio * bio)
 {
        conf_t *conf = mddev->private;
        mirror_info_t *mirror;
@@ -870,7 +870,7 @@ read_again:
                if (rdisk < 0) {
                        /* couldn't find anywhere to read from */
                        raid_end_bio_io(r1_bio);
-                       return 0;
+                       return;
                }
                mirror = conf->mirrors + rdisk;
 
@@ -928,7 +928,7 @@ read_again:
                        goto read_again;
                } else
                        generic_make_request(read_bio);
-               return 0;
+               return;
        }
 
        /*
@@ -1119,8 +1119,6 @@ read_again:
 
        if (do_sync || !bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
-
-       return 0;
 }
 
 static void status(struct seq_file *seq, mddev_t *mddev)
index 8b29cd4f01c89f8213d7dcc43cd1aaf6dd5799c7..04b625e1cb602965e64a3498b282273b034b2aea 100644 (file)
@@ -825,7 +825,7 @@ static void unfreeze_array(conf_t *conf)
        spin_unlock_irq(&conf->resync_lock);
 }
 
-static int make_request(mddev_t *mddev, struct bio * bio)
+static void make_request(mddev_t *mddev, struct bio * bio)
 {
        conf_t *conf = mddev->private;
        mirror_info_t *mirror;
@@ -844,7 +844,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
-               return 0;
+               return;
        }
 
        /* If this request crosses a chunk boundary, we need to
@@ -876,10 +876,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                conf->nr_waiting++;
                spin_unlock_irq(&conf->resync_lock);
 
-               if (make_request(mddev, &bp->bio1))
-                       generic_make_request(&bp->bio1);
-               if (make_request(mddev, &bp->bio2))
-                       generic_make_request(&bp->bio2);
+               make_request(mddev, &bp->bio1);
+               make_request(mddev, &bp->bio2);
 
                spin_lock_irq(&conf->resync_lock);
                conf->nr_waiting--;
@@ -887,14 +885,14 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                spin_unlock_irq(&conf->resync_lock);
 
                bio_pair_release(bp);
-               return 0;
+               return;
        bad_map:
                printk("md/raid10:%s: make_request bug: can't convert block across chunks"
                       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
                       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
 
                bio_io_error(bio);
-               return 0;
+               return;
        }
 
        md_write_start(mddev, bio);
@@ -937,7 +935,7 @@ read_again:
                slot = r10_bio->read_slot;
                if (disk < 0) {
                        raid_end_bio_io(r10_bio);
-                       return 0;
+                       return;
                }
                mirror = conf->mirrors + disk;
 
@@ -985,7 +983,7 @@ read_again:
                        goto read_again;
                } else
                        generic_make_request(read_bio);
-               return 0;
+               return;
        }
 
        /*
@@ -1157,7 +1155,6 @@ retry_write:
 
        if (do_sync || !mddev->bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
-       return 0;
 }
 
 static void status(struct seq_file *seq, mddev_t *mddev)
index dbae459fb02d7171291e1d8dcc3d7fa538ab02f4..96b7f6a1b6f2718a4de6dabaaba539246c555e14 100644 (file)
@@ -3695,7 +3695,7 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
        return sh;
 }
 
-static int make_request(mddev_t *mddev, struct bio * bi)
+static void make_request(mddev_t *mddev, struct bio * bi)
 {
        raid5_conf_t *conf = mddev->private;
        int dd_idx;
@@ -3708,7 +3708,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
 
        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bi);
-               return 0;
+               return;
        }
 
        md_write_start(mddev, bi);
@@ -3716,7 +3716,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
        if (rw == READ &&
             mddev->reshape_position == MaxSector &&
             chunk_aligned_read(mddev,bi))
-               return 0;
+               return;
 
        logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
        last_sector = bi->bi_sector + (bi->bi_size>>9);
@@ -3851,8 +3851,6 @@ static int make_request(mddev_t *mddev, struct bio * bi)
 
                bio_endio(bi, 0);
        }
-
-       return 0;
 }
 
 static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
index 9b43ae94beba1dea263e1687d38060e9a1f243d6..a5a55da2a1ac14752c426f4a0a00e4fb4f11521c 100644 (file)
@@ -27,7 +27,7 @@
 
 static int dcssblk_open(struct block_device *bdev, fmode_t mode);
 static int dcssblk_release(struct gendisk *disk, fmode_t mode);
-static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
+static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
 static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
                                 void **kaddr, unsigned long *pfn);
 
@@ -814,7 +814,7 @@ out:
        return rc;
 }
 
-static int
+static void
 dcssblk_make_request(struct request_queue *q, struct bio *bio)
 {
        struct dcssblk_dev_info *dev_info;
@@ -871,10 +871,9 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
                bytes_done += bvec->bv_len;
        }
        bio_endio(bio, 0);
-       return 0;
+       return;
 fail:
        bio_io_error(bio);
-       return 0;
 }
 
 static int
index 1f6a4d894e73cf6c6604551f10f862510b0444b9..98f3e4ade9248f904e6ab476b05e98525b2eda58 100644 (file)
@@ -181,7 +181,7 @@ static unsigned long xpram_highest_page_index(void)
 /*
  * Block device make request function.
  */
-static int xpram_make_request(struct request_queue *q, struct bio *bio)
+static void xpram_make_request(struct request_queue *q, struct bio *bio)
 {
        xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
        struct bio_vec *bvec;
@@ -221,10 +221,9 @@ static int xpram_make_request(struct request_queue *q, struct bio *bio)
        }
        set_bit(BIO_UPTODATE, &bio->bi_flags);
        bio_endio(bio, 0);
-       return 0;
+       return;
 fail:
        bio_io_error(bio);
-       return 0;
 }
 
 static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
index d70ec1ad10de05f20b6afa8fd6c82d67be0f727d..02589cab67109c871cda806f191914d7a8adf774 100644 (file)
@@ -556,24 +556,22 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
 /*
  * Handler function for all zram I/O requests.
  */
-static int zram_make_request(struct request_queue *queue, struct bio *bio)
+static void zram_make_request(struct request_queue *queue, struct bio *bio)
 {
        struct zram *zram = queue->queuedata;
 
        if (!valid_io_request(zram, bio)) {
                zram_stat64_inc(zram, &zram->stats.invalid_io);
                bio_io_error(bio);
-               return 0;
+               return;
        }
 
        if (unlikely(!zram->init_done) && zram_init_device(zram)) {
                bio_io_error(bio);
-               return 0;
+               return;
        }
 
        __zram_make_request(zram, bio, bio_data_dir(bio));
-
-       return 0;
 }
 
 void zram_reset_device(struct zram *zram)
index 085f95414c7fa9016f130d7917c98970100e09e0..c712efdafc3ffe44afc03edee28016c91c1de6c2 100644 (file)
@@ -193,7 +193,7 @@ struct request_pm_state
 #include <linux/elevator.h>
 
 typedef void (request_fn_proc) (struct request_queue *q);
-typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
+typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
 
@@ -675,7 +675,7 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
                         struct scsi_ioctl_command __user *);
 
-extern int blk_queue_bio(struct request_queue *q, struct bio *bio);
+extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
 
 /*
  * A queue has just exitted congestion.  Note this in the global counter of