]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'master' into for-2.6.31
authorJens Axboe <jens.axboe@oracle.com>
Fri, 22 May 2009 18:25:34 +0000 (20:25 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Fri, 22 May 2009 18:25:34 +0000 (20:25 +0200)
Conflicts:
drivers/block/hd.c
drivers/block/mg_disk.c

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
110 files changed:
arch/arm/plat-omap/mailbox.c
arch/um/drivers/ubd_kern.c
block/Kconfig
block/as-iosched.c
block/blk-barrier.c
block/blk-core.c
block/blk-exec.c
block/blk-map.c
block/blk-merge.c
block/blk-tag.c
block/blk-timeout.c
block/blk.h
block/bsg.c
block/cfq-iosched.c
block/deadline-iosched.c
block/elevator.c
block/scsi_ioctl.c
drivers/ata/libata-scsi.c
drivers/block/DAC960.c
drivers/block/Kconfig
drivers/block/amiflop.c
drivers/block/ataflop.c
drivers/block/cciss.c
drivers/block/cpqarray.c
drivers/block/floppy.c
drivers/block/hd.c
drivers/block/loop.c
drivers/block/mg_disk.c
drivers/block/nbd.c
drivers/block/paride/pcd.c
drivers/block/paride/pd.c
drivers/block/paride/pf.c
drivers/block/ps3disk.c
drivers/block/sunvdc.c
drivers/block/swim.c
drivers/block/swim3.c
drivers/block/sx8.c
drivers/block/ub.c
drivers/block/viodasd.c
drivers/block/virtio_blk.c
drivers/block/xd.c
drivers/block/xen-blkfront.c
drivers/block/xsysace.c
drivers/block/z2ram.c
drivers/cdrom/gdrom.c
drivers/cdrom/viocd.c
drivers/ide/ide-atapi.c
drivers/ide/ide-cd.c
drivers/ide/ide-cd.h
drivers/ide/ide-disk.c
drivers/ide/ide-dma.c
drivers/ide/ide-floppy.c
drivers/ide/ide-io.c
drivers/ide/ide-ioctls.c
drivers/ide/ide-lib.c
drivers/ide/ide-park.c
drivers/ide/ide-pm.c
drivers/ide/ide-tape.c
drivers/ide/ide-taskfile.c
drivers/ide/pdc202xx_old.c
drivers/ide/tc86c001.c
drivers/ide/tx4939ide.c
drivers/memstick/core/mspro_block.c
drivers/message/fusion/mptsas.c
drivers/message/i2o/i2o_block.c
drivers/mmc/card/block.c
drivers/mmc/card/queue.c
drivers/mtd/mtd_blkdevs.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/char/tape_34xx.c
drivers/s390/char/tape_3590.c
drivers/s390/char/tape_block.c
drivers/sbus/char/jsflash.c
drivers/scsi/eata.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/libsas/sas_host_smp.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/mpt2sas/mpt2sas_transport.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_tgt_lib.c
drivers/scsi/scsi_transport_sas.c
drivers/scsi/sd.c
drivers/scsi/sd_dif.c
drivers/scsi/sg.c
drivers/scsi/sr.c
drivers/scsi/st.c
drivers/scsi/u14-34f.c
fs/bio.c
fs/block_dev.c
fs/coda/file.c
fs/exofs/osd.c
fs/pipe.c
fs/read_write.c
fs/splice.c
include/linux/bio.h
include/linux/blkdev.h
include/linux/elevator.h
include/linux/fs.h
include/linux/ide.h
include/linux/loop.h
include/linux/mg_disk.h [deleted file]
include/linux/pipe_fs_i.h
include/linux/splice.h
include/linux/virtio_blk.h
include/scsi/scsi_cmnd.h
kernel/trace/blktrace.c

index 0abfbaa59871313c5ce3857457c3407f6ca4bafc..40424edae93912067cabd21a8ce7c4c304a91428 100644 (file)
@@ -147,24 +147,40 @@ static int __mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void *arg)
        return ret;
 }
 
+struct omap_msg_tx_data {
+       mbox_msg_t      msg;
+       void            *arg;
+};
+
+static void omap_msg_tx_end_io(struct request *rq, int error)
+{
+       kfree(rq->special);
+       __blk_put_request(rq->q, rq);
+}
+
 int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void* arg)
 {
+       struct omap_msg_tx_data *tx_data;
        struct request *rq;
        struct request_queue *q = mbox->txq->queue;
-       int ret = 0;
+
+       tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC);
+       if (unlikely(!tx_data))
+               return -ENOMEM;
 
        rq = blk_get_request(q, WRITE, GFP_ATOMIC);
        if (unlikely(!rq)) {
-               ret = -ENOMEM;
-               goto fail;
+               kfree(tx_data);
+               return -ENOMEM;
        }
 
-       rq->data = (void *)msg;
-       blk_insert_request(q, rq, 0, arg);
+       tx_data->msg = msg;
+       tx_data->arg = arg;
+       rq->end_io = omap_msg_tx_end_io;
+       blk_insert_request(q, rq, 0, tx_data);
 
        schedule_work(&mbox->txq->work);
- fail:
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL(omap_mbox_msg_send);
 
@@ -178,22 +194,28 @@ static void mbox_tx_work(struct work_struct *work)
        struct request_queue *q = mbox->txq->queue;
 
        while (1) {
+               struct omap_msg_tx_data *tx_data;
+
                spin_lock(q->queue_lock);
-               rq = elv_next_request(q);
+               rq = blk_fetch_request(q);
                spin_unlock(q->queue_lock);
 
                if (!rq)
                        break;
 
-               ret = __mbox_msg_send(mbox, (mbox_msg_t) rq->data, rq->special);
+               tx_data = rq->special;
+
+               ret = __mbox_msg_send(mbox, tx_data->msg, tx_data->arg);
                if (ret) {
                        enable_mbox_irq(mbox, IRQ_TX);
+                       spin_lock(q->queue_lock);
+                       blk_requeue_request(q, rq);
+                       spin_unlock(q->queue_lock);
                        return;
                }
 
                spin_lock(q->queue_lock);
-               if (__blk_end_request(rq, 0, 0))
-                       BUG();
+               __blk_end_request_all(rq, 0);
                spin_unlock(q->queue_lock);
        }
 }
@@ -218,16 +240,13 @@ static void mbox_rx_work(struct work_struct *work)
 
        while (1) {
                spin_lock_irqsave(q->queue_lock, flags);
-               rq = elv_next_request(q);
+               rq = blk_fetch_request(q);
                spin_unlock_irqrestore(q->queue_lock, flags);
                if (!rq)
                        break;
 
-               msg = (mbox_msg_t) rq->data;
-
-               if (blk_end_request(rq, 0, 0))
-                       BUG();
-
+               msg = (mbox_msg_t)rq->special;
+               blk_end_request_all(rq, 0);
                mbox->rxq->callback((void *)msg);
        }
 }
@@ -264,7 +283,6 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
                        goto nomem;
 
                msg = mbox_fifo_read(mbox);
-               rq->data = (void *)msg;
 
                if (unlikely(mbox_seq_test(mbox, msg))) {
                        pr_info("mbox: Illegal seq bit!(%08x)\n", msg);
@@ -272,7 +290,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
                                mbox->err_notify();
                }
 
-               blk_insert_request(q, rq, 0, NULL);
+               blk_insert_request(q, rq, 0, (void *)msg);
                if (mbox->ops->type == OMAP_MBOX_TYPE1)
                        break;
        }
@@ -329,16 +347,15 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
 
        while (1) {
                spin_lock_irqsave(q->queue_lock, flags);
-               rq = elv_next_request(q);
+               rq = blk_fetch_request(q);
                spin_unlock_irqrestore(q->queue_lock, flags);
 
                if (!rq)
                        break;
 
-               *p = (mbox_msg_t) rq->data;
+               *p = (mbox_msg_t)rq->special;
 
-               if (blk_end_request(rq, 0, 0))
-                       BUG();
+               blk_end_request_all(rq, 0);
 
                if (unlikely(mbox_seq_test(mbox, *p))) {
                        pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
index f934225fd8ef9c702c869c274df0c271c9a6a20b..aa9e926e13d73dca17015a5bd5f8e5eb76842c63 100644 (file)
@@ -451,23 +451,6 @@ static void do_ubd_request(struct request_queue * q);
 
 /* Only changed by ubd_init, which is an initcall. */
 static int thread_fd = -1;
-
-static void ubd_end_request(struct request *req, int bytes, int error)
-{
-       blk_end_request(req, error, bytes);
-}
-
-/* Callable only from interrupt context - otherwise you need to do
- * spin_lock_irq()/spin_lock_irqsave() */
-static inline void ubd_finish(struct request *req, int bytes)
-{
-       if(bytes < 0){
-               ubd_end_request(req, 0, -EIO);
-               return;
-       }
-       ubd_end_request(req, bytes, 0);
-}
-
 static LIST_HEAD(restart);
 
 /* XXX - move this inside ubd_intr. */
@@ -475,7 +458,6 @@ static LIST_HEAD(restart);
 static void ubd_handler(void)
 {
        struct io_thread_req *req;
-       struct request *rq;
        struct ubd *ubd;
        struct list_head *list, *next_ele;
        unsigned long flags;
@@ -492,10 +474,7 @@ static void ubd_handler(void)
                        return;
                }
 
-               rq = req->req;
-               rq->nr_sectors -= req->length >> 9;
-               if(rq->nr_sectors == 0)
-                       ubd_finish(rq, rq->hard_nr_sectors << 9);
+               blk_end_request(req->req, 0, req->length);
                kfree(req);
        }
        reactivate_fd(thread_fd, UBD_IRQ);
@@ -1243,27 +1222,26 @@ static void do_ubd_request(struct request_queue *q)
 {
        struct io_thread_req *io_req;
        struct request *req;
-       int n, last_sectors;
+       sector_t sector;
+       int n;
 
        while(1){
                struct ubd *dev = q->queuedata;
                if(dev->end_sg == 0){
-                       struct request *req = elv_next_request(q);
+                       struct request *req = blk_fetch_request(q);
                        if(req == NULL)
                                return;
 
                        dev->request = req;
-                       blkdev_dequeue_request(req);
                        dev->start_sg = 0;
                        dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
                }
 
                req = dev->request;
-               last_sectors = 0;
+               sector = blk_rq_pos(req);
                while(dev->start_sg < dev->end_sg){
                        struct scatterlist *sg = &dev->sg[dev->start_sg];
 
-                       req->sector += last_sectors;
                        io_req = kmalloc(sizeof(struct io_thread_req),
                                         GFP_ATOMIC);
                        if(io_req == NULL){
@@ -1272,10 +1250,10 @@ static void do_ubd_request(struct request_queue *q)
                                return;
                        }
                        prepare_request(req, io_req,
-                                       (unsigned long long) req->sector << 9,
+                                       (unsigned long long)sector << 9,
                                        sg->offset, sg->length, sg_page(sg));
 
-                       last_sectors = sg->length >> 9;
+                       sector += sg->length >> 9;
                        n = os_write_file(thread_fd, &io_req,
                                          sizeof(struct io_thread_req *));
                        if(n != sizeof(struct io_thread_req *)){
index e7d12782bcfb5eccb56aee532b8c27fa81706499..2c39527aa7db026dc6c6b6c20fbe55024de46100 100644 (file)
@@ -26,6 +26,7 @@ if BLOCK
 config LBD
        bool "Support for large block devices and files"
        depends on !64BIT
+       default y
        help
          Enable block devices or files of size 2TB and larger.
 
@@ -38,11 +39,13 @@ config LBD
 
          The ext4 filesystem requires that this feature be enabled in
          order to support filesystems that have the huge_file feature
-         enabled.    Otherwise, it will refuse to mount any filesystems
-         that use the huge_file feature, which is enabled by default
-         by mke2fs.ext4.   The GFS2 filesystem also requires this feature.
+         enabled.  Otherwise, it will refuse to mount in the read-write
+         mode any filesystems that use the huge_file feature, which is
+         enabled by default by mke2fs.ext4.
 
-         If unsure, say N.
+         The GFS2 filesystem also requires this feature.
+
+         If unsure, say Y.
 
 config BLK_DEV_BSG
        bool "Block layer SG support v4 (EXPERIMENTAL)"
index c48fa670d221342f223d196ab12e17d67452fe89..7a12cf6ee1d35e212a6d26007a1eb2057c189357 100644 (file)
@@ -306,8 +306,8 @@ as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
        data_dir = rq_is_sync(rq1);
 
        last = ad->last_sector[data_dir];
-       s1 = rq1->sector;
-       s2 = rq2->sector;
+       s1 = blk_rq_pos(rq1);
+       s2 = blk_rq_pos(rq2);
 
        BUG_ON(data_dir != rq_is_sync(rq2));
 
@@ -566,13 +566,15 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
                        as_update_thinktime(ad, aic, thinktime);
 
                        /* Calculate read -> read seek distance */
-                       if (aic->last_request_pos < rq->sector)
-                               seek_dist = rq->sector - aic->last_request_pos;
+                       if (aic->last_request_pos < blk_rq_pos(rq))
+                               seek_dist = blk_rq_pos(rq) -
+                                           aic->last_request_pos;
                        else
-                               seek_dist = aic->last_request_pos - rq->sector;
+                               seek_dist = aic->last_request_pos -
+                                           blk_rq_pos(rq);
                        as_update_seekdist(ad, aic, seek_dist);
                }
-               aic->last_request_pos = rq->sector + rq->nr_sectors;
+               aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
                set_bit(AS_TASK_IOSTARTED, &aic->state);
                spin_unlock(&aic->lock);
        }
@@ -587,7 +589,7 @@ static int as_close_req(struct as_data *ad, struct as_io_context *aic,
 {
        unsigned long delay;    /* jiffies */
        sector_t last = ad->last_sector[ad->batch_data_dir];
-       sector_t next = rq->sector;
+       sector_t next = blk_rq_pos(rq);
        sector_t delta; /* acceptable close offset (in sectors) */
        sector_t s;
 
@@ -981,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
         * This has to be set in order to be correctly updated by
         * as_find_next_rq
         */
-       ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
+       ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq);
 
        if (data_dir == BLK_RW_SYNC) {
                struct io_context *ioc = RQ_IOC(rq);
@@ -1312,12 +1314,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
 static void as_work_handler(struct work_struct *work)
 {
        struct as_data *ad = container_of(work, struct as_data, antic_work);
-       struct request_queue *q = ad->q;
-       unsigned long flags;
 
-       spin_lock_irqsave(q->queue_lock, flags);
-       blk_start_queueing(q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
+       blk_run_queue(ad->q);
 }
 
 static int as_may_queue(struct request_queue *q, int rw)
index 20b4111fa0507a46a641faa295124afeaf5f4ea5..0d98054cdbd77c19e4bc35397aef702c8edc3e22 100644 (file)
@@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
         */
        q->ordseq = 0;
        rq = q->orig_bar_rq;
-
-       if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
-               BUG();
-
+       __blk_end_request_all(rq, q->orderr);
        return true;
 }
 
@@ -166,7 +163,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
         * For an empty barrier, there's no actual BAR request, which
         * in turn makes POSTFLUSH unnecessary.  Mask them off.
         */
-       if (!rq->hard_nr_sectors) {
+       if (!blk_rq_sectors(rq)) {
                q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
                                QUEUE_ORDERED_DO_POSTFLUSH);
                /*
@@ -183,7 +180,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
        }
 
        /* stash away the original request */
-       elv_dequeue_request(q, rq);
+       blk_dequeue_request(rq);
        q->orig_bar_rq = rq;
        rq = NULL;
 
@@ -221,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
        } else
                skip |= QUEUE_ORDSEQ_PREFLUSH;
 
-       if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
+       if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
                rq = NULL;
        else
                skip |= QUEUE_ORDSEQ_DRAIN;
@@ -251,10 +248,8 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
                         * Queue ordering not supported.  Terminate
                         * with prejudice.
                         */
-                       elv_dequeue_request(q, rq);
-                       if (__blk_end_request(rq, -EOPNOTSUPP,
-                                             blk_rq_bytes(rq)))
-                               BUG();
+                       blk_dequeue_request(rq);
+                       __blk_end_request_all(rq, -EOPNOTSUPP);
                        *rqp = NULL;
                        return false;
                }
@@ -329,7 +324,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
        /*
         * The driver must store the error location in ->bi_sector, if
         * it supports it. For non-stacked drivers, this should be copied
-        * from rq->sector.
+        * from blk_rq_pos(rq).
         */
        if (error_sector)
                *error_sector = bio->bi_sector;
index c89883be87379d9454ab1af7cd68319e92795597..59c4af5231121c4c819a37b96abcc2fadbd7a7c2 100644 (file)
@@ -68,11 +68,11 @@ static void drive_stat_acct(struct request *rq, int new_io)
        int rw = rq_data_dir(rq);
        int cpu;
 
-       if (!blk_fs_request(rq) || !blk_do_io_stat(rq))
+       if (!blk_do_io_stat(rq))
                return;
 
        cpu = part_stat_lock();
-       part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
+       part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
 
        if (!new_io)
                part_stat_inc(cpu, part, merges[rw]);
@@ -127,13 +127,14 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        INIT_LIST_HEAD(&rq->timeout_list);
        rq->cpu = -1;
        rq->q = q;
-       rq->sector = rq->hard_sector = (sector_t) -1;
+       rq->__sector = (sector_t) -1;
        INIT_HLIST_NODE(&rq->hash);
        RB_CLEAR_NODE(&rq->rb_node);
        rq->cmd = rq->__cmd;
        rq->cmd_len = BLK_MAX_CDB;
        rq->tag = -1;
        rq->ref_count = 1;
+       rq->start_time = jiffies;
 }
 EXPORT_SYMBOL(blk_rq_init);
 
@@ -184,14 +185,11 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
                rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
                rq->cmd_flags);
 
-       printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
-                                               (unsigned long long)rq->sector,
-                                               rq->nr_sectors,
-                                               rq->current_nr_sectors);
-       printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
-                                               rq->bio, rq->biotail,
-                                               rq->buffer, rq->data,
-                                               rq->data_len);
+       printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
+              (unsigned long long)blk_rq_pos(rq),
+              blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
+       printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n",
+              rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
 
        if (blk_pc_request(rq)) {
                printk(KERN_INFO "  cdb: ");
@@ -333,24 +331,6 @@ void blk_unplug(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_unplug);
 
-static void blk_invoke_request_fn(struct request_queue *q)
-{
-       if (unlikely(blk_queue_stopped(q)))
-               return;
-
-       /*
-        * one level of recursion is ok and is much faster than kicking
-        * the unplug handling
-        */
-       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
-               q->request_fn(q);
-               queue_flag_clear(QUEUE_FLAG_REENTER, q);
-       } else {
-               queue_flag_set(QUEUE_FLAG_PLUGGED, q);
-               kblockd_schedule_work(q, &q->unplug_work);
-       }
-}
-
 /**
  * blk_start_queue - restart a previously stopped queue
  * @q:    The &struct request_queue in question
@@ -365,7 +345,7 @@ void blk_start_queue(struct request_queue *q)
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-       blk_invoke_request_fn(q);
+       __blk_run_queue(q);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -425,12 +405,23 @@ void __blk_run_queue(struct request_queue *q)
 {
        blk_remove_plug(q);
 
+       if (unlikely(blk_queue_stopped(q)))
+               return;
+
+       if (elv_queue_empty(q))
+               return;
+
        /*
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
         */
-       if (!elv_queue_empty(q))
-               blk_invoke_request_fn(q);
+       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+               q->request_fn(q);
+               queue_flag_clear(QUEUE_FLAG_REENTER, q);
+       } else {
+               queue_flag_set(QUEUE_FLAG_PLUGGED, q);
+               kblockd_schedule_work(q, &q->unplug_work);
+       }
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
@@ -440,9 +431,7 @@ EXPORT_SYMBOL(__blk_run_queue);
  *
  * Description:
  *    Invoke request handling on this queue, if it has pending work to do.
- *    May be used to restart queueing when a request has completed. Also
- *    See @blk_start_queueing.
- *
+ *    May be used to restart queueing when a request has completed.
  */
 void blk_run_queue(struct request_queue *q)
 {
@@ -902,26 +891,58 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
 EXPORT_SYMBOL(blk_get_request);
 
 /**
- * blk_start_queueing - initiate dispatch of requests to device
- * @q:         request queue to kick into gear
+ * blk_make_request - given a bio, allocate a corresponding struct request.
+ *
+ * @bio:  The bio describing the memory mappings that will be submitted for IO.
+ *        It may be a chained-bio properly constructed by block/bio layer.
+ *
+ * blk_make_request is the parallel of generic_make_request for BLOCK_PC
+ * type commands. Where the struct request needs to be farther initialized by
+ * the caller. It is passed a &struct bio, which describes the memory info of
+ * the I/O transfer.
  *
- * This is basically a helper to remove the need to know whether a queue
- * is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue. Should be used to start queueing on a device outside
- * of ->request_fn() context. Also see @blk_run_queue.
+ * The caller of blk_make_request must make sure that bi_io_vec
+ * are set to describe the memory buffers. That bio_data_dir() will return
+ * the needed direction of the request. (And all bio's in the passed bio-chain
+ * are properly set accordingly)
  *
- * The queue lock must be held with interrupts disabled.
+ * If called under none-sleepable conditions, mapped bio buffers must not
+ * need bouncing, by calling the appropriate masked or flagged allocator,
+ * suitable for the target device. Otherwise the call to blk_queue_bounce will
+ * BUG.
+ *
+ * WARNING: When allocating/cloning a bio-chain, careful consideration should be
+ * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
+ * anything but the first bio in the chain. Otherwise you risk waiting for IO
+ * completion of a bio that hasn't been submitted yet, thus resulting in a
+ * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
+ * of bio_alloc(), as that avoids the mempool deadlock.
+ * If possible a big IO should be split into smaller parts when allocation
+ * fails. Partial allocation should not be an error, or you risk a live-lock.
  */
-void blk_start_queueing(struct request_queue *q)
+struct request *blk_make_request(struct request_queue *q, struct bio *bio,
+                                gfp_t gfp_mask)
 {
-       if (!blk_queue_plugged(q)) {
-               if (unlikely(blk_queue_stopped(q)))
-                       return;
-               q->request_fn(q);
-       } else
-               __generic_unplug_device(q);
+       struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
+
+       if (unlikely(!rq))
+               return ERR_PTR(-ENOMEM);
+
+       for_each_bio(bio) {
+               struct bio *bounce_bio = bio;
+               int ret;
+
+               blk_queue_bounce(q, &bounce_bio);
+               ret = blk_rq_append_bio(q, rq, bounce_bio);
+               if (unlikely(ret)) {
+                       blk_put_request(rq);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       return rq;
 }
-EXPORT_SYMBOL(blk_start_queueing);
+EXPORT_SYMBOL(blk_make_request);
 
 /**
  * blk_requeue_request - put a request back on queue
@@ -935,6 +956,8 @@ EXPORT_SYMBOL(blk_start_queueing);
  */
 void blk_requeue_request(struct request_queue *q, struct request *rq)
 {
+       BUG_ON(blk_queued_rq(rq));
+
        blk_delete_timer(rq);
        blk_clear_rq_complete(rq);
        trace_block_rq_requeue(q, rq);
@@ -977,7 +1000,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
         * barrier
         */
        rq->cmd_type = REQ_TYPE_SPECIAL;
-       rq->cmd_flags |= REQ_SOFTBARRIER;
 
        rq->special = data;
 
@@ -991,7 +1013,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
 
        drive_stat_acct(rq, 1);
        __elv_add_request(q, rq, where, 0);
-       blk_start_queueing(q);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_insert_request);
@@ -1113,16 +1135,13 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        if (bio_failfast_driver(bio))
                req->cmd_flags |= REQ_FAILFAST_DRIVER;
 
-       /*
-        * REQ_BARRIER implies no merging, but lets make it explicit
-        */
        if (unlikely(bio_discard(bio))) {
                req->cmd_flags |= REQ_DISCARD;
                if (bio_barrier(bio))
                        req->cmd_flags |= REQ_SOFTBARRIER;
                req->q->prepare_discard_fn(req->q, req);
        } else if (unlikely(bio_barrier(bio)))
-               req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+               req->cmd_flags |= REQ_HARDBARRIER;
 
        if (bio_sync(bio))
                req->cmd_flags |= REQ_RW_SYNC;
@@ -1132,9 +1151,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
                req->cmd_flags |= REQ_NOIDLE;
 
        req->errors = 0;
-       req->hard_sector = req->sector = bio->bi_sector;
+       req->__sector = bio->bi_sector;
        req->ioprio = bio_prio(bio);
-       req->start_time = jiffies;
        blk_rq_bio_prep(req->q, req, bio);
 }
 
@@ -1150,14 +1168,13 @@ static inline bool queue_should_plug(struct request_queue *q)
 static int __make_request(struct request_queue *q, struct bio *bio)
 {
        struct request *req;
-       int el_ret, nr_sectors;
+       int el_ret;
+       unsigned int bytes = bio->bi_size;
        const unsigned short prio = bio_prio(bio);
        const int sync = bio_sync(bio);
        const int unplug = bio_unplug(bio);
        int rw_flags;
 
-       nr_sectors = bio_sectors(bio);
-
        /*
         * low level driver can indicate that it wants pages above a
         * certain limit bounced to low memory (ie for highmem, or even
@@ -1182,7 +1199,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
                req->biotail->bi_next = bio;
                req->biotail = bio;
-               req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+               req->__data_len += bytes;
                req->ioprio = ioprio_best(req->ioprio, prio);
                if (!blk_rq_cpu_valid(req))
                        req->cpu = bio->bi_comp_cpu;
@@ -1208,10 +1225,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
                 * not touch req->buffer either...
                 */
                req->buffer = bio_data(bio);
-               req->current_nr_sectors = bio_cur_sectors(bio);
-               req->hard_cur_sectors = req->current_nr_sectors;
-               req->sector = req->hard_sector = bio->bi_sector;
-               req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+               req->__sector = bio->bi_sector;
+               req->__data_len += bytes;
                req->ioprio = ioprio_best(req->ioprio, prio);
                if (!blk_rq_cpu_valid(req))
                        req->cpu = bio->bi_comp_cpu;
@@ -1593,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio);
  */
 int blk_rq_check_limits(struct request_queue *q, struct request *rq)
 {
-       if (rq->nr_sectors > q->max_sectors ||
-           rq->data_len > q->max_hw_sectors << 9) {
+       if (blk_rq_sectors(rq) > q->max_sectors ||
+           blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
                printk(KERN_ERR "%s: over max size limit.\n", __func__);
                return -EIO;
        }
@@ -1651,40 +1666,15 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
-/**
- * blkdev_dequeue_request - dequeue request and start timeout timer
- * @req: request to dequeue
- *
- * Dequeue @req and start timeout timer on it.  This hands off the
- * request to the driver.
- *
- * Block internal functions which don't want to start timer should
- * call elv_dequeue_request().
- */
-void blkdev_dequeue_request(struct request *req)
-{
-       elv_dequeue_request(req->q, req);
-
-       /*
-        * We are now handing the request to the hardware, add the
-        * timeout handler.
-        */
-       blk_add_timer(req);
-}
-EXPORT_SYMBOL(blkdev_dequeue_request);
-
 static void blk_account_io_completion(struct request *req, unsigned int bytes)
 {
-       if (!blk_do_io_stat(req))
-               return;
-
-       if (blk_fs_request(req)) {
+       if (blk_do_io_stat(req)) {
                const int rw = rq_data_dir(req);
                struct hd_struct *part;
                int cpu;
 
                cpu = part_stat_lock();
-               part = disk_map_sector_rcu(req->rq_disk, req->sector);
+               part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
                part_stat_add(cpu, part, sectors[rw], bytes >> 9);
                part_stat_unlock();
        }
@@ -1692,22 +1682,19 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
 
 static void blk_account_io_done(struct request *req)
 {
-       if (!blk_do_io_stat(req))
-               return;
-
        /*
         * Account IO completion.  bar_rq isn't accounted as a normal
         * IO on queueing nor completion.  Accounting the containing
         * request is enough.
         */
-       if (blk_fs_request(req) && req != &req->q->bar_rq) {
+       if (blk_do_io_stat(req) && req != &req->q->bar_rq) {
                unsigned long duration = jiffies - req->start_time;
                const int rw = rq_data_dir(req);
                struct hd_struct *part;
                int cpu;
 
                cpu = part_stat_lock();
-               part = disk_map_sector_rcu(req->rq_disk, req->sector);
+               part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
 
                part_stat_inc(cpu, part, ios[rw]);
                part_stat_add(cpu, part, ticks[rw], duration);
@@ -1719,38 +1706,218 @@ static void blk_account_io_done(struct request *req)
 }
 
 /**
- * __end_that_request_first - end I/O on a request
- * @req:      the request being processed
+ * blk_peek_request - peek at the top of a request queue
+ * @q: request queue to peek at
+ *
+ * Description:
+ *     Return the request at the top of @q.  The returned request
+ *     should be started using blk_start_request() before LLD starts
+ *     processing it.
+ *
+ * Return:
+ *     Pointer to the request at the top of @q if available.  Null
+ *     otherwise.
+ *
+ * Context:
+ *     queue_lock must be held.
+ */
+struct request *blk_peek_request(struct request_queue *q)
+{
+       struct request *rq;
+       int ret;
+
+       while ((rq = __elv_next_request(q)) != NULL) {
+               if (!(rq->cmd_flags & REQ_STARTED)) {
+                       /*
+                        * This is the first time the device driver
+                        * sees this request (possibly after
+                        * requeueing).  Notify IO scheduler.
+                        */
+                       if (blk_sorted_rq(rq))
+                               elv_activate_rq(q, rq);
+
+                       /*
+                        * just mark as started even if we don't start
+                        * it, a request that has been delayed should
+                        * not be passed by new incoming requests
+                        */
+                       rq->cmd_flags |= REQ_STARTED;
+                       trace_block_rq_issue(q, rq);
+               }
+
+               if (!q->boundary_rq || q->boundary_rq == rq) {
+                       q->end_sector = rq_end_sector(rq);
+                       q->boundary_rq = NULL;
+               }
+
+               if (rq->cmd_flags & REQ_DONTPREP)
+                       break;
+
+               if (q->dma_drain_size && blk_rq_bytes(rq)) {
+                       /*
+                        * make sure space for the drain appears we
+                        * know we can do this because max_hw_segments
+                        * has been adjusted to be one fewer than the
+                        * device can handle
+                        */
+                       rq->nr_phys_segments++;
+               }
+
+               if (!q->prep_rq_fn)
+                       break;
+
+               ret = q->prep_rq_fn(q, rq);
+               if (ret == BLKPREP_OK) {
+                       break;
+               } else if (ret == BLKPREP_DEFER) {
+                       /*
+                        * the request may have been (partially) prepped.
+                        * we need to keep this request in the front to
+                        * avoid resource deadlock.  REQ_STARTED will
+                        * prevent other fs requests from passing this one.
+                        */
+                       if (q->dma_drain_size && blk_rq_bytes(rq) &&
+                           !(rq->cmd_flags & REQ_DONTPREP)) {
+                               /*
+                                * remove the space for the drain we added
+                                * so that we don't add it again
+                                */
+                               --rq->nr_phys_segments;
+                       }
+
+                       rq = NULL;
+                       break;
+               } else if (ret == BLKPREP_KILL) {
+                       rq->cmd_flags |= REQ_QUIET;
+                       __blk_end_request_all(rq, -EIO);
+               } else {
+                       printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
+                       break;
+               }
+       }
+
+       return rq;
+}
+EXPORT_SYMBOL(blk_peek_request);
+
+void blk_dequeue_request(struct request *rq)
+{
+       struct request_queue *q = rq->q;
+
+       BUG_ON(list_empty(&rq->queuelist));
+       BUG_ON(ELV_ON_HASH(rq));
+
+       list_del_init(&rq->queuelist);
+
+       /*
+        * the time frame between a request being removed from the lists
+        * and to it is freed is accounted as io that is in progress at
+        * the driver side.
+        */
+       if (blk_account_rq(rq))
+               q->in_flight[rq_is_sync(rq)]++;
+}
+
+/**
+ * blk_start_request - start request processing on the driver
+ * @req: request to dequeue
+ *
+ * Description:
+ *     Dequeue @req and start timeout timer on it.  This hands off the
+ *     request to the driver.
+ *
+ *     Block internal functions which don't want to start timer should
+ *     call blk_dequeue_request().
+ *
+ * Context:
+ *     queue_lock must be held.
+ */
+void blk_start_request(struct request *req)
+{
+       blk_dequeue_request(req);
+
+       /*
+        * We are now handing the request to the hardware, initialize
+        * resid_len to full count and add the timeout handler.
+        */
+       req->resid_len = blk_rq_bytes(req);
+       blk_add_timer(req);
+}
+EXPORT_SYMBOL(blk_start_request);
+
+/**
+ * blk_fetch_request - fetch a request from a request queue
+ * @q: request queue to fetch a request from
+ *
+ * Description:
+ *     Return the request at the top of @q.  The request is started on
+ *     return and LLD can start processing it immediately.
+ *
+ * Return:
+ *     Pointer to the request at the top of @q if available.  Null
+ *     otherwise.
+ *
+ * Context:
+ *     queue_lock must be held.
+ */
+struct request *blk_fetch_request(struct request_queue *q)
+{
+       struct request *rq;
+
+       rq = blk_peek_request(q);
+       if (rq)
+               blk_start_request(rq);
+       return rq;
+}
+EXPORT_SYMBOL(blk_fetch_request);
+
+/**
+ * blk_update_request - Special helper function for request stacking drivers
+ * @rq:              the request being processed
  * @error:    %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
+ * @nr_bytes: number of bytes to complete @rq
  *
  * Description:
- *     Ends I/O on a number of bytes attached to @req, and sets it up
- *     for the next range of segments (if any) in the cluster.
+ *     Ends I/O on a number of bytes attached to @rq, but doesn't complete
+ *     the request structure even if @rq doesn't have leftover.
+ *     If @rq has leftover, sets it up for the next range of segments.
+ *
+ *     This special helper function is only for request stacking drivers
+ *     (e.g. request-based dm) so that they can handle partial completion.
+ *     Actual device drivers should use blk_end_request instead.
+ *
+ *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
+ *     %false return from this function.
  *
  * Return:
- *     %0 - we are done with this request, call end_that_request_last()
- *     %1 - still buffers pending for this request
+ *     %false - this request doesn't have any more data
+ *     %true  - this request has more data
  **/
-static int __end_that_request_first(struct request *req, int error,
-                                   int nr_bytes)
+bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 {
        int total_bytes, bio_nbytes, next_idx = 0;
        struct bio *bio;
 
+       if (!req->bio)
+               return false;
+
        trace_block_rq_complete(req->q, req);
 
        /*
-        * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
-        * sense key with us all the way through
+        * For fs requests, rq is just carrier of independent bio's
+        * and each partial completion should be handled separately.
+        * Reset per-request error on each partial completion.
+        *
+        * TODO: tj: This is too subtle.  It would be better to let
+        * low level drivers do what they see fit.
         */
-       if (!blk_pc_request(req))
+       if (blk_fs_request(req))
                req->errors = 0;
 
        if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
                printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
                                req->rq_disk ? req->rq_disk->disk_name : "?",
-                               (unsigned long long)req->sector);
+                               (unsigned long long)blk_rq_pos(req));
        }
 
        blk_account_io_completion(req, nr_bytes);
@@ -1810,8 +1977,15 @@ static int __end_that_request_first(struct request *req, int error,
        /*
         * completely done
         */
-       if (!req->bio)
-               return 0;
+       if (!req->bio) {
+               /*
+                * Reset counters so that the request stacking driver
+                * can find how many bytes remain in the request
+                * later.
+                */
+               req->__data_len = 0;
+               return false;
+       }
 
        /*
         * if the request wasn't completed, update state
@@ -1823,22 +1997,56 @@ static int __end_that_request_first(struct request *req, int error,
                bio_iovec(bio)->bv_len -= nr_bytes;
        }
 
-       blk_recalc_rq_sectors(req, total_bytes >> 9);
+       req->__data_len -= total_bytes;
+       req->buffer = bio_data(req->bio);
+
+       /* update sector only for requests with clear definition of sector */
+       if (blk_fs_request(req) || blk_discard_rq(req))
+               req->__sector += total_bytes >> 9;
+
+       /*
+        * If total number of sectors is less than the first segment
+        * size, something has gone terribly wrong.
+        */
+       if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
+               printk(KERN_ERR "blk: request botched\n");
+               req->__data_len = blk_rq_cur_bytes(req);
+       }
+
+       /* recalculate the number of segments */
        blk_recalc_rq_segments(req);
-       return 1;
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(blk_update_request);
+
+static bool blk_update_bidi_request(struct request *rq, int error,
+                                   unsigned int nr_bytes,
+                                   unsigned int bidi_bytes)
+{
+       if (blk_update_request(rq, error, nr_bytes))
+               return true;
+
+       /* Bidi request must be completed as a whole */
+       if (unlikely(blk_bidi_rq(rq)) &&
+           blk_update_request(rq->next_rq, error, bidi_bytes))
+               return true;
+
+       add_disk_randomness(rq->rq_disk);
+
+       return false;
 }
 
 /*
  * queue lock must be held
  */
-static void end_that_request_last(struct request *req, int error)
+static void blk_finish_request(struct request *req, int error)
 {
+       BUG_ON(blk_queued_rq(req));
+
        if (blk_rq_tagged(req))
                blk_queue_end_tag(req->q, req);
 
-       if (blk_queued_rq(req))
-               elv_dequeue_request(req->q, req);
-
        if (unlikely(laptop_mode) && blk_fs_request(req))
                laptop_io_completion();
 
@@ -1857,117 +2065,62 @@ static void end_that_request_last(struct request *req, int error)
 }
 
 /**
- * blk_rq_bytes - Returns bytes left to complete in the entire request
- * @rq: the request being processed
- **/
-unsigned int blk_rq_bytes(struct request *rq)
-{
-       if (blk_fs_request(rq))
-               return rq->hard_nr_sectors << 9;
-
-       return rq->data_len;
-}
-EXPORT_SYMBOL_GPL(blk_rq_bytes);
-
-/**
- * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
- * @rq: the request being processed
- **/
-unsigned int blk_rq_cur_bytes(struct request *rq)
-{
-       if (blk_fs_request(rq))
-               return rq->current_nr_sectors << 9;
-
-       if (rq->bio)
-               return rq->bio->bi_size;
-
-       return rq->data_len;
-}
-EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
-
-/**
- * end_request - end I/O on the current segment of the request
- * @req:       the request being processed
- * @uptodate:  error value or %0/%1 uptodate flag
+ * blk_end_bidi_request - Complete a bidi request
+ * @rq:         the request to complete
+ * @error:      %0 for success, < %0 for error
+ * @nr_bytes:   number of bytes to complete @rq
+ * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
  * Description:
- *     Ends I/O on the current segment of a request. If that is the only
- *     remaining segment, the request is also completed and freed.
- *
- *     This is a remnant of how older block drivers handled I/O completions.
- *     Modern drivers typically end I/O on the full request in one go, unless
- *     they have a residual value to account for. For that case this function
- *     isn't really useful, unless the residual just happens to be the
- *     full current segment. In other words, don't use this function in new
- *     code. Use blk_end_request() or __blk_end_request() to end a request.
+ *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
+ *     Drivers that supports bidi can safely call this member for any
+ *     type of request, bidi or uni.  In the later case @bidi_bytes is
+ *     just ignored.
+ *
+ * Return:
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
  **/
-void end_request(struct request *req, int uptodate)
-{
-       int error = 0;
-
-       if (uptodate <= 0)
-               error = uptodate ? uptodate : -EIO;
-
-       __blk_end_request(req, error, req->hard_cur_sectors << 9);
-}
-EXPORT_SYMBOL(end_request);
-
-static int end_that_request_data(struct request *rq, int error,
+static bool blk_end_bidi_request(struct request *rq, int error,
                                 unsigned int nr_bytes, unsigned int bidi_bytes)
 {
-       if (rq->bio) {
-               if (__end_that_request_first(rq, error, nr_bytes))
-                       return 1;
+       struct request_queue *q = rq->q;
+       unsigned long flags;
 
-               /* Bidi request must be completed as a whole */
-               if (blk_bidi_rq(rq) &&
-                   __end_that_request_first(rq->next_rq, error, bidi_bytes))
-                       return 1;
-       }
+       if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
+               return true;
 
-       return 0;
+       spin_lock_irqsave(q->queue_lock, flags);
+       blk_finish_request(rq, error);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       return false;
 }
 
 /**
- * blk_end_io - Generic end_io function to complete a request.
- * @rq:           the request being processed
- * @error:        %0 for success, < %0 for error
- * @nr_bytes:     number of bytes to complete @rq
- * @bidi_bytes:   number of bytes to complete @rq->next_rq
- * @drv_callback: function called between completion of bios in the request
- *                and completion of the request.
- *                If the callback returns non %0, this helper returns without
- *                completion of the request.
+ * __blk_end_bidi_request - Complete a bidi request with queue lock held
+ * @rq:         the request to complete
+ * @error:      %0 for success, < %0 for error
+ * @nr_bytes:   number of bytes to complete @rq
+ * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
  * Description:
- *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
- *     If @rq has leftover, sets it up for the next range of segments.
+ *     Identical to blk_end_bidi_request() except that queue lock is
+ *     assumed to be locked on entry and remains so on return.
  *
  * Return:
- *     %0 - we are done with this request
- *     %1 - this request is not freed yet, it still has pending buffers.
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
  **/
-static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
-                     unsigned int bidi_bytes,
-                     int (drv_callback)(struct request *))
+static bool __blk_end_bidi_request(struct request *rq, int error,
+                                  unsigned int nr_bytes, unsigned int bidi_bytes)
 {
-       struct request_queue *q = rq->q;
-       unsigned long flags = 0UL;
-
-       if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
-               return 1;
-
-       /* Special feature for tricky drivers */
-       if (drv_callback && drv_callback(rq))
-               return 1;
-
-       add_disk_randomness(rq->rq_disk);
+       if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
+               return true;
 
-       spin_lock_irqsave(q->queue_lock, flags);
-       end_that_request_last(rq, error);
-       spin_unlock_irqrestore(q->queue_lock, flags);
+       blk_finish_request(rq, error);
 
-       return 0;
+       return false;
 }
 
 /**
@@ -1981,124 +2134,112 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
  *     If @rq has leftover, sets it up for the next range of segments.
  *
  * Return:
- *     %0 - we are done with this request
- *     %1 - still buffers pending for this request
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
  **/
-int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 {
-       return blk_end_io(rq, error, nr_bytes, 0, NULL);
+       return blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
 EXPORT_SYMBOL_GPL(blk_end_request);
 
 /**
- * __blk_end_request - Helper function for drivers to complete the request.
- * @rq:       the request being processed
- * @error:    %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
+ * blk_end_request_all - Helper function for drives to finish the request.
+ * @rq: the request to finish
+ * @err: %0 for success, < %0 for error
  *
  * Description:
- *     Must be called with queue lock held unlike blk_end_request().
- *
- * Return:
- *     %0 - we are done with this request
- *     %1 - still buffers pending for this request
- **/
-int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+ *     Completely finish @rq.
+ */
+void blk_end_request_all(struct request *rq, int error)
 {
-       if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
-               return 1;
+       bool pending;
+       unsigned int bidi_bytes = 0;
 
-       add_disk_randomness(rq->rq_disk);
+       if (unlikely(blk_bidi_rq(rq)))
+               bidi_bytes = blk_rq_bytes(rq->next_rq);
 
-       end_that_request_last(rq, error);
+       pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
+       BUG_ON(pending);
+}
+EXPORT_SYMBOL_GPL(blk_end_request_all);
 
-       return 0;
+/**
+ * blk_end_request_cur - Helper function to finish the current request chunk.
+ * @rq: the request to finish the current chunk for
+ * @err: %0 for success, < %0 for error
+ *
+ * Description:
+ *     Complete the current consecutively mapped chunk from @rq.
+ *
+ * Return:
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
+ */
+bool blk_end_request_cur(struct request *rq, int error)
+{
+       return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
 }
-EXPORT_SYMBOL_GPL(__blk_end_request);
+EXPORT_SYMBOL_GPL(blk_end_request_cur);
 
 /**
- * blk_end_bidi_request - Helper function for drivers to complete bidi request.
- * @rq:         the bidi request being processed
- * @error:      %0 for success, < %0 for error
- * @nr_bytes:   number of bytes to complete @rq
- * @bidi_bytes: number of bytes to complete @rq->next_rq
+ * __blk_end_request - Helper function for drivers to complete the request.
+ * @rq:       the request being processed
+ * @error:    %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete
  *
  * Description:
- *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
+ *     Must be called with queue lock held unlike blk_end_request().
  *
  * Return:
- *     %0 - we are done with this request
- *     %1 - still buffers pending for this request
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
  **/
-int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
-                        unsigned int bidi_bytes)
+bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 {
-       return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
+       return __blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
-EXPORT_SYMBOL_GPL(blk_end_bidi_request);
+EXPORT_SYMBOL_GPL(__blk_end_request);
 
 /**
- * blk_update_request - Special helper function for request stacking drivers
- * @rq:           the request being processed
- * @error:        %0 for success, < %0 for error
- * @nr_bytes:     number of bytes to complete @rq
+ * __blk_end_request_all - Helper function for drives to finish the request.
+ * @rq: the request to finish
+ * @err: %0 for success, < %0 for error
  *
  * Description:
- *     Ends I/O on a number of bytes attached to @rq, but doesn't complete
- *     the request structure even if @rq doesn't have leftover.
- *     If @rq has leftover, sets it up for the next range of segments.
- *
- *     This special helper function is only for request stacking drivers
- *     (e.g. request-based dm) so that they can handle partial completion.
- *     Actual device drivers should use blk_end_request instead.
+ *     Completely finish @rq.  Must be called with queue lock held.
  */
-void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
+void __blk_end_request_all(struct request *rq, int error)
 {
-       if (!end_that_request_data(rq, error, nr_bytes, 0)) {
-               /*
-                * These members are not updated in end_that_request_data()
-                * when all bios are completed.
-                * Update them so that the request stacking driver can find
-                * how many bytes remain in the request later.
-                */
-               rq->nr_sectors = rq->hard_nr_sectors = 0;
-               rq->current_nr_sectors = rq->hard_cur_sectors = 0;
-       }
+       bool pending;
+       unsigned int bidi_bytes = 0;
+
+       if (unlikely(blk_bidi_rq(rq)))
+               bidi_bytes = blk_rq_bytes(rq->next_rq);
+
+       pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
+       BUG_ON(pending);
 }
-EXPORT_SYMBOL_GPL(blk_update_request);
+EXPORT_SYMBOL_GPL(__blk_end_request_all);
 
 /**
- * blk_end_request_callback - Special helper function for tricky drivers
- * @rq:           the request being processed
- * @error:        %0 for success, < %0 for error
- * @nr_bytes:     number of bytes to complete
- * @drv_callback: function called between completion of bios in the request
- *                and completion of the request.
- *                If the callback returns non %0, this helper returns without
- *                completion of the request.
+ * __blk_end_request_cur - Helper function to finish the current request chunk.
+ * @rq: the request to finish the current chunk for
+ * @err: %0 for success, < %0 for error
  *
  * Description:
- *     Ends I/O on a number of bytes attached to @rq.
- *     If @rq has leftover, sets it up for the next range of segments.
- *
- *     This special helper function is used only for existing tricky drivers.
- *     (e.g. cdrom_newpc_intr() of ide-cd)
- *     This interface will be removed when such drivers are rewritten.
- *     Don't use this interface in other places anymore.
+ *     Complete the current consecutively mapped chunk from @rq.  Must
+ *     be called with queue lock held.
  *
  * Return:
- *     %0 - we are done with this request
- *     %1 - this request is not freed yet.
- *          this request still has pending buffers or
- *          the driver doesn't want to finish this request yet.
- **/
-int blk_end_request_callback(struct request *rq, int error,
-                            unsigned int nr_bytes,
-                            int (drv_callback)(struct request *))
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
+ */
+bool __blk_end_request_cur(struct request *rq, int error)
 {
-       return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
+       return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
 }
-EXPORT_SYMBOL_GPL(blk_end_request_callback);
+EXPORT_SYMBOL_GPL(__blk_end_request_cur);
 
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                     struct bio *bio)
@@ -2111,11 +2252,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                rq->nr_phys_segments = bio_phys_segments(q, bio);
                rq->buffer = bio_data(bio);
        }
-       rq->current_nr_sectors = bio_cur_sectors(bio);
-       rq->hard_cur_sectors = rq->current_nr_sectors;
-       rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
-       rq->data_len = bio->bi_size;
-
+       rq->__data_len = bio->bi_size;
        rq->bio = rq->biotail = bio;
 
        if (bio->bi_bdev)
@@ -2158,6 +2295,9 @@ EXPORT_SYMBOL(kblockd_schedule_work);
 
 int __init blk_dev_init(void)
 {
+       BUILD_BUG_ON(__REQ_NR_BITS > 8 *
+                       sizeof(((struct request *)0)->cmd_flags));
+
        kblockd_workqueue = create_workqueue("kblockd");
        if (!kblockd_workqueue)
                panic("Failed to create kblockd\n");
index 6af716d1e54e038468c455e093f2d1e08719812e..49557e91f0dab58cda736b556378525dbea497b8 100644 (file)
@@ -51,7 +51,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
 
        rq->rq_disk = bd_disk;
-       rq->cmd_flags |= REQ_NOMERGE;
        rq->end_io = done;
        WARN_ON(irqs_disabled());
        spin_lock_irq(q->queue_lock);
index f103729b462fdb817d3a51caf5cf33984d0e6cf6..ef2492adca7e3af16537ac854280b85821426a5e 100644 (file)
@@ -20,11 +20,10 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
                rq->biotail->bi_next = bio;
                rq->biotail = bio;
 
-               rq->data_len += bio->bi_size;
+               rq->__data_len += bio->bi_size;
        }
        return 0;
 }
-EXPORT_SYMBOL(blk_rq_append_bio);
 
 static int __blk_rq_unmap_user(struct bio *bio)
 {
@@ -156,7 +155,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
        if (!bio_flagged(bio, BIO_USER_MAPPED))
                rq->cmd_flags |= REQ_COPY_USER;
 
-       rq->buffer = rq->data = NULL;
+       rq->buffer = NULL;
        return 0;
 unmap_rq:
        blk_rq_unmap_user(bio);
@@ -235,7 +234,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
        blk_queue_bounce(q, &bio);
        bio_get(bio);
        blk_rq_bio_prep(q, rq, bio);
-       rq->buffer = rq->data = NULL;
+       rq->buffer = NULL;
        return 0;
 }
 EXPORT_SYMBOL(blk_rq_map_user_iov);
@@ -282,7 +281,8 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
  *
  * Description:
  *    Data will be mapped directly if possible. Otherwise a bounce
- *    buffer is used.
+ *    buffer is used. Can be called multple times to append multple
+ *    buffers.
  */
 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
                    unsigned int len, gfp_t gfp_mask)
@@ -290,6 +290,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        int reading = rq_data_dir(rq) == READ;
        int do_copy = 0;
        struct bio *bio;
+       int ret;
 
        if (len > (q->max_hw_sectors << 9))
                return -EINVAL;
@@ -311,9 +312,15 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        if (do_copy)
                rq->cmd_flags |= REQ_COPY_USER;
 
-       blk_rq_bio_prep(q, rq, bio);
+       ret = blk_rq_append_bio(q, rq, bio);
+       if (unlikely(ret)) {
+               /* request is too big */
+               bio_put(bio);
+               return ret;
+       }
+
        blk_queue_bounce(q, &rq->bio);
-       rq->buffer = rq->data = NULL;
+       rq->buffer = NULL;
        return 0;
 }
 EXPORT_SYMBOL(blk_rq_map_kern);
index 23d2a6fe34a38b2e4f662bfeab9b2ddbbc60e8c6..4974dd5767e516f7f2bd507fa10a55cb0da62bd5 100644 (file)
@@ -9,35 +9,6 @@
 
 #include "blk.h"
 
-void blk_recalc_rq_sectors(struct request *rq, int nsect)
-{
-       if (blk_fs_request(rq) || blk_discard_rq(rq)) {
-               rq->hard_sector += nsect;
-               rq->hard_nr_sectors -= nsect;
-
-               /*
-                * Move the I/O submission pointers ahead if required.
-                */
-               if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
-                   (rq->sector <= rq->hard_sector)) {
-                       rq->sector = rq->hard_sector;
-                       rq->nr_sectors = rq->hard_nr_sectors;
-                       rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
-                       rq->current_nr_sectors = rq->hard_cur_sectors;
-                       rq->buffer = bio_data(rq->bio);
-               }
-
-               /*
-                * if total number of sectors is less than the first segment
-                * size, something has gone terribly wrong
-                */
-               if (rq->nr_sectors < rq->current_nr_sectors) {
-                       printk(KERN_ERR "blk: request botched\n");
-                       rq->nr_sectors = rq->current_nr_sectors;
-               }
-       }
-}
-
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                                             struct bio *bio)
 {
@@ -199,8 +170,9 @@ new_segment:
 
 
        if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
-           (rq->data_len & q->dma_pad_mask)) {
-               unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
+           (blk_rq_bytes(rq) & q->dma_pad_mask)) {
+               unsigned int pad_len =
+                       (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
 
                sg->length += pad_len;
                rq->extra_len += pad_len;
@@ -259,7 +231,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
        else
                max_sectors = q->max_sectors;
 
-       if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+       if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -284,7 +256,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
                max_sectors = q->max_sectors;
 
 
-       if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+       if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -315,7 +287,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
        /*
         * Will it become too large?
         */
-       if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
                return 0;
 
        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -345,7 +317,7 @@ static void blk_account_io_merge(struct request *req)
                int cpu;
 
                cpu = part_stat_lock();
-               part = disk_map_sector_rcu(req->rq_disk, req->sector);
+               part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
 
                part_round_stats(cpu, part);
                part_dec_in_flight(part);
@@ -366,7 +338,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        /*
         * not contiguous
         */
-       if (req->sector + req->nr_sectors != next->sector)
+       if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
                return 0;
 
        if (rq_data_dir(req) != rq_data_dir(next)
@@ -398,7 +370,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        req->biotail->bi_next = next->bio;
        req->biotail = next->biotail;
 
-       req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
+       req->__data_len += blk_rq_bytes(next);
 
        elv_merge_requests(q, req, next);
 
index 3c518e3303ae34113516924f9d73a58b94a1e75c..2e5cfeb59333ecc088576324f1d6e0e88faf3bed 100644 (file)
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
 int blk_queue_start_tag(struct request_queue *q, struct request *rq)
 {
        struct blk_queue_tag *bqt = q->queue_tags;
-       unsigned max_depth, offset;
+       unsigned max_depth;
        int tag;
 
        if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@@ -355,13 +355,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
         * to starve sync IO on behalf of flooding async IO.
         */
        max_depth = bqt->max_depth;
-       if (rq_is_sync(rq))
-               offset = 0;
-       else
-               offset = max_depth >> 2;
+       if (!rq_is_sync(rq) && max_depth > 1) {
+               max_depth -= 2;
+               if (!max_depth)
+                       max_depth = 1;
+               if (q->in_flight[0] > max_depth)
+                       return 1;
+       }
 
        do {
-               tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
+               tag = find_first_zero_bit(bqt->tag_map, max_depth);
                if (tag >= max_depth)
                        return 1;
 
@@ -374,7 +377,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
        rq->cmd_flags |= REQ_QUEUED;
        rq->tag = tag;
        bqt->tag_index[tag] = rq;
-       blkdev_dequeue_request(rq);
+       blk_start_request(rq);
        list_add(&rq->queuelist, &q->tag_busy_list);
        return 0;
 }
index 1ec0d503cacdc95bf296776ca2dc2d1e9ac4b108..1ba7e0aca8781b55ce14fa54491da8f81505b747 100644 (file)
@@ -122,10 +122,8 @@ void blk_rq_timed_out_timer(unsigned long data)
                        if (blk_mark_rq_complete(rq))
                                continue;
                        blk_rq_timed_out(rq);
-               } else {
-                       if (!next || time_after(next, rq->deadline))
-                               next = rq->deadline;
-               }
+               } else if (!next || time_after(next, rq->deadline))
+                       next = rq->deadline;
        }
 
        /*
@@ -176,16 +174,14 @@ void blk_add_timer(struct request *req)
        BUG_ON(!list_empty(&req->timeout_list));
        BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
 
-       if (req->timeout)
-               req->deadline = jiffies + req->timeout;
-       else {
-               req->deadline = jiffies + q->rq_timeout;
-               /*
-                * Some LLDs, like scsi, peek at the timeout to prevent
-                * a command from being retried forever.
-                */
+       /*
+        * Some LLDs, like scsi, peek at the timeout to prevent a
+        * command from being retried forever.
+        */
+       if (!req->timeout)
                req->timeout = q->rq_timeout;
-       }
+
+       req->deadline = jiffies + req->timeout;
        list_add_tail(&req->timeout_list, &q->timeout_list);
 
        /*
index 79c85f7c9ff50fb33fb89619a744c0f1d3e1686b..c863ec2281e05d6ca596d44a6438d61ca6eaf803 100644 (file)
@@ -13,6 +13,9 @@ extern struct kobj_type blk_queue_ktype;
 void init_request_from_bio(struct request *req, struct bio *bio);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                        struct bio *bio);
+int blk_rq_append_bio(struct request_queue *q, struct request *rq,
+                     struct bio *bio);
+void blk_dequeue_request(struct request *rq);
 void __blk_queue_free_tags(struct request_queue *q);
 
 void blk_unplug_work(struct work_struct *work);
@@ -43,6 +46,43 @@ static inline void blk_clear_rq_complete(struct request *rq)
        clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
 }
 
+/*
+ * Internal elevator interface
+ */
+#define ELV_ON_HASH(rq)                (!hlist_unhashed(&(rq)->hash))
+
+static inline struct request *__elv_next_request(struct request_queue *q)
+{
+       struct request *rq;
+
+       while (1) {
+               while (!list_empty(&q->queue_head)) {
+                       rq = list_entry_rq(q->queue_head.next);
+                       if (blk_do_ordered(q, &rq))
+                               return rq;
+               }
+
+               if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
+                       return NULL;
+       }
+}
+
+static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
+{
+       struct elevator_queue *e = q->elevator;
+
+       if (e->ops->elevator_activate_req_fn)
+               e->ops->elevator_activate_req_fn(q, rq);
+}
+
+static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
+{
+       struct elevator_queue *e = q->elevator;
+
+       if (e->ops->elevator_deactivate_req_fn)
+               e->ops->elevator_deactivate_req_fn(q, rq);
+}
+
 #ifdef CONFIG_FAIL_IO_TIMEOUT
 int blk_should_fake_timeout(struct request_queue *);
 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
@@ -64,7 +104,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
 int attempt_back_merge(struct request_queue *q, struct request *rq);
 int attempt_front_merge(struct request_queue *q, struct request *rq);
 void blk_recalc_rq_segments(struct request *rq);
-void blk_recalc_rq_sectors(struct request *rq, int nsect);
 
 void blk_queue_congestion_threshold(struct request_queue *q);
 
@@ -112,9 +151,17 @@ static inline int blk_cpu_to_group(int cpu)
 #endif
 }
 
+/*
+ * Contribute to IO statistics IFF:
+ *
+ *     a) it's attached to a gendisk, and
+ *     b) the queue had IO stats enabled when this request was started, and
+ *     c) it's a file system request
+ */
 static inline int blk_do_io_stat(struct request *rq)
 {
-       return rq->rq_disk && blk_rq_io_stat(rq);
+       return rq->rq_disk && blk_rq_io_stat(rq) && blk_fs_request(rq) &&
+               blk_discard_rq(rq);
 }
 
 #endif
index 206060e795da4324732835bf5012afca4ac7d43f..2d746e34f4c24c1a0c70e627af78955ef9681af9 100644 (file)
@@ -445,14 +445,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
        }
 
        if (rq->next_rq) {
-               hdr->dout_resid = rq->data_len;
-               hdr->din_resid = rq->next_rq->data_len;
+               hdr->dout_resid = rq->resid_len;
+               hdr->din_resid = rq->next_rq->resid_len;
                blk_rq_unmap_user(bidi_bio);
                blk_put_request(rq->next_rq);
        } else if (rq_data_dir(rq) == READ)
-               hdr->din_resid = rq->data_len;
+               hdr->din_resid = rq->resid_len;
        else
-               hdr->dout_resid = rq->data_len;
+               hdr->dout_resid = rq->resid_len;
 
        /*
         * If the request generated a negative error number, return it
index a55a9bd75bd1baf616a3a1b7118acaeee328759f..99ac4304d711e093867040d49ace530e832876db 100644 (file)
@@ -349,8 +349,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
        else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
                return rq2;
 
-       s1 = rq1->sector;
-       s2 = rq2->sector;
+       s1 = blk_rq_pos(rq1);
+       s2 = blk_rq_pos(rq2);
 
        last = cfqd->last_position;
 
@@ -579,9 +579,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
                 * Sort strictly based on sector.  Smallest to the left,
                 * largest to the right.
                 */
-               if (sector > cfqq->next_rq->sector)
+               if (sector > blk_rq_pos(cfqq->next_rq))
                        n = &(*p)->rb_right;
-               else if (sector < cfqq->next_rq->sector)
+               else if (sector < blk_rq_pos(cfqq->next_rq))
                        n = &(*p)->rb_left;
                else
                        break;
@@ -611,8 +611,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
                return;
 
        cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
-       __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector,
-                                        &parent, &p);
+       __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
+                                     blk_rq_pos(cfqq->next_rq), &parent, &p);
        if (!__cfqq) {
                rb_link_node(&cfqq->p_node, parent, p);
                rb_insert_color(&cfqq->p_node, cfqq->p_root);
@@ -760,7 +760,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
        cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
                                                cfqd->rq_in_driver);
 
-       cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
+       cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
 }
 
 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
@@ -949,10 +949,10 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
                                          struct request *rq)
 {
-       if (rq->sector >= cfqd->last_position)
-               return rq->sector - cfqd->last_position;
+       if (blk_rq_pos(rq) >= cfqd->last_position)
+               return blk_rq_pos(rq) - cfqd->last_position;
        else
-               return cfqd->last_position - rq->sector;
+               return cfqd->last_position - blk_rq_pos(rq);
 }
 
 #define CIC_SEEK_THR   8 * 1024
@@ -996,7 +996,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
        if (cfq_rq_close(cfqd, __cfqq->next_rq))
                return __cfqq;
 
-       if (__cfqq->next_rq->sector < sector)
+       if (blk_rq_pos(__cfqq->next_rq) < sector)
                node = rb_next(&__cfqq->p_node);
        else
                node = rb_prev(&__cfqq->p_node);
@@ -1918,10 +1918,10 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
 
        if (!cic->last_request_pos)
                sdist = 0;
-       else if (cic->last_request_pos < rq->sector)
-               sdist = rq->sector - cic->last_request_pos;
+       else if (cic->last_request_pos < blk_rq_pos(rq))
+               sdist = blk_rq_pos(rq) - cic->last_request_pos;
        else
-               sdist = cic->last_request_pos - rq->sector;
+               sdist = cic->last_request_pos - blk_rq_pos(rq);
 
        /*
         * Don't allow the seek distance to get too large from the
@@ -2071,7 +2071,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        cfq_update_io_seektime(cfqd, cic, rq);
        cfq_update_idle_window(cfqd, cfqq, cic);
 
-       cic->last_request_pos = rq->sector + rq->nr_sectors;
+       cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
 
        if (cfqq == cfqd->active_queue) {
                /*
@@ -2088,7 +2088,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                        if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
                            cfqd->busy_queues > 1) {
                                del_timer(&cfqd->idle_slice_timer);
-                               blk_start_queueing(cfqd->queue);
+                       __blk_run_queue(cfqd->queue);
                        }
                        cfq_mark_cfqq_must_dispatch(cfqq);
                }
@@ -2100,7 +2100,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               blk_start_queueing(cfqd->queue);
+               __blk_run_queue(cfqd->queue);
        }
 }
 
@@ -2345,7 +2345,7 @@ static void cfq_kick_queue(struct work_struct *work)
        struct request_queue *q = cfqd->queue;
 
        spin_lock_irq(q->queue_lock);
-       blk_start_queueing(q);
+       __blk_run_queue(cfqd->queue);
        spin_unlock_irq(q->queue_lock);
 }
 
index c4d991d4adef0e7fd6e0d432941b88771d7c6530..b547cbca7b23a55dd9d1f0b444bed07d297fc646 100644 (file)
@@ -138,7 +138,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
 
                __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
                if (__rq) {
-                       BUG_ON(sector != __rq->sector);
+                       BUG_ON(sector != blk_rq_pos(__rq));
 
                        if (elv_rq_merge_ok(__rq, bio)) {
                                ret = ELEVATOR_FRONT_MERGE;
index 7073a9072577cdf3a0ae6e63c5ca247c2f493a5d..ebee948293ebc7c265ebb4d9670575e69d6f073e 100644 (file)
@@ -52,8 +52,7 @@ static const int elv_hash_shift = 6;
 #define ELV_HASH_FN(sec)       \
                (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
 #define ELV_HASH_ENTRIES       (1 << elv_hash_shift)
-#define rq_hash_key(rq)                ((rq)->sector + (rq)->nr_sectors)
-#define ELV_ON_HASH(rq)                (!hlist_unhashed(&(rq)->hash))
+#define rq_hash_key(rq)                (blk_rq_pos(rq) + blk_rq_sectors(rq))
 
 DEFINE_TRACE(block_rq_insert);
 DEFINE_TRACE(block_rq_issue);
@@ -120,9 +119,9 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
         * we can merge and sequence is ok, check if it's possible
         */
        if (elv_rq_merge_ok(__rq, bio)) {
-               if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
+               if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
                        ret = ELEVATOR_BACK_MERGE;
-               else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
+               else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
                        ret = ELEVATOR_FRONT_MERGE;
        }
 
@@ -310,22 +309,6 @@ void elevator_exit(struct elevator_queue *e)
 }
 EXPORT_SYMBOL(elevator_exit);
 
-static void elv_activate_rq(struct request_queue *q, struct request *rq)
-{
-       struct elevator_queue *e = q->elevator;
-
-       if (e->ops->elevator_activate_req_fn)
-               e->ops->elevator_activate_req_fn(q, rq);
-}
-
-static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
-{
-       struct elevator_queue *e = q->elevator;
-
-       if (e->ops->elevator_deactivate_req_fn)
-               e->ops->elevator_deactivate_req_fn(q, rq);
-}
-
 static inline void __elv_rqhash_del(struct request *rq)
 {
        hlist_del_init(&rq->hash);
@@ -387,9 +370,9 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
                parent = *p;
                __rq = rb_entry(parent, struct request, rb_node);
 
-               if (rq->sector < __rq->sector)
+               if (blk_rq_pos(rq) < blk_rq_pos(__rq))
                        p = &(*p)->rb_left;
-               else if (rq->sector > __rq->sector)
+               else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
                        p = &(*p)->rb_right;
                else
                        return __rq;
@@ -417,9 +400,9 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
        while (n) {
                rq = rb_entry(n, struct request, rb_node);
 
-               if (sector < rq->sector)
+               if (sector < blk_rq_pos(rq))
                        n = n->rb_left;
-               else if (sector > rq->sector)
+               else if (sector > blk_rq_pos(rq))
                        n = n->rb_right;
                else
                        return rq;
@@ -458,14 +441,14 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
                        break;
                if (pos->cmd_flags & stop_flags)
                        break;
-               if (rq->sector >= boundary) {
-                       if (pos->sector < boundary)
+               if (blk_rq_pos(rq) >= boundary) {
+                       if (blk_rq_pos(pos) < boundary)
                                continue;
                } else {
-                       if (pos->sector >= boundary)
+                       if (blk_rq_pos(pos) >= boundary)
                                break;
                }
-               if (rq->sector >= pos->sector)
+               if (blk_rq_pos(rq) >= blk_rq_pos(pos))
                        break;
        }
 
@@ -563,7 +546,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
         * in_flight count again
         */
        if (blk_account_rq(rq)) {
-               q->in_flight--;
+               q->in_flight[rq_is_sync(rq)]--;
                if (blk_sorted_rq(rq))
                        elv_deactivate_rq(q, rq);
        }
@@ -599,7 +582,7 @@ void elv_quiesce_start(struct request_queue *q)
         */
        elv_drain_elevator(q);
        while (q->rq.elvpriv) {
-               blk_start_queueing(q);
+               __blk_run_queue(q);
                spin_unlock_irq(q->queue_lock);
                msleep(10);
                spin_lock_irq(q->queue_lock);
@@ -643,8 +626,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
                 *   with anything.  There's no point in delaying queue
                 *   processing.
                 */
-               blk_remove_plug(q);
-               blk_start_queueing(q);
+               __blk_run_queue(q);
                break;
 
        case ELEVATOR_INSERT_SORT:
@@ -703,7 +685,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
 
        if (unplug_it && blk_queue_plugged(q)) {
                int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
-                       - q->in_flight;
+                               - queue_in_flight(q);
 
                if (nrq >= q->unplug_thresh)
                        __generic_unplug_device(q);
@@ -759,117 +741,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
 }
 EXPORT_SYMBOL(elv_add_request);
 
-static inline struct request *__elv_next_request(struct request_queue *q)
-{
-       struct request *rq;
-
-       while (1) {
-               while (!list_empty(&q->queue_head)) {
-                       rq = list_entry_rq(q->queue_head.next);
-                       if (blk_do_ordered(q, &rq))
-                               return rq;
-               }
-
-               if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
-                       return NULL;
-       }
-}
-
-struct request *elv_next_request(struct request_queue *q)
-{
-       struct request *rq;
-       int ret;
-
-       while ((rq = __elv_next_request(q)) != NULL) {
-               if (!(rq->cmd_flags & REQ_STARTED)) {
-                       /*
-                        * This is the first time the device driver
-                        * sees this request (possibly after
-                        * requeueing).  Notify IO scheduler.
-                        */
-                       if (blk_sorted_rq(rq))
-                               elv_activate_rq(q, rq);
-
-                       /*
-                        * just mark as started even if we don't start
-                        * it, a request that has been delayed should
-                        * not be passed by new incoming requests
-                        */
-                       rq->cmd_flags |= REQ_STARTED;
-                       trace_block_rq_issue(q, rq);
-               }
-
-               if (!q->boundary_rq || q->boundary_rq == rq) {
-                       q->end_sector = rq_end_sector(rq);
-                       q->boundary_rq = NULL;
-               }
-
-               if (rq->cmd_flags & REQ_DONTPREP)
-                       break;
-
-               if (q->dma_drain_size && rq->data_len) {
-                       /*
-                        * make sure space for the drain appears we
-                        * know we can do this because max_hw_segments
-                        * has been adjusted to be one fewer than the
-                        * device can handle
-                        */
-                       rq->nr_phys_segments++;
-               }
-
-               if (!q->prep_rq_fn)
-                       break;
-
-               ret = q->prep_rq_fn(q, rq);
-               if (ret == BLKPREP_OK) {
-                       break;
-               } else if (ret == BLKPREP_DEFER) {
-                       /*
-                        * the request may have been (partially) prepped.
-                        * we need to keep this request in the front to
-                        * avoid resource deadlock.  REQ_STARTED will
-                        * prevent other fs requests from passing this one.
-                        */
-                       if (q->dma_drain_size && rq->data_len &&
-                           !(rq->cmd_flags & REQ_DONTPREP)) {
-                               /*
-                                * remove the space for the drain we added
-                                * so that we don't add it again
-                                */
-                               --rq->nr_phys_segments;
-                       }
-
-                       rq = NULL;
-                       break;
-               } else if (ret == BLKPREP_KILL) {
-                       rq->cmd_flags |= REQ_QUIET;
-                       __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
-               } else {
-                       printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
-                       break;
-               }
-       }
-
-       return rq;
-}
-EXPORT_SYMBOL(elv_next_request);
-
-void elv_dequeue_request(struct request_queue *q, struct request *rq)
-{
-       BUG_ON(list_empty(&rq->queuelist));
-       BUG_ON(ELV_ON_HASH(rq));
-
-       list_del_init(&rq->queuelist);
-
-       /*
-        * the time frame between a request being removed from the lists
-        * and to it is freed is accounted as io that is in progress at
-        * the driver side.
-        */
-       if (blk_account_rq(rq))
-               q->in_flight++;
-}
-
 int elv_queue_empty(struct request_queue *q)
 {
        struct elevator_queue *e = q->elevator;
@@ -939,7 +810,7 @@ void elv_abort_queue(struct request_queue *q)
                rq = list_entry_rq(q->queue_head.next);
                rq->cmd_flags |= REQ_QUIET;
                trace_block_rq_abort(q, rq);
-               __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+               __blk_end_request_all(rq, -EIO);
        }
 }
 EXPORT_SYMBOL(elv_abort_queue);
@@ -952,7 +823,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
         * request is released from the driver, io must be done
         */
        if (blk_account_rq(rq)) {
-               q->in_flight--;
+               q->in_flight[rq_is_sync(rq)]--;
                if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
                        e->ops->elevator_completed_req_fn(q, rq);
        }
@@ -967,11 +838,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
                if (!list_empty(&q->queue_head))
                        next = list_entry_rq(q->queue_head.next);
 
-               if (!q->in_flight &&
+               if (!queue_in_flight(q) &&
                    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
                    (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
                        blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
-                       blk_start_queueing(q);
+                       __blk_run_queue(q);
                }
        }
 }
index 82a0ca2f672901a8fab6442ae8a3662bedd7e6b8..a9670dd4b5de034cf2130f2fa0f552eda1bd80c5 100644 (file)
@@ -230,7 +230,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
        hdr->info = 0;
        if (hdr->masked_status || hdr->host_status || hdr->driver_status)
                hdr->info |= SG_INFO_CHECK;
-       hdr->resid = rq->data_len;
+       hdr->resid = rq->resid_len;
        hdr->sb_len_wr = 0;
 
        if (rq->sense_len && hdr->sbp) {
@@ -500,9 +500,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
 
        rq = blk_get_request(q, WRITE, __GFP_WAIT);
        rq->cmd_type = REQ_TYPE_BLOCK_PC;
-       rq->data = NULL;
-       rq->data_len = 0;
-       rq->extra_len = 0;
        rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
        rq->cmd[0] = cmd;
        rq->cmd[4] = data;
index 342316064e9ffe88b8962910908c345593c9ef1a..d0dfeef55db58444d5eeb979ab97f1e1628112c8 100644 (file)
@@ -1084,7 +1084,7 @@ static int atapi_drain_needed(struct request *rq)
        if (likely(!blk_pc_request(rq)))
                return 0;
 
-       if (!rq->data_len || (rq->cmd_flags & REQ_RW))
+       if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
                return 0;
 
        return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
index f22ed6cc69f286f916c9c064871ffaad77babeca..668dc234b8e22d5b09950950914398d6f1f61766 100644 (file)
@@ -3321,7 +3321,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
        DAC960_Command_T *Command;
 
    while(1) {
-       Request = elv_next_request(req_q);
+       Request = blk_peek_request(req_q);
        if (!Request)
                return 1;
 
@@ -3338,10 +3338,10 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
        }
        Command->Completion = Request->end_io_data;
        Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
-       Command->BlockNumber = Request->sector;
-       Command->BlockCount = Request->nr_sectors;
+       Command->BlockNumber = blk_rq_pos(Request);
+       Command->BlockCount = blk_rq_sectors(Request);
        Command->Request = Request;
-       blkdev_dequeue_request(Request);
+       blk_start_request(Request);
        Command->SegmentCount = blk_rq_map_sg(req_q,
                  Command->Request, Command->cmd_sglist);
        /* pci_map_sg MAY change the value of SegCount */
@@ -3431,7 +3431,7 @@ static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
    * successfully as possible.
    */
   Command->SegmentCount = 1;
-  Command->BlockNumber = Request->sector;
+  Command->BlockNumber = blk_rq_pos(Request);
   Command->BlockCount = 1;
   DAC960_QueueReadWriteCommand(Command);
   return;
index ddea8e485cc94dbc6ed9acddfd9231a26896bfca..f42fa50d35506276722ac6b21417468f847ba6c7 100644 (file)
@@ -412,7 +412,7 @@ config ATA_OVER_ETH
 
 config MG_DISK
        tristate "mGine mflash, gflash support"
-       depends on ARM && ATA && GPIOLIB
+       depends on ARM && GPIOLIB
        help
          mGine mFlash(gFlash) block device driver
 
index 8df436ff7068b4840bdbcf610e357797ae387ae5..9c6e5b0fe894f9e6af94a50b87a2ec2b2f04a470 100644 (file)
@@ -112,8 +112,6 @@ module_param(fd_def_df0, ulong, 0);
 MODULE_LICENSE("GPL");
 
 static struct request_queue *floppy_queue;
-#define QUEUE (floppy_queue)
-#define CURRENT elv_next_request(floppy_queue)
 
 /*
  *  Macros
@@ -1335,64 +1333,60 @@ static int get_track(int drive, int track)
 
 static void redo_fd_request(void)
 {
+       struct request *rq;
        unsigned int cnt, block, track, sector;
        int drive;
        struct amiga_floppy_struct *floppy;
        char *data;
        unsigned long flags;
+       int err;
 
- repeat:
-       if (!CURRENT) {
+next_req:
+       rq = blk_fetch_request(floppy_queue);
+       if (!rq) {
                /* Nothing left to do */
                return;
        }
 
-       floppy = CURRENT->rq_disk->private_data;
+       floppy = rq->rq_disk->private_data;
        drive = floppy - unit;
 
+next_segment:
        /* Here someone could investigate to be more efficient */
-       for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) { 
+       for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
 #ifdef DEBUG
                printk("fd: sector %ld + %d requested for %s\n",
-                      CURRENT->sector,cnt,
-                      (rq_data_dir(CURRENT) == READ) ? "read" : "write");
+                      blk_rq_pos(rq), cnt,
+                      (rq_data_dir(rq) == READ) ? "read" : "write");
 #endif
-               block = CURRENT->sector + cnt;
+               block = blk_rq_pos(rq) + cnt;
                if ((int)block > floppy->blocks) {
-                       end_request(CURRENT, 0);
-                       goto repeat;
+                       err = -EIO;
+                       break;
                }
 
                track = block / (floppy->dtype->sects * floppy->type->sect_mult);
                sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
-               data = CURRENT->buffer + 512 * cnt;
+               data = rq->buffer + 512 * cnt;
 #ifdef DEBUG
                printk("access to track %d, sector %d, with buffer at "
                       "0x%08lx\n", track, sector, data);
 #endif
 
-               if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
-                       printk(KERN_WARNING "do_fd_request: unknown command\n");
-                       end_request(CURRENT, 0);
-                       goto repeat;
-               }
                if (get_track(drive, track) == -1) {
-                       end_request(CURRENT, 0);
-                       goto repeat;
+                       err = -EIO;
+                       break;
                }
 
-               switch (rq_data_dir(CURRENT)) {
-               case READ:
+               if (rq_data_dir(rq) == READ) {
                        memcpy(data, floppy->trackbuf + sector * 512, 512);
-                       break;
-
-               case WRITE:
+               } else {
                        memcpy(floppy->trackbuf + sector * 512, data, 512);
 
                        /* keep the drive spinning while writes are scheduled */
                        if (!fd_motor_on(drive)) {
-                               end_request(CURRENT, 0);
-                               goto repeat;
+                               err = -EIO;
+                               break;
                        }
                        /*
                         * setup a callback to write the track buffer
@@ -1404,14 +1398,12 @@ static void redo_fd_request(void)
                        /* reset the timer */
                        mod_timer (flush_track_timer + drive, jiffies + 1);
                        local_irq_restore(flags);
-                       break;
                }
        }
-       CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
-       CURRENT->sector += CURRENT->current_nr_sectors;
 
-       end_request(CURRENT, 1);
-       goto repeat;
+       if (__blk_end_request_cur(rq, err))
+               goto next_segment;
+       goto next_req;
 }
 
 static void do_fd_request(struct request_queue * q)
index 4234c11c1e4cfe34592c9830eca7d2a68400b052..f5e7180d7f47d050ed0e39a37ad748054658b52b 100644 (file)
@@ -79,9 +79,7 @@
 #undef DEBUG
 
 static struct request_queue *floppy_queue;
-
-#define QUEUE (floppy_queue)
-#define CURRENT elv_next_request(floppy_queue)
+static struct request *fd_request;
 
 /* Disk types: DD, HD, ED */
 static struct atari_disk_type {
@@ -376,6 +374,12 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
 static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
 static DEFINE_TIMER(fd_timer, check_change, 0, 0);
        
+static void fd_end_request_cur(int err)
+{
+       if (!__blk_end_request_cur(fd_request, err))
+               fd_request = NULL;
+}
+
 static inline void start_motor_off_timer(void)
 {
        mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
@@ -606,15 +610,15 @@ static void fd_error( void )
                return;
        }
 
-       if (!CURRENT)
+       if (!fd_request)
                return;
 
-       CURRENT->errors++;
-       if (CURRENT->errors >= MAX_ERRORS) {
+       fd_request->errors++;
+       if (fd_request->errors >= MAX_ERRORS) {
                printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
-               end_request(CURRENT, 0);
+               fd_end_request_cur(-EIO);
        }
-       else if (CURRENT->errors == RECALIBRATE_ERRORS) {
+       else if (fd_request->errors == RECALIBRATE_ERRORS) {
                printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
                if (SelectedDrive != -1)
                        SUD.track = -1;
@@ -725,16 +729,14 @@ static void do_fd_action( int drive )
            if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
                if (ReqCmd == READ) {
                    copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
-                   if (++ReqCnt < CURRENT->current_nr_sectors) {
+                   if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
                        /* read next sector */
                        setup_req_params( drive );
                        goto repeat;
                    }
                    else {
                        /* all sectors finished */
-                       CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
-                       CURRENT->sector += CURRENT->current_nr_sectors;
-                       end_request(CURRENT, 1);
+                       fd_end_request_cur(0);
                        redo_fd_request();
                        return;
                    }
@@ -1132,16 +1134,14 @@ static void fd_rwsec_done1(int status)
                }
        }
   
-       if (++ReqCnt < CURRENT->current_nr_sectors) {
+       if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
                /* read next sector */
                setup_req_params( SelectedDrive );
                do_fd_action( SelectedDrive );
        }
        else {
                /* all sectors finished */
-               CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
-               CURRENT->sector += CURRENT->current_nr_sectors;
-               end_request(CURRENT, 1);
+               fd_end_request_cur(0);
                redo_fd_request();
        }
        return;
@@ -1382,7 +1382,7 @@ static void setup_req_params( int drive )
        ReqData = ReqBuffer + 512 * ReqCnt;
 
        if (UseTrackbuffer)
-               read_track = (ReqCmd == READ && CURRENT->errors == 0);
+               read_track = (ReqCmd == READ && fd_request->errors == 0);
        else
                read_track = 0;
 
@@ -1396,25 +1396,27 @@ static void redo_fd_request(void)
        int drive, type;
        struct atari_floppy_struct *floppy;
 
-       DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n",
-               CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "",
-               CURRENT ? CURRENT->sector : 0 ));
+       DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n",
+               fd_request, fd_request ? fd_request->rq_disk->disk_name : "",
+               fd_request ? blk_rq_pos(fd_request) : 0 ));
 
        IsFormatting = 0;
 
 repeat:
+       if (!fd_request) {
+               fd_request = blk_fetch_request(floppy_queue);
+               if (!fd_request)
+                       goto the_end;
+       }
 
-       if (!CURRENT)
-               goto the_end;
-
-       floppy = CURRENT->rq_disk->private_data;
+       floppy = fd_request->rq_disk->private_data;
        drive = floppy - unit;
        type = floppy->type;
        
        if (!UD.connected) {
                /* drive not connected */
                printk(KERN_ERR "Unknown Device: fd%d\n", drive );
-               end_request(CURRENT, 0);
+               fd_end_request_cur(-EIO);
                goto repeat;
        }
                
@@ -1430,12 +1432,12 @@ repeat:
                /* user supplied disk type */
                if (--type >= NUM_DISK_MINORS) {
                        printk(KERN_WARNING "fd%d: invalid disk format", drive );
-                       end_request(CURRENT, 0);
+                       fd_end_request_cur(-EIO);
                        goto repeat;
                }
                if (minor2disktype[type].drive_types > DriveType)  {
                        printk(KERN_WARNING "fd%d: unsupported disk format", drive );
-                       end_request(CURRENT, 0);
+                       fd_end_request_cur(-EIO);
                        goto repeat;
                }
                type = minor2disktype[type].index;
@@ -1444,8 +1446,8 @@ repeat:
                UD.autoprobe = 0;
        }
        
-       if (CURRENT->sector + 1 > UDT->blocks) {
-               end_request(CURRENT, 0);
+       if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
+               fd_end_request_cur(-EIO);
                goto repeat;
        }
 
@@ -1453,9 +1455,9 @@ repeat:
        del_timer( &motor_off_timer );
                
        ReqCnt = 0;
-       ReqCmd = rq_data_dir(CURRENT);
-       ReqBlock = CURRENT->sector;
-       ReqBuffer = CURRENT->buffer;
+       ReqCmd = rq_data_dir(fd_request);
+       ReqBlock = blk_rq_pos(fd_request);
+       ReqBuffer = fd_request->buffer;
        setup_req_params( drive );
        do_fd_action( drive );
 
index 4d4d5e0d3fa64af15faf87a3a78c2a3b0396ef9c..e714e7cce6f27b2a3ede8e0cccf5c2c569723acc 100644 (file)
@@ -1299,7 +1299,6 @@ static void cciss_softirq_done(struct request *rq)
 {
        CommandList_struct *cmd = rq->completion_data;
        ctlr_info_t *h = hba[cmd->ctlr];
-       unsigned int nr_bytes;
        unsigned long flags;
        u64bit temp64;
        int i, ddir;
@@ -1321,15 +1320,11 @@ static void cciss_softirq_done(struct request *rq)
        printk("Done with %p\n", rq);
 #endif                         /* CCISS_DEBUG */
 
-       /*
-        * Store the full size and set the residual count for pc requests
-        */
-       nr_bytes = blk_rq_bytes(rq);
+       /* set the residual count for pc requests */
        if (blk_pc_request(rq))
-               rq->data_len = cmd->err_info->ResidualCnt;
+               rq->resid_len = cmd->err_info->ResidualCnt;
 
-       if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, nr_bytes))
-               BUG();
+       blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
 
        spin_lock_irqsave(&h->lock, flags);
        cmd_free(h, cmd, 1);
@@ -2691,7 +2686,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
                        printk(KERN_WARNING "cciss: cmd %p has"
                               " completed with data underrun "
                               "reported\n", cmd);
-                       cmd->rq->data_len = cmd->err_info->ResidualCnt;
+                       cmd->rq->resid_len = cmd->err_info->ResidualCnt;
                }
                break;
        case CMD_DATA_OVERRUN:
@@ -2806,7 +2801,7 @@ static void do_cciss_request(struct request_queue *q)
                goto startio;
 
       queue:
-       creq = elv_next_request(q);
+       creq = blk_peek_request(q);
        if (!creq)
                goto startio;
 
@@ -2815,7 +2810,7 @@ static void do_cciss_request(struct request_queue *q)
        if ((c = cmd_alloc(h, 1)) == NULL)
                goto full;
 
-       blkdev_dequeue_request(creq);
+       blk_start_request(creq);
 
        spin_unlock_irq(q->queue_lock);
 
@@ -2840,10 +2835,10 @@ static void do_cciss_request(struct request_queue *q)
        c->Request.Timeout = 0; // Don't time out
        c->Request.CDB[0] =
            (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
-       start_blk = creq->sector;
+       start_blk = blk_rq_pos(creq);
 #ifdef CCISS_DEBUG
-       printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
-              (int)creq->nr_sectors);
+       printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
+              (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
 #endif                         /* CCISS_DEBUG */
 
        sg_init_table(tmp_sg, MAXSGENTRIES);
@@ -2869,8 +2864,8 @@ static void do_cciss_request(struct request_queue *q)
                h->maxSG = seg;
 
 #ifdef CCISS_DEBUG
-       printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n",
-              creq->nr_sectors, seg);
+       printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
+              blk_rq_sectors(creq), seg);
 #endif                         /* CCISS_DEBUG */
 
        c->Header.SGList = c->Header.SGTotal = seg;
@@ -2882,8 +2877,8 @@ static void do_cciss_request(struct request_queue *q)
                        c->Request.CDB[4] = (start_blk >> 8) & 0xff;
                        c->Request.CDB[5] = start_blk & 0xff;
                        c->Request.CDB[6] = 0;  // (sect >> 24) & 0xff; MSB
-                       c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
-                       c->Request.CDB[8] = creq->nr_sectors & 0xff;
+                       c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
+                       c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
                        c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
                } else {
                        u32 upper32 = upper_32_bits(start_blk);
@@ -2898,10 +2893,10 @@ static void do_cciss_request(struct request_queue *q)
                        c->Request.CDB[7]= (start_blk >> 16) & 0xff;
                        c->Request.CDB[8]= (start_blk >>  8) & 0xff;
                        c->Request.CDB[9]= start_blk & 0xff;
-                       c->Request.CDB[10]= (creq->nr_sectors >>  24) & 0xff;
-                       c->Request.CDB[11]= (creq->nr_sectors >>  16) & 0xff;
-                       c->Request.CDB[12]= (creq->nr_sectors >>  8) & 0xff;
-                       c->Request.CDB[13]= creq->nr_sectors & 0xff;
+                       c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
+                       c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
+                       c->Request.CDB[12]= (blk_rq_sectors(creq) >>  8) & 0xff;
+                       c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
                        c->Request.CDB[14] = c->Request.CDB[15] = 0;
                }
        } else if (blk_pc_request(creq)) {
index ca268ca111598588ef52d9f7ca7e0fa87cfbde86..a02dcfc00f134e548cf15b0fbca1f515962d500f 100644 (file)
@@ -903,7 +903,7 @@ static void do_ida_request(struct request_queue *q)
                goto startio;
 
 queue_next:
-       creq = elv_next_request(q);
+       creq = blk_peek_request(q);
        if (!creq)
                goto startio;
 
@@ -912,17 +912,18 @@ queue_next:
        if ((c = cmd_alloc(h,1)) == NULL)
                goto startio;
 
-       blkdev_dequeue_request(creq);
+       blk_start_request(creq);
 
        c->ctlr = h->ctlr;
        c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
        c->hdr.size = sizeof(rblk_t) >> 2;
        c->size += sizeof(rblk_t);
 
-       c->req.hdr.blk = creq->sector;
+       c->req.hdr.blk = blk_rq_pos(creq);
        c->rq = creq;
 DBGPX(
-       printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
+       printk("sector=%d, nr_sectors=%u\n",
+              blk_rq_pos(creq), blk_rq_sectors(creq));
 );
        sg_init_table(tmp_sg, SG_MAX);
        seg = blk_rq_map_sg(q, creq, tmp_sg);
@@ -940,9 +941,9 @@ DBGPX(
                                                 tmp_sg[i].offset,
                                                 tmp_sg[i].length, dir);
        }
-DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
+DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
        c->req.hdr.sg_cnt = seg;
-       c->req.hdr.blk_cnt = creq->nr_sectors;
+       c->req.hdr.blk_cnt = blk_rq_sectors(creq);
        c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
        c->type = CMD_RWREQ;
 
@@ -1024,8 +1025,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
                                cmd->req.sg[i].size, ddir);
 
        DBGPX(printk("Done with %p\n", rq););
-       if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
-               BUG();
+       __blk_end_request_all(rq, error);
 }
 
 /*
index 1300df6f1642a9a84fb345c22f9f88850cd1fe29..90877fee0ee006a61785b8807feb97ede6def977 100644 (file)
@@ -931,7 +931,7 @@ static inline void unlock_fdc(void)
        del_timer(&fd_timeout);
        cont = NULL;
        clear_bit(0, &fdc_busy);
-       if (elv_next_request(floppy_queue))
+       if (current_req || blk_peek_request(floppy_queue))
                do_fd_request(floppy_queue);
        spin_unlock_irqrestore(&floppy_lock, flags);
        wake_up(&fdc_wait);
@@ -2303,7 +2303,7 @@ static void floppy_end_request(struct request *req, int error)
 
        /* current_count_sectors can be zero if transfer failed */
        if (error)
-               nr_sectors = req->current_nr_sectors;
+               nr_sectors = blk_rq_cur_sectors(req);
        if (__blk_end_request(req, error, nr_sectors << 9))
                return;
 
@@ -2332,7 +2332,7 @@ static void request_done(int uptodate)
        if (uptodate) {
                /* maintain values for invalidation on geometry
                 * change */
-               block = current_count_sectors + req->sector;
+               block = current_count_sectors + blk_rq_pos(req);
                INFBOUND(DRS->maxblock, block);
                if (block > _floppy->sect)
                        DRS->maxtrack = 1;
@@ -2346,10 +2346,10 @@ static void request_done(int uptodate)
                        /* record write error information */
                        DRWE->write_errors++;
                        if (DRWE->write_errors == 1) {
-                               DRWE->first_error_sector = req->sector;
+                               DRWE->first_error_sector = blk_rq_pos(req);
                                DRWE->first_error_generation = DRS->generation;
                        }
-                       DRWE->last_error_sector = req->sector;
+                       DRWE->last_error_sector = blk_rq_pos(req);
                        DRWE->last_error_generation = DRS->generation;
                }
                spin_lock_irqsave(q->queue_lock, flags);
@@ -2503,24 +2503,23 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
 
        max_sector = transfer_size(ssize,
                                   min(max_sector, max_sector_2),
-                                  current_req->nr_sectors);
+                                  blk_rq_sectors(current_req));
 
        if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
-           buffer_max > fsector_t + current_req->nr_sectors)
+           buffer_max > fsector_t + blk_rq_sectors(current_req))
                current_count_sectors = min_t(int, buffer_max - fsector_t,
-                                             current_req->nr_sectors);
+                                             blk_rq_sectors(current_req));
 
        remaining = current_count_sectors << 9;
 #ifdef FLOPPY_SANITY_CHECK
-       if ((remaining >> 9) > current_req->nr_sectors &&
-           CT(COMMAND) == FD_WRITE) {
+       if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) {
                DPRINT("in copy buffer\n");
                printk("current_count_sectors=%ld\n", current_count_sectors);
                printk("remaining=%d\n", remaining >> 9);
-               printk("current_req->nr_sectors=%ld\n",
-                      current_req->nr_sectors);
+               printk("current_req->nr_sectors=%u\n",
+                      blk_rq_sectors(current_req));
                printk("current_req->current_nr_sectors=%u\n",
-                      current_req->current_nr_sectors);
+                      blk_rq_cur_sectors(current_req));
                printk("max_sector=%d\n", max_sector);
                printk("ssize=%d\n", ssize);
        }
@@ -2530,7 +2529,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
 
        dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
 
-       size = current_req->current_nr_sectors << 9;
+       size = blk_rq_cur_bytes(current_req);
 
        rq_for_each_segment(bv, current_req, iter) {
                if (!remaining)
@@ -2648,10 +2647,10 @@ static int make_raw_rw_request(void)
 
        max_sector = _floppy->sect * _floppy->head;
 
-       TRACK = (int)current_req->sector / max_sector;
-       fsector_t = (int)current_req->sector % max_sector;
+       TRACK = (int)blk_rq_pos(current_req) / max_sector;
+       fsector_t = (int)blk_rq_pos(current_req) % max_sector;
        if (_floppy->track && TRACK >= _floppy->track) {
-               if (current_req->current_nr_sectors & 1) {
+               if (blk_rq_cur_sectors(current_req) & 1) {
                        current_count_sectors = 1;
                        return 1;
                } else
@@ -2669,7 +2668,7 @@ static int make_raw_rw_request(void)
                if (fsector_t >= max_sector) {
                        current_count_sectors =
                            min_t(int, _floppy->sect - fsector_t,
-                                 current_req->nr_sectors);
+                                 blk_rq_sectors(current_req));
                        return 1;
                }
                SIZECODE = 2;
@@ -2720,7 +2719,7 @@ static int make_raw_rw_request(void)
 
        in_sector_offset = (fsector_t % _floppy->sect) % ssize;
        aligned_sector_t = fsector_t - in_sector_offset;
-       max_size = current_req->nr_sectors;
+       max_size = blk_rq_sectors(current_req);
        if ((raw_cmd->track == buffer_track) &&
            (current_drive == buffer_drive) &&
            (fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
@@ -2729,10 +2728,10 @@ static int make_raw_rw_request(void)
                        copy_buffer(1, max_sector, buffer_max);
                        return 1;
                }
-       } else if (in_sector_offset || current_req->nr_sectors < ssize) {
+       } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
                if (CT(COMMAND) == FD_WRITE) {
-                       if (fsector_t + current_req->nr_sectors > ssize &&
-                           fsector_t + current_req->nr_sectors < ssize + ssize)
+                       if (fsector_t + blk_rq_sectors(current_req) > ssize &&
+                           fsector_t + blk_rq_sectors(current_req) < ssize + ssize)
                                max_size = ssize + ssize;
                        else
                                max_size = ssize;
@@ -2776,7 +2775,7 @@ static int make_raw_rw_request(void)
                    (indirect * 2 > direct * 3 &&
                     *errors < DP->max_errors.read_track && ((!probing
                       || (DP->read_track & (1 << DRS->probed_format)))))) {
-                       max_size = current_req->nr_sectors;
+                       max_size = blk_rq_sectors(current_req);
                } else {
                        raw_cmd->kernel_data = current_req->buffer;
                        raw_cmd->length = current_count_sectors << 9;
@@ -2801,7 +2800,7 @@ static int make_raw_rw_request(void)
            fsector_t > buffer_max ||
            fsector_t < buffer_min ||
            ((CT(COMMAND) == FD_READ ||
-             (!in_sector_offset && current_req->nr_sectors >= ssize)) &&
+             (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
             max_sector > 2 * max_buffer_sectors + buffer_min &&
             max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
            /* not enough space */
@@ -2879,8 +2878,8 @@ static int make_raw_rw_request(void)
                                printk("write\n");
                        return 0;
                }
-       } else if (raw_cmd->length > current_req->nr_sectors << 9 ||
-                  current_count_sectors > current_req->nr_sectors) {
+       } else if (raw_cmd->length > blk_rq_bytes(current_req) ||
+                  current_count_sectors > blk_rq_sectors(current_req)) {
                DPRINT("buffer overrun in direct transfer\n");
                return 0;
        } else if (raw_cmd->length < current_count_sectors << 9) {
@@ -2913,7 +2912,7 @@ static void redo_fd_request(void)
                        struct request *req;
 
                        spin_lock_irq(floppy_queue->queue_lock);
-                       req = elv_next_request(floppy_queue);
+                       req = blk_fetch_request(floppy_queue);
                        spin_unlock_irq(floppy_queue->queue_lock);
                        if (!req) {
                                do_floppy = NULL;
@@ -2990,8 +2989,9 @@ static void do_fd_request(struct request_queue * q)
        if (usage_count == 0) {
                printk("warning: usage count=0, current_req=%p exiting\n",
                       current_req);
-               printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector,
-                      current_req->cmd_type, current_req->cmd_flags);
+               printk("sect=%ld type=%x flags=%x\n",
+                      (long)blk_rq_pos(current_req), current_req->cmd_type,
+                      current_req->cmd_flags);
                return;
        }
        if (test_bit(0, &fdc_busy)) {
index baaa9e486e508d354b1818f2ec427c32691c1db9..961de56d00a946946a785a7f3508ac35d0633ff9 100644 (file)
 
 static DEFINE_SPINLOCK(hd_lock);
 static struct request_queue *hd_queue;
+static struct request *hd_req;
 
 #define MAJOR_NR HD_MAJOR
-#define QUEUE (hd_queue)
-#define CURRENT elv_next_request(hd_queue)
 
 #define TIMEOUT_VALUE  (6*HZ)
 #define        HD_DELAY        0
@@ -195,11 +194,24 @@ static void __init hd_setup(char *str, int *ints)
        NR_HD = hdind+1;
 }
 
+static bool hd_end_request(int err, unsigned int bytes)
+{
+       if (__blk_end_request(hd_req, err, bytes))
+               return true;
+       hd_req = NULL;
+       return false;
+}
+
+static bool hd_end_request_cur(int err)
+{
+       return hd_end_request(err, blk_rq_cur_bytes(hd_req));
+}
+
 static void dump_status(const char *msg, unsigned int stat)
 {
        char *name = "hd?";
-       if (CURRENT)
-               name = CURRENT->rq_disk->disk_name;
+       if (hd_req)
+               name = hd_req->rq_disk->disk_name;
 
 #ifdef VERBOSE_ERRORS
        printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
@@ -227,8 +239,8 @@ static void dump_status(const char *msg, unsigned int stat)
                if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
                        printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
                                inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
-                       if (CURRENT)
-                               printk(", sector=%ld", CURRENT->sector);
+                       if (hd_req)
+                               printk(", sector=%ld", blk_rq_pos(hd_req));
                }
                printk("\n");
        }
@@ -406,11 +418,12 @@ static void unexpected_hd_interrupt(void)
  */
 static void bad_rw_intr(void)
 {
-       struct request *req = CURRENT;
+       struct request *req = hd_req;
+
        if (req != NULL) {
                struct hd_i_struct *disk = req->rq_disk->private_data;
                if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
-                       end_request(req, 0);
+                       hd_end_request_cur(-EIO);
                        disk->special_op = disk->recalibrate = 1;
                } else if (req->errors % RESET_FREQ == 0)
                        reset = 1;
@@ -452,37 +465,30 @@ static void read_intr(void)
        bad_rw_intr();
        hd_request();
        return;
+
 ok_to_read:
-       req = CURRENT;
+       req = hd_req;
        insw(HD_DATA, req->buffer, 256);
-       req->sector++;
-       req->buffer += 512;
-       req->errors = 0;
-       i = --req->nr_sectors;
-       --req->current_nr_sectors;
 #ifdef DEBUG
-       printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n",
-               req->rq_disk->disk_name, req->sector, req->nr_sectors,
-               req->buffer+512);
+       printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
+              req->rq_disk->disk_name, blk_rq_pos(req) + 1,
+              blk_rq_sectors(req) - 1, req->buffer+512);
 #endif
-       if (req->current_nr_sectors <= 0)
-               end_request(req, 1);
-       if (i > 0) {
+       if (hd_end_request(0, 512)) {
                SET_HANDLER(&read_intr);
                return;
        }
+
        (void) inb_p(HD_STATUS);
 #if (HD_DELAY > 0)
        last_req = read_timer();
 #endif
-       if (elv_next_request(QUEUE))
-               hd_request();
-       return;
+       hd_request();
 }
 
 static void write_intr(void)
 {
-       struct request *req = CURRENT;
+       struct request *req = hd_req;
        int i;
        int retries = 100000;
 
@@ -492,30 +498,25 @@ static void write_intr(void)
                        continue;
                if (!OK_STATUS(i))
                        break;
-               if ((req->nr_sectors <= 1) || (i & DRQ_STAT))
+               if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
                        goto ok_to_write;
        } while (--retries > 0);
        dump_status("write_intr", i);
        bad_rw_intr();
        hd_request();
        return;
+
 ok_to_write:
-       req->sector++;
-       i = --req->nr_sectors;
-       --req->current_nr_sectors;
-       req->buffer += 512;
-       if (!i || (req->bio && req->current_nr_sectors <= 0))
-               end_request(req, 1);
-       if (i > 0) {
+       if (hd_end_request(0, 512)) {
                SET_HANDLER(&write_intr);
                outsw(HD_DATA, req->buffer, 256);
-       } else {
+               return;
+       }
+
 #if (HD_DELAY > 0)
-               last_req = read_timer();
+       last_req = read_timer();
 #endif
-               hd_request();
-       }
-       return;
+       hd_request();
 }
 
 static void recal_intr(void)
@@ -537,18 +538,18 @@ static void hd_times_out(unsigned long dummy)
 
        do_hd = NULL;
 
-       if (!CURRENT)
+       if (!hd_req)
                return;
 
        spin_lock_irq(hd_queue->queue_lock);
        reset = 1;
-       name = CURRENT->rq_disk->disk_name;
+       name = hd_req->rq_disk->disk_name;
        printk("%s: timeout\n", name);
-       if (++CURRENT->errors >= MAX_ERRORS) {
+       if (++hd_req->errors >= MAX_ERRORS) {
 #ifdef DEBUG
                printk("%s: too many errors\n", name);
 #endif
-               end_request(CURRENT, 0);
+               hd_end_request_cur(-EIO);
        }
        hd_request();
        spin_unlock_irq(hd_queue->queue_lock);
@@ -563,7 +564,7 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req)
        }
        if (disk->head > 16) {
                printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
-               end_request(req, 0);
+               hd_end_request_cur(-EIO);
        }
        disk->special_op = 0;
        return 1;
@@ -590,24 +591,27 @@ static void hd_request(void)
 repeat:
        del_timer(&device_timer);
 
-       req = CURRENT;
-       if (!req) {
-               do_hd = NULL;
-               return;
+       if (!hd_req) {
+               hd_req = blk_fetch_request(hd_queue);
+               if (!hd_req) {
+                       do_hd = NULL;
+                       return;
+               }
        }
+       req = hd_req;
 
        if (reset) {
                reset_hd();
                return;
        }
        disk = req->rq_disk->private_data;
-       block = req->sector;
-       nsect = req->nr_sectors;
+       block = blk_rq_pos(req);
+       nsect = blk_rq_sectors(req);
        if (block >= get_capacity(req->rq_disk) ||
            ((block+nsect) > get_capacity(req->rq_disk))) {
                printk("%s: bad access: block=%d, count=%d\n",
                        req->rq_disk->disk_name, block, nsect);
-               end_request(req, 0);
+               hd_end_request_cur(-EIO);
                goto repeat;
        }
 
@@ -647,7 +651,7 @@ repeat:
                        break;
                default:
                        printk("unknown hd-command\n");
-                       end_request(req, 0);
+                       hd_end_request_cur(-EIO);
                        break;
                }
        }
index ddae80825899ae0459bff4a8109d8fc3468868c3..801f4ab83302556e9b48f1ddf139201a05e8c304 100644 (file)
@@ -511,11 +511,7 @@ out:
  */
 static void loop_add_bio(struct loop_device *lo, struct bio *bio)
 {
-       if (lo->lo_biotail) {
-               lo->lo_biotail->bi_next = bio;
-               lo->lo_biotail = bio;
-       } else
-               lo->lo_bio = lo->lo_biotail = bio;
+       bio_list_add(&lo->lo_bio_list, bio);
 }
 
 /*
@@ -523,16 +519,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
  */
 static struct bio *loop_get_bio(struct loop_device *lo)
 {
-       struct bio *bio;
-
-       if ((bio = lo->lo_bio)) {
-               if (bio == lo->lo_biotail)
-                       lo->lo_biotail = NULL;
-               lo->lo_bio = bio->bi_next;
-               bio->bi_next = NULL;
-       }
-
-       return bio;
+       return bio_list_pop(&lo->lo_bio_list);
 }
 
 static int loop_make_request(struct request_queue *q, struct bio *old_bio)
@@ -609,12 +596,13 @@ static int loop_thread(void *data)
 
        set_user_nice(current, -20);
 
-       while (!kthread_should_stop() || lo->lo_bio) {
+       while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
 
                wait_event_interruptible(lo->lo_event,
-                               lo->lo_bio || kthread_should_stop());
+                               !bio_list_empty(&lo->lo_bio_list) ||
+                               kthread_should_stop());
 
-               if (!lo->lo_bio)
+               if (bio_list_empty(&lo->lo_bio_list))
                        continue;
                spin_lock_irq(&lo->lo_lock);
                bio = loop_get_bio(lo);
@@ -721,10 +709,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
        if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
                goto out_putf;
 
-       /* new backing store needs to support loop (eg splice_read) */
-       if (!inode->i_fop->splice_read)
-               goto out_putf;
-
        /* size of the new backing store needs to be the same */
        if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
                goto out_putf;
@@ -800,12 +784,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
        error = -EINVAL;
        if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
                const struct address_space_operations *aops = mapping->a_ops;
-               /*
-                * If we can't read - sorry. If we only can't write - well,
-                * it's going to be read-only.
-                */
-               if (!file->f_op->splice_read)
-                       goto out_putf;
+
                if (aops->write_begin)
                        lo_flags |= LO_FLAGS_USE_AOPS;
                if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
@@ -841,7 +820,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
        lo->old_gfp_mask = mapping_gfp_mask(mapping);
        mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 
-       lo->lo_bio = lo->lo_biotail = NULL;
+       bio_list_init(&lo->lo_bio_list);
 
        /*
         * set queue make_request_fn, and add limits based on lower level
index f3898353d0a8f9dc6d328ace0e06689ae366c214..c0cd0a03f698590083759952b33a84bb3bba2c66 100644 (file)
 #include <linux/fs.h>
 #include <linux/blkdev.h>
 #include <linux/hdreg.h>
-#include <linux/libata.h>
+#include <linux/ata.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
-#include <linux/mg_disk.h>
 
 #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
 
+/* name for block device */
+#define MG_DISK_NAME "mgd"
+/* name for platform device */
+#define MG_DEV_NAME "mg_disk"
+
+#define MG_DISK_MAJ 0
+#define MG_DISK_MAX_PART 16
+#define MG_SECTOR_SIZE 512
+#define MG_MAX_SECTS 256
+
+/* Register offsets */
+#define MG_BUFF_OFFSET                 0x8000
+#define MG_STORAGE_BUFFER_SIZE         0x200
+#define MG_REG_OFFSET                  0xC000
+#define MG_REG_FEATURE                 (MG_REG_OFFSET + 2)     /* write case */
+#define MG_REG_ERROR                   (MG_REG_OFFSET + 2)     /* read case */
+#define MG_REG_SECT_CNT                        (MG_REG_OFFSET + 4)
+#define MG_REG_SECT_NUM                        (MG_REG_OFFSET + 6)
+#define MG_REG_CYL_LOW                 (MG_REG_OFFSET + 8)
+#define MG_REG_CYL_HIGH                        (MG_REG_OFFSET + 0xA)
+#define MG_REG_DRV_HEAD                        (MG_REG_OFFSET + 0xC)
+#define MG_REG_COMMAND                 (MG_REG_OFFSET + 0xE)   /* write case */
+#define MG_REG_STATUS                  (MG_REG_OFFSET + 0xE)   /* read  case */
+#define MG_REG_DRV_CTRL                        (MG_REG_OFFSET + 0x10)
+#define MG_REG_BURST_CTRL              (MG_REG_OFFSET + 0x12)
+
+/* handy status */
+#define MG_STAT_READY  (ATA_DRDY | ATA_DSC)
+#define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
+                                ATA_ERR))) == MG_STAT_READY)
+
+/* error code for others */
+#define MG_ERR_NONE            0
+#define MG_ERR_TIMEOUT         0x100
+#define MG_ERR_INIT_STAT       0x101
+#define MG_ERR_TRANSLATION     0x102
+#define MG_ERR_CTRL_RST                0x103
+#define MG_ERR_INV_STAT                0x104
+#define MG_ERR_RSTOUT          0x105
+
+#define MG_MAX_ERRORS  6       /* Max read/write errors */
+
+/* command */
+#define MG_CMD_RD 0x20
+#define MG_CMD_WR 0x30
+#define MG_CMD_SLEEP 0x99
+#define MG_CMD_WAKEUP 0xC3
+#define MG_CMD_ID 0xEC
+#define MG_CMD_WR_CONF 0x3C
+#define MG_CMD_RD_CONF 0x40
+
+/* operation mode */
+#define MG_OP_CASCADE (1 << 0)
+#define MG_OP_CASCADE_SYNC_RD (1 << 1)
+#define MG_OP_CASCADE_SYNC_WR (1 << 2)
+#define MG_OP_INTERLEAVE (1 << 3)
+
+/* synchronous */
+#define MG_BURST_LAT_4 (3 << 4)
+#define MG_BURST_LAT_5 (4 << 4)
+#define MG_BURST_LAT_6 (5 << 4)
+#define MG_BURST_LAT_7 (6 << 4)
+#define MG_BURST_LAT_8 (7 << 4)
+#define MG_BURST_LEN_4 (1 << 1)
+#define MG_BURST_LEN_8 (2 << 1)
+#define MG_BURST_LEN_16 (3 << 1)
+#define MG_BURST_LEN_32 (4 << 1)
+#define MG_BURST_LEN_CONT (0 << 1)
+
+/* timeout value (unit: ms) */
+#define MG_TMAX_CONF_TO_CMD    1
+#define MG_TMAX_WAIT_RD_DRQ    10
+#define MG_TMAX_WAIT_WR_DRQ    500
+#define MG_TMAX_RST_TO_BUSY    10
+#define MG_TMAX_HDRST_TO_RDY   500
+#define MG_TMAX_SWRST_TO_RDY   500
+#define MG_TMAX_RSTOUT         3000
+
+/* device attribution */
+/* use mflash as boot device */
+#define MG_BOOT_DEV            (1 << 0)
+/* use mflash as storage device */
+#define MG_STORAGE_DEV         (1 << 1)
+/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
+#define MG_STORAGE_DEV_SKIP_RST        (1 << 2)
+
+#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
+
+/* names of GPIO resource */
+#define MG_RST_PIN     "mg_rst"
+/* except MG_BOOT_DEV, reset-out pin should be assigned */
+#define MG_RSTOUT_PIN  "mg_rstout"
+
+/* private driver data */
+struct mg_drv_data {
+       /* disk resource */
+       u32 use_polling;
+
+       /* device attribution */
+       u32 dev_attr;
+
+       /* internally used */
+       struct mg_host *host;
+};
+
+/* main structure for mflash driver */
+struct mg_host {
+       struct device *dev;
+
+       struct request_queue *breq;
+       struct request *req;
+       spinlock_t lock;
+       struct gendisk *gd;
+
+       struct timer_list timer;
+       void (*mg_do_intr) (struct mg_host *);
+
+       u16 id[ATA_ID_WORDS];
+
+       u16 cyls;
+       u16 heads;
+       u16 sectors;
+       u32 n_sectors;
+       u32 nres_sectors;
+
+       void __iomem *dev_base;
+       unsigned int irq;
+       unsigned int rst;
+       unsigned int rstout;
+
+       u32 major;
+       u32 error;
+};
+
+/*
+ * Debugging macro and defines
+ */
+#undef DO_MG_DEBUG
+#ifdef DO_MG_DEBUG
+#  define MG_DBG(fmt, args...) \
+       printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
+#else /* CONFIG_MG_DEBUG */
+#  define MG_DBG(fmt, args...) do { } while (0)
+#endif /* CONFIG_MG_DEBUG */
+
 static void mg_request(struct request_queue *);
 
+static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
+{
+       if (__blk_end_request(host->req, err, nr_bytes))
+               return true;
+
+       host->req = NULL;
+       return false;
+}
+
+static bool mg_end_request_cur(struct mg_host *host, int err)
+{
+       return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
+}
+
 static void mg_dump_status(const char *msg, unsigned int stat,
                struct mg_host *host)
 {
        char *name = MG_DISK_NAME;
-       struct request *req;
 
-       if (host->breq) {
-               req = elv_next_request(host->breq);
-               if (req)
-                       name = req->rq_disk->disk_name;
-       }
+       if (host->req)
+               name = host->req->rq_disk->disk_name;
 
        printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
-       if (stat & MG_REG_STATUS_BIT_BUSY)
+       if (stat & ATA_BUSY)
                printk("Busy ");
-       if (stat & MG_REG_STATUS_BIT_READY)
+       if (stat & ATA_DRDY)
                printk("DriveReady ");
-       if (stat & MG_REG_STATUS_BIT_WRITE_FAULT)
+       if (stat & ATA_DF)
                printk("WriteFault ");
-       if (stat & MG_REG_STATUS_BIT_SEEK_DONE)
+       if (stat & ATA_DSC)
                printk("SeekComplete ");
-       if (stat & MG_REG_STATUS_BIT_DATA_REQ)
+       if (stat & ATA_DRQ)
                printk("DataRequest ");
-       if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR)
+       if (stat & ATA_CORR)
                printk("CorrectedError ");
-       if (stat & MG_REG_STATUS_BIT_ERROR)
+       if (stat & ATA_ERR)
                printk("Error ");
        printk("}\n");
-       if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) {
+       if ((stat & ATA_ERR) == 0) {
                host->error = 0;
        } else {
                host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
                printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
                                host->error & 0xff);
-               if (host->error & MG_REG_ERR_BBK)
+               if (host->error & ATA_BBK)
                        printk("BadSector ");
-               if (host->error & MG_REG_ERR_UNC)
+               if (host->error & ATA_UNC)
                        printk("UncorrectableError ");
-               if (host->error & MG_REG_ERR_IDNF)
+               if (host->error & ATA_IDNF)
                        printk("SectorIdNotFound ");
-               if (host->error & MG_REG_ERR_ABRT)
+               if (host->error & ATA_ABORTED)
                        printk("DriveStatusError ");
-               if (host->error & MG_REG_ERR_AMNF)
+               if (host->error & ATA_AMNF)
                        printk("AddrMarkNotFound ");
                printk("}");
-               if (host->error &
-                               (MG_REG_ERR_BBK | MG_REG_ERR_UNC |
-                                MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) {
-                       if (host->breq) {
-                               req = elv_next_request(host->breq);
-                               if (req)
-                                       printk(", sector=%u", (u32)req->sector);
-                       }
-
+               if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
+                       if (host->req)
+                               printk(", sector=%u",
+                                      (unsigned int)blk_rq_pos(host->req));
                }
                printk("\n");
        }
@@ -100,12 +249,12 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
 
        do {
                cur_jiffies = jiffies;
-               if (status & MG_REG_STATUS_BIT_BUSY) {
-                       if (expect == MG_REG_STATUS_BIT_BUSY)
+               if (status & ATA_BUSY) {
+                       if (expect == ATA_BUSY)
                                break;
                } else {
                        /* Check the error condition! */
-                       if (status & MG_REG_STATUS_BIT_ERROR) {
+                       if (status & ATA_ERR) {
                                mg_dump_status("mg_wait", status, host);
                                break;
                        }
@@ -114,8 +263,8 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
                                if (MG_READY_OK(status))
                                        break;
 
-                       if (expect == MG_REG_STATUS_BIT_DATA_REQ)
-                               if (status & MG_REG_STATUS_BIT_DATA_REQ)
+                       if (expect == ATA_DRQ)
+                               if (status & ATA_DRQ)
                                        break;
                }
                if (!msec) {
@@ -173,6 +322,42 @@ static irqreturn_t mg_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+/* local copy of ata_id_string() */
+static void mg_id_string(const u16 *id, unsigned char *s,
+                        unsigned int ofs, unsigned int len)
+{
+       unsigned int c;
+
+       BUG_ON(len & 1);
+
+       while (len > 0) {
+               c = id[ofs] >> 8;
+               *s = c;
+               s++;
+
+               c = id[ofs] & 0xff;
+               *s = c;
+               s++;
+
+               ofs++;
+               len -= 2;
+       }
+}
+
+/* local copy of ata_id_c_string() */
+static void mg_id_c_string(const u16 *id, unsigned char *s,
+                          unsigned int ofs, unsigned int len)
+{
+       unsigned char *p;
+
+       mg_id_string(id, s, ofs, len - 1);
+
+       p = s + strnlen(s, len - 1);
+       while (p > s && p[-1] == ' ')
+               p--;
+       *p = '\0';
+}
+
 static int mg_get_disk_id(struct mg_host *host)
 {
        u32 i;
@@ -184,12 +369,10 @@ static int mg_get_disk_id(struct mg_host *host)
        char serial[ATA_ID_SERNO_LEN + 1];
 
        if (!prv_data->use_polling)
-               outb(MG_REG_CTRL_INTR_DISABLE,
-                               (unsigned long)host->dev_base +
-                               MG_REG_DRV_CTRL);
+               outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
 
        outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
-       err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ);
+       err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
        if (err)
                return err;
 
@@ -219,9 +402,9 @@ static int mg_get_disk_id(struct mg_host *host)
                host->n_sectors -= host->nres_sectors;
        }
 
-       ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
-       ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
-       ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
+       mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
+       mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
+       mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
        printk(KERN_INFO "mg_disk: model: %s\n", model);
        printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
        printk(KERN_INFO "mg_disk: serial: %s\n", serial);
@@ -229,8 +412,7 @@ static int mg_get_disk_id(struct mg_host *host)
                        host->n_sectors, host->nres_sectors);
 
        if (!prv_data->use_polling)
-               outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
-                               MG_REG_DRV_CTRL);
+               outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
 
        return err;
 }
@@ -244,7 +426,7 @@ static int mg_disk_init(struct mg_host *host)
 
        /* hdd rst low */
        gpio_set_value(host->rst, 0);
-       err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
+       err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
        if (err)
                return err;
 
@@ -255,17 +437,14 @@ static int mg_disk_init(struct mg_host *host)
                return err;
 
        /* soft reset on */
-       outb(MG_REG_CTRL_RESET |
-                       (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
-                        MG_REG_CTRL_INTR_ENABLE),
+       outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0),
                        (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
-       err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
+       err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
        if (err)
                return err;
 
        /* soft reset off */
-       outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
-                       MG_REG_CTRL_INTR_ENABLE,
+       outb(prv_data->use_polling ? ATA_NIEN : 0,
                        (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
        err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
        if (err)
@@ -281,11 +460,10 @@ static int mg_disk_init(struct mg_host *host)
 
 static void mg_bad_rw_intr(struct mg_host *host)
 {
-       struct request *req = elv_next_request(host->breq);
-       if (req != NULL)
-               if (++req->errors >= MG_MAX_ERRORS ||
-                               host->error == MG_ERR_TIMEOUT)
-                       end_request(req, 0);
+       if (host->req)
+               if (++host->req->errors >= MG_MAX_ERRORS ||
+                   host->error == MG_ERR_TIMEOUT)
+                       mg_end_request_cur(host, -EIO);
 }
 
 static unsigned int mg_out(struct mg_host *host,
@@ -311,7 +489,7 @@ static unsigned int mg_out(struct mg_host *host,
                        MG_REG_CYL_LOW);
        outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
                        MG_REG_CYL_HIGH);
-       outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE),
+       outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS),
                        (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
        outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
        return MG_ERR_NONE;
@@ -319,105 +497,77 @@ static unsigned int mg_out(struct mg_host *host,
 
 static void mg_read(struct request *req)
 {
-       u32 remains, j;
+       u32 j;
        struct mg_host *host = req->rq_disk->private_data;
 
-       remains = req->nr_sectors;
-
-       if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
-                       MG_ERR_NONE)
+       if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
+                  MG_CMD_RD, NULL) != MG_ERR_NONE)
                mg_bad_rw_intr(host);
 
        MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
-                       remains, req->sector, req->buffer);
+              blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
+
+       do {
+               u16 *buff = (u16 *)req->buffer;
 
-       while (remains) {
-               if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
-                                       MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
+               if (mg_wait(host, ATA_DRQ,
+                           MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
                        mg_bad_rw_intr(host);
                        return;
                }
-               for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
-                       *(u16 *)req->buffer =
-                               inw((unsigned long)host->dev_base +
-                                               MG_BUFF_OFFSET + (j << 1));
-                       req->buffer += 2;
-               }
-
-               req->sector++;
-               req->errors = 0;
-               remains = --req->nr_sectors;
-               --req->current_nr_sectors;
-
-               if (req->current_nr_sectors <= 0) {
-                       MG_DBG("remain : %d sects\n", remains);
-                       end_request(req, 1);
-                       if (remains > 0)
-                               req = elv_next_request(host->breq);
-               }
+               for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
+                       *buff++ = inw((unsigned long)host->dev_base +
+                                     MG_BUFF_OFFSET + (j << 1));
 
                outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
                                MG_REG_COMMAND);
-       }
+       } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
 }
 
 static void mg_write(struct request *req)
 {
-       u32 remains, j;
+       u32 j;
        struct mg_host *host = req->rq_disk->private_data;
 
-       remains = req->nr_sectors;
-
-       if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
-                       MG_ERR_NONE) {
+       if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
+                  MG_CMD_WR, NULL) != MG_ERR_NONE) {
                mg_bad_rw_intr(host);
                return;
        }
 
-
        MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
-                       remains, req->sector, req->buffer);
-       while (remains) {
-               if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
-                                       MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
+              blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
+
+       do {
+               u16 *buff = (u16 *)req->buffer;
+
+       if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
                        mg_bad_rw_intr(host);
                        return;
                }
-               for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
-                       outw(*(u16 *)req->buffer,
-                                       (unsigned long)host->dev_base +
-                                       MG_BUFF_OFFSET + (j << 1));
-                       req->buffer += 2;
-               }
-               req->sector++;
-               remains = --req->nr_sectors;
-               --req->current_nr_sectors;
-
-               if (req->current_nr_sectors <= 0) {
-                       MG_DBG("remain : %d sects\n", remains);
-                       end_request(req, 1);
-                       if (remains > 0)
-                               req = elv_next_request(host->breq);
-               }
+               for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
+                       outw(*buff++, (unsigned long)host->dev_base +
+                                     MG_BUFF_OFFSET + (j << 1));
 
                outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
                                MG_REG_COMMAND);
-       }
+       } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
 }
 
 static void mg_read_intr(struct mg_host *host)
 {
+       struct request *req = host->req;
        u32 i;
-       struct request *req;
+       u16 *buff;
 
        /* check status */
        do {
                i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
-               if (i & MG_REG_STATUS_BIT_BUSY)
+               if (i & ATA_BUSY)
                        break;
                if (!MG_READY_OK(i))
                        break;
-               if (i & MG_REG_STATUS_BIT_DATA_REQ)
+               if (i & ATA_DRQ)
                        goto ok_to_read;
        } while (0);
        mg_dump_status("mg_read_intr", i, host);
@@ -427,60 +577,42 @@ static void mg_read_intr(struct mg_host *host)
 
 ok_to_read:
        /* get current segment of request */
-       req = elv_next_request(host->breq);
+       buff = (u16 *)req->buffer;
 
        /* read 1 sector */
-       for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
-               *(u16 *)req->buffer =
-                       inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
-                                       (i << 1));
-               req->buffer += 2;
-       }
+       for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
+               *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
+                             (i << 1));
 
-       /* manipulate request */
        MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
-                       req->sector, req->nr_sectors - 1, req->buffer);
-
-       req->sector++;
-       req->errors = 0;
-       i = --req->nr_sectors;
-       --req->current_nr_sectors;
-
-       /* let know if current segment done */
-       if (req->current_nr_sectors <= 0)
-               end_request(req, 1);
-
-       /* set handler if read remains */
-       if (i > 0) {
-               host->mg_do_intr = mg_read_intr;
-               mod_timer(&host->timer, jiffies + 3 * HZ);
-       }
+              blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
 
        /* send read confirm */
        outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
 
-       /* goto next request */
-       if (!i)
+       if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
+               /* set handler if read remains */
+               host->mg_do_intr = mg_read_intr;
+               mod_timer(&host->timer, jiffies + 3 * HZ);
+       } else /* goto next request */
                mg_request(host->breq);
 }
 
 static void mg_write_intr(struct mg_host *host)
 {
+       struct request *req = host->req;
        u32 i, j;
        u16 *buff;
-       struct request *req;
-
-       /* get current segment of request */
-       req = elv_next_request(host->breq);
+       bool rem;
 
        /* check status */
        do {
                i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
-               if (i & MG_REG_STATUS_BIT_BUSY)
+               if (i & ATA_BUSY)
                        break;
                if (!MG_READY_OK(i))
                        break;
-               if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ))
+               if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
                        goto ok_to_write;
        } while (0);
        mg_dump_status("mg_write_intr", i, host);
@@ -489,18 +621,8 @@ static void mg_write_intr(struct mg_host *host)
        return;
 
 ok_to_write:
-       /* manipulate request */
-       req->sector++;
-       i = --req->nr_sectors;
-       --req->current_nr_sectors;
-       req->buffer += MG_SECTOR_SIZE;
-
-       /* let know if current segment or all done */
-       if (!i || (req->bio && req->current_nr_sectors <= 0))
-               end_request(req, 1);
-
-       /* write 1 sector and set handler if remains */
-       if (i > 0) {
+       if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
+               /* write 1 sector and set handler if remains */
                buff = (u16 *)req->buffer;
                for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
                        outw(*buff, (unsigned long)host->dev_base +
@@ -508,7 +630,7 @@ ok_to_write:
                        buff++;
                }
                MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
-                               req->sector, req->nr_sectors, req->buffer);
+                      blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
                host->mg_do_intr = mg_write_intr;
                mod_timer(&host->timer, jiffies + 3 * HZ);
        }
@@ -516,7 +638,7 @@ ok_to_write:
        /* send write confirm */
        outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
 
-       if (!i)
+       if (!rem)
                mg_request(host->breq);
 }
 
@@ -524,49 +646,45 @@ void mg_times_out(unsigned long data)
 {
        struct mg_host *host = (struct mg_host *)data;
        char *name;
-       struct request *req;
 
        spin_lock_irq(&host->lock);
 
-       req = elv_next_request(host->breq);
-       if (!req)
+       if (!host->req)
                goto out_unlock;
 
        host->mg_do_intr = NULL;
 
-       name = req->rq_disk->disk_name;
+       name = host->req->rq_disk->disk_name;
        printk(KERN_DEBUG "%s: timeout\n", name);
 
        host->error = MG_ERR_TIMEOUT;
        mg_bad_rw_intr(host);
 
-       mg_request(host->breq);
 out_unlock:
+       mg_request(host->breq);
        spin_unlock_irq(&host->lock);
 }
 
 static void mg_request_poll(struct request_queue *q)
 {
-       struct request *req;
-       struct mg_host *host;
+       struct mg_host *host = q->queuedata;
 
-       while ((req = elv_next_request(q)) != NULL) {
-               host = req->rq_disk->private_data;
-               if (blk_fs_request(req)) {
-                       switch (rq_data_dir(req)) {
-                       case READ:
-                               mg_read(req);
-                               break;
-                       case WRITE:
-                               mg_write(req);
-                               break;
-                       default:
-                               printk(KERN_WARNING "%s:%d unknown command\n",
-                                               __func__, __LINE__);
-                               end_request(req, 0);
+       while (1) {
+               if (!host->req) {
+                       host->req = blk_fetch_request(q);
+                       if (!host->req)
                                break;
-                       }
                }
+
+               if (unlikely(!blk_fs_request(host->req))) {
+                       mg_end_request_cur(host, -EIO);
+                       continue;
+               }
+
+               if (rq_data_dir(host->req) == READ)
+                       mg_read(host->req);
+               else
+                       mg_write(host->req);
        }
 }
 
@@ -588,18 +706,15 @@ static unsigned int mg_issue_req(struct request *req,
                break;
        case WRITE:
                /* TODO : handler */
-               outb(MG_REG_CTRL_INTR_DISABLE,
-                               (unsigned long)host->dev_base +
-                               MG_REG_DRV_CTRL);
+               outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
                if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
                                != MG_ERR_NONE) {
                        mg_bad_rw_intr(host);
                        return host->error;
                }
                del_timer(&host->timer);
-               mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ);
-               outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
-                               MG_REG_DRV_CTRL);
+               mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
+               outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
                if (host->error) {
                        mg_bad_rw_intr(host);
                        return host->error;
@@ -614,11 +729,6 @@ static unsigned int mg_issue_req(struct request *req,
                outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
                                MG_REG_COMMAND);
                break;
-       default:
-               printk(KERN_WARNING "%s:%d unknown command\n",
-                               __func__, __LINE__);
-               end_request(req, 0);
-               break;
        }
        return MG_ERR_NONE;
 }
@@ -626,16 +736,17 @@ static unsigned int mg_issue_req(struct request *req,
 /* This function also called from IRQ context */
 static void mg_request(struct request_queue *q)
 {
+       struct mg_host *host = q->queuedata;
        struct request *req;
-       struct mg_host *host;
        u32 sect_num, sect_cnt;
 
        while (1) {
-               req = elv_next_request(q);
-               if (!req)
-                       return;
-
-               host = req->rq_disk->private_data;
+               if (!host->req) {
+                       host->req = blk_fetch_request(q);
+                       if (!host->req)
+                               break;
+               }
+               req = host->req;
 
                /* check unwanted request call */
                if (host->mg_do_intr)
@@ -643,9 +754,9 @@ static void mg_request(struct request_queue *q)
 
                del_timer(&host->timer);
 
-               sect_num = req->sector;
+               sect_num = blk_rq_pos(req);
                /* deal whole segments */
-               sect_cnt = req->nr_sectors;
+               sect_cnt = blk_rq_sectors(req);
 
                /* sanity check */
                if (sect_num >= get_capacity(req->rq_disk) ||
@@ -655,12 +766,14 @@ static void mg_request(struct request_queue *q)
                                        "%s: bad access: sector=%d, count=%d\n",
                                        req->rq_disk->disk_name,
                                        sect_num, sect_cnt);
-                       end_request(req, 0);
+                       mg_end_request_cur(host, -EIO);
                        continue;
                }
 
-               if (!blk_fs_request(req))
-                       return;
+               if (unlikely(!blk_fs_request(req))) {
+                       mg_end_request_cur(host, -EIO);
+                       continue;
+               }
 
                if (!mg_issue_req(req, host, sect_num, sect_cnt))
                        return;
@@ -690,9 +803,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
                return -EIO;
 
        if (!prv_data->use_polling)
-               outb(MG_REG_CTRL_INTR_DISABLE,
-                               (unsigned long)host->dev_base +
-                               MG_REG_DRV_CTRL);
+               outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
 
        outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
        /* wait until mflash deep sleep */
@@ -700,9 +811,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
 
        if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
                if (!prv_data->use_polling)
-                       outb(MG_REG_CTRL_INTR_ENABLE,
-                                       (unsigned long)host->dev_base +
-                                       MG_REG_DRV_CTRL);
+                       outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
                return -EIO;
        }
 
@@ -725,8 +834,7 @@ static int mg_resume(struct platform_device *plat_dev)
                return -EIO;
 
        if (!prv_data->use_polling)
-               outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
-                               MG_REG_DRV_CTRL);
+               outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
 
        return 0;
 }
@@ -877,6 +985,7 @@ static int mg_probe(struct platform_device *plat_dev)
                                __func__, __LINE__);
                goto probe_err_5;
        }
+       host->breq->queuedata = host;
 
        /* mflash is random device, thanx for the noop */
        elevator_exit(host->breq->elevator);
index 4d6de4f15ccb34dd4039ed4bf3e53b5993751958..5d23ffad7c77fc31d97ff1d5e3fed081dffe16c3 100644 (file)
@@ -110,7 +110,7 @@ static void nbd_end_request(struct request *req)
                        req, error ? "failed" : "done");
 
        spin_lock_irqsave(q->queue_lock, flags);
-       __blk_end_request(req, error, req->nr_sectors << 9);
+       __blk_end_request_all(req, error);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
@@ -231,19 +231,19 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
 {
        int result, flags;
        struct nbd_request request;
-       unsigned long size = req->nr_sectors << 9;
+       unsigned long size = blk_rq_bytes(req);
 
        request.magic = htonl(NBD_REQUEST_MAGIC);
        request.type = htonl(nbd_cmd(req));
-       request.from = cpu_to_be64((u64) req->sector << 9);
+       request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
        request.len = htonl(size);
        memcpy(request.handle, &req, sizeof(req));
 
-       dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
+       dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
                        lo->disk->disk_name, req,
                        nbdcmd_to_ascii(nbd_cmd(req)),
-                       (unsigned long long)req->sector << 9,
-                       req->nr_sectors << 9);
+                       (unsigned long long)blk_rq_pos(req) << 9,
+                       blk_rq_bytes(req));
        result = sock_xmit(lo, 1, &request, sizeof(request),
                        (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
        if (result <= 0) {
@@ -533,11 +533,9 @@ static void do_nbd_request(struct request_queue *q)
 {
        struct request *req;
        
-       while ((req = elv_next_request(q)) != NULL) {
+       while ((req = blk_fetch_request(q)) != NULL) {
                struct nbd_device *lo;
 
-               blkdev_dequeue_request(req);
-
                spin_unlock_irq(q->queue_lock);
 
                dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
@@ -580,13 +578,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
                blk_rq_init(NULL, &sreq);
                sreq.cmd_type = REQ_TYPE_SPECIAL;
                nbd_cmd(&sreq) = NBD_CMD_DISC;
-               /*
-                * Set these to sane values in case server implementation
-                * fails to check the request type first and also to keep
-                * debugging output cleaner.
-                */
-               sreq.sector = 0;
-               sreq.nr_sectors = 0;
                if (!lo->sock)
                        return -EINVAL;
                nbd_send_req(lo, &sreq);
index e91d4b4b014fc5997ce88ef3edbf09366cb95a80..911dfd98d813cf7ad23cdc44a77aad946580bfe0 100644 (file)
@@ -719,32 +719,37 @@ static void do_pcd_request(struct request_queue * q)
        if (pcd_busy)
                return;
        while (1) {
-               pcd_req = elv_next_request(q);
-               if (!pcd_req)
-                       return;
+               if (!pcd_req) {
+                       pcd_req = blk_fetch_request(q);
+                       if (!pcd_req)
+                               return;
+               }
 
                if (rq_data_dir(pcd_req) == READ) {
                        struct pcd_unit *cd = pcd_req->rq_disk->private_data;
                        if (cd != pcd_current)
                                pcd_bufblk = -1;
                        pcd_current = cd;
-                       pcd_sector = pcd_req->sector;
-                       pcd_count = pcd_req->current_nr_sectors;
+                       pcd_sector = blk_rq_pos(pcd_req);
+                       pcd_count = blk_rq_cur_sectors(pcd_req);
                        pcd_buf = pcd_req->buffer;
                        pcd_busy = 1;
                        ps_set_intr(do_pcd_read, NULL, 0, nice);
                        return;
-               } else
-                       end_request(pcd_req, 0);
+               } else {
+                       __blk_end_request_all(pcd_req, -EIO);
+                       pcd_req = NULL;
+               }
        }
 }
 
-static inline void next_request(int success)
+static inline void next_request(int err)
 {
        unsigned long saved_flags;
 
        spin_lock_irqsave(&pcd_lock, saved_flags);
-       end_request(pcd_req, success);
+       if (!__blk_end_request_cur(pcd_req, err))
+               pcd_req = NULL;
        pcd_busy = 0;
        do_pcd_request(pcd_queue);
        spin_unlock_irqrestore(&pcd_lock, saved_flags);
@@ -781,7 +786,7 @@ static void pcd_start(void)
 
        if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
                pcd_bufblk = -1;
-               next_request(0);
+               next_request(-EIO);
                return;
        }
 
@@ -796,7 +801,7 @@ static void do_pcd_read(void)
        pcd_retries = 0;
        pcd_transfer();
        if (!pcd_count) {
-               next_request(1);
+               next_request(0);
                return;
        }
 
@@ -815,7 +820,7 @@ static void do_pcd_read_drq(void)
                        return;
                }
                pcd_bufblk = -1;
-               next_request(0);
+               next_request(-EIO);
                return;
        }
 
index 9299455b0af678d3176a9c7a431f544583e13faa..bf5955b3d873511d25e31c162d7ee4280a6cc14c 100644 (file)
@@ -410,10 +410,12 @@ static void run_fsm(void)
                                pd_claimed = 0;
                                phase = NULL;
                                spin_lock_irqsave(&pd_lock, saved_flags);
-                               end_request(pd_req, res);
-                               pd_req = elv_next_request(pd_queue);
-                               if (!pd_req)
-                                       stop = 1;
+                               if (!__blk_end_request_cur(pd_req,
+                                               res == Ok ? 0 : -EIO)) {
+                                       pd_req = blk_fetch_request(pd_queue);
+                                       if (!pd_req)
+                                               stop = 1;
+                               }
                                spin_unlock_irqrestore(&pd_lock, saved_flags);
                                if (stop)
                                        return;
@@ -443,11 +445,11 @@ static enum action do_pd_io_start(void)
 
        pd_cmd = rq_data_dir(pd_req);
        if (pd_cmd == READ || pd_cmd == WRITE) {
-               pd_block = pd_req->sector;
-               pd_count = pd_req->current_nr_sectors;
+               pd_block = blk_rq_pos(pd_req);
+               pd_count = blk_rq_cur_sectors(pd_req);
                if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
                        return Fail;
-               pd_run = pd_req->nr_sectors;
+               pd_run = blk_rq_sectors(pd_req);
                pd_buf = pd_req->buffer;
                pd_retries = 0;
                if (pd_cmd == READ)
@@ -477,8 +479,8 @@ static int pd_next_buf(void)
        if (pd_count)
                return 0;
        spin_lock_irqsave(&pd_lock, saved_flags);
-       end_request(pd_req, 1);
-       pd_count = pd_req->current_nr_sectors;
+       __blk_end_request_cur(pd_req, 0);
+       pd_count = blk_rq_cur_sectors(pd_req);
        pd_buf = pd_req->buffer;
        spin_unlock_irqrestore(&pd_lock, saved_flags);
        return 0;
@@ -702,7 +704,7 @@ static void do_pd_request(struct request_queue * q)
 {
        if (pd_req)
                return;
-       pd_req = elv_next_request(q);
+       pd_req = blk_fetch_request(q);
        if (!pd_req)
                return;
 
index bef3b997ba3e39d11b85917b31b20f6e96a27f1c..68a90834e99388c366874be2fe4ea554e266831f 100644 (file)
@@ -750,12 +750,10 @@ static int pf_ready(void)
 
 static struct request_queue *pf_queue;
 
-static void pf_end_request(int uptodate)
+static void pf_end_request(int err)
 {
-       if (pf_req) {
-               end_request(pf_req, uptodate);
+       if (pf_req && !__blk_end_request_cur(pf_req, err))
                pf_req = NULL;
-       }
 }
 
 static void do_pf_request(struct request_queue * q)
@@ -763,17 +761,19 @@ static void do_pf_request(struct request_queue * q)
        if (pf_busy)
                return;
 repeat:
-       pf_req = elv_next_request(q);
-       if (!pf_req)
-               return;
+       if (!pf_req) {
+               pf_req = blk_fetch_request(q);
+               if (!pf_req)
+                       return;
+       }
 
        pf_current = pf_req->rq_disk->private_data;
-       pf_block = pf_req->sector;
-       pf_run = pf_req->nr_sectors;
-       pf_count = pf_req->current_nr_sectors;
+       pf_block = blk_rq_pos(pf_req);
+       pf_run = blk_rq_sectors(pf_req);
+       pf_count = blk_rq_cur_sectors(pf_req);
 
        if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
-               pf_end_request(0);
+               pf_end_request(-EIO);
                goto repeat;
        }
 
@@ -788,7 +788,7 @@ repeat:
                pi_do_claimed(pf_current->pi, do_pf_write);
        else {
                pf_busy = 0;
-               pf_end_request(0);
+               pf_end_request(-EIO);
                goto repeat;
        }
 }
@@ -805,23 +805,22 @@ static int pf_next_buf(void)
                return 1;
        if (!pf_count) {
                spin_lock_irqsave(&pf_spin_lock, saved_flags);
-               pf_end_request(1);
-               pf_req = elv_next_request(pf_queue);
+               pf_end_request(0);
                spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
                if (!pf_req)
                        return 1;
-               pf_count = pf_req->current_nr_sectors;
+               pf_count = blk_rq_cur_sectors(pf_req);
                pf_buf = pf_req->buffer;
        }
        return 0;
 }
 
-static inline void next_request(int success)
+static inline void next_request(int err)
 {
        unsigned long saved_flags;
 
        spin_lock_irqsave(&pf_spin_lock, saved_flags);
-       pf_end_request(success);
+       pf_end_request(err);
        pf_busy = 0;
        do_pf_request(pf_queue);
        spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
@@ -844,7 +843,7 @@ static void do_pf_read_start(void)
                        pi_do_claimed(pf_current->pi, do_pf_read_start);
                        return;
                }
-               next_request(0);
+               next_request(-EIO);
                return;
        }
        pf_mask = STAT_DRQ;
@@ -863,7 +862,7 @@ static void do_pf_read_drq(void)
                                pi_do_claimed(pf_current->pi, do_pf_read_start);
                                return;
                        }
-                       next_request(0);
+                       next_request(-EIO);
                        return;
                }
                pi_read_block(pf_current->pi, pf_buf, 512);
@@ -871,7 +870,7 @@ static void do_pf_read_drq(void)
                        break;
        }
        pi_disconnect(pf_current->pi);
-       next_request(1);
+       next_request(0);
 }
 
 static void do_pf_write(void)
@@ -890,7 +889,7 @@ static void do_pf_write_start(void)
                        pi_do_claimed(pf_current->pi, do_pf_write_start);
                        return;
                }
-               next_request(0);
+               next_request(-EIO);
                return;
        }
 
@@ -903,7 +902,7 @@ static void do_pf_write_start(void)
                                pi_do_claimed(pf_current->pi, do_pf_write_start);
                                return;
                        }
-                       next_request(0);
+                       next_request(-EIO);
                        return;
                }
                pi_write_block(pf_current->pi, pf_buf, 512);
@@ -923,11 +922,11 @@ static void do_pf_write_done(void)
                        pi_do_claimed(pf_current->pi, do_pf_write_start);
                        return;
                }
-               next_request(0);
+               next_request(-EIO);
                return;
        }
        pi_disconnect(pf_current->pi);
-       next_request(1);
+       next_request(0);
 }
 
 static int __init pf_init(void)
index bccc42bb9212d7f37086ac86160521f96bc6fea6..338cee4cc0ba4cf4c9222203a6cc52fe85e228cd 100644 (file)
@@ -134,13 +134,12 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
        rq_for_each_segment(bv, req, iter)
                n++;
        dev_dbg(&dev->sbd.core,
-               "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
-               __func__, __LINE__, op, n, req->nr_sectors,
-               req->hard_nr_sectors);
+               "%s:%u: %s req has %u bvecs for %u sectors\n",
+               __func__, __LINE__, op, n, blk_rq_sectors(req));
 #endif
 
-       start_sector = req->sector * priv->blocking_factor;
-       sectors = req->nr_sectors * priv->blocking_factor;
+       start_sector = blk_rq_pos(req) * priv->blocking_factor;
+       sectors = blk_rq_sectors(req) * priv->blocking_factor;
        dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
                __func__, __LINE__, op, sectors, start_sector);
 
@@ -158,7 +157,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
        if (res) {
                dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
                        __LINE__, op, res);
-               end_request(req, 0);
+               __blk_end_request_all(req, -EIO);
                return 0;
        }
 
@@ -180,7 +179,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
        if (res) {
                dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
                        __func__, __LINE__, res);
-               end_request(req, 0);
+               __blk_end_request_all(req, -EIO);
                return 0;
        }
 
@@ -195,7 +194,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
 
        dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
 
-       while ((req = elv_next_request(q))) {
+       while ((req = blk_fetch_request(q))) {
                if (blk_fs_request(req)) {
                        if (ps3disk_submit_request_sg(dev, req))
                                break;
@@ -205,7 +204,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
                                break;
                } else {
                        blk_dump_rq_flags(req, DEVICE_NAME " bad request");
-                       end_request(req, 0);
+                       __blk_end_request_all(req, -EIO);
                        continue;
                }
        }
@@ -231,7 +230,6 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
        struct request *req;
        int res, read, error;
        u64 tag, status;
-       unsigned long num_sectors;
        const char *op;
 
        res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
@@ -261,11 +259,9 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
        if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
            req->cmd[0] == REQ_LB_OP_FLUSH) {
                read = 0;
-               num_sectors = req->hard_cur_sectors;
                op = "flush";
        } else {
                read = !rq_data_dir(req);
-               num_sectors = req->nr_sectors;
                op = read ? "read" : "write";
        }
        if (status) {
@@ -281,7 +277,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
        }
 
        spin_lock(&priv->lock);
-       __blk_end_request(req, error, num_sectors << 9);
+       __blk_end_request_all(req, error);
        priv->req = NULL;
        ps3disk_do_request(dev, priv->queue);
        spin_unlock(&priv->lock);
index 5861e33efe63589a2f974eee6a99172acc596bc9..cbfd9c0aef034ffd2778aaac37eec3fd4d33a61e 100644 (file)
@@ -212,11 +212,6 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
        vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
 }
 
-static void vdc_end_request(struct request *req, int error, int num_sectors)
-{
-       __blk_end_request(req, error, num_sectors << 9);
-}
-
 static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
                        unsigned int index)
 {
@@ -239,7 +234,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
 
        rqe->req = NULL;
 
-       vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9);
+       __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
 
        if (blk_queue_stopped(port->disk->queue))
                blk_start_queue(port->disk->queue);
@@ -421,7 +416,7 @@ static int __send_request(struct request *req)
                desc->slice = 0;
        }
        desc->status = ~0;
-       desc->offset = (req->sector << 9) / port->vdisk_block_size;
+       desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
        desc->size = len;
        desc->ncookies = err;
 
@@ -446,14 +441,13 @@ out:
 static void do_vdc_request(struct request_queue *q)
 {
        while (1) {
-               struct request *req = elv_next_request(q);
+               struct request *req = blk_fetch_request(q);
 
                if (!req)
                        break;
 
-               blkdev_dequeue_request(req);
                if (__send_request(req) < 0)
-                       vdc_end_request(req, -EIO, req->hard_nr_sectors);
+                       __blk_end_request_all(req, -EIO);
        }
 }
 
index d22cc385693728f382bb286ae59d179816b9cc74..cf7877fb8a7d721b0ddbff0c0cced731e8c3e9fc 100644 (file)
@@ -514,7 +514,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
                        ret = swim_read_sector(fs, side, track, sector,
                                                buffer);
                        if (try-- == 0)
-                               return -1;
+                               return -EIO;
                } while (ret != 512);
 
                buffer += ret;
@@ -528,45 +528,31 @@ static void redo_fd_request(struct request_queue *q)
        struct request *req;
        struct floppy_state *fs;
 
-       while ((req = elv_next_request(q))) {
+       req = blk_fetch_request(q);
+       while (req) {
+               int err = -EIO;
 
                fs = req->rq_disk->private_data;
-               if (req->sector < 0 || req->sector >= fs->total_secs) {
-                       end_request(req, 0);
-                       continue;
-               }
-               if (req->current_nr_sectors == 0) {
-                       end_request(req, 1);
-                       continue;
-               }
-               if (!fs->disk_in) {
-                       end_request(req, 0);
-                       continue;
-               }
-               if (rq_data_dir(req) == WRITE) {
-                       if (fs->write_protected) {
-                               end_request(req, 0);
-                               continue;
-                       }
-               }
+               if (blk_rq_pos(req) >= fs->total_secs)
+                       goto done;
+               if (!fs->disk_in)
+                       goto done;
+               if (rq_data_dir(req) == WRITE && fs->write_protected)
+                       goto done;
+
                switch (rq_data_dir(req)) {
                case WRITE:
                        /* NOT IMPLEMENTED */
-                       end_request(req, 0);
                        break;
                case READ:
-                       if (floppy_read_sectors(fs, req->sector,
-                                               req->current_nr_sectors,
-                                               req->buffer)) {
-                               end_request(req, 0);
-                               continue;
-                       }
-                       req->nr_sectors -= req->current_nr_sectors;
-                       req->sector += req->current_nr_sectors;
-                       req->buffer += req->current_nr_sectors * 512;
-                       end_request(req, 1);
+                       err = floppy_read_sectors(fs, blk_rq_pos(req),
+                                                 blk_rq_cur_sectors(req),
+                                                 req->buffer);
                        break;
                }
+       done:
+               if (!__blk_end_request_cur(req, err))
+                       req = blk_fetch_request(q);
        }
 }
 
index 612965307ba009e04c8f733a870a5b4df69b4572..80df93e3cdd05f0d9c219b6d18bbf8b7a6de517d 100644 (file)
@@ -251,6 +251,20 @@ static int floppy_release(struct gendisk *disk, fmode_t mode);
 static int floppy_check_change(struct gendisk *disk);
 static int floppy_revalidate(struct gendisk *disk);
 
+static bool swim3_end_request(int err, unsigned int nr_bytes)
+{
+       if (__blk_end_request(fd_req, err, nr_bytes))
+               return true;
+
+       fd_req = NULL;
+       return false;
+}
+
+static bool swim3_end_request_cur(int err)
+{
+       return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+}
+
 static void swim3_select(struct floppy_state *fs, int sel)
 {
        struct swim3 __iomem *sw = fs->swim3;
@@ -310,25 +324,27 @@ static void start_request(struct floppy_state *fs)
                wake_up(&fs->wait);
                return;
        }
-       while (fs->state == idle && (req = elv_next_request(swim3_queue))) {
+       while (fs->state == idle) {
+               if (!fd_req) {
+                       fd_req = blk_fetch_request(swim3_queue);
+                       if (!fd_req)
+                               break;
+               }
+               req = fd_req;
 #if 0
-               printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
+               printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
                       req->rq_disk->disk_name, req->cmd,
-                      (long)req->sector, req->nr_sectors, req->buffer);
-               printk("           errors=%d current_nr_sectors=%ld\n",
-                      req->errors, req->current_nr_sectors);
+                      (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
+               printk("           errors=%d current_nr_sectors=%u\n",
+                      req->errors, blk_rq_cur_sectors(req));
 #endif
 
-               if (req->sector < 0 || req->sector >= fs->total_secs) {
-                       end_request(req, 0);
-                       continue;
-               }
-               if (req->current_nr_sectors == 0) {
-                       end_request(req, 1);
+               if (blk_rq_pos(req) >= fs->total_secs) {
+                       swim3_end_request_cur(-EIO);
                        continue;
                }
                if (fs->ejected) {
-                       end_request(req, 0);
+                       swim3_end_request_cur(-EIO);
                        continue;
                }
 
@@ -336,18 +352,19 @@ static void start_request(struct floppy_state *fs)
                        if (fs->write_prot < 0)
                                fs->write_prot = swim3_readbit(fs, WRITE_PROT);
                        if (fs->write_prot) {
-                               end_request(req, 0);
+                               swim3_end_request_cur(-EIO);
                                continue;
                        }
                }
 
-               /* Do not remove the cast. req->sector is now a sector_t and
-                * can be 64 bits, but it will never go past 32 bits for this
-                * driver anyway, so we can safely cast it down and not have
-                * to do a 64/32 division
+               /* Do not remove the cast. blk_rq_pos(req) is now a
+                * sector_t and can be 64 bits, but it will never go
+                * past 32 bits for this driver anyway, so we can
+                * safely cast it down and not have to do a 64/32
+                * division
                 */
-               fs->req_cyl = ((long)req->sector) / fs->secpercyl;
-               x = ((long)req->sector) % fs->secpercyl;
+               fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
+               x = ((long)blk_rq_pos(req)) % fs->secpercyl;
                fs->head = x / fs->secpertrack;
                fs->req_sector = x % fs->secpertrack + 1;
                fd_req = req;
@@ -424,7 +441,7 @@ static inline void setup_transfer(struct floppy_state *fs)
        struct dbdma_cmd *cp = fs->dma_cmd;
        struct dbdma_regs __iomem *dr = fs->dma;
 
-       if (fd_req->current_nr_sectors <= 0) {
+       if (blk_rq_cur_sectors(fd_req) <= 0) {
                printk(KERN_ERR "swim3: transfer 0 sectors?\n");
                return;
        }
@@ -432,8 +449,8 @@ static inline void setup_transfer(struct floppy_state *fs)
                n = 1;
        else {
                n = fs->secpertrack - fs->req_sector + 1;
-               if (n > fd_req->current_nr_sectors)
-                       n = fd_req->current_nr_sectors;
+               if (n > blk_rq_cur_sectors(fd_req))
+                       n = blk_rq_cur_sectors(fd_req);
        }
        fs->scount = n;
        swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
@@ -508,7 +525,7 @@ static void act(struct floppy_state *fs)
                case do_transfer:
                        if (fs->cur_cyl != fs->req_cyl) {
                                if (fs->retries > 5) {
-                                       end_request(fd_req, 0);
+                                       swim3_end_request_cur(-EIO);
                                        fs->state = idle;
                                        return;
                                }
@@ -540,7 +557,7 @@ static void scan_timeout(unsigned long data)
        out_8(&sw->intr_enable, 0);
        fs->cur_cyl = -1;
        if (fs->retries > 5) {
-               end_request(fd_req, 0);
+               swim3_end_request_cur(-EIO);
                fs->state = idle;
                start_request(fs);
        } else {
@@ -559,7 +576,7 @@ static void seek_timeout(unsigned long data)
        out_8(&sw->select, RELAX);
        out_8(&sw->intr_enable, 0);
        printk(KERN_ERR "swim3: seek timeout\n");
-       end_request(fd_req, 0);
+       swim3_end_request_cur(-EIO);
        fs->state = idle;
        start_request(fs);
 }
@@ -583,7 +600,7 @@ static void settle_timeout(unsigned long data)
                return;
        }
        printk(KERN_ERR "swim3: seek settle timeout\n");
-       end_request(fd_req, 0);
+       swim3_end_request_cur(-EIO);
        fs->state = idle;
        start_request(fs);
 }
@@ -593,8 +610,6 @@ static void xfer_timeout(unsigned long data)
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
        struct dbdma_regs __iomem *dr = fs->dma;
-       struct dbdma_cmd *cp = fs->dma_cmd;
-       unsigned long s;
        int n;
 
        fs->timeout_pending = 0;
@@ -605,17 +620,10 @@ static void xfer_timeout(unsigned long data)
        out_8(&sw->intr_enable, 0);
        out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
        out_8(&sw->select, RELAX);
-       if (rq_data_dir(fd_req) == WRITE)
-               ++cp;
-       if (ld_le16(&cp->xfer_status) != 0)
-               s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
-       else
-               s = 0;
-       fd_req->sector += s;
-       fd_req->current_nr_sectors -= s;
        printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
-              (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector);
-       end_request(fd_req, 0);
+              (rq_data_dir(fd_req)==WRITE? "writ": "read"),
+              (long)blk_rq_pos(fd_req));
+       swim3_end_request_cur(-EIO);
        fs->state = idle;
        start_request(fs);
 }
@@ -646,7 +654,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                                printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
                                fs->cur_cyl = -1;
                                if (fs->retries > 5) {
-                                       end_request(fd_req, 0);
+                                       swim3_end_request_cur(-EIO);
                                        fs->state = idle;
                                        start_request(fs);
                                } else {
@@ -719,9 +727,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                if (intr & ERROR_INTR) {
                        n = fs->scount - 1 - resid / 512;
                        if (n > 0) {
-                               fd_req->sector += n;
-                               fd_req->current_nr_sectors -= n;
-                               fd_req->buffer += n * 512;
+                               blk_update_request(fd_req, 0, n << 9);
                                fs->req_sector += n;
                        }
                        if (fs->retries < 5) {
@@ -730,8 +736,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        } else {
                                printk("swim3: error %sing block %ld (err=%x)\n",
                                       rq_data_dir(fd_req) == WRITE? "writ": "read",
-                                      (long)fd_req->sector, err);
-                               end_request(fd_req, 0);
+                                      (long)blk_rq_pos(fd_req), err);
+                               swim3_end_request_cur(-EIO);
                                fs->state = idle;
                        }
                } else {
@@ -740,18 +746,12 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                                printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
                                printk(KERN_ERR "  state=%d, dir=%x, intr=%x, err=%x\n",
                                       fs->state, rq_data_dir(fd_req), intr, err);
-                               end_request(fd_req, 0);
+                               swim3_end_request_cur(-EIO);
                                fs->state = idle;
                                start_request(fs);
                                break;
                        }
-                       fd_req->sector += fs->scount;
-                       fd_req->current_nr_sectors -= fs->scount;
-                       fd_req->buffer += fs->scount * 512;
-                       if (fd_req->current_nr_sectors <= 0) {
-                               end_request(fd_req, 1);
-                               fs->state = idle;
-                       } else {
+                       if (swim3_end_request(0, fs->scount << 9)) {
                                fs->req_sector += fs->scount;
                                if (fs->req_sector > fs->secpertrack) {
                                        fs->req_sector -= fs->secpertrack;
@@ -761,7 +761,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                                        }
                                }
                                act(fs);
-                       }
+                       } else
+                               fs->state = idle;
                }
                if (fs->state == idle)
                        start_request(fs);
index ff0448e4bf036d36fc6e2a2a071be0ff13b19f99..da403b6a7f434525a694fb139d58950d65769586 100644 (file)
@@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
        struct request *req = crq->rq;
        int rc;
 
-       rc = __blk_end_request(req, error, blk_rq_bytes(req));
-       assert(rc == 0);
+       __blk_end_request_all(req, error);
 
        rc = carm_put_request(host, crq);
        assert(rc == 0);
@@ -811,12 +810,10 @@ static void carm_oob_rq_fn(struct request_queue *q)
 
        while (1) {
                DPRINTK("get req\n");
-               rq = elv_next_request(q);
+               rq = blk_fetch_request(q);
                if (!rq)
                        break;
 
-               blkdev_dequeue_request(rq);
-
                crq = rq->special;
                assert(crq != NULL);
                assert(crq->rq == rq);
@@ -847,7 +844,7 @@ static void carm_rq_fn(struct request_queue *q)
 
 queue_one_request:
        VPRINTK("get req\n");
-       rq = elv_next_request(q);
+       rq = blk_peek_request(q);
        if (!rq)
                return;
 
@@ -858,7 +855,7 @@ queue_one_request:
        }
        crq->rq = rq;
 
-       blkdev_dequeue_request(rq);
+       blk_start_request(rq);
 
        if (rq_data_dir(rq) == WRITE) {
                writing = 1;
@@ -904,10 +901,10 @@ queue_one_request:
        msg->sg_count   = n_elem;
        msg->sg_type    = SGT_32BIT;
        msg->handle     = cpu_to_le32(TAG_ENCODE(crq->tag));
-       msg->lba        = cpu_to_le32(rq->sector & 0xffffffff);
-       tmp             = (rq->sector >> 16) >> 16;
+       msg->lba        = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
+       tmp             = (blk_rq_pos(rq) >> 16) >> 16;
        msg->lba_high   = cpu_to_le16( (u16) tmp );
-       msg->lba_count  = cpu_to_le16(rq->nr_sectors);
+       msg->lba_count  = cpu_to_le16(blk_rq_sectors(rq));
 
        msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
        for (i = 0; i < n_elem; i++) {
index 689cd27ac890afff8c39dc11bdac5e605e37816c..e67bbae9547d1b3ac42cbbd1a243911789dbb94b 100644 (file)
@@ -360,8 +360,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
     struct ub_scsi_cmd *cmd, struct ub_request *urq);
 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
-static void ub_end_rq(struct request *rq, unsigned int status,
-    unsigned int cmd_len);
+static void ub_end_rq(struct request *rq, unsigned int status);
 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
     struct ub_request *urq, struct ub_scsi_cmd *cmd);
 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -627,7 +626,7 @@ static void ub_request_fn(struct request_queue *q)
        struct ub_lun *lun = q->queuedata;
        struct request *rq;
 
-       while ((rq = elv_next_request(q)) != NULL) {
+       while ((rq = blk_peek_request(q)) != NULL) {
                if (ub_request_fn_1(lun, rq) != 0) {
                        blk_stop_queue(q);
                        break;
@@ -643,14 +642,14 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
        int n_elem;
 
        if (atomic_read(&sc->poison)) {
-               blkdev_dequeue_request(rq);
-               ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq));
+               blk_start_request(rq);
+               ub_end_rq(rq, DID_NO_CONNECT << 16);
                return 0;
        }
 
        if (lun->changed && !blk_pc_request(rq)) {
-               blkdev_dequeue_request(rq);
-               ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq));
+               blk_start_request(rq);
+               ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
                return 0;
        }
 
@@ -660,7 +659,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
                return -1;
        memset(cmd, 0, sizeof(struct ub_scsi_cmd));
 
-       blkdev_dequeue_request(rq);
+       blk_start_request(rq);
 
        urq = &lun->urq;
        memset(urq, 0, sizeof(struct ub_request));
@@ -702,7 +701,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
 
 drop:
        ub_put_cmd(lun, cmd);
-       ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq));
+       ub_end_rq(rq, DID_ERROR << 16);
        return 0;
 }
 
@@ -726,8 +725,8 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
         * The call to blk_queue_hardsect_size() guarantees that request
         * is aligned, but it is given in terms of 512 byte units, always.
         */
-       block = rq->sector >> lun->capacity.bshift;
-       nblks = rq->nr_sectors >> lun->capacity.bshift;
+       block = blk_rq_pos(rq) >> lun->capacity.bshift;
+       nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
 
        cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
        /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
@@ -739,7 +738,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
        cmd->cdb[8] = nblks;
        cmd->cdb_len = 10;
 
-       cmd->len = rq->nr_sectors * 512;
+       cmd->len = blk_rq_bytes(rq);
 }
 
 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
@@ -747,7 +746,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
 {
        struct request *rq = urq->rq;
 
-       if (rq->data_len == 0) {
+       if (blk_rq_bytes(rq) == 0) {
                cmd->dir = UB_DIR_NONE;
        } else {
                if (rq_data_dir(rq) == WRITE)
@@ -762,7 +761,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
        memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
        cmd->cdb_len = rq->cmd_len;
 
-       cmd->len = rq->data_len;
+       cmd->len = blk_rq_bytes(rq);
 
        /*
         * To reapply this to every URB is not as incorrect as it looks.
@@ -777,16 +776,15 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
        struct ub_request *urq = cmd->back;
        struct request *rq;
        unsigned int scsi_status;
-       unsigned int cmd_len;
 
        rq = urq->rq;
 
        if (cmd->error == 0) {
                if (blk_pc_request(rq)) {
-                       if (cmd->act_len >= rq->data_len)
-                               rq->data_len = 0;
+                       if (cmd->act_len >= rq->resid_len)
+                               rq->resid_len = 0;
                        else
-                               rq->data_len -= cmd->act_len;
+                               rq->resid_len -= cmd->act_len;
                        scsi_status = 0;
                } else {
                        if (cmd->act_len != cmd->len) {
@@ -818,17 +816,14 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 
        urq->rq = NULL;
 
-       cmd_len = cmd->len;
        ub_put_cmd(lun, cmd);
-       ub_end_rq(rq, scsi_status, cmd_len);
+       ub_end_rq(rq, scsi_status);
        blk_start_queue(lun->disk->queue);
 }
 
-static void ub_end_rq(struct request *rq, unsigned int scsi_status,
-    unsigned int cmd_len)
+static void ub_end_rq(struct request *rq, unsigned int scsi_status)
 {
        int error;
-       long rqlen;
 
        if (scsi_status == 0) {
                error = 0;
@@ -836,12 +831,7 @@ static void ub_end_rq(struct request *rq, unsigned int scsi_status,
                error = -EIO;
                rq->errors = scsi_status;
        }
-       rqlen = blk_rq_bytes(rq);    /* Oddly enough, this is the residue. */
-       if (__blk_end_request(rq, error, cmd_len)) {
-               printk(KERN_WARNING DRV_NAME
-                   ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n",
-                   blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen);
-       }
+       __blk_end_request_all(rq, error);
 }
 
 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
index ecccf65dce2f07a7a41a12cdfcbab7633da409c5..390d69bb7c482ad1a79a09a42192575b692b4980 100644 (file)
@@ -252,7 +252,7 @@ static int send_request(struct request *req)
        struct viodasd_device *d;
        unsigned long flags;
 
-       start = (u64)req->sector << 9;
+       start = (u64)blk_rq_pos(req) << 9;
 
        if (rq_data_dir(req) == READ) {
                direction = DMA_FROM_DEVICE;
@@ -361,19 +361,17 @@ static void do_viodasd_request(struct request_queue *q)
         * back later.
         */
        while (num_req_outstanding < VIOMAXREQ) {
-               req = elv_next_request(q);
+               req = blk_fetch_request(q);
                if (req == NULL)
                        return;
-               /* dequeue the current request from the queue */
-               blkdev_dequeue_request(req);
                /* check that request contains a valid command */
                if (!blk_fs_request(req)) {
-                       viodasd_end_request(req, -EIO, req->hard_nr_sectors);
+                       viodasd_end_request(req, -EIO, blk_rq_sectors(req));
                        continue;
                }
                /* Try sending the request */
                if (send_request(req) != 0)
-                       viodasd_end_request(req, -EIO, req->hard_nr_sectors);
+                       viodasd_end_request(req, -EIO, blk_rq_sectors(req));
        }
 }
 
@@ -590,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
                err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
                printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
                                event->xRc, bevent->sub_result, err->msg);
-               num_sect = req->hard_nr_sectors;
+               num_sect = blk_rq_sectors(req);
        }
        qlock = req->q->queue_lock;
        spin_lock_irqsave(qlock, irq_flags);
index 5d34764c8a8726d5103e6e723eaae6ab28a6a422..511d4ae2d1764cb6816bd07cbc014e6bc55802cd 100644 (file)
@@ -37,6 +37,7 @@ struct virtblk_req
        struct list_head list;
        struct request *req;
        struct virtio_blk_outhdr out_hdr;
+       struct virtio_scsi_inhdr in_hdr;
        u8 status;
 };
 
@@ -50,6 +51,7 @@ static void blk_done(struct virtqueue *vq)
        spin_lock_irqsave(&vblk->lock, flags);
        while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
                int error;
+
                switch (vbr->status) {
                case VIRTIO_BLK_S_OK:
                        error = 0;
@@ -62,7 +64,13 @@ static void blk_done(struct virtqueue *vq)
                        break;
                }
 
-               __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req));
+               if (blk_pc_request(vbr->req)) {
+                       vbr->req->resid_len = vbr->in_hdr.residual;
+                       vbr->req->sense_len = vbr->in_hdr.sense_len;
+                       vbr->req->errors = vbr->in_hdr.errors;
+               }
+
+               __blk_end_request_all(vbr->req, error);
                list_del(&vbr->list);
                mempool_free(vbr, vblk->pool);
        }
@@ -74,7 +82,7 @@ static void blk_done(struct virtqueue *vq)
 static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
                   struct request *req)
 {
-       unsigned long num, out, in;
+       unsigned long num, out = 0, in = 0;
        struct virtblk_req *vbr;
 
        vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
@@ -85,7 +93,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
        vbr->req = req;
        if (blk_fs_request(vbr->req)) {
                vbr->out_hdr.type = 0;
-               vbr->out_hdr.sector = vbr->req->sector;
+               vbr->out_hdr.sector = blk_rq_pos(vbr->req);
                vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
        } else if (blk_pc_request(vbr->req)) {
                vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
@@ -99,18 +107,36 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
        if (blk_barrier_rq(vbr->req))
                vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
 
-       sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr));
-       num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
-       sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
+       sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
 
-       if (rq_data_dir(vbr->req) == WRITE) {
-               vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
-               out = 1 + num;
-               in = 1;
-       } else {
-               vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
-               out = 1;
-               in = 1 + num;
+       /*
+        * If this is a packet command we need a couple of additional headers.
+        * Behind the normal outhdr we put a segment with the scsi command
+        * block, and before the normal inhdr we put the sense data and the
+        * inhdr with additional status information before the normal inhdr.
+        */
+       if (blk_pc_request(vbr->req))
+               sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
+
+       num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
+
+       if (blk_pc_request(vbr->req)) {
+               sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
+               sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
+                          sizeof(vbr->in_hdr));
+       }
+
+       sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
+                  sizeof(vbr->status));
+
+       if (num) {
+               if (rq_data_dir(vbr->req) == WRITE) {
+                       vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
+                       out += num;
+               } else {
+                       vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
+                       in += num;
+               }
        }
 
        if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
@@ -124,12 +150,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
 
 static void do_virtblk_request(struct request_queue *q)
 {
-       struct virtio_blk *vblk = NULL;
+       struct virtio_blk *vblk = q->queuedata;
        struct request *req;
        unsigned int issued = 0;
 
-       while ((req = elv_next_request(q)) != NULL) {
-               vblk = req->rq_disk->private_data;
+       while ((req = blk_peek_request(q)) != NULL) {
                BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
 
                /* If this request fails, stop queue and wait for something to
@@ -138,7 +163,7 @@ static void do_virtblk_request(struct request_queue *q)
                        blk_stop_queue(q);
                        break;
                }
-               blkdev_dequeue_request(req);
+               blk_start_request(req);
                issued++;
        }
 
@@ -149,8 +174,16 @@ static void do_virtblk_request(struct request_queue *q)
 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
                         unsigned cmd, unsigned long data)
 {
-       return scsi_cmd_ioctl(bdev->bd_disk->queue,
-                             bdev->bd_disk, mode, cmd,
+       struct gendisk *disk = bdev->bd_disk;
+       struct virtio_blk *vblk = disk->private_data;
+
+       /*
+        * Only allow the generic SCSI ioctls if the host can support it.
+        */
+       if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
+               return -ENOIOCTLCMD;
+
+       return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
                              (void __user *)data);
 }
 
@@ -249,6 +282,7 @@ static int virtblk_probe(struct virtio_device *vdev)
                goto out_put_disk;
        }
 
+       vblk->disk->queue->queuedata = vblk;
        queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
 
        if (index < 26) {
@@ -356,6 +390,7 @@ static struct virtio_device_id id_table[] = {
 static unsigned int features[] = {
        VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
        VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
+       VIRTIO_BLK_F_SCSI,
 };
 
 static struct virtio_driver virtio_blk = {
index 64b496fce98bb2a19fd17eb22ef5a192f67ae7bb..ce2429219925597edddb597ed88c09ff37c48b75 100644 (file)
@@ -305,30 +305,25 @@ static void do_xd_request (struct request_queue * q)
        if (xdc_busy)
                return;
 
-       while ((req = elv_next_request(q)) != NULL) {
-               unsigned block = req->sector;
-               unsigned count = req->nr_sectors;
-               int rw = rq_data_dir(req);
+       req = blk_fetch_request(q);
+       while (req) {
+               unsigned block = blk_rq_pos(req);
+               unsigned count = blk_rq_cur_sectors(req);
                XD_INFO *disk = req->rq_disk->private_data;
-               int res = 0;
+               int res = -EIO;
                int retry;
 
-               if (!blk_fs_request(req)) {
-                       end_request(req, 0);
-                       continue;
-               }
-               if (block + count > get_capacity(req->rq_disk)) {
-                       end_request(req, 0);
-                       continue;
-               }
-               if (rw != READ && rw != WRITE) {
-                       printk("do_xd_request: unknown request\n");
-                       end_request(req, 0);
-                       continue;
-               }
+               if (!blk_fs_request(req))
+                       goto done;
+               if (block + count > get_capacity(req->rq_disk))
+                       goto done;
                for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
-                       res = xd_readwrite(rw, disk, req->buffer, block, count);
-               end_request(req, res);  /* wrap up, 0 = fail, 1 = success */
+                       res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
+                                          block, count);
+       done:
+               /* wrap up, 0 = success, -errno = fail */
+               if (!__blk_end_request_cur(req, res))
+                       req = blk_fetch_request(q);
        }
 }
 
@@ -418,7 +413,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
                                printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
                                xd_recalibrate(drive);
                                spin_lock_irq(&xd_lock);
-                               return (0);
+                               return -EIO;
                        case 2:
                                if (sense[0] & 0x30) {
                                        printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
@@ -439,7 +434,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
                                else
                                        printk(" - no valid disk address\n");
                                spin_lock_irq(&xd_lock);
-                               return (0);
+                               return -EIO;
                }
                if (xd_dma_buffer)
                        for (i=0; i < (temp * 0x200); i++)
@@ -448,7 +443,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
                count -= temp, buffer += temp * 0x200, block += temp;
        }
        spin_lock_irq(&xd_lock);
-       return (1);
+       return 0;
 }
 
 /* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
index 8f905089b72b7e49c2434d1b6abba9fa76321051..6d5950839bd04eb0e2dfad7559245cdac2382c93 100644 (file)
@@ -122,7 +122,7 @@ static DEFINE_SPINLOCK(blkif_io_lock);
 static int get_id_from_freelist(struct blkfront_info *info)
 {
        unsigned long free = info->shadow_free;
-       BUG_ON(free > BLK_RING_SIZE);
+       BUG_ON(free >= BLK_RING_SIZE);
        info->shadow_free = info->shadow[free].req.id;
        info->shadow[free].req.id = 0x0fffffee; /* debug */
        return free;
@@ -231,7 +231,7 @@ static int blkif_queue_request(struct request *req)
        info->shadow[id].request = (unsigned long)req;
 
        ring_req->id = id;
-       ring_req->sector_number = (blkif_sector_t)req->sector;
+       ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
        ring_req->handle = info->handle;
 
        ring_req->operation = rq_data_dir(req) ?
@@ -299,25 +299,25 @@ static void do_blkif_request(struct request_queue *rq)
 
        queued = 0;
 
-       while ((req = elv_next_request(rq)) != NULL) {
+       while ((req = blk_peek_request(rq)) != NULL) {
                info = req->rq_disk->private_data;
-               if (!blk_fs_request(req)) {
-                       end_request(req, 0);
-                       continue;
-               }
 
                if (RING_FULL(&info->ring))
                        goto wait;
 
-               pr_debug("do_blk_req %p: cmd %p, sec %lx, "
-                        "(%u/%li) buffer:%p [%s]\n",
-                        req, req->cmd, (unsigned long)req->sector,
-                        req->current_nr_sectors,
-                        req->nr_sectors, req->buffer,
-                        rq_data_dir(req) ? "write" : "read");
+               blk_start_request(req);
 
+               if (!blk_fs_request(req)) {
+                       __blk_end_request_all(req, -EIO);
+                       continue;
+               }
+
+               pr_debug("do_blk_req %p: cmd %p, sec %lx, "
+                        "(%u/%u) buffer:%p [%s]\n",
+                        req, req->cmd, (unsigned long)blk_rq_pos(req),
+                        blk_rq_cur_sectors(req), blk_rq_sectors(req),
+                        req->buffer, rq_data_dir(req) ? "write" : "read");
 
-               blkdev_dequeue_request(req);
                if (blkif_queue_request(req)) {
                        blk_requeue_request(rq, req);
 wait:
@@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
        for (i = info->ring.rsp_cons; i != rp; i++) {
                unsigned long id;
-               int ret;
 
                bret = RING_GET_RESPONSE(&info->ring, i);
                id   = bret->id;
@@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                                dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
                                        "request: %x\n", bret->status);
 
-                       ret = __blk_end_request(req, error, blk_rq_bytes(req));
-                       BUG_ON(ret);
+                       __blk_end_request_all(req, error);
                        break;
                default:
                        BUG();
index 4aecf5dc6a93fe546bd80f3a2eba31b84d674567..3a4397edab71a25b2de8216a17ce9cbc7232add8 100644 (file)
@@ -463,10 +463,11 @@ struct request *ace_get_next_request(struct request_queue * q)
 {
        struct request *req;
 
-       while ((req = elv_next_request(q)) != NULL) {
+       while ((req = blk_peek_request(q)) != NULL) {
                if (blk_fs_request(req))
                        break;
-               end_request(req, 0);
+               blk_start_request(req);
+               __blk_end_request_all(req, -EIO);
        }
        return req;
 }
@@ -492,9 +493,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
                set_capacity(ace->gd, 0);
                dev_info(ace->dev, "No CF in slot\n");
 
-               /* Drop all pending requests */
-               while ((req = elv_next_request(ace->queue)) != NULL)
-                       end_request(req, 0);
+               /* Drop all in-flight and pending requests */
+               if (ace->req) {
+                       __blk_end_request_all(ace->req, -EIO);
+                       ace->req = NULL;
+               }
+               while ((req = blk_fetch_request(ace->queue)) != NULL)
+                       __blk_end_request_all(req, -EIO);
 
                /* Drop back to IDLE state and notify waiters */
                ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -642,19 +647,21 @@ static void ace_fsm_dostate(struct ace_device *ace)
                        ace->fsm_state = ACE_FSM_STATE_IDLE;
                        break;
                }
+               blk_start_request(req);
 
                /* Okay, it's a data request, set it up for transfer */
                dev_dbg(ace->dev,
-                       "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n",
-                       (unsigned long long) req->sector, req->hard_nr_sectors,
-                       req->current_nr_sectors, rq_data_dir(req));
+                       "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
+                       (unsigned long long)blk_rq_pos(req),
+                       blk_rq_sectors(req), blk_rq_cur_sectors(req),
+                       rq_data_dir(req));
 
                ace->req = req;
                ace->data_ptr = req->buffer;
-               ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
-               ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
+               ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
+               ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
 
-               count = req->hard_nr_sectors;
+               count = blk_rq_sectors(req);
                if (rq_data_dir(req)) {
                        /* Kick off write request */
                        dev_dbg(ace->dev, "write data\n");
@@ -688,7 +695,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
                        dev_dbg(ace->dev,
                                "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
                                ace->fsm_task, ace->fsm_iter_num,
-                               ace->req->current_nr_sectors * 16,
+                               blk_rq_cur_sectors(ace->req) * 16,
                                ace->data_count, ace->in_irq);
                        ace_fsm_yield(ace);     /* need to poll CFBSY bit */
                        break;
@@ -697,7 +704,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
                        dev_dbg(ace->dev,
                                "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
                                ace->fsm_task, ace->fsm_iter_num,
-                               ace->req->current_nr_sectors * 16,
+                               blk_rq_cur_sectors(ace->req) * 16,
                                ace->data_count, ace->in_irq);
                        ace_fsm_yieldirq(ace);
                        break;
@@ -717,14 +724,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
                }
 
                /* bio finished; is there another one? */
-               if (__blk_end_request(ace->req, 0,
-                                       blk_rq_cur_bytes(ace->req))) {
-                       /* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
-                        *      ace->req->hard_nr_sectors,
-                        *      ace->req->current_nr_sectors);
+               if (__blk_end_request_cur(ace->req, 0)) {
+                       /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
+                        *      blk_rq_sectors(ace->req),
+                        *      blk_rq_cur_sectors(ace->req));
                         */
                        ace->data_ptr = ace->req->buffer;
-                       ace->data_count = ace->req->current_nr_sectors * 16;
+                       ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
                        ace_fsm_yieldirq(ace);
                        break;
                }
index 80754cdd31190c16d9c65e4e0538b979310b42cb..4575171e5beb1bd754e0fe0ba11600b0fce4ea3f 100644 (file)
@@ -70,15 +70,18 @@ static struct gendisk *z2ram_gendisk;
 static void do_z2_request(struct request_queue *q)
 {
        struct request *req;
-       while ((req = elv_next_request(q)) != NULL) {
-               unsigned long start = req->sector << 9;
-               unsigned long len  = req->current_nr_sectors << 9;
+
+       req = blk_fetch_request(q);
+       while (req) {
+               unsigned long start = blk_rq_pos(req) << 9;
+               unsigned long len  = blk_rq_cur_bytes(req);
+               int err = 0;
 
                if (start + len > z2ram_size) {
                        printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
-                               req->sector, req->current_nr_sectors);
-                       end_request(req, 0);
-                       continue;
+                               blk_rq_pos(req), blk_rq_cur_sectors(req));
+                       err = -EIO;
+                       goto done;
                }
                while (len) {
                        unsigned long addr = start & Z2RAM_CHUNKMASK;
@@ -93,7 +96,9 @@ static void do_z2_request(struct request_queue *q)
                        start += size;
                        len -= size;
                }
-               end_request(req, 1);
+       done:
+               if (!__blk_end_request_cur(req, err))
+                       req = blk_fetch_request(q);
        }
 }
 
index 2eecb779437b8057cdef2c59d37bbae71930b9c1..1e366ad8f68069b8c15fc2d0c66fee2d09074bf7 100644 (file)
@@ -584,8 +584,8 @@ static void gdrom_readdisk_dma(struct work_struct *work)
        list_for_each_safe(elem, next, &gdrom_deferred) {
                req = list_entry(elem, struct request, queuelist);
                spin_unlock(&gdrom_lock);
-               block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET;
-               block_cnt = req->nr_sectors/GD_TO_BLK;
+               block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
+               block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
                ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
                ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
                ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
@@ -632,39 +632,35 @@ static void gdrom_readdisk_dma(struct work_struct *work)
                * before handling ending the request */
                spin_lock(&gdrom_lock);
                list_del_init(&req->queuelist);
-               __blk_end_request(req, err, blk_rq_bytes(req));
+               __blk_end_request_all(req, err);
        }
        spin_unlock(&gdrom_lock);
        kfree(read_command);
 }
 
-static void gdrom_request_handler_dma(struct request *req)
-{
-       /* dequeue, add to list of deferred work
-       * and then schedule workqueue */
-       blkdev_dequeue_request(req);
-       list_add_tail(&req->queuelist, &gdrom_deferred);
-       schedule_work(&work);
-}
-
 static void gdrom_request(struct request_queue *rq)
 {
        struct request *req;
 
-       while ((req = elv_next_request(rq)) != NULL) {
+       while ((req = blk_fetch_request(rq)) != NULL) {
                if (!blk_fs_request(req)) {
                        printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
-                       end_request(req, 0);
+                       __blk_end_request_all(req, -EIO);
+                       continue;
                }
                if (rq_data_dir(req) != READ) {
                        printk(KERN_NOTICE "GDROM: Read only device -");
                        printk(" write request ignored\n");
-                       end_request(req, 0);
+                       __blk_end_request_all(req, -EIO);
+                       continue;
                }
-               if (req->nr_sectors)
-                       gdrom_request_handler_dma(req);
-               else
-                       end_request(req, 0);
+
+               /*
+                * Add to list of deferred work and then schedule
+                * workqueue.
+                */
+               list_add_tail(&req->queuelist, &gdrom_deferred);
+               schedule_work(&work);
        }
 }
 
index 13929356135c837743f130c2f7de4d3cd85cc4fb..ca741c21e4aaa4fa93af33e6ae8e4ef6af4b038c 100644 (file)
@@ -282,7 +282,7 @@ static int send_request(struct request *req)
                        viopath_targetinst(viopath_hostLp),
                        (u64)req, VIOVERSION << 16,
                        ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
-                       (u64)req->sector * 512, len, 0);
+                       (u64)blk_rq_pos(req) * 512, len, 0);
        if (hvrc != HvLpEvent_Rc_Good) {
                printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
                return -1;
@@ -291,36 +291,19 @@ static int send_request(struct request *req)
        return 0;
 }
 
-static void viocd_end_request(struct request *req, int error)
-{
-       int nsectors = req->hard_nr_sectors;
-
-       /*
-        * Make sure it's fully ended, and ensure that we process
-        * at least one sector.
-        */
-       if (blk_pc_request(req))
-               nsectors = (req->data_len + 511) >> 9;
-       if (!nsectors)
-               nsectors = 1;
-
-       if (__blk_end_request(req, error, nsectors << 9))
-               BUG();
-}
-
 static int rwreq;
 
 static void do_viocd_request(struct request_queue *q)
 {
        struct request *req;
 
-       while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
+       while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
                if (!blk_fs_request(req))
-                       viocd_end_request(req, -EIO);
+                       __blk_end_request_all(req, -EIO);
                else if (send_request(req) < 0) {
                        printk(VIOCD_KERN_WARNING
                                        "unable to send message to OS/400!");
-                       viocd_end_request(req, -EIO);
+                       __blk_end_request_all(req, -EIO);
                } else
                        rwreq++;
        }
@@ -531,9 +514,9 @@ return_complete:
                                        "with rc %d:0x%04X: %s\n",
                                        req, event->xRc,
                                        bevent->sub_result, err->msg);
-                       viocd_end_request(req, -EIO);
+                       __blk_end_request_all(req, -EIO);
                } else
-                       viocd_end_request(req, 0);
+                       __blk_end_request_all(req, 0);
 
                /* restart handling of incoming requests */
                spin_unlock_irqrestore(&viocd_reqlock, flags);
index 7201b176d75b05814665096ab45fe372900fbe99..8a894fa37b530d2841e77f6db60c7104a660d7e8 100644 (file)
@@ -79,34 +79,6 @@ void ide_init_pc(struct ide_atapi_pc *pc)
 }
 EXPORT_SYMBOL_GPL(ide_init_pc);
 
-/*
- * Generate a new packet command request in front of the request queue, before
- * the current request, so that it will be processed immediately, on the next
- * pass through the driver.
- */
-static void ide_queue_pc_head(ide_drive_t *drive, struct gendisk *disk,
-                             struct ide_atapi_pc *pc, struct request *rq)
-{
-       blk_rq_init(NULL, rq);
-       rq->cmd_type = REQ_TYPE_SPECIAL;
-       rq->cmd_flags |= REQ_PREEMPT;
-       rq->buffer = (char *)pc;
-       rq->rq_disk = disk;
-
-       if (pc->req_xfer) {
-               rq->data = pc->buf;
-               rq->data_len = pc->req_xfer;
-       }
-
-       memcpy(rq->cmd, pc->c, 12);
-       if (drive->media == ide_tape)
-               rq->cmd[13] = REQ_IDETAPE_PC1;
-
-       drive->hwif->rq = NULL;
-
-       elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
-}
-
 /*
  * Add a special packet command request to the tail of the request queue,
  * and wait for it to be serviced.
@@ -119,19 +91,21 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
 
        rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
        rq->cmd_type = REQ_TYPE_SPECIAL;
-       rq->buffer = (char *)pc;
+       rq->special = (char *)pc;
 
        if (pc->req_xfer) {
-               rq->data = pc->buf;
-               rq->data_len = pc->req_xfer;
+               error = blk_rq_map_kern(drive->queue, rq, pc->buf, pc->req_xfer,
+                                       GFP_NOIO);
+               if (error)
+                       goto put_req;
        }
 
        memcpy(rq->cmd, pc->c, 12);
        if (drive->media == ide_tape)
                rq->cmd[13] = REQ_IDETAPE_PC1;
        error = blk_execute_rq(drive->queue, disk, rq, 0);
+put_req:
        blk_put_request(rq);
-
        return error;
 }
 EXPORT_SYMBOL_GPL(ide_queue_pc_tail);
@@ -191,20 +165,113 @@ void ide_create_request_sense_cmd(ide_drive_t *drive, struct ide_atapi_pc *pc)
 }
 EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
 
+void ide_prep_sense(ide_drive_t *drive, struct request *rq)
+{
+       struct request_sense *sense = &drive->sense_data;
+       struct request *sense_rq = &drive->sense_rq;
+       unsigned int cmd_len, sense_len;
+       int err;
+
+       debug_log("%s: enter\n", __func__);
+
+       switch (drive->media) {
+       case ide_floppy:
+               cmd_len = 255;
+               sense_len = 18;
+               break;
+       case ide_tape:
+               cmd_len = 20;
+               sense_len = 20;
+               break;
+       default:
+               cmd_len = 18;
+               sense_len = 18;
+       }
+
+       BUG_ON(sense_len > sizeof(*sense));
+
+       if (blk_sense_request(rq) || drive->sense_rq_armed)
+               return;
+
+       memset(sense, 0, sizeof(*sense));
+
+       blk_rq_init(rq->q, sense_rq);
+
+       err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
+                             GFP_NOIO);
+       if (unlikely(err)) {
+               if (printk_ratelimit())
+                       printk(KERN_WARNING "%s: failed to map sense buffer\n",
+                              drive->name);
+               return;
+       }
+
+       sense_rq->rq_disk = rq->rq_disk;
+       sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
+       sense_rq->cmd[4] = cmd_len;
+       sense_rq->cmd_type = REQ_TYPE_SENSE;
+       sense_rq->cmd_flags |= REQ_PREEMPT;
+
+       if (drive->media == ide_tape)
+               sense_rq->cmd[13] = REQ_IDETAPE_PC1;
+
+       drive->sense_rq_armed = true;
+}
+EXPORT_SYMBOL_GPL(ide_prep_sense);
+
+int ide_queue_sense_rq(ide_drive_t *drive, void *special)
+{
+       /* deferred failure from ide_prep_sense() */
+       if (!drive->sense_rq_armed) {
+               printk(KERN_WARNING "%s: failed queue sense request\n",
+                      drive->name);
+               return -ENOMEM;
+       }
+
+       drive->sense_rq.special = special;
+       drive->sense_rq_armed = false;
+
+       drive->hwif->rq = NULL;
+
+       elv_add_request(drive->queue, &drive->sense_rq,
+                       ELEVATOR_INSERT_FRONT, 0);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
+
 /*
  * Called when an error was detected during the last packet command.
- * We queue a request sense packet command in the head of the request list.
+ * We queue a request sense packet command at the head of the request
+ * queue.
  */
-void ide_retry_pc(ide_drive_t *drive, struct gendisk *disk)
+void ide_retry_pc(ide_drive_t *drive)
 {
-       struct request *rq = &drive->request_sense_rq;
+       struct request *failed_rq = drive->hwif->rq;
+       struct request *sense_rq = &drive->sense_rq;
        struct ide_atapi_pc *pc = &drive->request_sense_pc;
 
        (void)ide_read_error(drive);
-       ide_create_request_sense_cmd(drive, pc);
+
+       /* init pc from sense_rq */
+       ide_init_pc(pc);
+       memcpy(pc->c, sense_rq->cmd, 12);
+       pc->buf = bio_data(sense_rq->bio);      /* pointer to mapped address */
+       pc->req_xfer = blk_rq_bytes(sense_rq);
+
        if (drive->media == ide_tape)
                set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
-       ide_queue_pc_head(drive, disk, pc, rq);
+
+       /*
+        * Push back the failed request and put request sense on top
+        * of it.  The failed command will be retried after sense data
+        * is acquired.
+        */
+       blk_requeue_request(failed_rq->q, failed_rq);
+       drive->hwif->rq = NULL;
+       if (ide_queue_sense_rq(drive, pc)) {
+               blk_start_request(failed_rq);
+               ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
+       }
 }
 EXPORT_SYMBOL_GPL(ide_retry_pc);
 
@@ -246,7 +313,7 @@ int ide_cd_get_xferlen(struct request *rq)
                return 32768;
        else if (blk_sense_request(rq) || blk_pc_request(rq) ||
                         rq->cmd_type == REQ_TYPE_ATA_PC)
-               return rq->data_len;
+               return blk_rq_bytes(rq);
        else
                return 0;
 }
@@ -276,7 +343,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
        struct ide_cmd *cmd = &hwif->cmd;
        struct request *rq = hwif->rq;
        const struct ide_tp_ops *tp_ops = hwif->tp_ops;
-       xfer_func_t *xferfunc;
        unsigned int timeout, done;
        u16 bcount;
        u8 stat, ireason, dsc = 0;
@@ -303,18 +369,14 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
                                        drive->name, rq_data_dir(pc->rq)
                                                     ? "write" : "read");
                        pc->flags |= PC_FLAG_DMA_ERROR;
-               } else {
+               } else
                        pc->xferred = pc->req_xfer;
-                       if (drive->pc_update_buffers)
-                               drive->pc_update_buffers(drive, pc);
-               }
                debug_log("%s: DMA finished\n", drive->name);
        }
 
        /* No more interrupts */
        if ((stat & ATA_DRQ) == 0) {
                int uptodate, error;
-               unsigned int done;
 
                debug_log("Packet command completed, %d bytes transferred\n",
                          pc->xferred);
@@ -343,7 +405,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
                        debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
 
                        /* Retry operation */
-                       ide_retry_pc(drive, rq->rq_disk);
+                       ide_retry_pc(drive);
 
                        /* queued, but not started */
                        return ide_stopped;
@@ -361,7 +423,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
 
                if (blk_special_request(rq)) {
                        rq->errors = 0;
-                       done = blk_rq_bytes(rq);
                        error = 0;
                } else {
 
@@ -370,15 +431,10 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
                                        rq->errors = -EIO;
                        }
 
-                       if (drive->media == ide_tape)
-                               done = ide_rq_bytes(rq); /* FIXME */
-                       else
-                               done = blk_rq_bytes(rq);
-
                        error = uptodate ? 0 : -EIO;
                }
 
-               ide_complete_rq(drive, error, done);
+               ide_complete_rq(drive, error, blk_rq_bytes(rq));
                return ide_stopped;
        }
 
@@ -407,21 +463,11 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
                return ide_do_reset(drive);
        }
 
-       xferfunc = write ? tp_ops->output_data : tp_ops->input_data;
-
-       if (drive->media == ide_floppy && pc->buf == NULL) {
-               done = min_t(unsigned int, bcount, cmd->nleft);
-               ide_pio_bytes(drive, cmd, write, done);
-       } else if (drive->media == ide_tape && pc->bh) {
-               done = drive->pc_io_buffers(drive, pc, bcount, write);
-       } else {
-               done = min_t(unsigned int, bcount, pc->req_xfer - pc->xferred);
-               xferfunc(drive, NULL, pc->cur_pos, done);
-       }
+       done = min_t(unsigned int, bcount, cmd->nleft);
+       ide_pio_bytes(drive, cmd, write, done);
 
-       /* Update the current position */
+       /* Update transferred byte count */
        pc->xferred += done;
-       pc->cur_pos += done;
 
        bcount -= done;
 
@@ -599,7 +645,6 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
 
                /* We haven't transferred any data yet */
                pc->xferred = 0;
-               pc->cur_pos = pc->buf;
 
                valid_tf = IDE_VALID_DEVICE;
                bcount = ((drive->media == ide_tape) ?
index 925eb9e245d1e7f0028dbc3cd0e698bce6b98962..1799328decfb1c79ccf86cf1eb136a6194d410d7 100644 (file)
@@ -206,54 +206,25 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
        ide_cd_log_error(drive->name, failed_command, sense);
 }
 
-static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
-                                     struct request *failed_command)
-{
-       struct cdrom_info *info         = drive->driver_data;
-       struct request *rq              = &drive->request_sense_rq;
-
-       ide_debug_log(IDE_DBG_SENSE, "enter");
-
-       if (sense == NULL)
-               sense = &info->sense_data;
-
-       /* stuff the sense request in front of our current request */
-       blk_rq_init(NULL, rq);
-       rq->cmd_type = REQ_TYPE_ATA_PC;
-       rq->rq_disk = info->disk;
-
-       rq->data = sense;
-       rq->cmd[0] = GPCMD_REQUEST_SENSE;
-       rq->cmd[4] = 18;
-       rq->data_len = 18;
-
-       rq->cmd_type = REQ_TYPE_SENSE;
-       rq->cmd_flags |= REQ_PREEMPT;
-
-       /* NOTE! Save the failed command in "rq->buffer" */
-       rq->buffer = (void *) failed_command;
-
-       if (failed_command)
-               ide_debug_log(IDE_DBG_SENSE, "failed_cmd: 0x%x",
-                                            failed_command->cmd[0]);
-
-       drive->hwif->rq = NULL;
-
-       elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
-}
-
 static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
 {
        /*
-        * For REQ_TYPE_SENSE, "rq->buffer" points to the original
-        * failed request
+        * For REQ_TYPE_SENSE, "rq->special" points to the original
+        * failed request.  Also, the sense data should be read
+        * directly from rq which might be different from the original
+        * sense buffer if it got copied during mapping.
         */
-       struct request *failed = (struct request *)rq->buffer;
-       struct cdrom_info *info = drive->driver_data;
-       void *sense = &info->sense_data;
+       struct request *failed = (struct request *)rq->special;
+       void *sense = bio_data(rq->bio);
 
        if (failed) {
                if (failed->sense) {
+                       /*
+                        * Sense is always read into drive->sense_data.
+                        * Copy back if the failed request has its
+                        * sense pointer set.
+                        */
+                       memcpy(failed->sense, sense, 18);
                        sense = failed->sense;
                        failed->sense_len = rq->sense_len;
                }
@@ -428,22 +399,13 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 
        /* if we got a CHECK_CONDITION status, queue a request sense command */
        if (stat & ATA_ERR)
-               cdrom_queue_request_sense(drive, NULL, NULL);
+               return ide_queue_sense_rq(drive, NULL) ? 2 : 1;
        return 1;
 
 end_request:
        if (stat & ATA_ERR) {
-               struct request_queue *q = drive->queue;
-               unsigned long flags;
-
-               spin_lock_irqsave(q->queue_lock, flags);
-               blkdev_dequeue_request(rq);
-               spin_unlock_irqrestore(q->queue_lock, flags);
-
                hwif->rq = NULL;
-
-               cdrom_queue_request_sense(drive, rq->sense, rq);
-               return 1;
+               return ide_queue_sense_rq(drive, rq) ? 2 : 1;
        } else
                return 2;
 }
@@ -503,14 +465,8 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
         * and some drives don't send them.  Sigh.
         */
        if (rq->cmd[0] == GPCMD_REQUEST_SENSE &&
-           cmd->nleft > 0 && cmd->nleft <= 5) {
-               unsigned int ofs = cmd->nbytes - cmd->nleft;
-
-               while (cmd->nleft > 0) {
-                       *((u8 *)rq->data + ofs++) = 0;
-                       cmd->nleft--;
-               }
-       }
+           cmd->nleft > 0 && cmd->nleft <= 5)
+               cmd->nleft = 0;
 }
 
 int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
@@ -543,14 +499,18 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
                rq->cmd_flags |= cmd_flags;
                rq->timeout = timeout;
                if (buffer) {
-                       rq->data = buffer;
-                       rq->data_len = *bufflen;
+                       error = blk_rq_map_kern(drive->queue, rq, buffer,
+                                               *bufflen, GFP_NOIO);
+                       if (error) {
+                               blk_put_request(rq);
+                               return error;
+                       }
                }
 
                error = blk_execute_rq(drive->queue, info->disk, rq, 0);
 
                if (buffer)
-                       *bufflen = rq->data_len;
+                       *bufflen = rq->resid_len;
 
                flags = rq->cmd_flags;
                blk_put_request(rq);
@@ -608,7 +568,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
        struct request *rq = hwif->rq;
        ide_expiry_t *expiry = NULL;
        int dma_error = 0, dma, thislen, uptodate = 0;
-       int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0, nsectors;
+       int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
        int sense = blk_sense_request(rq);
        unsigned int timeout;
        u16 len;
@@ -738,13 +698,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 
 out_end:
        if (blk_pc_request(rq) && rc == 0) {
-               unsigned int dlen = rq->data_len;
-
-               rq->data_len = 0;
-
-               if (blk_end_request(rq, 0, dlen))
-                       BUG();
-
+               rq->resid_len = 0;
+               blk_end_request_all(rq, 0);
                hwif->rq = NULL;
        } else {
                if (sense && uptodate)
@@ -762,21 +717,13 @@ out_end:
                        ide_cd_error_cmd(drive, cmd);
 
                /* make sure it's fully ended */
-               if (blk_pc_request(rq))
-                       nsectors = (rq->data_len + 511) >> 9;
-               else
-                       nsectors = rq->hard_nr_sectors;
-
-               if (nsectors == 0)
-                       nsectors = 1;
-
                if (blk_fs_request(rq) == 0) {
-                       rq->data_len -= (cmd->nbytes - cmd->nleft);
+                       rq->resid_len -= cmd->nbytes - cmd->nleft;
                        if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
-                               rq->data_len += cmd->last_xfer_len;
+                               rq->resid_len += cmd->last_xfer_len;
                }
 
-               ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9);
+               ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
 
                if (sense && rc == 2)
                        ide_error(drive, "request sense failure", stat);
@@ -809,8 +756,8 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
        }
 
        /* fs requests *must* be hardware frame aligned */
-       if ((rq->nr_sectors & (sectors_per_frame - 1)) ||
-           (rq->sector & (sectors_per_frame - 1)))
+       if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
+           (blk_rq_pos(rq) & (sectors_per_frame - 1)))
                return ide_stopped;
 
        /* use DMA, if possible */
@@ -838,15 +785,10 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
        drive->dma = 0;
 
        /* sg request */
-       if (rq->bio || ((rq->cmd_type == REQ_TYPE_ATA_PC) && rq->data_len)) {
+       if (rq->bio) {
                struct request_queue *q = drive->queue;
+               char *buf = bio_data(rq->bio);
                unsigned int alignment;
-               char *buf;
-
-               if (rq->bio)
-                       buf = bio_data(rq->bio);
-               else
-                       buf = rq->data;
 
                drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
 
@@ -858,7 +800,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
                 */
                alignment = queue_dma_alignment(q) | q->dma_pad_mask;
                if ((unsigned long)buf & alignment
-                   || rq->data_len & q->dma_pad_mask
+                   || blk_rq_bytes(rq) & q->dma_pad_mask
                    || object_is_on_stack(buf))
                        drive->dma = 0;
        }
@@ -896,6 +838,9 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
                goto out_end;
        }
 
+       /* prepare sense request for this command */
+       ide_prep_sense(drive, rq);
+
        memset(&cmd, 0, sizeof(cmd));
 
        if (rq_data_dir(rq))
@@ -903,15 +848,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
 
        cmd.rq = rq;
 
-       if (blk_fs_request(rq) || rq->data_len) {
-               ide_init_sg_cmd(&cmd, blk_fs_request(rq) ? (rq->nr_sectors << 9)
-                                                        : rq->data_len);
+       if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
+               ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
                ide_map_sg(drive, &cmd);
        }
 
        return ide_issue_pc(drive, &cmd);
 out_end:
-       nsectors = rq->hard_nr_sectors;
+       nsectors = blk_rq_sectors(rq);
 
        if (nsectors == 0)
                nsectors = 1;
@@ -1395,8 +1339,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
 static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
 {
        int hard_sect = queue_hardsect_size(q);
-       long block = (long)rq->hard_sector / (hard_sect >> 9);
-       unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
+       long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
+       unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
 
        memset(rq->cmd, 0, BLK_MAX_CDB);
 
index 1d97101099ce20bab8e0b319f6da278a09bd9afa..93a3cf1b0f3f8c61b974b9b16a190502f5d4d038 100644 (file)
@@ -87,10 +87,6 @@ struct cdrom_info {
 
        struct atapi_toc *toc;
 
-       /* The result of the last successful request sense command
-          on this device. */
-       struct request_sense sense_data;
-
        u8 max_speed;           /* Max speed of the drive. */
        u8 current_speed;       /* Current speed of the drive. */
 
index a9fbe2c31210cc1400dc2ce132c74211dbd40642..ad18e14043c57b5ae52bb9015efb8e7588cbcfdc 100644 (file)
@@ -82,7 +82,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
                                        sector_t block)
 {
        ide_hwif_t *hwif        = drive->hwif;
-       u16 nsectors            = (u16)rq->nr_sectors;
+       u16 nsectors            = (u16)blk_rq_sectors(rq);
        u8 lba48                = !!(drive->dev_flags & IDE_DFLAG_LBA48);
        u8 dma                  = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
        struct ide_cmd          cmd;
@@ -90,7 +90,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
        ide_startstop_t         rc;
 
        if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
-               if (block + rq->nr_sectors > 1ULL << 28)
+               if (block + blk_rq_sectors(rq) > 1ULL << 28)
                        dma = 0;
                else
                        lba48 = 0;
@@ -195,9 +195,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
 
        ledtrig_ide_activity();
 
-       pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
+       pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
                 drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
-                (unsigned long long)block, rq->nr_sectors,
+                (unsigned long long)block, blk_rq_sectors(rq),
                 (unsigned long)rq->buffer);
 
        if (hwif->rw_disk)
@@ -411,7 +411,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
        cmd->protocol = ATA_PROT_NODATA;
 
        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
-       rq->cmd_flags |= REQ_SOFTBARRIER;
        rq->special = cmd;
 }
 
index a0b8cab1d9a682249200fce35bc5ea5c8223079f..001f68f0bb285c62d0b42b6dbb1508d92182923a 100644 (file)
@@ -103,7 +103,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive)
                                ide_finish_cmd(drive, cmd, stat);
                        else
                                ide_complete_rq(drive, 0,
-                                               cmd->rq->nr_sectors << 9);
+                                               blk_rq_sectors(cmd->rq) << 9);
                        return ide_stopped;
                }
                printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
@@ -510,23 +510,11 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
        /*
         * un-busy drive etc and make sure request is sane
         */
-
        rq = hwif->rq;
-       if (!rq)
-               goto out;
-
-       hwif->rq = NULL;
-
-       rq->errors = 0;
-
-       if (!rq->bio)
-               goto out;
-
-       rq->sector = rq->bio->bi_sector;
-       rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
-       rq->hard_cur_sectors = rq->current_nr_sectors;
-       rq->buffer = bio_data(rq->bio);
-out:
+       if (rq) {
+               hwif->rq = NULL;
+               rq->errors = 0;
+       }
        return ret;
 }
 
index 2b4868d95f8b0597de827c36df575cec1375d8f7..650981758f15cd0b83e05d455cb590a61401edf4 100644 (file)
@@ -134,13 +134,17 @@ static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive,
        drive->pc = pc;
 
        if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) {
+               unsigned int done = blk_rq_bytes(drive->hwif->rq);
+
                if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR))
                        ide_floppy_report_error(floppy, pc);
+
                /* Giving up */
                pc->error = IDE_DRV_ERROR_GENERAL;
 
                drive->failed_pc = NULL;
                drive->pc_callback(drive, 0);
+               ide_complete_rq(drive, -EIO, done);
                return ide_stopped;
        }
 
@@ -190,7 +194,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
 {
        struct ide_disk_obj *floppy = drive->driver_data;
        int block = sector / floppy->bs_factor;
-       int blocks = rq->nr_sectors / floppy->bs_factor;
+       int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
        int cmd = rq_data_dir(rq);
 
        ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);
@@ -216,16 +220,14 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
        ide_init_pc(pc);
        memcpy(pc->c, rq->cmd, sizeof(pc->c));
        pc->rq = rq;
-       if (rq->data_len && rq_data_dir(rq) == WRITE)
-               pc->flags |= PC_FLAG_WRITING;
-       pc->buf = rq->data;
-       if (rq->bio)
+       if (blk_rq_bytes(rq)) {
                pc->flags |= PC_FLAG_DMA_OK;
-       /*
-        * possibly problematic, doesn't look like ide-floppy correctly
-        * handled scattered requests if dma fails...
-        */
-       pc->req_xfer = pc->buf_size = rq->data_len;
+               if (rq_data_dir(rq) == WRITE)
+                       pc->flags |= PC_FLAG_WRITING;
+       }
+       /* pio will be performed by ide_pio_bytes() which handles sg fine */
+       pc->buf = NULL;
+       pc->req_xfer = pc->buf_size = blk_rq_bytes(rq);
 }
 
 static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
@@ -257,16 +259,16 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
                        goto out_end;
        }
        if (blk_fs_request(rq)) {
-               if (((long)rq->sector % floppy->bs_factor) ||
-                   (rq->nr_sectors % floppy->bs_factor)) {
+               if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
+                   (blk_rq_sectors(rq) % floppy->bs_factor)) {
                        printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
                                drive->name);
                        goto out_end;
                }
                pc = &floppy->queued_pc;
                idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
-       } else if (blk_special_request(rq)) {
-               pc = (struct ide_atapi_pc *) rq->buffer;
+       } else if (blk_special_request(rq) || blk_sense_request(rq)) {
+               pc = (struct ide_atapi_pc *)rq->special;
        } else if (blk_pc_request(rq)) {
                pc = &floppy->queued_pc;
                idefloppy_blockpc_cmd(floppy, pc, rq);
@@ -275,6 +277,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
                goto out_end;
        }
 
+       ide_prep_sense(drive, rq);
+
        memset(&cmd, 0, sizeof(cmd));
 
        if (rq_data_dir(rq))
index 35dc38d3b2c58d2f40e3a299f5b6c0832f88da82..e4e3a0e3201e5516700b60184bfc7ebe03d308bd 100644 (file)
@@ -116,9 +116,9 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
 unsigned int ide_rq_bytes(struct request *rq)
 {
        if (blk_pc_request(rq))
-               return rq->data_len;
+               return blk_rq_bytes(rq);
        else
-               return rq->hard_cur_sectors << 9;
+               return blk_rq_cur_sectors(rq) << 9;
 }
 EXPORT_SYMBOL_GPL(ide_rq_bytes);
 
@@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
         * and complete the whole request right now
         */
        if (blk_noretry_request(rq) && error <= 0)
-               nr_bytes = rq->hard_nr_sectors << 9;
+               nr_bytes = blk_rq_sectors(rq) << 9;
 
        rc = ide_end_rq(drive, rq, error, nr_bytes);
        if (rc == 0)
@@ -248,14 +248,7 @@ void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
        struct scatterlist *sg = hwif->sg_table;
        struct request *rq = cmd->rq;
 
-       if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
-               sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
-               cmd->sg_nents = 1;
-       } else if (!rq->bio) {
-               sg_init_one(sg, rq->data, rq->data_len);
-               cmd->sg_nents = 1;
-       } else
-               cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
+       cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
 }
 EXPORT_SYMBOL_GPL(ide_map_sg);
 
@@ -286,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
 
        if (cmd) {
                if (cmd->protocol == ATA_PROT_PIO) {
-                       ide_init_sg_cmd(cmd, rq->nr_sectors << 9);
+                       ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
                        ide_map_sg(drive, cmd);
                }
 
@@ -371,7 +364,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
                if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
                        return execute_drive_cmd(drive, rq);
                else if (blk_pm_request(rq)) {
-                       struct request_pm_state *pm = rq->data;
+                       struct request_pm_state *pm = rq->special;
 #ifdef DEBUG_PM
                        printk("%s: start_power_step(step: %d)\n",
                                drive->name, pm->pm_step);
@@ -394,7 +387,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
 
                drv = *(struct ide_driver **)rq->rq_disk->private_data;
 
-               return drv->do_request(drive, rq, rq->sector);
+               return drv->do_request(drive, rq, blk_rq_pos(rq));
        }
        return do_special(drive);
 kill_rq:
@@ -484,6 +477,9 @@ void do_ide_request(struct request_queue *q)
 
        spin_unlock_irq(q->queue_lock);
 
+       /* HLD do_request() callback might sleep, make sure it's okay */
+       might_sleep();
+
        if (ide_lock_host(host, hwif))
                goto plug_device_2;
 
@@ -491,10 +487,10 @@ void do_ide_request(struct request_queue *q)
 
        if (!ide_lock_port(hwif)) {
                ide_hwif_t *prev_port;
+
+               WARN_ON_ONCE(hwif->rq);
 repeat:
                prev_port = hwif->host->cur_port;
-               hwif->rq = NULL;
-
                if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
                    time_after(drive->sleep, jiffies)) {
                        ide_unlock_port(hwif);
@@ -523,7 +519,9 @@ repeat:
                 * we know that the queue isn't empty, but this can happen
                 * if the q->prep_rq_fn() decides to kill a request
                 */
-               rq = elv_next_request(drive->queue);
+               if (!rq)
+                       rq = blk_fetch_request(drive->queue);
+
                spin_unlock_irq(q->queue_lock);
                spin_lock_irq(&hwif->lock);
 
@@ -535,7 +533,7 @@ repeat:
                /*
                 * Sanity: don't accept a request that isn't a PM request
                 * if we are currently power managed. This is very important as
-                * blk_stop_queue() doesn't prevent the elv_next_request()
+                * blk_stop_queue() doesn't prevent the blk_fetch_request()
                 * above to return us whatever is in the queue. Since we call
                 * ide_do_request() ourselves, we end up taking requests while
                 * the queue is blocked...
@@ -559,8 +557,11 @@ repeat:
                startstop = start_request(drive, rq);
                spin_lock_irq(&hwif->lock);
 
-               if (startstop == ide_stopped)
+               if (startstop == ide_stopped) {
+                       rq = hwif->rq;
+                       hwif->rq = NULL;
                        goto repeat;
+               }
        } else
                goto plug_device;
 out:
@@ -576,18 +577,24 @@ plug_device:
 plug_device_2:
        spin_lock_irq(q->queue_lock);
 
+       if (rq)
+               blk_requeue_request(q, rq);
        if (!elv_queue_empty(q))
                blk_plug_device(q);
 }
 
-static void ide_plug_device(ide_drive_t *drive)
+static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
 {
        struct request_queue *q = drive->queue;
        unsigned long flags;
 
        spin_lock_irqsave(q->queue_lock, flags);
+
+       if (rq)
+               blk_requeue_request(q, rq);
        if (!elv_queue_empty(q))
                blk_plug_device(q);
+
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
@@ -636,6 +643,7 @@ void ide_timer_expiry (unsigned long data)
        unsigned long   flags;
        int             wait = -1;
        int             plug_device = 0;
+       struct request  *uninitialized_var(rq_in_flight);
 
        spin_lock_irqsave(&hwif->lock, flags);
 
@@ -697,6 +705,8 @@ void ide_timer_expiry (unsigned long data)
                spin_lock_irq(&hwif->lock);
                enable_irq(hwif->irq);
                if (startstop == ide_stopped) {
+                       rq_in_flight = hwif->rq;
+                       hwif->rq = NULL;
                        ide_unlock_port(hwif);
                        plug_device = 1;
                }
@@ -705,7 +715,7 @@ void ide_timer_expiry (unsigned long data)
 
        if (plug_device) {
                ide_unlock_host(hwif->host);
-               ide_plug_device(drive);
+               ide_requeue_and_plug(drive, rq_in_flight);
        }
 }
 
@@ -791,6 +801,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
        ide_startstop_t startstop;
        irqreturn_t irq_ret = IRQ_NONE;
        int plug_device = 0;
+       struct request *uninitialized_var(rq_in_flight);
 
        if (host->host_flags & IDE_HFLAG_SERIALIZE) {
                if (hwif != host->cur_port)
@@ -870,6 +881,8 @@ irqreturn_t ide_intr (int irq, void *dev_id)
         */
        if (startstop == ide_stopped) {
                BUG_ON(hwif->handler);
+               rq_in_flight = hwif->rq;
+               hwif->rq = NULL;
                ide_unlock_port(hwif);
                plug_device = 1;
        }
@@ -879,7 +892,7 @@ out:
 out_early:
        if (plug_device) {
                ide_unlock_host(hwif->host);
-               ide_plug_device(drive);
+               ide_requeue_and_plug(drive, rq_in_flight);
        }
 
        return irq_ret;
index c1c25ebbaa1fb3ef6abac4bb720946bfd6af90c6..5991b23793f20ee33f164a2dea752804f7580310 100644 (file)
@@ -231,7 +231,6 @@ static int generic_drive_reset(ide_drive_t *drive)
        rq->cmd_type = REQ_TYPE_SPECIAL;
        rq->cmd_len = 1;
        rq->cmd[0] = REQ_DRIVE_RESET;
-       rq->cmd_flags |= REQ_SOFTBARRIER;
        if (blk_execute_rq(drive->queue, NULL, rq, 1))
                ret = rq->errors;
        blk_put_request(rq);
index 56ff8c46c7d10a4cf0eefa27bd69769ac52c7953..05b7fbc7ead54d3c41f00cc30310368dccaf70f8 100644 (file)
@@ -114,7 +114,7 @@ static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
 
                if (rq)
                        printk(KERN_CONT ", sector=%llu",
-                              (unsigned long long)rq->sector);
+                              (unsigned long long)blk_rq_pos(rq));
        }
        printk(KERN_CONT "\n");
 }
index 310d03f2b5b793e456305547ffd3d86e036c66ca..a914023d6d035d9e530178ff2422a7bf386cc2cd 100644 (file)
@@ -24,11 +24,8 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
                        start_queue = 1;
                spin_unlock_irq(&hwif->lock);
 
-               if (start_queue) {
-                       spin_lock_irq(q->queue_lock);
-                       blk_start_queueing(q);
-                       spin_unlock_irq(q->queue_lock);
-               }
+               if (start_queue)
+                       blk_run_queue(q);
                return;
        }
        spin_unlock_irq(&hwif->lock);
index 0d8a151c0a01da799d0c68f96b20dc2dbe1928bc..ba1488bd84307bb90850977573d2e80c9778af69 100644 (file)
@@ -7,7 +7,6 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq;
        struct request_pm_state rqpm;
-       struct ide_cmd cmd;
        int ret;
 
        /* call ACPI _GTM only once */
@@ -15,11 +14,9 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
                ide_acpi_get_timing(hwif);
 
        memset(&rqpm, 0, sizeof(rqpm));
-       memset(&cmd, 0, sizeof(cmd));
        rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
        rq->cmd_type = REQ_TYPE_PM_SUSPEND;
-       rq->special = &cmd;
-       rq->data = &rqpm;
+       rq->special = &rqpm;
        rqpm.pm_step = IDE_PM_START_SUSPEND;
        if (mesg.event == PM_EVENT_PRETHAW)
                mesg.event = PM_EVENT_FREEZE;
@@ -41,7 +38,6 @@ int generic_ide_resume(struct device *dev)
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq;
        struct request_pm_state rqpm;
-       struct ide_cmd cmd;
        int err;
 
        /* call ACPI _PS0 / _STM only once */
@@ -53,12 +49,10 @@ int generic_ide_resume(struct device *dev)
        ide_acpi_exec_tfs(drive);
 
        memset(&rqpm, 0, sizeof(rqpm));
-       memset(&cmd, 0, sizeof(cmd));
        rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
        rq->cmd_type = REQ_TYPE_PM_RESUME;
        rq->cmd_flags |= REQ_PREEMPT;
-       rq->special = &cmd;
-       rq->data = &rqpm;
+       rq->special = &rqpm;
        rqpm.pm_step = IDE_PM_START_RESUME;
        rqpm.pm_state = PM_EVENT_ON;
 
@@ -77,7 +71,7 @@ int generic_ide_resume(struct device *dev)
 
 void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
 {
-       struct request_pm_state *pm = rq->data;
+       struct request_pm_state *pm = rq->special;
 
 #ifdef DEBUG_PM
        printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
@@ -107,10 +101,8 @@ void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
 
 ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
 {
-       struct request_pm_state *pm = rq->data;
-       struct ide_cmd *cmd = rq->special;
-
-       memset(cmd, 0, sizeof(*cmd));
+       struct request_pm_state *pm = rq->special;
+       struct ide_cmd cmd = { };
 
        switch (pm->pm_step) {
        case IDE_PM_FLUSH_CACHE:        /* Suspend step 1 (flush cache) */
@@ -123,12 +115,12 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
                        return ide_stopped;
                }
                if (ata_id_flush_ext_enabled(drive->id))
-                       cmd->tf.command = ATA_CMD_FLUSH_EXT;
+                       cmd.tf.command = ATA_CMD_FLUSH_EXT;
                else
-                       cmd->tf.command = ATA_CMD_FLUSH;
+                       cmd.tf.command = ATA_CMD_FLUSH;
                goto out_do_tf;
        case IDE_PM_STANDBY:            /* Suspend step 2 (standby) */
-               cmd->tf.command = ATA_CMD_STANDBYNOW1;
+               cmd.tf.command = ATA_CMD_STANDBYNOW1;
                goto out_do_tf;
        case IDE_PM_RESTORE_PIO:        /* Resume step 1 (restore PIO) */
                ide_set_max_pio(drive);
@@ -141,7 +133,7 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
                        ide_complete_power_step(drive, rq);
                return ide_stopped;
        case IDE_PM_IDLE:               /* Resume step 2 (idle) */
-               cmd->tf.command = ATA_CMD_IDLEIMMEDIATE;
+               cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
                goto out_do_tf;
        case IDE_PM_RESTORE_DMA:        /* Resume step 3 (restore DMA) */
                /*
@@ -163,11 +155,11 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
        return ide_stopped;
 
 out_do_tf:
-       cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
-       cmd->valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
-       cmd->protocol = ATA_PROT_NODATA;
+       cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
+       cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
+       cmd.protocol = ATA_PROT_NODATA;
 
-       return do_rw_taskfile(drive, cmd);
+       return do_rw_taskfile(drive, &cmd);
 }
 
 /**
@@ -181,7 +173,7 @@ out_do_tf:
 void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
 {
        struct request_queue *q = drive->queue;
-       struct request_pm_state *pm = rq->data;
+       struct request_pm_state *pm = rq->special;
        unsigned long flags;
 
        ide_complete_power_step(drive, rq);
@@ -207,7 +199,7 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
 
 void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
 {
-       struct request_pm_state *pm = rq->data;
+       struct request_pm_state *pm = rq->special;
 
        if (blk_pm_suspend_request(rq) &&
            pm->pm_step == IDE_PM_START_SUSPEND)
index 3a53e0834cf798d6bbc7c5fca982d192d1941fd3..683ff37d4079471ebe9b202ca2631b3e3164e33c 100644 (file)
@@ -131,13 +131,6 @@ enum {
        IDETAPE_DIR_WRITE = (1 << 2),
 };
 
-struct idetape_bh {
-       u32 b_size;
-       atomic_t b_count;
-       struct idetape_bh *b_reqnext;
-       char *b_data;
-};
-
 /* Tape door status */
 #define DOOR_UNLOCKED                  0
 #define DOOR_LOCKED                    1
@@ -219,18 +212,12 @@ typedef struct ide_tape_obj {
 
        /* Data buffer size chosen based on the tape's recommendation */
        int buffer_size;
-       /* merge buffer */
-       struct idetape_bh *merge_bh;
-       /* size of the merge buffer */
-       int merge_bh_size;
-       /* pointer to current buffer head within the merge buffer */
-       struct idetape_bh *bh;
-       char *b_data;
-       int b_count;
-
-       int pages_per_buffer;
-       /* Wasted space in each stage */
-       int excess_bh_size;
+       /* Staging buffer of buffer_size bytes */
+       void *buf;
+       /* The read/write cursor */
+       void *cur;
+       /* The number of valid bytes in buf */
+       size_t valid;
 
        /* Measures average tape speed */
        unsigned long avg_time;
@@ -297,84 +284,6 @@ static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
        return tape;
 }
 
-static int idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
-                                 unsigned int bcount)
-{
-       struct idetape_bh *bh = pc->bh;
-       int count;
-
-       while (bcount) {
-               if (bh == NULL)
-                       break;
-               count = min(
-                       (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
-                       bcount);
-               drive->hwif->tp_ops->input_data(drive, NULL, bh->b_data +
-                                       atomic_read(&bh->b_count), count);
-               bcount -= count;
-               atomic_add(count, &bh->b_count);
-               if (atomic_read(&bh->b_count) == bh->b_size) {
-                       bh = bh->b_reqnext;
-                       if (bh)
-                               atomic_set(&bh->b_count, 0);
-               }
-       }
-
-       pc->bh = bh;
-
-       return bcount;
-}
-
-static int idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
-                                  unsigned int bcount)
-{
-       struct idetape_bh *bh = pc->bh;
-       int count;
-
-       while (bcount) {
-               if (bh == NULL)
-                       break;
-               count = min((unsigned int)pc->b_count, (unsigned int)bcount);
-               drive->hwif->tp_ops->output_data(drive, NULL, pc->b_data, count);
-               bcount -= count;
-               pc->b_data += count;
-               pc->b_count -= count;
-               if (!pc->b_count) {
-                       bh = bh->b_reqnext;
-                       pc->bh = bh;
-                       if (bh) {
-                               pc->b_data = bh->b_data;
-                               pc->b_count = atomic_read(&bh->b_count);
-                       }
-               }
-       }
-
-       return bcount;
-}
-
-static void idetape_update_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc)
-{
-       struct idetape_bh *bh = pc->bh;
-       int count;
-       unsigned int bcount = pc->xferred;
-
-       if (pc->flags & PC_FLAG_WRITING)
-               return;
-       while (bcount) {
-               if (bh == NULL) {
-                       printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
-                                       __func__);
-                       return;
-               }
-               count = min((unsigned int)bh->b_size, (unsigned int)bcount);
-               atomic_set(&bh->b_count, count);
-               if (atomic_read(&bh->b_count) == bh->b_size)
-                       bh = bh->b_reqnext;
-               bcount -= count;
-       }
-       pc->bh = bh;
-}
-
 /*
  * called on each failed packet command retry to analyze the request sense. We
  * currently do not utilize this information.
@@ -392,12 +301,10 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
                 pc->c[0], tape->sense_key, tape->asc, tape->ascq);
 
        /* Correct pc->xferred by asking the tape.       */
-       if (pc->flags & PC_FLAG_DMA_ERROR) {
+       if (pc->flags & PC_FLAG_DMA_ERROR)
                pc->xferred = pc->req_xfer -
                        tape->blk_size *
                        get_unaligned_be32(&sense[3]);
-               idetape_update_buffers(drive, pc);
-       }
 
        /*
         * If error was the result of a zero-length read or write command,
@@ -436,29 +343,6 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
        }
 }
 
-/* Free data buffers completely. */
-static void ide_tape_kfree_buffer(idetape_tape_t *tape)
-{
-       struct idetape_bh *prev_bh, *bh = tape->merge_bh;
-
-       while (bh) {
-               u32 size = bh->b_size;
-
-               while (size) {
-                       unsigned int order = fls(size >> PAGE_SHIFT)-1;
-
-                       if (bh->b_data)
-                               free_pages((unsigned long)bh->b_data, order);
-
-                       size &= (order-1);
-                       bh->b_data += (1 << order) * PAGE_SIZE;
-               }
-               prev_bh = bh;
-               bh = bh->b_reqnext;
-               kfree(prev_bh);
-       }
-}
-
 static void ide_tape_handle_dsc(ide_drive_t *);
 
 static int ide_tape_callback(ide_drive_t *drive, int dsc)
@@ -496,7 +380,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
                }
 
                tape->first_frame += blocks;
-               rq->current_nr_sectors -= blocks;
+               rq->resid_len -= blocks * tape->blk_size;
 
                if (pc->error) {
                        uptodate = 0;
@@ -558,19 +442,6 @@ static void ide_tape_handle_dsc(ide_drive_t *drive)
        idetape_postpone_request(drive);
 }
 
-static int ide_tape_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
-                               unsigned int bcount, int write)
-{
-       unsigned int bleft;
-
-       if (write)
-               bleft = idetape_output_buffers(drive, pc, bcount);
-       else
-               bleft = idetape_input_buffers(drive, pc, bcount);
-
-       return bcount - bleft;
-}
-
 /*
  * Packet Command Interface
  *
@@ -622,6 +493,8 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
 
        if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
                (pc->flags & PC_FLAG_ABORT)) {
+               unsigned int done = blk_rq_bytes(drive->hwif->rq);
+
                /*
                 * We will "abort" retrying a packet command in case legitimate
                 * error code was received (crossing a filemark, or end of the
@@ -641,8 +514,10 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
                        /* Giving up */
                        pc->error = IDE_DRV_ERROR_GENERAL;
                }
+
                drive->failed_pc = NULL;
                drive->pc_callback(drive, 0);
+               ide_complete_rq(drive, -EIO, done);
                return ide_stopped;
        }
        debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
@@ -695,7 +570,7 @@ static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
                                printk(KERN_ERR "ide-tape: %s: I/O error, ",
                                                tape->name);
                        /* Retry operation */
-                       ide_retry_pc(drive, tape->disk);
+                       ide_retry_pc(drive);
                        return ide_stopped;
                }
                pc->error = 0;
@@ -711,27 +586,22 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
                                   struct ide_atapi_pc *pc, struct request *rq,
                                   u8 opcode)
 {
-       struct idetape_bh *bh = (struct idetape_bh *)rq->special;
-       unsigned int length = rq->current_nr_sectors;
+       unsigned int length = blk_rq_sectors(rq);
 
        ide_init_pc(pc);
        put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
        pc->c[1] = 1;
-       pc->bh = bh;
        pc->buf = NULL;
        pc->buf_size = length * tape->blk_size;
        pc->req_xfer = pc->buf_size;
        if (pc->req_xfer == tape->buffer_size)
                pc->flags |= PC_FLAG_DMA_OK;
 
-       if (opcode == READ_6) {
+       if (opcode == READ_6)
                pc->c[0] = READ_6;
-               atomic_set(&bh->b_count, 0);
-       } else if (opcode == WRITE_6) {
+       else if (opcode == WRITE_6) {
                pc->c[0] = WRITE_6;
                pc->flags |= PC_FLAG_WRITING;
-               pc->b_data = bh->b_data;
-               pc->b_count = atomic_read(&bh->b_count);
        }
 
        memcpy(rq->cmd, pc->c, 12);
@@ -747,12 +617,10 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
        struct ide_cmd cmd;
        u8 stat;
 
-       debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %lu,"
-                       " current_nr_sectors: %u\n",
-                       (unsigned long long)rq->sector, rq->nr_sectors,
-                       rq->current_nr_sectors);
+       debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %u\n"
+                 (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq));
 
-       if (!blk_special_request(rq)) {
+       if (!(blk_special_request(rq) || blk_sense_request(rq))) {
                /* We do not support buffer cache originated requests. */
                printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
                        "request queue (%d)\n", drive->name, rq->cmd_type);
@@ -828,7 +696,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
                goto out;
        }
        if (rq->cmd[13] & REQ_IDETAPE_PC1) {
-               pc = (struct ide_atapi_pc *) rq->buffer;
+               pc = (struct ide_atapi_pc *)rq->special;
                rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
                rq->cmd[13] |= REQ_IDETAPE_PC2;
                goto out;
@@ -840,6 +708,9 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
        BUG();
 
 out:
+       /* prepare sense request for this command */
+       ide_prep_sense(drive, rq);
+
        memset(&cmd, 0, sizeof(cmd));
 
        if (rq_data_dir(rq))
@@ -847,167 +718,10 @@ out:
 
        cmd.rq = rq;
 
-       return ide_tape_issue_pc(drive, &cmd, pc);
-}
-
-/*
- * The function below uses __get_free_pages to allocate a data buffer of size
- * tape->buffer_size (or a bit more). We attempt to combine sequential pages as
- * much as possible.
- *
- * It returns a pointer to the newly allocated buffer, or NULL in case of
- * failure.
- */
-static struct idetape_bh *ide_tape_kmalloc_buffer(idetape_tape_t *tape,
-                                                 int full, int clear)
-{
-       struct idetape_bh *prev_bh, *bh, *merge_bh;
-       int pages = tape->pages_per_buffer;
-       unsigned int order, b_allocd;
-       char *b_data = NULL;
-
-       merge_bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
-       bh = merge_bh;
-       if (bh == NULL)
-               goto abort;
-
-       order = fls(pages) - 1;
-       bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order);
-       if (!bh->b_data)
-               goto abort;
-       b_allocd = (1 << order) * PAGE_SIZE;
-       pages &= (order-1);
-
-       if (clear)
-               memset(bh->b_data, 0, b_allocd);
-       bh->b_reqnext = NULL;
-       bh->b_size = b_allocd;
-       atomic_set(&bh->b_count, full ? bh->b_size : 0);
-
-       while (pages) {
-               order = fls(pages) - 1;
-               b_data = (char *) __get_free_pages(GFP_KERNEL, order);
-               if (!b_data)
-                       goto abort;
-               b_allocd = (1 << order) * PAGE_SIZE;
-
-               if (clear)
-                       memset(b_data, 0, b_allocd);
-
-               /* newly allocated page frames below buffer header or ...*/
-               if (bh->b_data == b_data + b_allocd) {
-                       bh->b_size += b_allocd;
-                       bh->b_data -= b_allocd;
-                       if (full)
-                               atomic_add(b_allocd, &bh->b_count);
-                       continue;
-               }
-               /* they are above the header */
-               if (b_data == bh->b_data + bh->b_size) {
-                       bh->b_size += b_allocd;
-                       if (full)
-                               atomic_add(b_allocd, &bh->b_count);
-                       continue;
-               }
-               prev_bh = bh;
-               bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
-               if (!bh) {
-                       free_pages((unsigned long) b_data, order);
-                       goto abort;
-               }
-               bh->b_reqnext = NULL;
-               bh->b_data = b_data;
-               bh->b_size = b_allocd;
-               atomic_set(&bh->b_count, full ? bh->b_size : 0);
-               prev_bh->b_reqnext = bh;
-
-               pages &= (order-1);
-       }
-
-       bh->b_size -= tape->excess_bh_size;
-       if (full)
-               atomic_sub(tape->excess_bh_size, &bh->b_count);
-       return merge_bh;
-abort:
-       ide_tape_kfree_buffer(tape);
-       return NULL;
-}
+       ide_init_sg_cmd(&cmd, pc->req_xfer);
+       ide_map_sg(drive, &cmd);
 
-static int idetape_copy_stage_from_user(idetape_tape_t *tape,
-                                       const char __user *buf, int n)
-{
-       struct idetape_bh *bh = tape->bh;
-       int count;
-       int ret = 0;
-
-       while (n) {
-               if (bh == NULL) {
-                       printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
-                                       __func__);
-                       return 1;
-               }
-               count = min((unsigned int)
-                               (bh->b_size - atomic_read(&bh->b_count)),
-                               (unsigned int)n);
-               if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
-                               count))
-                       ret = 1;
-               n -= count;
-               atomic_add(count, &bh->b_count);
-               buf += count;
-               if (atomic_read(&bh->b_count) == bh->b_size) {
-                       bh = bh->b_reqnext;
-                       if (bh)
-                               atomic_set(&bh->b_count, 0);
-               }
-       }
-       tape->bh = bh;
-       return ret;
-}
-
-static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
-                                     int n)
-{
-       struct idetape_bh *bh = tape->bh;
-       int count;
-       int ret = 0;
-
-       while (n) {
-               if (bh == NULL) {
-                       printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
-                                       __func__);
-                       return 1;
-               }
-               count = min(tape->b_count, n);
-               if  (copy_to_user(buf, tape->b_data, count))
-                       ret = 1;
-               n -= count;
-               tape->b_data += count;
-               tape->b_count -= count;
-               buf += count;
-               if (!tape->b_count) {
-                       bh = bh->b_reqnext;
-                       tape->bh = bh;
-                       if (bh) {
-                               tape->b_data = bh->b_data;
-                               tape->b_count = atomic_read(&bh->b_count);
-                       }
-               }
-       }
-       return ret;
-}
-
-static void idetape_init_merge_buffer(idetape_tape_t *tape)
-{
-       struct idetape_bh *bh = tape->merge_bh;
-       tape->bh = tape->merge_bh;
-
-       if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
-               atomic_set(&bh->b_count, 0);
-       else {
-               tape->b_data = bh->b_data;
-               tape->b_count = atomic_read(&bh->b_count);
-       }
+       return ide_tape_issue_pc(drive, &cmd, pc);
 }
 
 /*
@@ -1107,10 +821,10 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
                return;
 
        clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags);
-       tape->merge_bh_size = 0;
-       if (tape->merge_bh != NULL) {
-               ide_tape_kfree_buffer(tape);
-               tape->merge_bh = NULL;
+       tape->valid = 0;
+       if (tape->buf != NULL) {
+               kfree(tape->buf);
+               tape->buf = NULL;
        }
 
        tape->chrdev_dir = IDETAPE_DIR_NONE;
@@ -1164,36 +878,43 @@ static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
  * Generate a read/write request for the block device interface and wait for it
  * to be serviced.
  */
-static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
-                                struct idetape_bh *bh)
+static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
 {
        idetape_tape_t *tape = drive->driver_data;
        struct request *rq;
-       int ret, errors;
+       int ret;
 
        debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
+       BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
+       BUG_ON(size < 0 || size % tape->blk_size);
 
        rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
        rq->cmd_type = REQ_TYPE_SPECIAL;
        rq->cmd[13] = cmd;
        rq->rq_disk = tape->disk;
-       rq->special = (void *)bh;
-       rq->sector = tape->first_frame;
-       rq->nr_sectors = blocks;
-       rq->current_nr_sectors = blocks;
-       blk_execute_rq(drive->queue, tape->disk, rq, 0);
 
-       errors = rq->errors;
-       ret = tape->blk_size * (blocks - rq->current_nr_sectors);
-       blk_put_request(rq);
+       if (size) {
+               ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
+                                     __GFP_WAIT);
+               if (ret)
+                       goto out_put;
+       }
 
-       if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
-               return 0;
+       blk_execute_rq(drive->queue, tape->disk, rq, 0);
 
-       if (tape->merge_bh)
-               idetape_init_merge_buffer(tape);
-       if (errors == IDE_DRV_ERROR_GENERAL)
-               return -EIO;
+       /* calculate the number of transferred bytes and update buffer state */
+       size -= rq->resid_len;
+       tape->cur = tape->buf;
+       if (cmd == REQ_IDETAPE_READ)
+               tape->valid = size;
+       else
+               tape->valid = 0;
+
+       ret = size;
+       if (rq->errors == IDE_DRV_ERROR_GENERAL)
+               ret = -EIO;
+out_put:
+       blk_put_request(rq);
        return ret;
 }
 
@@ -1230,153 +951,87 @@ static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
        pc->flags |= PC_FLAG_WAIT_FOR_DSC;
 }
 
-/* Queue up a character device originated write request. */
-static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
-{
-       idetape_tape_t *tape = drive->driver_data;
-
-       debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
-
-       return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
-                                    blocks, tape->merge_bh);
-}
-
 static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
 {
        idetape_tape_t *tape = drive->driver_data;
-       int blocks, min;
-       struct idetape_bh *bh;
 
        if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
                printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
                                " but we are not writing.\n");
                return;
        }
-       if (tape->merge_bh_size > tape->buffer_size) {
-               printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
-               tape->merge_bh_size = tape->buffer_size;
-       }
-       if (tape->merge_bh_size) {
-               blocks = tape->merge_bh_size / tape->blk_size;
-               if (tape->merge_bh_size % tape->blk_size) {
-                       unsigned int i;
-
-                       blocks++;
-                       i = tape->blk_size - tape->merge_bh_size %
-                               tape->blk_size;
-                       bh = tape->bh->b_reqnext;
-                       while (bh) {
-                               atomic_set(&bh->b_count, 0);
-                               bh = bh->b_reqnext;
-                       }
-                       bh = tape->bh;
-                       while (i) {
-                               if (bh == NULL) {
-                                       printk(KERN_INFO "ide-tape: bug,"
-                                                        " bh NULL\n");
-                                       break;
-                               }
-                               min = min(i, (unsigned int)(bh->b_size -
-                                               atomic_read(&bh->b_count)));
-                               memset(bh->b_data + atomic_read(&bh->b_count),
-                                               0, min);
-                               atomic_add(min, &bh->b_count);
-                               i -= min;
-                               bh = bh->b_reqnext;
-                       }
-               }
-               (void) idetape_add_chrdev_write_request(drive, blocks);
-               tape->merge_bh_size = 0;
-       }
-       if (tape->merge_bh != NULL) {
-               ide_tape_kfree_buffer(tape);
-               tape->merge_bh = NULL;
+       if (tape->buf) {
+               size_t aligned = roundup(tape->valid, tape->blk_size);
+
+               memset(tape->cur, 0, aligned - tape->valid);
+               idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, aligned);
+               kfree(tape->buf);
+               tape->buf = NULL;
        }
        tape->chrdev_dir = IDETAPE_DIR_NONE;
 }
 
-static int idetape_init_read(ide_drive_t *drive)
+static int idetape_init_rw(ide_drive_t *drive, int dir)
 {
        idetape_tape_t *tape = drive->driver_data;
-       int bytes_read;
+       int rc;
 
-       /* Initialize read operation */
-       if (tape->chrdev_dir != IDETAPE_DIR_READ) {
-               if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
-                       ide_tape_flush_merge_buffer(drive);
-                       idetape_flush_tape_buffers(drive);
-               }
-               if (tape->merge_bh || tape->merge_bh_size) {
-                       printk(KERN_ERR "ide-tape: merge_bh_size should be"
-                                        " 0 now\n");
-                       tape->merge_bh_size = 0;
-               }
-               tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
-               if (!tape->merge_bh)
-                       return -ENOMEM;
-               tape->chrdev_dir = IDETAPE_DIR_READ;
+       BUG_ON(dir != IDETAPE_DIR_READ && dir != IDETAPE_DIR_WRITE);
 
-               /*
-                * Issue a read 0 command to ensure that DSC handshake is
-                * switched from completion mode to buffer available mode.
-                * No point in issuing this if DSC overlap isn't supported, some
-                * drives (Seagate STT3401A) will return an error.
-                */
-               if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
-                       bytes_read = idetape_queue_rw_tail(drive,
-                                                       REQ_IDETAPE_READ, 0,
-                                                       tape->merge_bh);
-                       if (bytes_read < 0) {
-                               ide_tape_kfree_buffer(tape);
-                               tape->merge_bh = NULL;
-                               tape->chrdev_dir = IDETAPE_DIR_NONE;
-                               return bytes_read;
-                       }
-               }
-       }
+       if (tape->chrdev_dir == dir)
+               return 0;
 
-       return 0;
-}
+       if (tape->chrdev_dir == IDETAPE_DIR_READ)
+               ide_tape_discard_merge_buffer(drive, 1);
+       else if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
+               ide_tape_flush_merge_buffer(drive);
+               idetape_flush_tape_buffers(drive);
+       }
 
-/* called from idetape_chrdev_read() to service a chrdev read request. */
-static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
-{
-       idetape_tape_t *tape = drive->driver_data;
+       if (tape->buf || tape->valid) {
+               printk(KERN_ERR "ide-tape: valid should be 0 now\n");
+               tape->valid = 0;
+       }
 
-       debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
+       tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
+       if (!tape->buf)
+               return -ENOMEM;
+       tape->chrdev_dir = dir;
+       tape->cur = tape->buf;
 
-       /* If we are at a filemark, return a read length of 0 */
-       if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
-               return 0;
-
-       idetape_init_read(drive);
+       /*
+        * Issue a 0 rw command to ensure that DSC handshake is
+        * switched from completion mode to buffer available mode.  No
+        * point in issuing this if DSC overlap isn't supported, some
+        * drives (Seagate STT3401A) will return an error.
+        */
+       if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
+               int cmd = dir == IDETAPE_DIR_READ ? REQ_IDETAPE_READ
+                                                 : REQ_IDETAPE_WRITE;
+
+               rc = idetape_queue_rw_tail(drive, cmd, 0);
+               if (rc < 0) {
+                       kfree(tape->buf);
+                       tape->buf = NULL;
+                       tape->chrdev_dir = IDETAPE_DIR_NONE;
+                       return rc;
+               }
+       }
 
-       return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
-                                    tape->merge_bh);
+       return 0;
 }
 
 static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
 {
        idetape_tape_t *tape = drive->driver_data;
-       struct idetape_bh *bh;
-       int blocks;
+
+       memset(tape->buf, 0, tape->buffer_size);
 
        while (bcount) {
-               unsigned int count;
+               unsigned int count = min(tape->buffer_size, bcount);
 
-               bh = tape->merge_bh;
-               count = min(tape->buffer_size, bcount);
+               idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, count);
                bcount -= count;
-               blocks = count / tape->blk_size;
-               while (count) {
-                       atomic_set(&bh->b_count,
-                                  min(count, (unsigned int)bh->b_size));
-                       memset(bh->b_data, 0, atomic_read(&bh->b_count));
-                       count -= atomic_read(&bh->b_count);
-                       bh = bh->b_reqnext;
-               }
-               idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
-                                     tape->merge_bh);
        }
 }
 
@@ -1456,7 +1111,7 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
        }
 
        if (tape->chrdev_dir == IDETAPE_DIR_READ) {
-               tape->merge_bh_size = 0;
+               tape->valid = 0;
                if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
                        ++count;
                ide_tape_discard_merge_buffer(drive, 0);
@@ -1505,9 +1160,9 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
 {
        struct ide_tape_obj *tape = file->private_data;
        ide_drive_t *drive = tape->drive;
-       ssize_t bytes_read, temp, actually_read = 0, rc;
+       size_t done = 0;
        ssize_t ret = 0;
-       u16 ctl = *(u16 *)&tape->caps[12];
+       int rc;
 
        debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
 
@@ -1517,49 +1172,43 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
                            (count % tape->blk_size) == 0)
                                tape->user_bs_factor = count / tape->blk_size;
        }
-       rc = idetape_init_read(drive);
+
+       rc = idetape_init_rw(drive, IDETAPE_DIR_READ);
        if (rc < 0)
                return rc;
-       if (count == 0)
-               return (0);
-       if (tape->merge_bh_size) {
-               actually_read = min((unsigned int)(tape->merge_bh_size),
-                                   (unsigned int)count);
-               if (idetape_copy_stage_to_user(tape, buf, actually_read))
-                       ret = -EFAULT;
-               buf += actually_read;
-               tape->merge_bh_size -= actually_read;
-               count -= actually_read;
-       }
-       while (count >= tape->buffer_size) {
-               bytes_read = idetape_add_chrdev_read_request(drive, ctl);
-               if (bytes_read <= 0)
-                       goto finish;
-               if (idetape_copy_stage_to_user(tape, buf, bytes_read))
-                       ret = -EFAULT;
-               buf += bytes_read;
-               count -= bytes_read;
-               actually_read += bytes_read;
-       }
-       if (count) {
-               bytes_read = idetape_add_chrdev_read_request(drive, ctl);
-               if (bytes_read <= 0)
-                       goto finish;
-               temp = min((unsigned long)count, (unsigned long)bytes_read);
-               if (idetape_copy_stage_to_user(tape, buf, temp))
+
+       while (done < count) {
+               size_t todo;
+
+               /* refill if staging buffer is empty */
+               if (!tape->valid) {
+                       /* If we are at a filemark, nothing more to read */
+                       if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
+                               break;
+                       /* read */
+                       if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ,
+                                                 tape->buffer_size) <= 0)
+                               break;
+               }
+
+               /* copy out */
+               todo = min_t(size_t, count - done, tape->valid);
+               if (copy_to_user(buf + done, tape->cur, todo))
                        ret = -EFAULT;
-               actually_read += temp;
-               tape->merge_bh_size = bytes_read-temp;
+
+               tape->cur += todo;
+               tape->valid -= todo;
+               done += todo;
        }
-finish:
-       if (!actually_read && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
+
+       if (!done && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
                debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
 
                idetape_space_over_filemarks(drive, MTFSF, 1);
                return 0;
        }
 
-       return ret ? ret : actually_read;
+       return ret ? ret : done;
 }
 
 static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
@@ -1567,9 +1216,9 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
 {
        struct ide_tape_obj *tape = file->private_data;
        ide_drive_t *drive = tape->drive;
-       ssize_t actually_written = 0;
+       size_t done = 0;
        ssize_t ret = 0;
-       u16 ctl = *(u16 *)&tape->caps[12];
+       int rc;
 
        /* The drive is write protected. */
        if (tape->write_prot)
@@ -1578,80 +1227,31 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
        debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
 
        /* Initialize write operation */
-       if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
-               if (tape->chrdev_dir == IDETAPE_DIR_READ)
-                       ide_tape_discard_merge_buffer(drive, 1);
-               if (tape->merge_bh || tape->merge_bh_size) {
-                       printk(KERN_ERR "ide-tape: merge_bh_size "
-                               "should be 0 now\n");
-                       tape->merge_bh_size = 0;
-               }
-               tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
-               if (!tape->merge_bh)
-                       return -ENOMEM;
-               tape->chrdev_dir = IDETAPE_DIR_WRITE;
-               idetape_init_merge_buffer(tape);
+       rc = idetape_init_rw(drive, IDETAPE_DIR_WRITE);
+       if (rc < 0)
+               return rc;
 
-               /*
-                * Issue a write 0 command to ensure that DSC handshake is
-                * switched from completion mode to buffer available mode. No
-                * point in issuing this if DSC overlap isn't supported, some
-                * drives (Seagate STT3401A) will return an error.
-                */
-               if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
-                       ssize_t retval = idetape_queue_rw_tail(drive,
-                                                       REQ_IDETAPE_WRITE, 0,
-                                                       tape->merge_bh);
-                       if (retval < 0) {
-                               ide_tape_kfree_buffer(tape);
-                               tape->merge_bh = NULL;
-                               tape->chrdev_dir = IDETAPE_DIR_NONE;
-                               return retval;
-                       }
-               }
-       }
-       if (count == 0)
-               return (0);
-       if (tape->merge_bh_size) {
-               if (tape->merge_bh_size >= tape->buffer_size) {
-                       printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
-                       tape->merge_bh_size = 0;
-               }
-               actually_written = min((unsigned int)
-                               (tape->buffer_size - tape->merge_bh_size),
-                               (unsigned int)count);
-               if (idetape_copy_stage_from_user(tape, buf, actually_written))
-                               ret = -EFAULT;
-               buf += actually_written;
-               tape->merge_bh_size += actually_written;
-               count -= actually_written;
-
-               if (tape->merge_bh_size == tape->buffer_size) {
-                       ssize_t retval;
-                       tape->merge_bh_size = 0;
-                       retval = idetape_add_chrdev_write_request(drive, ctl);
-                       if (retval <= 0)
-                               return (retval);
-               }
-       }
-       while (count >= tape->buffer_size) {
-               ssize_t retval;
-               if (idetape_copy_stage_from_user(tape, buf, tape->buffer_size))
-                       ret = -EFAULT;
-               buf += tape->buffer_size;
-               count -= tape->buffer_size;
-               retval = idetape_add_chrdev_write_request(drive, ctl);
-               actually_written += tape->buffer_size;
-               if (retval <= 0)
-                       return (retval);
-       }
-       if (count) {
-               actually_written += count;
-               if (idetape_copy_stage_from_user(tape, buf, count))
+       while (done < count) {
+               size_t todo;
+
+               /* flush if staging buffer is full */
+               if (tape->valid == tape->buffer_size &&
+                   idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
+                                         tape->buffer_size) <= 0)
+                       return rc;
+
+               /* copy in */
+               todo = min_t(size_t, count - done,
+                            tape->buffer_size - tape->valid);
+               if (copy_from_user(tape->cur, buf + done, todo))
                        ret = -EFAULT;
-               tape->merge_bh_size += count;
+
+               tape->cur += todo;
+               tape->valid += todo;
+               done += todo;
        }
-       return ret ? ret : actually_written;
+
+       return ret ? ret : done;
 }
 
 static int idetape_write_filemark(ide_drive_t *drive)
@@ -1812,7 +1412,7 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
                idetape_flush_tape_buffers(drive);
        }
        if (cmd == MTIOCGET || cmd == MTIOCPOS) {
-               block_offset = tape->merge_bh_size /
+               block_offset = tape->valid /
                        (tape->blk_size * tape->user_bs_factor);
                position = idetape_read_position(drive);
                if (position < 0)
@@ -1960,12 +1560,12 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
        idetape_tape_t *tape = drive->driver_data;
 
        ide_tape_flush_merge_buffer(drive);
-       tape->merge_bh = ide_tape_kmalloc_buffer(tape, 1, 0);
-       if (tape->merge_bh != NULL) {
+       tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
+       if (tape->buf != NULL) {
                idetape_pad_zeros(drive, tape->blk_size *
                                (tape->user_bs_factor - 1));
-               ide_tape_kfree_buffer(tape);
-               tape->merge_bh = NULL;
+               kfree(tape->buf);
+               tape->buf = NULL;
        }
        idetape_write_filemark(drive);
        idetape_flush_tape_buffers(drive);
@@ -2159,8 +1759,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
        u16 *ctl = (u16 *)&tape->caps[12];
 
        drive->pc_callback       = ide_tape_callback;
-       drive->pc_update_buffers = idetape_update_buffers;
-       drive->pc_io_buffers     = ide_tape_io_buffers;
 
        drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
 
@@ -2191,11 +1789,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
                tape->buffer_size = *ctl * tape->blk_size;
        }
        buffer_size = tape->buffer_size;
-       tape->pages_per_buffer = buffer_size / PAGE_SIZE;
-       if (buffer_size % PAGE_SIZE) {
-               tape->pages_per_buffer++;
-               tape->excess_bh_size = PAGE_SIZE - buffer_size % PAGE_SIZE;
-       }
 
        /* select the "best" DSC read/write polling freq */
        speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
@@ -2238,7 +1831,7 @@ static void ide_tape_release(struct device *dev)
        ide_drive_t *drive = tape->drive;
        struct gendisk *g = tape->disk;
 
-       BUG_ON(tape->merge_bh_size);
+       BUG_ON(tape->valid);
 
        drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
        drive->driver_data = NULL;
index 4aa6223c11bea0ee138fec32dfd29fbf7facd30e..a0c3e1b2f73c20b6b005eb9ce2b35ea61bbb42f1 100644 (file)
@@ -385,7 +385,7 @@ out_end:
        if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
                ide_finish_cmd(drive, cmd, stat);
        else
-               ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9);
+               ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
        return ide_stopped;
 out_err:
        ide_error_cmd(drive, cmd);
@@ -424,7 +424,9 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
 
        rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
-       rq->buffer = buf;
+
+       if (cmd->tf_flags & IDE_TFLAG_WRITE)
+               rq->cmd_flags |= REQ_RW;
 
        /*
         * (ks) We transfer currently only whole sectors.
@@ -432,18 +434,20 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
         * if we would find a solution to transfer any size.
         * To support special commands like READ LONG.
         */
-       rq->hard_nr_sectors = rq->nr_sectors = nsect;
-       rq->hard_cur_sectors = rq->current_nr_sectors = nsect;
-
-       if (cmd->tf_flags & IDE_TFLAG_WRITE)
-               rq->cmd_flags |= REQ_RW;
+       if (nsect) {
+               error = blk_rq_map_kern(drive->queue, rq, buf,
+                                       nsect * SECTOR_SIZE, __GFP_WAIT);
+               if (error)
+                       goto put_req;
+       }
 
        rq->special = cmd;
        cmd->rq = rq;
 
        error = blk_execute_rq(drive->queue, NULL, rq, 0);
-       blk_put_request(rq);
 
+put_req:
+       blk_put_request(rq);
        return error;
 }
 
index 248a54bd2386f346e155c19a74f1c635215170a3..c2a16a8f486dea1525d75a2ac6a037d2fe96bc04 100644 (file)
@@ -177,7 +177,7 @@ static void pdc202xx_dma_start(ide_drive_t *drive)
                u8 clock = inb(high_16 + 0x11);
 
                outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
-               word_count = (rq->nr_sectors << 8);
+               word_count = (blk_rq_sectors(rq) << 8);
                word_count = (rq_data_dir(rq) == READ) ?
                                        word_count | 0x05000000 :
                                        word_count | 0x06000000;
index b4cf42dc8a6fcf4cd155ae0bd3f7ad9868a10106..05a93d6baecc60edf3ba587ab26c0947907915aa 100644 (file)
@@ -112,7 +112,7 @@ static void tc86c001_dma_start(ide_drive_t *drive)
        ide_hwif_t *hwif        = drive->hwif;
        unsigned long sc_base   = hwif->config_data;
        unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
-       unsigned long nsectors  = hwif->rq->nr_sectors;
+       unsigned long nsectors  = blk_rq_sectors(hwif->rq);
 
        /*
         * We have to manually load the sector count and size into
index 564422d239766a97debc65ff45546d0bbfa9095b..5ca76224f6d11a731b585f98a35ea8e2661c4e5e 100644 (file)
@@ -307,7 +307,7 @@ static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
        tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
                         TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
 
-       tx4939ide_writew(cmd->rq->nr_sectors, base, TX4939IDE_Sec_Cnt);
+       tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
 
        return 0;
 }
index de143deb06f0bcbb1dc1da9e738e88cffd431491..c0bebc6a2f2ccbb1cab1298f0fdfd396316928b0 100644 (file)
@@ -672,15 +672,14 @@ try_again:
                                               msb->req_sg);
 
                if (!msb->seg_count) {
-                       chunk = __blk_end_request(msb->block_req, -ENOMEM,
-                                       blk_rq_cur_bytes(msb->block_req));
+                       chunk = __blk_end_request_cur(msb->block_req, -ENOMEM);
                        continue;
                }
 
-               t_sec = msb->block_req->sector << 9;
+               t_sec = blk_rq_pos(msb->block_req) << 9;
                sector_div(t_sec, msb->page_size);
 
-               count = msb->block_req->nr_sectors << 9;
+               count = blk_rq_bytes(msb->block_req);
                count /= msb->page_size;
 
                param.system = msb->system;
@@ -705,8 +704,8 @@ try_again:
                return 0;
        }
 
-       dev_dbg(&card->dev, "elv_next\n");
-       msb->block_req = elv_next_request(msb->queue);
+       dev_dbg(&card->dev, "blk_fetch\n");
+       msb->block_req = blk_fetch_request(msb->queue);
        if (!msb->block_req) {
                dev_dbg(&card->dev, "issue end\n");
                return -EAGAIN;
@@ -745,7 +744,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
                                        t_len *= msb->page_size;
                        }
                } else
-                       t_len = msb->block_req->nr_sectors << 9;
+                       t_len = blk_rq_bytes(msb->block_req);
 
                dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error);
 
@@ -825,8 +824,8 @@ static void mspro_block_submit_req(struct request_queue *q)
                return;
 
        if (msb->eject) {
-               while ((req = elv_next_request(q)) != NULL)
-                       __blk_end_request(req, -ENODEV, blk_rq_bytes(req));
+               while ((req = blk_fetch_request(q)) != NULL)
+                       __blk_end_request_all(req, -ENODEV);
 
                return;
        }
index a9019f081b971b888464166c407a4c8aabb9da30..79f5433359f9b3fc1b346cb78b5d11dcd60d15de 100644 (file)
@@ -1277,8 +1277,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        /* do we need to support multiple segments? */
        if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
                printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
-                   ioc->name, __func__, req->bio->bi_vcnt, req->data_len,
-                   rsp->bio->bi_vcnt, rsp->data_len);
+                   ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
+                   rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
                return -EINVAL;
        }
 
@@ -1295,7 +1295,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        smpreq = (SmpPassthroughRequest_t *)mf;
        memset(smpreq, 0, sizeof(*smpreq));
 
-       smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4);
+       smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
        smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
 
        if (rphy)
@@ -1321,10 +1321,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                       MPI_SGE_FLAGS_END_OF_BUFFER |
                       MPI_SGE_FLAGS_DIRECTION |
                       mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT;
-       flagsLength |= (req->data_len - 4);
+       flagsLength |= (blk_rq_bytes(req) - 4);
 
        dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
-                                     req->data_len, PCI_DMA_BIDIRECTIONAL);
+                                     blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
        if (!dma_addr_out)
                goto put_mf;
        mpt_add_sge(psge, flagsLength, dma_addr_out);
@@ -1332,9 +1332,9 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
        /* response */
        flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
-       flagsLength |= rsp->data_len + 4;
+       flagsLength |= blk_rq_bytes(rsp) + 4;
        dma_addr_in =  pci_map_single(ioc->pcidev, bio_data(rsp->bio),
-                                     rsp->data_len, PCI_DMA_BIDIRECTIONAL);
+                                     blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
        if (!dma_addr_in)
                goto unmap;
        mpt_add_sge(psge, flagsLength, dma_addr_in);
@@ -1357,8 +1357,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
                memcpy(req->sense, smprep, sizeof(*smprep));
                req->sense_len = sizeof(*smprep);
-               req->data_len = 0;
-               rsp->data_len -= smprep->ResponseDataLength;
+               req->resid_len = 0;
+               rsp->resid_len -= smprep->ResponseDataLength;
        } else {
                printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
                    ioc->name, __func__);
@@ -1366,10 +1366,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        }
 unmap:
        if (dma_addr_out)
-               pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len,
+               pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
                                 PCI_DMA_BIDIRECTIONAL);
        if (dma_addr_in)
-               pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len,
+               pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
                                 PCI_DMA_BIDIRECTIONAL);
 put_mf:
        if (mf)
index a443e136dc41591136aecc1ec9e863ce43b2e35e..6573ef4408f1dc66141acbc4c6038fd84b08acbd 100644 (file)
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error,
        struct request_queue *q = req->q;
        unsigned long flags;
 
-       if (blk_end_request(req, error, nr_bytes)) {
-               int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
-
-               if (blk_pc_request(req))
-                       leftover = req->data_len;
-
+       if (blk_end_request(req, error, nr_bytes))
                if (error)
-                       blk_end_request(req, -EIO, leftover);
-       }
+                       blk_end_request_all(req, -EIO);
 
        spin_lock_irqsave(q->queue_lock, flags);
 
@@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req)
                        break;
 
                case CACHE_SMARTFETCH:
-                       if (req->nr_sectors > 16)
+                       if (blk_rq_sectors(req) > 16)
                                ctl_flags = 0x201F0008;
                        else
                                ctl_flags = 0x001F0000;
@@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req)
                        ctl_flags = 0x001F0010;
                        break;
                case CACHE_SMARTBACK:
-                       if (req->nr_sectors > 16)
+                       if (blk_rq_sectors(req) > 16)
                                ctl_flags = 0x001F0004;
                        else
                                ctl_flags = 0x001F0010;
                        break;
                case CACHE_SMARTTHROUGH:
-                       if (req->nr_sectors > 16)
+                       if (blk_rq_sectors(req) > 16)
                                ctl_flags = 0x001F0004;
                        else
                                ctl_flags = 0x001F0010;
@@ -827,22 +821,22 @@ static int i2o_block_transfer(struct request *req)
 
                *mptr++ = cpu_to_le32(scsi_flags);
 
-               *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
-               *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
+               *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
+               *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
 
                memcpy(mptr, cmd, 10);
                mptr += 4;
-               *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+               *mptr++ = cpu_to_le32(blk_rq_bytes(req));
        } else
 #endif
        {
                msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
                *mptr++ = cpu_to_le32(ctl_flags);
-               *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+               *mptr++ = cpu_to_le32(blk_rq_bytes(req));
                *mptr++ =
-                   cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
+                   cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
                *mptr++ =
-                   cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
+                   cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
        }
 
        if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -883,7 +877,7 @@ static void i2o_block_request_fn(struct request_queue *q)
        struct request *req;
 
        while (!blk_queue_plugged(q)) {
-               req = elv_next_request(q);
+               req = blk_peek_request(q);
                if (!req)
                        break;
 
@@ -896,7 +890,7 @@ static void i2o_block_request_fn(struct request_queue *q)
 
                        if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
                                if (!i2o_block_transfer(req)) {
-                                       blkdev_dequeue_request(req);
+                                       blk_start_request(req);
                                        continue;
                                } else
                                        osm_info("transfer error\n");
@@ -922,8 +916,10 @@ static void i2o_block_request_fn(struct request_queue *q)
                                blk_stop_queue(q);
                                break;
                        }
-               } else
-                       end_request(req, 0);
+               } else {
+                       blk_start_request(req);
+                       __blk_end_request_all(req, -EIO);
+               }
        }
 };
 
index b25e9b6516ae6942d6e9a0c34fc3f0dba0f9556a..c5df86546458d1da54f9a0bd778e7957d2357e2e 100644 (file)
@@ -243,7 +243,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                brq.mrq.cmd = &brq.cmd;
                brq.mrq.data = &brq.data;
 
-               brq.cmd.arg = req->sector;
+               brq.cmd.arg = blk_rq_pos(req);
                if (!mmc_card_blockaddr(card))
                        brq.cmd.arg <<= 9;
                brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
@@ -251,7 +251,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                brq.stop.opcode = MMC_STOP_TRANSMISSION;
                brq.stop.arg = 0;
                brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-               brq.data.blocks = req->nr_sectors;
+               brq.data.blocks = blk_rq_sectors(req);
 
                /*
                 * The block layer doesn't support all sector count
@@ -301,7 +301,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                 * Adjust the sg list so it is the same size as the
                 * request.
                 */
-               if (brq.data.blocks != req->nr_sectors) {
+               if (brq.data.blocks != blk_rq_sectors(req)) {
                        int i, data_size = brq.data.blocks << 9;
                        struct scatterlist *sg;
 
@@ -352,8 +352,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                        printk(KERN_ERR "%s: error %d transferring data,"
                               " sector %u, nr %u, card status %#x\n",
                               req->rq_disk->disk_name, brq.data.error,
-                              (unsigned)req->sector,
-                              (unsigned)req->nr_sectors, status);
+                              (unsigned)blk_rq_pos(req),
+                              (unsigned)blk_rq_sectors(req), status);
                }
 
                if (brq.stop.error) {
index 7a72e75d5c674b94a66ab16073acb89e96a6d42b..49e582356c65ab34bc17b1db1bbae002bc5faaff 100644 (file)
@@ -55,7 +55,7 @@ static int mmc_queue_thread(void *d)
                spin_lock_irq(q->queue_lock);
                set_current_state(TASK_INTERRUPTIBLE);
                if (!blk_queue_plugged(q))
-                       req = elv_next_request(q);
+                       req = blk_fetch_request(q);
                mq->req = req;
                spin_unlock_irq(q->queue_lock);
 
@@ -88,16 +88,11 @@ static void mmc_request(struct request_queue *q)
 {
        struct mmc_queue *mq = q->queuedata;
        struct request *req;
-       int ret;
 
        if (!mq) {
                printk(KERN_ERR "MMC: killing requests for dead queue\n");
-               while ((req = elv_next_request(q)) != NULL) {
-                       do {
-                               ret = __blk_end_request(req, -EIO,
-                                                       blk_rq_cur_bytes(req));
-                       } while (ret);
-               }
+               while ((req = blk_fetch_request(q)) != NULL)
+                       __blk_end_request_all(req, -EIO);
                return;
        }
 
index a49a9c8f2cb1faffe5c16d782ea793839062dcc4..502622f628bc0dfb8e988a86833bc10dc5710c58 100644 (file)
@@ -47,40 +47,41 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
        unsigned long block, nsect;
        char *buf;
 
-       block = req->sector << 9 >> tr->blkshift;
-       nsect = req->current_nr_sectors << 9 >> tr->blkshift;
+       block = blk_rq_pos(req) << 9 >> tr->blkshift;
+       nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
 
        buf = req->buffer;
 
        if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
            req->cmd[0] == REQ_LB_OP_DISCARD)
-               return !tr->discard(dev, block, nsect);
+               return tr->discard(dev, block, nsect);
 
        if (!blk_fs_request(req))
-               return 0;
+               return -EIO;
 
-       if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
-               return 0;
+       if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
+           get_capacity(req->rq_disk))
+               return -EIO;
 
        switch(rq_data_dir(req)) {
        case READ:
                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
                        if (tr->readsect(dev, block, buf))
-                               return 0;
-               return 1;
+                               return -EIO;
+               return 0;
 
        case WRITE:
                if (!tr->writesect)
-                       return 0;
+                       return -EIO;
 
                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
                        if (tr->writesect(dev, block, buf))
-                               return 0;
-               return 1;
+                               return -EIO;
+               return 0;
 
        default:
                printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
-               return 0;
+               return -EIO;
        }
 }
 
@@ -88,19 +89,18 @@ static int mtd_blktrans_thread(void *arg)
 {
        struct mtd_blktrans_ops *tr = arg;
        struct request_queue *rq = tr->blkcore_priv->rq;
+       struct request *req = NULL;
 
        /* we might get involved when memory gets low, so use PF_MEMALLOC */
        current->flags |= PF_MEMALLOC;
 
        spin_lock_irq(rq->queue_lock);
+
        while (!kthread_should_stop()) {
-               struct request *req;
                struct mtd_blktrans_dev *dev;
-               int res = 0;
-
-               req = elv_next_request(rq);
+               int res;
 
-               if (!req) {
+               if (!req && !(req = blk_fetch_request(rq))) {
                        set_current_state(TASK_INTERRUPTIBLE);
                        spin_unlock_irq(rq->queue_lock);
                        schedule();
@@ -119,8 +119,13 @@ static int mtd_blktrans_thread(void *arg)
 
                spin_lock_irq(rq->queue_lock);
 
-               end_request(req, res);
+               if (!__blk_end_request_cur(req, res))
+                       req = NULL;
        }
+
+       if (req)
+               __blk_end_request_all(req, -EIO);
+
        spin_unlock_irq(rq->queue_lock);
 
        return 0;
index d1815272c4351b90bbd972a649c8f11a3bd49710..e64f62d5e0fc503f2fe7533c741778be1a5092d1 100644 (file)
@@ -603,7 +603,7 @@ static void dasd_profile_end(struct dasd_block *block,
        if (dasd_profile_level != DASD_PROFILE_ON)
                return;
 
-       sectors = req->nr_sectors;
+       sectors = blk_rq_sectors(req);
        if (!cqr->buildclk || !cqr->startclk ||
            !cqr->stopclk || !cqr->endclk ||
            !sectors)
@@ -1613,15 +1613,6 @@ void dasd_block_clear_timer(struct dasd_block *block)
        del_timer(&block->timer);
 }
 
-/*
- * posts the buffer_cache about a finalized request
- */
-static inline void dasd_end_request(struct request *req, int error)
-{
-       if (__blk_end_request(req, error, blk_rq_bytes(req)))
-               BUG();
-}
-
 /*
  * Process finished error recovery ccw.
  */
@@ -1665,18 +1656,14 @@ static void __dasd_process_request_queue(struct dasd_block *block)
        if (basedev->state < DASD_STATE_READY)
                return;
        /* Now we try to fetch requests from the request queue */
-       while (!blk_queue_plugged(queue) &&
-              elv_next_request(queue)) {
-
-               req = elv_next_request(queue);
-
+       while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
                if (basedev->features & DASD_FEATURE_READONLY &&
                    rq_data_dir(req) == WRITE) {
                        DBF_DEV_EVENT(DBF_ERR, basedev,
                                      "Rejecting write request %p",
                                      req);
-                       blkdev_dequeue_request(req);
-                       dasd_end_request(req, -EIO);
+                       blk_start_request(req);
+                       __blk_end_request_all(req, -EIO);
                        continue;
                }
                cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -1704,8 +1691,8 @@ static void __dasd_process_request_queue(struct dasd_block *block)
                                      "CCW creation failed (rc=%ld) "
                                      "on request %p",
                                      PTR_ERR(cqr), req);
-                       blkdev_dequeue_request(req);
-                       dasd_end_request(req, -EIO);
+                       blk_start_request(req);
+                       __blk_end_request_all(req, -EIO);
                        continue;
                }
                /*
@@ -1714,7 +1701,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
                 */
                cqr->callback_data = (void *) req;
                cqr->status = DASD_CQR_FILLED;
-               blkdev_dequeue_request(req);
+               blk_start_request(req);
                list_add_tail(&cqr->blocklist, &block->ccw_queue);
                dasd_profile_start(block, cqr, req);
        }
@@ -1731,7 +1718,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
        status = cqr->block->base->discipline->free_cp(cqr, req);
        if (status <= 0)
                error = status ? status : -EIO;
-       dasd_end_request(req, error);
+       __blk_end_request_all(req, error);
 }
 
 /*
@@ -2038,10 +2025,8 @@ static void dasd_flush_request_queue(struct dasd_block *block)
                return;
 
        spin_lock_irq(&block->request_queue_lock);
-       while ((req = elv_next_request(block->request_queue))) {
-               blkdev_dequeue_request(req);
-               dasd_end_request(req, -EIO);
-       }
+       while ((req = blk_fetch_request(block->request_queue)))
+               __blk_end_request_all(req, -EIO);
        spin_unlock_irq(&block->request_queue_lock);
 }
 
index b9a7f77334468ff5afc4e34d59a6baffadb543ea..2efaddfae56088be5d25bf27e4f53991105ec070 100644 (file)
@@ -505,8 +505,9 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
                return ERR_PTR(-EINVAL);
        blksize = block->bp_block;
        /* Calculate record id of first and last block. */
-       first_rec = req->sector >> block->s2b_shift;
-       last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+       first_rec = blk_rq_pos(req) >> block->s2b_shift;
+       last_rec =
+               (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
        /* Check struct bio and count the number of blocks for the request. */
        count = 0;
        rq_for_each_segment(bv, req, iter) {
index cb52da033f061ebbe853b90afaad65db31b7fed5..a41c94053e64be88f1179153be06156c313d9093 100644 (file)
@@ -2354,10 +2354,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
        blksize = block->bp_block;
        blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
        /* Calculate record id of first and last block. */
-       first_rec = first_trk = req->sector >> block->s2b_shift;
+       first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
        first_offs = sector_div(first_trk, blk_per_trk);
        last_rec = last_trk =
-               (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+               (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
        last_offs = sector_div(last_trk, blk_per_trk);
        cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
 
@@ -2420,7 +2420,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
        private = (struct dasd_eckd_private *) cqr->block->base->private;
        blksize = cqr->block->bp_block;
        blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
-       recid = req->sector >> cqr->block->s2b_shift;
+       recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
        ccw = cqr->cpaddr;
        /* Skip over define extent & locate record. */
        ccw++;
index a3eb6fd146731dad7ef87c9f1dbe28a72eb504da..8912358daa2fd0c3c6561b7469c5d2d23b5e1d5d 100644 (file)
@@ -270,8 +270,9 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
                return ERR_PTR(-EINVAL);
        blksize = block->bp_block;
        /* Calculate record id of first and last block. */
-       first_rec = req->sector >> block->s2b_shift;
-       last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+       first_rec = blk_rq_pos(req) >> block->s2b_shift;
+       last_rec =
+               (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
        /* Check struct bio and count the number of blocks for the request. */
        count = 0;
        cidaw = 0;
@@ -309,7 +310,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
        ccw = cqr->cpaddr;
        /* First ccw is define extent. */
        define_extent(ccw++, cqr->data, rq_data_dir(req),
-                     block->bp_block, req->sector, req->nr_sectors);
+                     block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
        /* Build locate_record + read/write ccws. */
        idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
        LO_data = (struct LO_fba_data *) (idaws + cidaw);
index 5f8e8ef43dd312add6ca69cc9ed851a903e38c3b..2d00a383a475ca50fbc93a6b0fee0cc1df018c59 100644 (file)
@@ -1134,7 +1134,7 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
        /* Setup ccws. */
        request->op = TO_BLOCK;
        start_block = (struct tape_34xx_block_id *) request->cpdata;
-       start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B;
+       start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
        DBF_EVENT(6, "start_block = %i\n", start_block->block);
 
        ccw = request->cpaddr;
index 823b05bd0dd79d9ab928d214b36cab9ec78626df..c453b2f3e9f4c6c46cf73a9be1397c9f10873c7c 100644 (file)
@@ -633,7 +633,7 @@ tape_3590_bread(struct tape_device *device, struct request *req)
        struct req_iterator iter;
 
        DBF_EVENT(6, "xBREDid:");
-       start_block = req->sector >> TAPEBLOCK_HSEC_S2B;
+       start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
        DBF_EVENT(6, "start_block = %i\n", start_block);
 
        rq_for_each_segment(bv, req, iter)
index f32e89e7c4f2e9f3f1aa54856996c446921e8a9a..1e796767598056616ed6b3265ed27855fd77d40a 100644 (file)
@@ -73,13 +73,6 @@ tapeblock_trigger_requeue(struct tape_device *device)
 /*
  * Post finished request.
  */
-static void
-tapeblock_end_request(struct request *req, int error)
-{
-       if (blk_end_request(req, error, blk_rq_bytes(req)))
-               BUG();
-}
-
 static void
 __tapeblock_end_request(struct tape_request *ccw_req, void *data)
 {
@@ -90,17 +83,17 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
 
        device = ccw_req->device;
        req = (struct request *) data;
-       tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO);
+       blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
        if (ccw_req->rc == 0)
                /* Update position. */
                device->blk_data.block_position =
-                       (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B;
+                 (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
        else
                /* We lost the position information due to an error. */
                device->blk_data.block_position = -1;
        device->discipline->free_bread(ccw_req);
        if (!list_empty(&device->req_queue) ||
-           elv_next_request(device->blk_data.request_queue))
+           blk_peek_request(device->blk_data.request_queue))
                tapeblock_trigger_requeue(device);
 }
 
@@ -118,7 +111,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
        ccw_req = device->discipline->bread(device, req);
        if (IS_ERR(ccw_req)) {
                DBF_EVENT(1, "TBLOCK: bread failed\n");
-               tapeblock_end_request(req, -EIO);
+               blk_end_request_all(req, -EIO);
                return PTR_ERR(ccw_req);
        }
        ccw_req->callback = __tapeblock_end_request;
@@ -131,7 +124,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
                 * Start/enqueueing failed. No retries in
                 * this case.
                 */
-               tapeblock_end_request(req, -EIO);
+               blk_end_request_all(req, -EIO);
                device->discipline->free_bread(ccw_req);
        }
 
@@ -169,19 +162,16 @@ tapeblock_requeue(struct work_struct *work) {
        spin_lock_irq(&device->blk_data.request_queue_lock);
        while (
                !blk_queue_plugged(queue) &&
-               elv_next_request(queue)   &&
+               (req = blk_fetch_request(queue)) &&
                nr_queued < TAPEBLOCK_MIN_REQUEUE
        ) {
-               req = elv_next_request(queue);
                if (rq_data_dir(req) == WRITE) {
                        DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
-                       blkdev_dequeue_request(req);
                        spin_unlock_irq(&device->blk_data.request_queue_lock);
-                       tapeblock_end_request(req, -EIO);
+                       blk_end_request_all(req, -EIO);
                        spin_lock_irq(&device->blk_data.request_queue_lock);
                        continue;
                }
-               blkdev_dequeue_request(req);
                nr_queued++;
                spin_unlock_irq(&device->blk_data.request_queue_lock);
                rc = tapeblock_start_request(device, req);
index a85ad05e85482a96d774358123b88e3401a50d0b..6d46516846884515bd56b35a9f0d28b342df1b03 100644 (file)
@@ -186,31 +186,31 @@ static void jsfd_do_request(struct request_queue *q)
 {
        struct request *req;
 
-       while ((req = elv_next_request(q)) != NULL) {
+       req = blk_fetch_request(q);
+       while (req) {
                struct jsfd_part *jdp = req->rq_disk->private_data;
-               unsigned long offset = req->sector << 9;
-               size_t len = req->current_nr_sectors << 9;
+               unsigned long offset = blk_rq_pos(req) << 9;
+               size_t len = blk_rq_cur_bytes(req);
+               int err = -EIO;
 
-               if ((offset + len) > jdp->dsize) {
-                               end_request(req, 0);
-                       continue;
-               }
+               if ((offset + len) > jdp->dsize)
+                       goto end;
 
                if (rq_data_dir(req) != READ) {
                        printk(KERN_ERR "jsfd: write\n");
-                       end_request(req, 0);
-                       continue;
+                       goto end;
                }
 
                if ((jdp->dbase & 0xff000000) != 0x20000000) {
                        printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase);
-                       end_request(req, 0);
-                       continue;
+                       goto end;
                }
 
                jsfd_read(req->buffer, jdp->dbase + offset, len);
-
-               end_request(req, 1);
+               err = 0;
+       end:
+               if (!__blk_end_request_cur(req, err))
+                       req = blk_fetch_request(q);
        }
 }
 
index be5099dd94b5d4e655385ad59364dbd585da1910..c7076ce25e2121dcad840347314cea0ac2997890 100644 (file)
@@ -1825,7 +1825,7 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
        if (linked_comm && SCpnt->device->queue_depth > 2
            && TLDEV(SCpnt->device->type)) {
                ha->cp_stat[i] = READY;
-               flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0);
+               flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0);
                return 0;
        }
 
@@ -2144,13 +2144,13 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
                if (!cpp->din)
                        input_only = 0;
 
-               if (SCpnt->request->sector < minsec)
-                       minsec = SCpnt->request->sector;
-               if (SCpnt->request->sector > maxsec)
-                       maxsec = SCpnt->request->sector;
+               if (blk_rq_pos(SCpnt->request) < minsec)
+                       minsec = blk_rq_pos(SCpnt->request);
+               if (blk_rq_pos(SCpnt->request) > maxsec)
+                       maxsec = blk_rq_pos(SCpnt->request);
 
-               sl[n] = SCpnt->request->sector;
-               ioseek += SCpnt->request->nr_sectors;
+               sl[n] = blk_rq_pos(SCpnt->request);
+               ioseek += blk_rq_sectors(SCpnt->request);
 
                if (!n)
                        continue;
@@ -2190,7 +2190,7 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
                        k = il[n];
                        cpp = &ha->cp[k];
                        SCpnt = cpp->SCpnt;
-                       ll[n] = SCpnt->request->nr_sectors;
+                       ll[n] = blk_rq_sectors(SCpnt->request);
                        pl[n] = SCpnt->serial_number;
 
                        if (!n)
@@ -2236,12 +2236,12 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
                        cpp = &ha->cp[k];
                        SCpnt = cpp->SCpnt;
                        scmd_printk(KERN_INFO, SCpnt,
-                           "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld"
+                           "%s pid %ld mb %d fc %d nr %d sec %ld ns %u"
                             " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
                             (ihdlr ? "ihdlr" : "qcomm"),
                             SCpnt->serial_number, k, flushcount,
-                            n_ready, SCpnt->request->sector,
-                            SCpnt->request->nr_sectors, cursec, YESNO(s),
+                            n_ready, blk_rq_pos(SCpnt->request),
+                            blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
                             YESNO(r), YESNO(rev), YESNO(input_only),
                             YESNO(overlap), cpp->din);
                }
@@ -2408,7 +2408,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
 
        if (linked_comm && SCpnt->device->queue_depth > 2
            && TLDEV(SCpnt->device->type))
-               flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1);
+               flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1);
 
        tstatus = status_byte(spp->target_status);
 
index 3da02e4367884b4f8fcc1015811b55b0bf88a968..54fa1e42dc4d2dfba29cdf52afd52d909af67139 100644 (file)
@@ -1927,21 +1927,21 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        /* do we need to support multiple segments? */
        if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
                printk("%s: multiple segments req %u %u, rsp %u %u\n",
-                      __func__, req->bio->bi_vcnt, req->data_len,
-                      rsp->bio->bi_vcnt, rsp->data_len);
+                      __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
+                      rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
                return -EINVAL;
        }
 
-       ret = smp_execute_task(dev, bio_data(req->bio), req->data_len,
-                              bio_data(rsp->bio), rsp->data_len);
+       ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
+                              bio_data(rsp->bio), blk_rq_bytes(rsp));
        if (ret > 0) {
                /* positive number is the untransferred residual */
-               rsp->data_len = ret;
-               req->data_len = 0;
+               rsp->resid_len = ret;
+               req->resid_len = 0;
                ret = 0;
        } else if (ret == 0) {
-               rsp->data_len = 0;
-               req->data_len = 0;
+               rsp->resid_len = 0;
+               req->resid_len = 0;
        }
 
        return ret;
index d110a366c48a13f7e8372ac81c1a15de05c40efa..1bc3b75679947ac1d8bcca33d132835cf16854f8 100644 (file)
@@ -134,24 +134,24 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 {
        u8 *req_data = NULL, *resp_data = NULL, *buf;
        struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
-       int error = -EINVAL, resp_data_len = rsp->data_len;
+       int error = -EINVAL;
 
        /* eight is the minimum size for request and response frames */
-       if (req->data_len < 8 || rsp->data_len < 8)
+       if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
                goto out;
 
-       if (bio_offset(req->bio) + req->data_len > PAGE_SIZE ||
-           bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) {
+       if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
+           bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
                shost_printk(KERN_ERR, shost,
                        "SMP request/response frame crosses page boundary");
                goto out;
        }
 
-       req_data = kzalloc(req->data_len, GFP_KERNEL);
+       req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
 
        /* make sure frame can always be built ... we copy
         * back only the requested length */
-       resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL);
+       resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
 
        if (!req_data || !resp_data) {
                error = -ENOMEM;
@@ -160,7 +160,7 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 
        local_irq_disable();
        buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
-       memcpy(req_data, buf, req->data_len);
+       memcpy(req_data, buf, blk_rq_bytes(req));
        kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
        local_irq_enable();
 
@@ -178,15 +178,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 
        switch (req_data[1]) {
        case SMP_REPORT_GENERAL:
-               req->data_len -= 8;
-               resp_data_len -= 32;
+               req->resid_len -= 8;
+               rsp->resid_len -= 32;
                resp_data[2] = SMP_RESP_FUNC_ACC;
                resp_data[9] = sas_ha->num_phys;
                break;
 
        case SMP_REPORT_MANUF_INFO:
-               req->data_len -= 8;
-               resp_data_len -= 64;
+               req->resid_len -= 8;
+               rsp->resid_len -= 64;
                resp_data[2] = SMP_RESP_FUNC_ACC;
                memcpy(resp_data + 12, shost->hostt->name,
                       SAS_EXPANDER_VENDOR_ID_LEN);
@@ -199,13 +199,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
                break;
 
        case SMP_DISCOVER:
-               req->data_len -= 16;
-               if ((int)req->data_len < 0) {
-                       req->data_len = 0;
+               req->resid_len -= 16;
+               if ((int)req->resid_len < 0) {
+                       req->resid_len = 0;
                        error = -EINVAL;
                        goto out;
                }
-               resp_data_len -= 56;
+               rsp->resid_len -= 56;
                sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
                break;
 
@@ -215,13 +215,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
                break;
 
        case SMP_REPORT_PHY_SATA:
-               req->data_len -= 16;
-               if ((int)req->data_len < 0) {
-                       req->data_len = 0;
+               req->resid_len -= 16;
+               if ((int)req->resid_len < 0) {
+                       req->resid_len = 0;
                        error = -EINVAL;
                        goto out;
                }
-               resp_data_len -= 60;
+               rsp->resid_len -= 60;
                sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
                break;
 
@@ -238,13 +238,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
                break;
 
        case SMP_PHY_CONTROL:
-               req->data_len -= 44;
-               if ((int)req->data_len < 0) {
-                       req->data_len = 0;
+               req->resid_len -= 44;
+               if ((int)req->resid_len < 0) {
+                       req->resid_len = 0;
                        error = -EINVAL;
                        goto out;
                }
-               resp_data_len -= 8;
+               rsp->resid_len -= 8;
                sas_phy_control(sas_ha, req_data[9], req_data[10],
                                req_data[32] >> 4, req_data[33] >> 4,
                                resp_data);
@@ -261,11 +261,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
 
        local_irq_disable();
        buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
-       memcpy(buf, resp_data, rsp->data_len);
+       memcpy(buf, resp_data, blk_rq_bytes(rsp));
        flush_kernel_dcache_page(bio_page(rsp->bio));
        kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
        local_irq_enable();
-       rsp->data_len = resp_data_len;
 
  out:
        kfree(req_data);
index 167b66dd34c712a73d103d26f603d87042ae4cd7..8032c5adb6a9b2400c318912a7aa376df512b096 100644 (file)
@@ -1312,10 +1312,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
        uint32_t bgstat = bgf->bgstat;
        uint64_t failing_sector = 0;
 
-       printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
+       printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
                        "bgstat=0x%x bghm=0x%x\n",
                        cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
-                       cmd->request->nr_sectors, bgstat, bghm);
+                       blk_rq_sectors(cmd->request), bgstat, bghm);
 
        spin_lock(&_dump_buf_lock);
        if (!_dump_buf_done) {
@@ -2378,15 +2378,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
                if (cmnd->cmnd[0] == READ_10)
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                        "9035 BLKGRD: READ @ sector %llu, "
-                                        "count %lu\n",
-                                        (unsigned long long)scsi_get_lba(cmnd),
-                                       cmnd->request->nr_sectors);
+                                       "count %u\n",
+                                       (unsigned long long)scsi_get_lba(cmnd),
+                                       blk_rq_sectors(cmnd->request));
                else if (cmnd->cmnd[0] == WRITE_10)
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                        "9036 BLKGRD: WRITE @ sector %llu, "
-                                       "count %lu cmd=%p\n",
+                                       "count %u cmd=%p\n",
                                        (unsigned long long)scsi_get_lba(cmnd),
-                                       cmnd->request->nr_sectors,
+                                       blk_rq_sectors(cmnd->request),
                                        cmnd);
 
                err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -2406,15 +2406,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
                if (cmnd->cmnd[0] == READ_10)
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                         "9040 dbg: READ @ sector %llu, "
-                                        "count %lu\n",
+                                        "count %u\n",
                                         (unsigned long long)scsi_get_lba(cmnd),
-                                        cmnd->request->nr_sectors);
+                                        blk_rq_sectors(cmnd->request));
                else if (cmnd->cmnd[0] == WRITE_10)
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                         "9041 dbg: WRITE @ sector %llu, "
-                                        "count %lu cmd=%p\n",
+                                        "count %u cmd=%p\n",
                                         (unsigned long long)scsi_get_lba(cmnd),
-                                        cmnd->request->nr_sectors, cmnd);
+                                        blk_rq_sectors(cmnd->request), cmnd);
                else
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                         "9042 dbg: parser not implemented\n");
index e03dc0b1e1a0f906d1393fbc82a0157f9e2ee49e..5c65da519e39af511334816c18977912a4920637 100644 (file)
@@ -1041,7 +1041,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
                printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
                    "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
-                   req->data_len, rsp->bio->bi_vcnt, rsp->data_len);
+                   blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
                return -EINVAL;
        }
 
@@ -1104,7 +1104,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        *((u64 *)&mpi_request->SASAddress) = (rphy) ?
            cpu_to_le64(rphy->identify.sas_address) :
            cpu_to_le64(ioc->sas_hba.sas_address);
-       mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4);
+       mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
        psge = &mpi_request->SGL;
 
        /* WRITE sgel first */
@@ -1112,13 +1112,13 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
            MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
        dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
-             req->data_len, PCI_DMA_BIDIRECTIONAL);
+               blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
        if (!dma_addr_out) {
                mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
                goto unmap;
        }
 
-       ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4),
+       ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
            dma_addr_out);
 
        /* incr sgel */
@@ -1129,14 +1129,14 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
            MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
            MPI2_SGE_FLAGS_END_OF_LIST);
        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-       dma_addr_in =  pci_map_single(ioc->pdev, bio_data(rsp->bio),
-             rsp->data_len, PCI_DMA_BIDIRECTIONAL);
+       dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
+                                    blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
        if (!dma_addr_in) {
                mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
                goto unmap;
        }
 
-       ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4),
+       ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
            dma_addr_in);
 
        dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
@@ -1170,9 +1170,8 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
                memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
                req->sense_len = sizeof(*mpi_reply);
-               req->data_len = 0;
-               rsp->data_len -= mpi_reply->ResponseDataLength;
-
+               req->resid_len = 0;
+               rsp->resid_len -= mpi_reply->ResponseDataLength;
        } else {
                dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
                    "%s - no reply\n", ioc->name, __func__));
@@ -1188,10 +1187,10 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
  unmap:
        if (dma_addr_out)
-               pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len,
+               pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
                    PCI_DMA_BIDIRECTIONAL);
        if (dma_addr_in)
-               pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len,
+               pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
                    PCI_DMA_BIDIRECTIONAL);
 
  out:
index 1ce6b24abab297f70937a43cfd414bbe60349f26..5776b2ab6b12a5fa54fe974b11e31619d8ee18e0 100644 (file)
@@ -889,26 +889,6 @@ int osd_req_add_set_attr_list(struct osd_request *or,
 }
 EXPORT_SYMBOL(osd_req_add_set_attr_list);
 
-static int _append_map_kern(struct request *req,
-       void *buff, unsigned len, gfp_t flags)
-{
-       struct bio *bio;
-       int ret;
-
-       bio = bio_map_kern(req->q, buff, len, flags);
-       if (IS_ERR(bio)) {
-               OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
-                       PTR_ERR(bio));
-               return PTR_ERR(bio);
-       }
-       ret = blk_rq_append_bio(req->q, req, bio);
-       if (ret) {
-               OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
-               bio_put(bio);
-       }
-       return ret;
-}
-
 static int _req_append_segment(struct osd_request *or,
        unsigned padding, struct _osd_req_data_segment *seg,
        struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
@@ -924,14 +904,14 @@ static int _req_append_segment(struct osd_request *or,
                else
                        pad_buff = io->pad_buff;
 
-               ret = _append_map_kern(io->req, pad_buff, padding,
+               ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
                                       or->alloc_flags);
                if (ret)
                        return ret;
                io->total_bytes += padding;
        }
 
-       ret = _append_map_kern(io->req, seg->buff, seg->total_bytes,
+       ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
                               or->alloc_flags);
        if (ret)
                return ret;
@@ -1293,6 +1273,21 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
 /*
  * osd_finalize_request and helpers
  */
+static struct request *_make_request(struct request_queue *q, bool has_write,
+                             struct _osd_io_info *oii, gfp_t flags)
+{
+       if (oii->bio)
+               return blk_make_request(q, oii->bio, flags);
+       else {
+               struct request *req;
+
+               req = blk_get_request(q, has_write ? WRITE : READ, flags);
+               if (unlikely(!req))
+                       return ERR_PTR(-ENOMEM);
+
+               return req;
+       }
+}
 
 static int _init_blk_request(struct osd_request *or,
        bool has_in, bool has_out)
@@ -1301,11 +1296,13 @@ static int _init_blk_request(struct osd_request *or,
        struct scsi_device *scsi_device = or->osd_dev->scsi_device;
        struct request_queue *q = scsi_device->request_queue;
        struct request *req;
-       int ret = -ENOMEM;
+       int ret;
 
-       req = blk_get_request(q, has_out, flags);
-       if (!req)
+       req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
+       if (IS_ERR(req)) {
+               ret = PTR_ERR(req);
                goto out;
+       }
 
        or->request = req;
        req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1318,9 +1315,10 @@ static int _init_blk_request(struct osd_request *or,
                or->out.req = req;
                if (has_in) {
                        /* allocate bidi request */
-                       req = blk_get_request(q, READ, flags);
-                       if (!req) {
+                       req = _make_request(q, false, &or->in, flags);
+                       if (IS_ERR(req)) {
                                OSD_DEBUG("blk_get_request for bidi failed\n");
+                               ret = PTR_ERR(req);
                                goto out;
                        }
                        req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1364,26 +1362,6 @@ int osd_finalize_request(struct osd_request *or,
                return ret;
        }
 
-       if (or->out.bio) {
-               ret = blk_rq_append_bio(or->request->q, or->out.req,
-                                       or->out.bio);
-               if (ret) {
-                       OSD_DEBUG("blk_rq_append_bio out failed\n");
-                       return ret;
-               }
-               OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
-                       _LLU(or->out.total_bytes), or->out.req->data_len);
-       }
-       if (or->in.bio) {
-               ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
-               if (ret) {
-                       OSD_DEBUG("blk_rq_append_bio in failed\n");
-                       return ret;
-               }
-               OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
-                       _LLU(or->in.total_bytes), or->in.req->data_len);
-       }
-
        or->out.pad_buff = sg_out_pad_buffer;
        or->in.pad_buff = sg_in_pad_buffer;
 
index bb218c8b6e98373344d1c188982a189a7aecbbf9..dd3f9d2b99fd05b7834e0abbb7e2cbe23e12d462 100644 (file)
@@ -240,11 +240,11 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
         * is invalid.  Prevent the garbage from being misinterpreted
         * and prevent security leaks by zeroing out the excess data.
         */
-       if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
-               memset(buffer + (bufflen - req->data_len), 0, req->data_len);
+       if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
+               memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
 
        if (resid)
-               *resid = req->data_len;
+               *resid = req->resid_len;
        ret = req->errors;
  out:
        blk_put_request(req);
@@ -546,14 +546,9 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
         * to queue the remainder of them.
         */
        if (blk_end_request(req, error, bytes)) {
-               int leftover = (req->hard_nr_sectors << 9);
-
-               if (blk_pc_request(req))
-                       leftover = req->data_len;
-
                /* kill remainder if no retrys */
                if (error && scsi_noretry_cmd(cmd))
-                       blk_end_request(req, error, leftover);
+                       blk_end_request_all(req, error);
                else {
                        if (requeue) {
                                /*
@@ -672,34 +667,6 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
 }
 EXPORT_SYMBOL(scsi_release_buffers);
 
-/*
- * Bidi commands Must be complete as a whole, both sides at once.
- * If part of the bytes were written and lld returned
- * scsi_in()->resid and/or scsi_out()->resid this information will be left
- * in req->data_len and req->next_rq->data_len. The upper-layer driver can
- * decide what to do with this information.
- */
-static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
-{
-       struct request *req = cmd->request;
-       unsigned int dlen = req->data_len;
-       unsigned int next_dlen = req->next_rq->data_len;
-
-       req->data_len = scsi_out(cmd)->resid;
-       req->next_rq->data_len = scsi_in(cmd)->resid;
-
-       /* The req and req->next_rq have not been completed */
-       BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
-
-       scsi_release_buffers(cmd);
-
-       /*
-        * This will goose the queue request function at the end, so we don't
-        * need to worry about launching another command.
-        */
-       scsi_next_command(cmd);
-}
-
 /*
  * Function:    scsi_io_completion()
  *
@@ -739,7 +706,6 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 {
        int result = cmd->result;
-       int this_count;
        struct request_queue *q = cmd->device->request_queue;
        struct request *req = cmd->request;
        int error = 0;
@@ -773,12 +739,22 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        if (!sense_deferred)
                                error = -EIO;
                }
+
+               req->resid_len = scsi_get_resid(cmd);
+
                if (scsi_bidi_cmnd(cmd)) {
-                       /* will also release_buffers */
-                       scsi_end_bidi_request(cmd);
+                       /*
+                        * Bidi commands Must be complete as a whole,
+                        * both sides at once.
+                        */
+                       req->next_rq->resid_len = scsi_in(cmd)->resid;
+
+                       blk_end_request_all(req, 0);
+
+                       scsi_release_buffers(cmd);
+                       scsi_next_command(cmd);
                        return;
                }
-               req->data_len = scsi_get_resid(cmd);
        }
 
        BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
@@ -787,9 +763,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
         * Next deal with any sectors which we were able to correctly
         * handle.
         */
-       SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
+       SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
                                      "%d bytes done.\n",
-                                     req->nr_sectors, good_bytes));
+                                     blk_rq_sectors(req), good_bytes));
 
        /*
         * Recovered errors need reporting, but they're always treated
@@ -812,7 +788,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
         */
        if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
                return;
-       this_count = blk_rq_bytes(req);
 
        error = -EIO;
 
@@ -922,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        if (driver_byte(result) & DRIVER_SENSE)
                                scsi_print_sense("", cmd);
                }
-               blk_end_request(req, -EIO, blk_rq_bytes(req));
+               blk_end_request_all(req, -EIO);
                scsi_next_command(cmd);
                break;
        case ACTION_REPREP:
@@ -965,10 +940,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
        count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
        BUG_ON(count > sdb->table.nents);
        sdb->table.nents = count;
-       if (blk_pc_request(req))
-               sdb->length = req->data_len;
-       else
-               sdb->length = req->nr_sectors << 9;
+       sdb->length = blk_rq_bytes(req);
        return BLKPREP_OK;
 }
 
@@ -1087,22 +1059,21 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
                if (unlikely(ret))
                        return ret;
        } else {
-               BUG_ON(req->data_len);
-               BUG_ON(req->data);
+               BUG_ON(blk_rq_bytes(req));
 
                memset(&cmd->sdb, 0, sizeof(cmd->sdb));
                req->buffer = NULL;
        }
 
        cmd->cmd_len = req->cmd_len;
-       if (!req->data_len)
+       if (!blk_rq_bytes(req))
                cmd->sc_data_direction = DMA_NONE;
        else if (rq_data_dir(req) == WRITE)
                cmd->sc_data_direction = DMA_TO_DEVICE;
        else
                cmd->sc_data_direction = DMA_FROM_DEVICE;
        
-       cmd->transfersize = req->data_len;
+       cmd->transfersize = blk_rq_bytes(req);
        cmd->allowed = req->retries;
        return BLKPREP_OK;
 }
@@ -1212,7 +1183,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
                break;
        case BLKPREP_DEFER:
                /*
-                * If we defer, the elv_next_request() returns NULL, but the
+                * If we defer, the blk_peek_request() returns NULL, but the
                 * queue must be restarted, so we plug here if no returning
                 * command will automatically do that.
                 */
@@ -1388,7 +1359,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
        struct scsi_target *starget = scsi_target(sdev);
        struct Scsi_Host *shost = sdev->host;
 
-       blkdev_dequeue_request(req);
+       blk_start_request(req);
 
        if (unlikely(cmd == NULL)) {
                printk(KERN_CRIT "impossible request in %s.\n",
@@ -1480,7 +1451,7 @@ static void scsi_request_fn(struct request_queue *q)
 
        if (!sdev) {
                printk("scsi: killing requests for dead queue\n");
-               while ((req = elv_next_request(q)) != NULL)
+               while ((req = blk_peek_request(q)) != NULL)
                        scsi_kill_request(req, q);
                return;
        }
@@ -1501,7 +1472,7 @@ static void scsi_request_fn(struct request_queue *q)
                 * that the request is fully prepared even if we cannot 
                 * accept it.
                 */
-               req = elv_next_request(q);
+               req = blk_peek_request(q);
                if (!req || !scsi_dev_queue_ready(q, sdev))
                        break;
 
@@ -1517,7 +1488,7 @@ static void scsi_request_fn(struct request_queue *q)
                 * Remove the request from the request list.
                 */
                if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
-                       blkdev_dequeue_request(req);
+                       blk_start_request(req);
                sdev->device_busy++;
 
                spin_unlock(q->queue_lock);
index 48ba413f7f6afb511f9d0c26daa42a5cb34dce77..10303272ba4573082f942a4ec8e9cd7e4d0a9e95 100644 (file)
@@ -387,7 +387,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
         * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
         * length for us.
         */
-       cmd->sdb.length = rq->data_len;
+       cmd->sdb.length = blk_rq_bytes(rq);
 
        return 0;
 
index 50988cbf7b2df90a1cc23d07e423c920b428e2ba..d606452297cf3fdc82747b279067fb67613f8779 100644 (file)
@@ -163,12 +163,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
        int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
 
        while (!blk_queue_plugged(q)) {
-               req = elv_next_request(q);
+               req = blk_fetch_request(q);
                if (!req)
                        break;
 
-               blkdev_dequeue_request(req);
-
                spin_unlock_irq(q->queue_lock);
 
                handler = to_sas_internal(shost->transportt)->f->smp_handler;
index 84044233b637c71929bf22b665d15976096e5d63..40d2860f235a3d2d85bed42b95a24ddc2a180f68 100644 (file)
@@ -384,9 +384,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
        struct scsi_device *sdp = q->queuedata;
        struct gendisk *disk = rq->rq_disk;
        struct scsi_disk *sdkp;
-       sector_t block = rq->sector;
+       sector_t block = blk_rq_pos(rq);
        sector_t threshold;
-       unsigned int this_count = rq->nr_sectors;
+       unsigned int this_count = blk_rq_sectors(rq);
        int ret, host_dif;
 
        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -413,10 +413,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
                                        this_count));
 
        if (!sdp || !scsi_device_online(sdp) ||
-           block + rq->nr_sectors > get_capacity(disk)) {
+           block + blk_rq_sectors(rq) > get_capacity(disk)) {
                SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
-                                               "Finishing %ld sectors\n",
-                                               rq->nr_sectors));
+                                               "Finishing %u sectors\n",
+                                               blk_rq_sectors(rq)));
                SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
                                                "Retry with 0x%p\n", SCpnt));
                goto out;
@@ -463,7 +463,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
         * for this.
         */
        if (sdp->sector_size == 1024) {
-               if ((block & 1) || (rq->nr_sectors & 1)) {
+               if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
                        scmd_printk(KERN_ERR, SCpnt,
                                    "Bad block number requested\n");
                        goto out;
@@ -473,7 +473,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
                }
        }
        if (sdp->sector_size == 2048) {
-               if ((block & 3) || (rq->nr_sectors & 3)) {
+               if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
                        scmd_printk(KERN_ERR, SCpnt,
                                    "Bad block number requested\n");
                        goto out;
@@ -483,7 +483,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
                }
        }
        if (sdp->sector_size == 4096) {
-               if ((block & 7) || (rq->nr_sectors & 7)) {
+               if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
                        scmd_printk(KERN_ERR, SCpnt,
                                    "Bad block number requested\n");
                        goto out;
@@ -512,10 +512,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
        }
 
        SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
-                                       "%s %d/%ld 512 byte blocks.\n",
+                                       "%s %d/%u 512 byte blocks.\n",
                                        (rq_data_dir(rq) == WRITE) ?
                                        "writing" : "reading", this_count,
-                                       rq->nr_sectors));
+                                       blk_rq_sectors(rq)));
 
        /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
        host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@ -971,8 +971,8 @@ static struct block_device_operations sd_fops = {
 
 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
 {
-       u64 start_lba = scmd->request->sector;
-       u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512);
+       u64 start_lba = blk_rq_pos(scmd->request);
+       u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
        u64 bad_lba;
        int info_valid;
 
index 184dff492797e960c2a65ae4fce5f81ead4c3654..82f14a9482d0791fdd852a397eb2641e2f09e192 100644 (file)
@@ -507,7 +507,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
        sector_sz = scmd->device->sector_size;
        sectors = good_bytes / sector_sz;
 
-       phys = scmd->request->sector & 0xffffffff;
+       phys = blk_rq_pos(scmd->request) & 0xffffffff;
        if (sector_sz == 4096)
                phys >>= 3;
 
index e1716f14cd4710cca0363eab985cdf370d9b9879..0fc2c0ae7691b4b6f38672880ccfed3a7fbacea3 100644 (file)
@@ -1260,7 +1260,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
 
        sense = rq->sense;
        result = rq->errors;
-       resid = rq->data_len;
+       resid = rq->resid_len;
 
        SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
                sdp->disk->disk_name, srp->header.pack_id, result));
index 0e1a0f2d2ad55543f39f0f9f56b6bea2645e5990..fddba53c7fe51480399d354089d5ba3d485f28f4 100644 (file)
@@ -292,7 +292,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
                        if (cd->device->sector_size == 2048)
                                error_sector <<= 2;
                        error_sector &= ~(block_sectors - 1);
-                       good_bytes = (error_sector - SCpnt->request->sector) << 9;
+                       good_bytes = (error_sector -
+                                     blk_rq_pos(SCpnt->request)) << 9;
                        if (good_bytes < 0 || good_bytes >= this_count)
                                good_bytes = 0;
                        /*
@@ -349,8 +350,8 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
                                cd->disk->disk_name, block));
 
        if (!cd->device || !scsi_device_online(cd->device)) {
-               SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n",
-                                       rq->nr_sectors));
+               SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n",
+                                          blk_rq_sectors(rq)));
                SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
                goto out;
        }
@@ -413,7 +414,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
        /*
         * request doesn't start on hw block boundary, add scatter pads
         */
-       if (((unsigned int)rq->sector % (s_size >> 9)) ||
+       if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
            (scsi_bufflen(SCpnt) % s_size)) {
                scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
                goto out;
@@ -422,14 +423,14 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
        this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
 
 
-       SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
+       SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n",
                                cd->cdi.name,
                                (rq_data_dir(rq) == WRITE) ?
                                        "writing" : "reading",
-                               this_count, rq->nr_sectors));
+                               this_count, blk_rq_sectors(rq)));
 
        SCpnt->cmnd[1] = 0;
-       block = (unsigned int)rq->sector / (s_size >> 9);
+       block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
 
        if (this_count > 0xffff) {
                this_count = 0xffff;
index eb24efea8f1450ad9dabdf7a08aa3ada2b0e675d..8681b708344f0130265eba1e3a34456fd1258abe 100644 (file)
@@ -463,7 +463,7 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
        struct scsi_tape *STp = SRpnt->stp;
 
        STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
-       STp->buffer->cmdstat.residual = req->data_len;
+       STp->buffer->cmdstat.residual = req->resid_len;
 
        if (SRpnt->waiting)
                complete(SRpnt->waiting);
index 601e95141cbe13e16570db74c9d826e0e6c864e5..54023d41fd15cdcd69b9391653f21f544927cb01 100644 (file)
@@ -1306,7 +1306,7 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs
    if (linked_comm && SCpnt->device->queue_depth > 2
                                      && TLDEV(SCpnt->device->type)) {
       HD(j)->cp_stat[i] = READY;
-      flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE);
+      flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
       return 0;
       }
 
@@ -1610,11 +1610,13 @@ static int reorder(unsigned int j, unsigned long cursec,
 
       if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
 
-      if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector;
-      if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector;
+      if (blk_rq_pos(SCpnt->request) < minsec)
+        minsec = blk_rq_pos(SCpnt->request);
+      if (blk_rq_pos(SCpnt->request) > maxsec)
+        maxsec = blk_rq_pos(SCpnt->request);
 
-      sl[n] = SCpnt->request->sector;
-      ioseek += SCpnt->request->nr_sectors;
+      sl[n] = blk_rq_pos(SCpnt->request);
+      ioseek += blk_rq_sectors(SCpnt->request);
 
       if (!n) continue;
 
@@ -1642,7 +1644,7 @@ static int reorder(unsigned int j, unsigned long cursec,
 
    if (!input_only) for (n = 0; n < n_ready; n++) {
       k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
-      ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number;
+      ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
 
       if (!n) continue;
 
@@ -1666,12 +1668,12 @@ static int reorder(unsigned int j, unsigned long cursec,
    if (link_statistics && (overlap || !(flushcount % link_statistics)))
       for (n = 0; n < n_ready; n++) {
          k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
-         printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\
+         printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\
                 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
                 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
                 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready,
-                SCpnt->request->sector, SCpnt->request->nr_sectors, cursec,
-                YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
+                blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
+               cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
                 YESNO(overlap), cpp->xdir);
          }
 #endif
@@ -1799,7 +1801,7 @@ static irqreturn_t ihdlr(unsigned int j)
 
    if (linked_comm && SCpnt->device->queue_depth > 2
                                      && TLDEV(SCpnt->device->type))
-      flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE);
+      flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
 
    tstatus = status_byte(spp->target_status);
 
index 98711647ece49548a94409a99ce4b680ee085ca1..81dc93e72535f299b8df7687a2cfa818fcbe4d00 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1201,7 +1201,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
                char *addr = page_address(bvec->bv_page);
                int len = bmd->iovecs[i].bv_len;
 
-               if (read && !err)
+               if (read)
                        memcpy(p, addr, len);
 
                __free_page(bvec->bv_page);
index f45dbc18dd175891950ddb84fffa2bc6ce0df117..a85fe310fc6feb20a86fcc758d7673fce6b82ef4 100644 (file)
@@ -331,6 +331,12 @@ static int blkdev_readpage(struct file * file, struct page * page)
        return block_read_full_page(page, blkdev_get_block);
 }
 
+static int blkdev_readpages(struct file *file, struct address_space *mapping,
+                       struct list_head *pages, unsigned nr_pages)
+{
+       return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block);
+}
+
 static int blkdev_write_begin(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned flags,
                        struct page **pagep, void **fsdata)
@@ -1399,6 +1405,7 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
 
 static const struct address_space_operations def_blk_aops = {
        .readpage       = blkdev_readpage,
+       .readpages      = blkdev_readpages,
        .writepage      = blkdev_writepage,
        .sync_page      = block_sync_page,
        .write_begin    = blkdev_write_begin,
index 6a347fbc998a4cc3a23f60d3d2e91854db4206b2..ffd42815fda1e6a528030c13712a5c7455e28f21 100644 (file)
@@ -47,6 +47,8 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos,
                      struct pipe_inode_info *pipe, size_t count,
                      unsigned int flags)
 {
+       ssize_t (*splice_read)(struct file *, loff_t *,
+                              struct pipe_inode_info *, size_t, unsigned int);
        struct coda_file_info *cfi;
        struct file *host_file;
 
@@ -54,10 +56,11 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos,
        BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
        host_file = cfi->cfi_container;
 
-       if (!host_file->f_op || !host_file->f_op->splice_read)
-               return -EINVAL;
+       splice_read = host_file->f_op->splice_read;
+       if (!splice_read)
+               splice_read = default_file_splice_read;
 
-       return host_file->f_op->splice_read(host_file, ppos, pipe, count,flags);
+       return splice_read(host_file, ppos, pipe, count, flags);
 }
 
 static ssize_t
index b249ae97fb15bfb24e1835a41d351110c7ae3ef9..06ca92672eb5d6118ee644074019a650a96125f3 100644 (file)
@@ -50,10 +50,10 @@ int exofs_check_ok_resid(struct osd_request *or, u64 *in_resid, u64 *out_resid)
 
        /* FIXME: should be include in osd_sense_info */
        if (in_resid)
-               *in_resid = or->in.req ? or->in.req->data_len : 0;
+               *in_resid = or->in.req ? or->in.req->resid_len : 0;
 
        if (out_resid)
-               *out_resid = or->out.req ? or->out.req->data_len : 0;
+               *out_resid = or->out.req ? or->out.req->resid_len : 0;
 
        return ret;
 }
index 13414ec45b8d5b42012d8c5fbd56d54df5a96749..f7dd21ad85a61937666eddf0ccbff66e11e6c3fd 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -302,6 +302,20 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *info,
        return 0;
 }
 
+/**
+ * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
+ * @pipe:      the pipe that the buffer belongs to
+ * @buf:       the buffer to put a reference to
+ *
+ * Description:
+ *     This function releases a reference to @buf.
+ */
+void generic_pipe_buf_release(struct pipe_inode_info *pipe,
+                             struct pipe_buffer *buf)
+{
+       page_cache_release(buf->page);
+}
+
 static const struct pipe_buf_operations anon_pipe_buf_ops = {
        .can_merge = 1,
        .map = generic_pipe_buf_map,
index 9d1e76bb9ee147b8236630ba953a53873b1847d4..6c8c55dec2bcd6b2759f9698abab5b663258cc4b 100644 (file)
@@ -805,12 +805,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
                goto out;
        if (!(in_file->f_mode & FMODE_READ))
                goto fput_in;
-       retval = -EINVAL;
-       in_inode = in_file->f_path.dentry->d_inode;
-       if (!in_inode)
-               goto fput_in;
-       if (!in_file->f_op || !in_file->f_op->splice_read)
-               goto fput_in;
        retval = -ESPIPE;
        if (!ppos)
                ppos = &in_file->f_pos;
@@ -834,6 +828,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
        retval = -EINVAL;
        if (!out_file->f_op || !out_file->f_op->sendpage)
                goto fput_out;
+       in_inode = in_file->f_path.dentry->d_inode;
        out_inode = out_file->f_path.dentry->d_inode;
        retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
        if (retval < 0)
index 666953d59a35c77670514b916aea7515fe612951..73766d24f97b9d41f48ba2cadf36d4cd7ae3d60b 100644 (file)
@@ -507,9 +507,131 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
 
        return ret;
 }
-
 EXPORT_SYMBOL(generic_file_splice_read);
 
+static const struct pipe_buf_operations default_pipe_buf_ops = {
+       .can_merge = 0,
+       .map = generic_pipe_buf_map,
+       .unmap = generic_pipe_buf_unmap,
+       .confirm = generic_pipe_buf_confirm,
+       .release = generic_pipe_buf_release,
+       .steal = generic_pipe_buf_steal,
+       .get = generic_pipe_buf_get,
+};
+
+static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
+                           unsigned long vlen, loff_t offset)
+{
+       mm_segment_t old_fs;
+       loff_t pos = offset;
+       ssize_t res;
+
+       old_fs = get_fs();
+       set_fs(get_ds());
+       /* The cast to a user pointer is valid due to the set_fs() */
+       res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
+       set_fs(old_fs);
+
+       return res;
+}
+
+static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
+                           loff_t pos)
+{
+       mm_segment_t old_fs;
+       ssize_t res;
+
+       old_fs = get_fs();
+       set_fs(get_ds());
+       /* The cast to a user pointer is valid due to the set_fs() */
+       res = vfs_write(file, (const char __user *)buf, count, &pos);
+       set_fs(old_fs);
+
+       return res;
+}
+
+ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+                                struct pipe_inode_info *pipe, size_t len,
+                                unsigned int flags)
+{
+       unsigned int nr_pages;
+       unsigned int nr_freed;
+       size_t offset;
+       struct page *pages[PIPE_BUFFERS];
+       struct partial_page partial[PIPE_BUFFERS];
+       struct iovec vec[PIPE_BUFFERS];
+       pgoff_t index;
+       ssize_t res;
+       size_t this_len;
+       int error;
+       int i;
+       struct splice_pipe_desc spd = {
+               .pages = pages,
+               .partial = partial,
+               .flags = flags,
+               .ops = &default_pipe_buf_ops,
+               .spd_release = spd_release_page,
+       };
+
+       index = *ppos >> PAGE_CACHE_SHIFT;
+       offset = *ppos & ~PAGE_CACHE_MASK;
+       nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
+       for (i = 0; i < nr_pages && i < PIPE_BUFFERS && len; i++) {
+               struct page *page;
+
+               page = alloc_page(GFP_USER);
+               error = -ENOMEM;
+               if (!page)
+                       goto err;
+
+               this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
+               vec[i].iov_base = (void __user *) page_address(page);
+               vec[i].iov_len = this_len;
+               pages[i] = page;
+               spd.nr_pages++;
+               len -= this_len;
+               offset = 0;
+       }
+
+       res = kernel_readv(in, vec, spd.nr_pages, *ppos);
+       if (res < 0) {
+               error = res;
+               goto err;
+       }
+
+       error = 0;
+       if (!res)
+               goto err;
+
+       nr_freed = 0;
+       for (i = 0; i < spd.nr_pages; i++) {
+               this_len = min_t(size_t, vec[i].iov_len, res);
+               partial[i].offset = 0;
+               partial[i].len = this_len;
+               if (!this_len) {
+                       __free_page(pages[i]);
+                       pages[i] = NULL;
+                       nr_freed++;
+               }
+               res -= this_len;
+       }
+       spd.nr_pages -= nr_freed;
+
+       res = splice_to_pipe(pipe, &spd);
+       if (res > 0)
+               *ppos += res;
+
+       return res;
+
+err:
+       for (i = 0; i < spd.nr_pages; i++)
+               __free_page(pages[i]);
+
+       return error;
+}
+EXPORT_SYMBOL(default_file_splice_read);
+
 /*
  * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
  * using sendpage(). Return the number of bytes sent.
@@ -881,6 +1003,36 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
 
 EXPORT_SYMBOL(generic_file_splice_write);
 
+static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+                         struct splice_desc *sd)
+{
+       int ret;
+       void *data;
+
+       ret = buf->ops->confirm(pipe, buf);
+       if (ret)
+               return ret;
+
+       data = buf->ops->map(pipe, buf, 0);
+       ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos);
+       buf->ops->unmap(pipe, buf, data);
+
+       return ret;
+}
+
+static ssize_t default_file_splice_write(struct pipe_inode_info *pipe,
+                                        struct file *out, loff_t *ppos,
+                                        size_t len, unsigned int flags)
+{
+       ssize_t ret;
+
+       ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf);
+       if (ret > 0)
+               *ppos += ret;
+
+       return ret;
+}
+
 /**
  * generic_splice_sendpage - splice data from a pipe to a socket
  * @pipe:      pipe to splice from
@@ -908,11 +1060,10 @@ EXPORT_SYMBOL(generic_splice_sendpage);
 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
                           loff_t *ppos, size_t len, unsigned int flags)
 {
+       ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
+                               loff_t *, size_t, unsigned int);
        int ret;
 
-       if (unlikely(!out->f_op || !out->f_op->splice_write))
-               return -EINVAL;
-
        if (unlikely(!(out->f_mode & FMODE_WRITE)))
                return -EBADF;
 
@@ -923,7 +1074,11 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
        if (unlikely(ret < 0))
                return ret;
 
-       return out->f_op->splice_write(pipe, out, ppos, len, flags);
+       splice_write = out->f_op->splice_write;
+       if (!splice_write)
+               splice_write = default_file_splice_write;
+
+       return splice_write(pipe, out, ppos, len, flags);
 }
 
 /*
@@ -933,11 +1088,10 @@ static long do_splice_to(struct file *in, loff_t *ppos,
                         struct pipe_inode_info *pipe, size_t len,
                         unsigned int flags)
 {
+       ssize_t (*splice_read)(struct file *, loff_t *,
+                              struct pipe_inode_info *, size_t, unsigned int);
        int ret;
 
-       if (unlikely(!in->f_op || !in->f_op->splice_read))
-               return -EINVAL;
-
        if (unlikely(!(in->f_mode & FMODE_READ)))
                return -EBADF;
 
@@ -945,7 +1099,11 @@ static long do_splice_to(struct file *in, loff_t *ppos,
        if (unlikely(ret < 0))
                return ret;
 
-       return in->f_op->splice_read(in, ppos, pipe, len, flags);
+       splice_read = in->f_op->splice_read;
+       if (!splice_read)
+               splice_read = default_file_splice_read;
+
+       return splice_read(in, ppos, pipe, len, flags);
 }
 
 /**
@@ -1112,6 +1270,9 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
        return ret;
 }
 
+static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
+                              struct pipe_inode_info *opipe,
+                              size_t len, unsigned int flags);
 /*
  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
  * location, so checking ->i_pipe is not enough to verify that this is a
@@ -1132,12 +1293,32 @@ static long do_splice(struct file *in, loff_t __user *off_in,
                      struct file *out, loff_t __user *off_out,
                      size_t len, unsigned int flags)
 {
-       struct pipe_inode_info *pipe;
+       struct pipe_inode_info *ipipe;
+       struct pipe_inode_info *opipe;
        loff_t offset, *off;
        long ret;
 
-       pipe = pipe_info(in->f_path.dentry->d_inode);
-       if (pipe) {
+       ipipe = pipe_info(in->f_path.dentry->d_inode);
+       opipe = pipe_info(out->f_path.dentry->d_inode);
+
+       if (ipipe && opipe) {
+               if (off_in || off_out)
+                       return -ESPIPE;
+
+               if (!(in->f_mode & FMODE_READ))
+                       return -EBADF;
+
+               if (!(out->f_mode & FMODE_WRITE))
+                       return -EBADF;
+
+               /* Splicing to self would be fun, but... */
+               if (ipipe == opipe)
+                       return -EINVAL;
+
+               return splice_pipe_to_pipe(ipipe, opipe, len, flags);
+       }
+
+       if (ipipe) {
                if (off_in)
                        return -ESPIPE;
                if (off_out) {
@@ -1149,7 +1330,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
                } else
                        off = &out->f_pos;
 
-               ret = do_splice_from(pipe, out, off, len, flags);
+               ret = do_splice_from(ipipe, out, off, len, flags);
 
                if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
                        ret = -EFAULT;
@@ -1157,8 +1338,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
                return ret;
        }
 
-       pipe = pipe_info(out->f_path.dentry->d_inode);
-       if (pipe) {
+       if (opipe) {
                if (off_out)
                        return -ESPIPE;
                if (off_in) {
@@ -1170,7 +1350,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
                } else
                        off = &in->f_pos;
 
-               ret = do_splice_to(in, off, pipe, len, flags);
+               ret = do_splice_to(in, off, opipe, len, flags);
 
                if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
                        ret = -EFAULT;
@@ -1511,7 +1691,7 @@ SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
  * Make sure there's data to read. Wait for input if we can, otherwise
  * return an appropriate error.
  */
-static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
 {
        int ret;
 
@@ -1549,7 +1729,7 @@ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
  * Make sure there's writeable room. Wait for room if we can, otherwise
  * return an appropriate error.
  */
-static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
 {
        int ret;
 
@@ -1586,6 +1766,124 @@ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
        return ret;
 }
 
+/*
+ * Splice contents of ipipe to opipe.
+ */
+static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
+                              struct pipe_inode_info *opipe,
+                              size_t len, unsigned int flags)
+{
+       struct pipe_buffer *ibuf, *obuf;
+       int ret = 0, nbuf;
+       bool input_wakeup = false;
+
+
+retry:
+       ret = ipipe_prep(ipipe, flags);
+       if (ret)
+               return ret;
+
+       ret = opipe_prep(opipe, flags);
+       if (ret)
+               return ret;
+
+       /*
+        * Potential ABBA deadlock, work around it by ordering lock
+        * grabbing by pipe info address. Otherwise two different processes
+        * could deadlock (one doing tee from A -> B, the other from B -> A).
+        */
+       pipe_double_lock(ipipe, opipe);
+
+       do {
+               if (!opipe->readers) {
+                       send_sig(SIGPIPE, current, 0);
+                       if (!ret)
+                               ret = -EPIPE;
+                       break;
+               }
+
+               if (!ipipe->nrbufs && !ipipe->writers)
+                       break;
+
+               /*
+                * Cannot make any progress, because either the input
+                * pipe is empty or the output pipe is full.
+                */
+               if (!ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS) {
+                       /* Already processed some buffers, break */
+                       if (ret)
+                               break;
+
+                       if (flags & SPLICE_F_NONBLOCK) {
+                               ret = -EAGAIN;
+                               break;
+                       }
+
+                       /*
+                        * We raced with another reader/writer and haven't
+                        * managed to process any buffers.  A zero return
+                        * value means EOF, so retry instead.
+                        */
+                       pipe_unlock(ipipe);
+                       pipe_unlock(opipe);
+                       goto retry;
+               }
+
+               ibuf = ipipe->bufs + ipipe->curbuf;
+               nbuf = (opipe->curbuf + opipe->nrbufs) % PIPE_BUFFERS;
+               obuf = opipe->bufs + nbuf;
+
+               if (len >= ibuf->len) {
+                       /*
+                        * Simply move the whole buffer from ipipe to opipe
+                        */
+                       *obuf = *ibuf;
+                       ibuf->ops = NULL;
+                       opipe->nrbufs++;
+                       ipipe->curbuf = (ipipe->curbuf + 1) % PIPE_BUFFERS;
+                       ipipe->nrbufs--;
+                       input_wakeup = true;
+               } else {
+                       /*
+                        * Get a reference to this pipe buffer,
+                        * so we can copy the contents over.
+                        */
+                       ibuf->ops->get(ipipe, ibuf);
+                       *obuf = *ibuf;
+
+                       /*
+                        * Don't inherit the gift flag, we need to
+                        * prevent multiple steals of this page.
+                        */
+                       obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
+
+                       obuf->len = len;
+                       opipe->nrbufs++;
+                       ibuf->offset += obuf->len;
+                       ibuf->len -= obuf->len;
+               }
+               ret += obuf->len;
+               len -= obuf->len;
+       } while (len);
+
+       pipe_unlock(ipipe);
+       pipe_unlock(opipe);
+
+       /*
+        * If we put data in the output pipe, wakeup any potential readers.
+        */
+       if (ret > 0) {
+               smp_mb();
+               if (waitqueue_active(&opipe->wait))
+                       wake_up_interruptible(&opipe->wait);
+               kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
+       }
+       if (input_wakeup)
+               wakeup_pipe_writers(ipipe);
+
+       return ret;
+}
+
 /*
  * Link contents of ipipe to opipe.
  */
@@ -1690,9 +1988,9 @@ static long do_tee(struct file *in, struct file *out, size_t len,
                 * Keep going, unless we encounter an error. The ipipe/opipe
                 * ordering doesn't really matter.
                 */
-               ret = link_ipipe_prep(ipipe, flags);
+               ret = ipipe_prep(ipipe, flags);
                if (!ret) {
-                       ret = link_opipe_prep(opipe, flags);
+                       ret = opipe_prep(opipe, flags);
                        if (!ret)
                                ret = link_pipe(ipipe, opipe, len, flags);
                }
index 7b214fd672a2d923e56d8263d8c183d7dcf98b0e..d30ec6f30dd7ee21cc7cb78bb06da62f513cd2df 100644 (file)
@@ -218,12 +218,12 @@ struct bio {
 #define bio_sectors(bio)       ((bio)->bi_size >> 9)
 #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
 
-static inline unsigned int bio_cur_sectors(struct bio *bio)
+static inline unsigned int bio_cur_bytes(struct bio *bio)
 {
        if (bio->bi_vcnt)
-               return bio_iovec(bio)->bv_len >> 9;
+               return bio_iovec(bio)->bv_len;
        else /* dataless requests such as discard */
-               return bio->bi_size >> 9;
+               return bio->bi_size;
 }
 
 static inline void *bio_data(struct bio *bio)
@@ -506,7 +506,7 @@ static inline int bio_has_data(struct bio *bio)
 }
 
 /*
- * BIO list managment for use by remapping drivers (e.g. DM or MD).
+ * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
  *
  * A bio_list anchors a singly-linked list of bios chained through the bi_next
  * member of the bio.  The bio_list also caches the last list member to allow
index b4f71f1a4af72bd7c68c6f6fe707f43e6a50888f..56ce53fce72eeabd7452b696c9fc024f12f4a423 100644 (file)
@@ -166,19 +166,9 @@ struct request {
        enum rq_cmd_type_bits cmd_type;
        unsigned long atomic_flags;
 
-       /* Maintain bio traversal state for part by part I/O submission.
-        * hard_* are block layer internals, no driver should touch them!
-        */
-
-       sector_t sector;                /* next sector to submit */
-       sector_t hard_sector;           /* next sector to complete */
-       unsigned long nr_sectors;       /* no. of sectors left to submit */
-       unsigned long hard_nr_sectors;  /* no. of sectors left to complete */
-       /* no. of sectors left to submit in the current segment */
-       unsigned int current_nr_sectors;
-
-       /* no. of sectors left to complete in the current segment */
-       unsigned int hard_cur_sectors;
+       /* the following two fields are internal, NEVER access directly */
+       sector_t __sector;              /* sector cursor */
+       unsigned int __data_len;        /* total data len */
 
        struct bio *bio;
        struct bio *biotail;
@@ -211,8 +201,8 @@ struct request {
 
        unsigned short ioprio;
 
-       void *special;
-       char *buffer;
+       void *special;          /* opaque pointer available for LLD use */
+       char *buffer;           /* kaddr of the current segment if available */
 
        int tag;
        int errors;
@@ -226,10 +216,9 @@ struct request {
        unsigned char __cmd[BLK_MAX_CDB];
        unsigned char *cmd;
 
-       unsigned int data_len;
        unsigned int extra_len; /* length of alignment and padding */
        unsigned int sense_len;
-       void *data;
+       unsigned int resid_len; /* residual count */
        void *sense;
 
        unsigned long deadline;
@@ -415,7 +404,7 @@ struct request_queue
        struct list_head        tag_busy_list;
 
        unsigned int            nr_sorted;
-       unsigned int            in_flight;
+       unsigned int            in_flight[2];
 
        unsigned int            rq_timeout;
        struct timer_list       timeout;
@@ -522,6 +511,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag,
        __clear_bit(flag, &q->queue_flags);
 }
 
+static inline int queue_in_flight(struct request_queue *q)
+{
+       return q->in_flight[0] + q->in_flight[1];
+}
+
 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 {
        WARN_ON_ONCE(!queue_is_locked(q));
@@ -752,6 +746,8 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
 extern void blk_put_request(struct request *);
 extern void __blk_put_request(struct request_queue *, struct request *);
 extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
+extern struct request *blk_make_request(struct request_queue *, struct bio *,
+                                       gfp_t);
 extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
 extern void blk_requeue_request(struct request_queue *, struct request *);
 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
@@ -767,12 +763,6 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
                         struct scsi_ioctl_command __user *);
 
-/*
- * Temporary export, until SCSI gets fixed up.
- */
-extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
-                            struct bio *bio);
-
 /*
  * A queue has just exitted congestion.  Note this in the global counter of
  * congested queues, and wake up anyone who was waiting for requests to be
@@ -798,7 +788,6 @@ extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(struct request_queue *q);
 extern void __blk_run_queue(struct request_queue *);
 extern void blk_run_queue(struct request_queue *);
-extern void blk_start_queueing(struct request_queue *);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
                           struct rq_map_data *, void __user *, unsigned long,
                           gfp_t);
@@ -831,41 +820,73 @@ static inline void blk_run_address_space(struct address_space *mapping)
                blk_run_backing_dev(mapping->backing_dev_info, NULL);
 }
 
-extern void blkdev_dequeue_request(struct request *req);
+/*
+ * blk_rq_pos()                : the current sector
+ * blk_rq_bytes()      : bytes left in the entire request
+ * blk_rq_cur_bytes()  : bytes left in the current segment
+ * blk_rq_sectors()    : sectors left in the entire request
+ * blk_rq_cur_sectors()        : sectors left in the current segment
+ */
+static inline sector_t blk_rq_pos(const struct request *rq)
+{
+       return rq->__sector;
+}
+
+static inline unsigned int blk_rq_bytes(const struct request *rq)
+{
+       return rq->__data_len;
+}
+
+static inline int blk_rq_cur_bytes(const struct request *rq)
+{
+       return rq->bio ? bio_cur_bytes(rq->bio) : 0;
+}
+
+static inline unsigned int blk_rq_sectors(const struct request *rq)
+{
+       return blk_rq_bytes(rq) >> 9;
+}
+
+static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+{
+       return blk_rq_cur_bytes(rq) >> 9;
+}
+
+/*
+ * Request issue related functions.
+ */
+extern struct request *blk_peek_request(struct request_queue *q);
+extern void blk_start_request(struct request *rq);
+extern struct request *blk_fetch_request(struct request_queue *q);
 
 /*
- * blk_end_request() and friends.
- * __blk_end_request() and end_request() must be called with
- * the request queue spinlock acquired.
+ * Request completion related functions.
+ *
+ * blk_update_request() completes given number of bytes and updates
+ * the request without completing it.
+ *
+ * blk_end_request() and friends.  __blk_end_request() must be called
+ * with the request queue spinlock acquired.
  *
  * Several drivers define their own end_request and call
  * blk_end_request() for parts of the original function.
  * This prevents code duplication in drivers.
  */
-extern int blk_end_request(struct request *rq, int error,
-                               unsigned int nr_bytes);
-extern int __blk_end_request(struct request *rq, int error,
-                               unsigned int nr_bytes);
-extern int blk_end_bidi_request(struct request *rq, int error,
-                               unsigned int nr_bytes, unsigned int bidi_bytes);
-extern void end_request(struct request *, int);
-extern int blk_end_request_callback(struct request *rq, int error,
-                               unsigned int nr_bytes,
-                               int (drv_callback)(struct request *));
+extern bool blk_update_request(struct request *rq, int error,
+                              unsigned int nr_bytes);
+extern bool blk_end_request(struct request *rq, int error,
+                           unsigned int nr_bytes);
+extern void blk_end_request_all(struct request *rq, int error);
+extern bool blk_end_request_cur(struct request *rq, int error);
+extern bool __blk_end_request(struct request *rq, int error,
+                             unsigned int nr_bytes);
+extern void __blk_end_request_all(struct request *rq, int error);
+extern bool __blk_end_request_cur(struct request *rq, int error);
+
 extern void blk_complete_request(struct request *);
 extern void __blk_complete_request(struct request *);
 extern void blk_abort_request(struct request *);
 extern void blk_abort_queue(struct request_queue *);
-extern void blk_update_request(struct request *rq, int error,
-                              unsigned int nr_bytes);
-
-/*
- * blk_end_request() takes bytes instead of sectors as a complete size.
- * blk_rq_bytes() returns bytes left to complete in the entire request.
- * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
- */
-extern unsigned int blk_rq_bytes(struct request *rq);
-extern unsigned int blk_rq_cur_bytes(struct request *rq);
 
 /*
  * Access functions for manipulating queue properties
index c59b769f62b0dcb2582e0a3e9592d19b503d8180..1cb3372e65d89521886a8deb0f08f46c31dfe813 100644 (file)
@@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *);
 extern void elv_merge_requests(struct request_queue *, struct request *,
                               struct request *);
 extern void elv_merged_request(struct request_queue *, struct request *, int);
-extern void elv_dequeue_request(struct request_queue *, struct request *);
 extern void elv_requeue_request(struct request_queue *, struct request *);
 extern int elv_queue_empty(struct request_queue *);
-extern struct request *elv_next_request(struct request_queue *q);
 extern struct request *elv_former_request(struct request_queue *, struct request *);
 extern struct request *elv_latter_request(struct request_queue *, struct request *);
 extern int elv_register_queue(struct request_queue *q);
@@ -171,7 +169,7 @@ enum {
        ELV_MQUEUE_MUST,
 };
 
-#define rq_end_sector(rq)      ((rq)->sector + (rq)->nr_sectors)
+#define rq_end_sector(rq)      (blk_rq_pos(rq) + blk_rq_sectors(rq))
 #define rb_entry_rq(node)      rb_entry((node), struct request, rb_node)
 
 /*
index 3b534e527e09922aa0d89b1854ba60a4e5871f5a..83d6b4397245bdbb44f27d9f7cbdfa763ff96767 100644 (file)
@@ -2205,6 +2205,8 @@ extern int generic_segment_checks(const struct iovec *iov,
 /* fs/splice.c */
 extern ssize_t generic_file_splice_read(struct file *, loff_t *,
                struct pipe_inode_info *, size_t, unsigned int);
+extern ssize_t default_file_splice_read(struct file *, loff_t *,
+               struct pipe_inode_info *, size_t, unsigned int);
 extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
                struct file *, loff_t *, size_t, unsigned int);
 extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
index ff65fffb078f6e9475983190da859bfecb942740..34c128f0a33c1882622a132e74490405608580ba 100644 (file)
@@ -26,6 +26,9 @@
 #include <asm/io.h>
 #include <asm/mutex.h>
 
+/* for request_sense */
+#include <linux/cdrom.h>
+
 #if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300)
 # define SUPPORT_VLB_SYNC 0
 #else
@@ -324,7 +327,6 @@ struct ide_cmd {
        unsigned int            cursg_ofs;
 
        struct request          *rq;            /* copy of request */
-       void                    *special;       /* valid_t generally */
 };
 
 /* ATAPI packet command flags */
@@ -360,11 +362,7 @@ struct ide_atapi_pc {
 
        /* data buffer */
        u8 *buf;
-       /* current buffer position */
-       u8 *cur_pos;
        int buf_size;
-       /* missing/available data on the current buffer */
-       int b_count;
 
        /* the corresponding request */
        struct request *rq;
@@ -377,10 +375,6 @@ struct ide_atapi_pc {
         */
        u8 pc_buf[IDE_PC_BUFFER_SIZE];
 
-       /* idetape only */
-       struct idetape_bh *bh;
-       char *b_data;
-
        unsigned long timeout;
 };
 
@@ -593,16 +587,16 @@ struct ide_drive_s {
        /* callback for packet commands */
        int  (*pc_callback)(struct ide_drive_s *, int);
 
-       void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *);
-       int  (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *,
-                             unsigned int, int);
-
        ide_startstop_t (*irq_handler)(struct ide_drive_s *);
 
        unsigned long atapi_flags;
 
        struct ide_atapi_pc request_sense_pc;
-       struct request request_sense_rq;
+
+       /* current sense rq and buffer */
+       bool sense_rq_armed;
+       struct request sense_rq;
+       struct request_sense sense_data;
 };
 
 typedef struct ide_drive_s ide_drive_t;
@@ -1174,7 +1168,10 @@ int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
 int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
 int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
 void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
-void ide_retry_pc(ide_drive_t *, struct gendisk *);
+void ide_retry_pc(ide_drive_t *drive);
+
+void ide_prep_sense(ide_drive_t *drive, struct request *rq);
+int ide_queue_sense_rq(ide_drive_t *drive, void *special);
 
 int ide_cd_expiry(ide_drive_t *);
 
index 40725447f5e0e958ef58097636603f00ca3d04e0..66c194e2d9b9c072fc201b864506ae12c010112d 100644 (file)
@@ -56,8 +56,7 @@ struct loop_device {
        gfp_t           old_gfp_mask;
 
        spinlock_t              lo_lock;
-       struct bio              *lo_bio;
-       struct bio              *lo_biotail;
+       struct bio_list         lo_bio_list;
        int                     lo_state;
        struct mutex            lo_ctl_mutex;
        struct task_struct      *lo_thread;
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
deleted file mode 100644 (file)
index 1f76b1e..0000000
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- *  include/linux/mg_disk.c
- *
- *  Support for the mGine m[g]flash IO mode.
- *  Based on legacy hd.c
- *
- * (c) 2008 mGine Co.,LTD
- * (c) 2008 unsik Kim <donari75@gmail.com>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  published by the Free Software Foundation.
- */
-
-#ifndef __MG_DISK_H__
-#define __MG_DISK_H__
-
-#include <linux/blkdev.h>
-#include <linux/ata.h>
-
-/* name for block device */
-#define MG_DISK_NAME "mgd"
-/* name for platform device */
-#define MG_DEV_NAME "mg_disk"
-
-#define MG_DISK_MAJ 0
-#define MG_DISK_MAX_PART 16
-#define MG_SECTOR_SIZE 512
-#define MG_MAX_SECTS 256
-
-/* Register offsets */
-#define MG_BUFF_OFFSET                 0x8000
-#define MG_STORAGE_BUFFER_SIZE         0x200
-#define MG_REG_OFFSET                  0xC000
-#define MG_REG_FEATURE                 (MG_REG_OFFSET + 2)     /* write case */
-#define MG_REG_ERROR                   (MG_REG_OFFSET + 2)     /* read case */
-#define MG_REG_SECT_CNT                        (MG_REG_OFFSET + 4)
-#define MG_REG_SECT_NUM                        (MG_REG_OFFSET + 6)
-#define MG_REG_CYL_LOW                 (MG_REG_OFFSET + 8)
-#define MG_REG_CYL_HIGH                        (MG_REG_OFFSET + 0xA)
-#define MG_REG_DRV_HEAD                        (MG_REG_OFFSET + 0xC)
-#define MG_REG_COMMAND                 (MG_REG_OFFSET + 0xE)   /* write case */
-#define MG_REG_STATUS                  (MG_REG_OFFSET + 0xE)   /* read  case */
-#define MG_REG_DRV_CTRL                        (MG_REG_OFFSET + 0x10)
-#define MG_REG_BURST_CTRL              (MG_REG_OFFSET + 0x12)
-
-/* "Drive Select/Head Register" bit values */
-#define MG_REG_HEAD_MUST_BE_ON         0xA0 /* These 2 bits are always on */
-#define MG_REG_HEAD_DRIVE_MASTER       (0x00 | MG_REG_HEAD_MUST_BE_ON)
-#define MG_REG_HEAD_DRIVE_SLAVE                (0x10 | MG_REG_HEAD_MUST_BE_ON)
-#define MG_REG_HEAD_LBA_MODE           (0x40 | MG_REG_HEAD_MUST_BE_ON)
-
-
-/* "Device Control Register" bit values */
-#define MG_REG_CTRL_INTR_ENABLE                        0x0
-#define MG_REG_CTRL_INTR_DISABLE               (0x1<<1)
-#define MG_REG_CTRL_RESET                      (0x1<<2)
-#define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH      0x0
-#define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW       (0x1<<4)
-#define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW                0x0
-#define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH       (0x1<<5)
-#define MG_REG_CTRL_DPD_DISABLE                        0x0
-#define MG_REG_CTRL_DPD_ENABLE                 (0x1<<6)
-
-/* Status register bit */
-/* error bit in status register */
-#define MG_REG_STATUS_BIT_ERROR                        0x01
-/* corrected error in status register */
-#define MG_REG_STATUS_BIT_CORRECTED_ERROR      0x04
-/* data request bit in status register */
-#define MG_REG_STATUS_BIT_DATA_REQ             0x08
-/* DSC - Drive Seek Complete */
-#define MG_REG_STATUS_BIT_SEEK_DONE            0x10
-/* DWF - Drive Write Fault */
-#define MG_REG_STATUS_BIT_WRITE_FAULT          0x20
-#define MG_REG_STATUS_BIT_READY                        0x40
-#define MG_REG_STATUS_BIT_BUSY                 0x80
-
-/* handy status */
-#define MG_STAT_READY  (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE)
-#define MG_READY_OK(s) (((s) & (MG_STAT_READY | \
-                               (MG_REG_STATUS_BIT_BUSY | \
-                                MG_REG_STATUS_BIT_WRITE_FAULT | \
-                                MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY)
-
-/* Error register */
-#define MG_REG_ERR_AMNF                0x01
-#define MG_REG_ERR_ABRT                0x04
-#define MG_REG_ERR_IDNF                0x10
-#define MG_REG_ERR_UNC         0x40
-#define MG_REG_ERR_BBK         0x80
-
-/* error code for others */
-#define MG_ERR_NONE            0
-#define MG_ERR_TIMEOUT         0x100
-#define MG_ERR_INIT_STAT       0x101
-#define MG_ERR_TRANSLATION     0x102
-#define MG_ERR_CTRL_RST                0x103
-#define MG_ERR_INV_STAT                0x104
-#define MG_ERR_RSTOUT          0x105
-
-#define MG_MAX_ERRORS  6       /* Max read/write errors */
-
-/* command */
-#define MG_CMD_RD 0x20
-#define MG_CMD_WR 0x30
-#define MG_CMD_SLEEP 0x99
-#define MG_CMD_WAKEUP 0xC3
-#define MG_CMD_ID 0xEC
-#define MG_CMD_WR_CONF 0x3C
-#define MG_CMD_RD_CONF 0x40
-
-/* operation mode */
-#define MG_OP_CASCADE (1 << 0)
-#define MG_OP_CASCADE_SYNC_RD (1 << 1)
-#define MG_OP_CASCADE_SYNC_WR (1 << 2)
-#define MG_OP_INTERLEAVE (1 << 3)
-
-/* synchronous */
-#define MG_BURST_LAT_4 (3 << 4)
-#define MG_BURST_LAT_5 (4 << 4)
-#define MG_BURST_LAT_6 (5 << 4)
-#define MG_BURST_LAT_7 (6 << 4)
-#define MG_BURST_LAT_8 (7 << 4)
-#define MG_BURST_LEN_4 (1 << 1)
-#define MG_BURST_LEN_8 (2 << 1)
-#define MG_BURST_LEN_16 (3 << 1)
-#define MG_BURST_LEN_32 (4 << 1)
-#define MG_BURST_LEN_CONT (0 << 1)
-
-/* timeout value (unit: ms) */
-#define MG_TMAX_CONF_TO_CMD    1
-#define MG_TMAX_WAIT_RD_DRQ    10
-#define MG_TMAX_WAIT_WR_DRQ    500
-#define MG_TMAX_RST_TO_BUSY    10
-#define MG_TMAX_HDRST_TO_RDY   500
-#define MG_TMAX_SWRST_TO_RDY   500
-#define MG_TMAX_RSTOUT         3000
-
-/* device attribution */
-/* use mflash as boot device */
-#define MG_BOOT_DEV            (1 << 0)
-/* use mflash as storage device */
-#define MG_STORAGE_DEV         (1 << 1)
-/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
-#define MG_STORAGE_DEV_SKIP_RST        (1 << 2)
-
-#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
-
-/* names of GPIO resource */
-#define MG_RST_PIN     "mg_rst"
-/* except MG_BOOT_DEV, reset-out pin should be assigned */
-#define MG_RSTOUT_PIN  "mg_rstout"
-
-/* private driver data */
-struct mg_drv_data {
-       /* disk resource */
-       u32 use_polling;
-
-       /* device attribution */
-       u32 dev_attr;
-
-       /* internally used */
-       struct mg_host *host;
-};
-
-/* main structure for mflash driver */
-struct mg_host {
-       struct device *dev;
-
-       struct request_queue *breq;
-       spinlock_t lock;
-       struct gendisk *gd;
-
-       struct timer_list timer;
-       void (*mg_do_intr) (struct mg_host *);
-
-       u16 id[ATA_ID_WORDS];
-
-       u16 cyls;
-       u16 heads;
-       u16 sectors;
-       u32 n_sectors;
-       u32 nres_sectors;
-
-       void __iomem *dev_base;
-       unsigned int irq;
-       unsigned int rst;
-       unsigned int rstout;
-
-       u32 major;
-       u32 error;
-};
-
-/*
- * Debugging macro and defines
- */
-#undef DO_MG_DEBUG
-#ifdef DO_MG_DEBUG
-#  define MG_DBG(fmt, args...) \
-       printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
-#else /* CONFIG_MG_DEBUG */
-#  define MG_DBG(fmt, args...) do { } while (0)
-#endif /* CONFIG_MG_DEBUG */
-
-#endif
index c8f038554e80d1ecd18cd35c5a4305a30b90dd23..b43a9e0390591a9efe7612a8ba1af1dc13806a7b 100644 (file)
@@ -152,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void
 void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
 
 #endif
index 5f3faa9d15aea2416804080e1e5d17cfc9b5b5ce..18e7c7c0cae6d2d7733518d3f740a1d0011b201c 100644 (file)
@@ -11,8 +11,7 @@
 #include <linux/pipe_fs_i.h>
 
 /*
- * splice is tied to pipes as a transport (at least for now), so we'll just
- * add the splice flags here.
+ * Flags passed in from splice/tee/vmsplice
  */
 #define SPLICE_F_MOVE  (0x01)  /* move pages instead of copying */
 #define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
index 94c56d29869df77a2c72b7438762db7ff72fac9f..4dbcbc1c3481c212ec3a6968d39e9041a5a6d530 100644 (file)
@@ -15,6 +15,7 @@
 #define VIRTIO_BLK_F_GEOMETRY  4       /* Legacy geometry available  */
 #define VIRTIO_BLK_F_RO                5       /* Disk is read-only */
 #define VIRTIO_BLK_F_BLK_SIZE  6       /* Block size of disk is available*/
+#define VIRTIO_BLK_F_SCSI      7       /* Supports scsi command passthru */
 
 struct virtio_blk_config
 {
@@ -55,6 +56,13 @@ struct virtio_blk_outhdr
        __u64 sector;
 };
 
+struct virtio_scsi_inhdr {
+       __u32 errors;
+       __u32 data_len;
+       __u32 sense_len;
+       __u32 residual;
+};
+
 /* And this is the final byte of the write scatter-gather list. */
 #define VIRTIO_BLK_S_OK                0
 #define VIRTIO_BLK_S_IOERR     1
index 43b50d36925cab501acab17f7ed754867652aee5..3878d1dc7f596f8c7c4fb8d3fb547448a9b2dba1 100644 (file)
@@ -270,7 +270,7 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
 
 static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
 {
-       return scmd->request->sector;
+       return blk_rq_pos(scmd->request);
 }
 
 static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
index 921ef5d1f0ba95e7497faa55afb293c50ae7ee47..5708a14bee54dd9a88df70b5de76d054030206a9 100644 (file)
@@ -642,12 +642,12 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
 
        if (blk_pc_request(rq)) {
                what |= BLK_TC_ACT(BLK_TC_PC);
-               __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
-                               rq->cmd_len, rq->cmd);
+               __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
+                               what, rq->errors, rq->cmd_len, rq->cmd);
        } else  {
                what |= BLK_TC_ACT(BLK_TC_FS);
-               __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
-                               rw, what, rq->errors, 0, NULL);
+               __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
+                               what, rq->errors, 0, NULL);
        }
 }
 
@@ -854,11 +854,11 @@ void blk_add_driver_data(struct request_queue *q,
                return;
 
        if (blk_pc_request(rq))
-               __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
-                               rq->errors, len, data);
+               __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
+                               BLK_TA_DRV_DATA, rq->errors, len, data);
        else
-               __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
-                               0, BLK_TA_DRV_DATA, rq->errors, len, data);
+               __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
+                               BLK_TA_DRV_DATA, rq->errors, len, data);
 }
 EXPORT_SYMBOL_GPL(blk_add_driver_data);