]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'master' into for-2.6.31
authorJens Axboe <jens.axboe@oracle.com>
Fri, 22 May 2009 18:25:34 +0000 (20:25 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Fri, 22 May 2009 18:25:34 +0000 (20:25 +0200)
Conflicts:
drivers/block/hd.c
drivers/block/mg_disk.c

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
12 files changed:
1  2 
block/blk-core.c
drivers/ata/libata-scsi.c
drivers/ide/ide-cd.c
drivers/mmc/card/block.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sg.c
fs/bio.c
include/linux/blkdev.h
include/linux/fs.h

diff --combined block/blk-core.c
index 1c748403882915470df51ba0d639f881e8933b44,c89883be87379d9454ab1af7cd68319e92795597..59c4af5231121c4c819a37b96abcc2fadbd7a7c2
@@@ -68,11 -68,11 +68,11 @@@ static void drive_stat_acct(struct requ
        int rw = rq_data_dir(rq);
        int cpu;
  
 -      if (!blk_fs_request(rq) || !blk_do_io_stat(rq))
 +      if (!blk_do_io_stat(rq))
                return;
  
        cpu = part_stat_lock();
 -      part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
 +      part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
  
        if (!new_io)
                part_stat_inc(cpu, part, merges[rw]);
@@@ -127,14 -127,13 +127,14 @@@ void blk_rq_init(struct request_queue *
        INIT_LIST_HEAD(&rq->timeout_list);
        rq->cpu = -1;
        rq->q = q;
 -      rq->sector = rq->hard_sector = (sector_t) -1;
 +      rq->__sector = (sector_t) -1;
        INIT_HLIST_NODE(&rq->hash);
        RB_CLEAR_NODE(&rq->rb_node);
        rq->cmd = rq->__cmd;
        rq->cmd_len = BLK_MAX_CDB;
        rq->tag = -1;
        rq->ref_count = 1;
 +      rq->start_time = jiffies;
  }
  EXPORT_SYMBOL(blk_rq_init);
  
@@@ -185,11 -184,14 +185,11 @@@ void blk_dump_rq_flags(struct request *
                rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
                rq->cmd_flags);
  
 -      printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
 -                                              (unsigned long long)rq->sector,
 -                                              rq->nr_sectors,
 -                                              rq->current_nr_sectors);
 -      printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
 -                                              rq->bio, rq->biotail,
 -                                              rq->buffer, rq->data,
 -                                              rq->data_len);
 +      printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
 +             (unsigned long long)blk_rq_pos(rq),
 +             blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
 +      printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n",
 +             rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
  
        if (blk_pc_request(rq)) {
                printk(KERN_INFO "  cdb: ");
@@@ -331,6 -333,24 +331,6 @@@ void blk_unplug(struct request_queue *q
  }
  EXPORT_SYMBOL(blk_unplug);
  
 -static void blk_invoke_request_fn(struct request_queue *q)
 -{
 -      if (unlikely(blk_queue_stopped(q)))
 -              return;
 -
 -      /*
 -       * one level of recursion is ok and is much faster than kicking
 -       * the unplug handling
 -       */
 -      if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
 -              q->request_fn(q);
 -              queue_flag_clear(QUEUE_FLAG_REENTER, q);
 -      } else {
 -              queue_flag_set(QUEUE_FLAG_PLUGGED, q);
 -              kblockd_schedule_work(q, &q->unplug_work);
 -      }
 -}
 -
  /**
   * blk_start_queue - restart a previously stopped queue
   * @q:    The &struct request_queue in question
@@@ -345,7 -365,7 +345,7 @@@ void blk_start_queue(struct request_que
        WARN_ON(!irqs_disabled());
  
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 -      blk_invoke_request_fn(q);
 +      __blk_run_queue(q);
  }
  EXPORT_SYMBOL(blk_start_queue);
  
@@@ -405,23 -425,12 +405,23 @@@ void __blk_run_queue(struct request_que
  {
        blk_remove_plug(q);
  
 +      if (unlikely(blk_queue_stopped(q)))
 +              return;
 +
 +      if (elv_queue_empty(q))
 +              return;
 +
        /*
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
         */
 -      if (!elv_queue_empty(q))
 -              blk_invoke_request_fn(q);
 +      if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
 +              q->request_fn(q);
 +              queue_flag_clear(QUEUE_FLAG_REENTER, q);
 +      } else {
 +              queue_flag_set(QUEUE_FLAG_PLUGGED, q);
 +              kblockd_schedule_work(q, &q->unplug_work);
 +      }
  }
  EXPORT_SYMBOL(__blk_run_queue);
  
   *
   * Description:
   *    Invoke request handling on this queue, if it has pending work to do.
 - *    May be used to restart queueing when a request has completed. Also
 - *    See @blk_start_queueing.
 - *
 + *    May be used to restart queueing when a request has completed.
   */
  void blk_run_queue(struct request_queue *q)
  {
@@@ -891,58 -902,26 +891,58 @@@ struct request *blk_get_request(struct 
  EXPORT_SYMBOL(blk_get_request);
  
  /**
 - * blk_start_queueing - initiate dispatch of requests to device
 - * @q:                request queue to kick into gear
 + * blk_make_request - given a bio, allocate a corresponding struct request.
 + *
 + * @bio:  The bio describing the memory mappings that will be submitted for IO.
 + *        It may be a chained-bio properly constructed by block/bio layer.
 + *
 + * blk_make_request is the parallel of generic_make_request for BLOCK_PC
 + * type commands. Where the struct request needs to be farther initialized by
 + * the caller. It is passed a &struct bio, which describes the memory info of
 + * the I/O transfer.
   *
 - * This is basically a helper to remove the need to know whether a queue
 - * is plugged or not if someone just wants to initiate dispatch of requests
 - * for this queue. Should be used to start queueing on a device outside
 - * of ->request_fn() context. Also see @blk_run_queue.
 + * The caller of blk_make_request must make sure that bi_io_vec
 + * are set to describe the memory buffers. That bio_data_dir() will return
 + * the needed direction of the request. (And all bio's in the passed bio-chain
 + * are properly set accordingly)
   *
 - * The queue lock must be held with interrupts disabled.
 + * If called under none-sleepable conditions, mapped bio buffers must not
 + * need bouncing, by calling the appropriate masked or flagged allocator,
 + * suitable for the target device. Otherwise the call to blk_queue_bounce will
 + * BUG.
 + *
 + * WARNING: When allocating/cloning a bio-chain, careful consideration should be
 + * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
 + * anything but the first bio in the chain. Otherwise you risk waiting for IO
 + * completion of a bio that hasn't been submitted yet, thus resulting in a
 + * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
 + * of bio_alloc(), as that avoids the mempool deadlock.
 + * If possible a big IO should be split into smaller parts when allocation
 + * fails. Partial allocation should not be an error, or you risk a live-lock.
   */
 -void blk_start_queueing(struct request_queue *q)
 +struct request *blk_make_request(struct request_queue *q, struct bio *bio,
 +                               gfp_t gfp_mask)
  {
 -      if (!blk_queue_plugged(q)) {
 -              if (unlikely(blk_queue_stopped(q)))
 -                      return;
 -              q->request_fn(q);
 -      } else
 -              __generic_unplug_device(q);
 +      struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
 +
 +      if (unlikely(!rq))
 +              return ERR_PTR(-ENOMEM);
 +
 +      for_each_bio(bio) {
 +              struct bio *bounce_bio = bio;
 +              int ret;
 +
 +              blk_queue_bounce(q, &bounce_bio);
 +              ret = blk_rq_append_bio(q, rq, bounce_bio);
 +              if (unlikely(ret)) {
 +                      blk_put_request(rq);
 +                      return ERR_PTR(ret);
 +              }
 +      }
 +
 +      return rq;
  }
 -EXPORT_SYMBOL(blk_start_queueing);
 +EXPORT_SYMBOL(blk_make_request);
  
  /**
   * blk_requeue_request - put a request back on queue
   */
  void blk_requeue_request(struct request_queue *q, struct request *rq)
  {
 +      BUG_ON(blk_queued_rq(rq));
 +
        blk_delete_timer(rq);
        blk_clear_rq_complete(rq);
        trace_block_rq_requeue(q, rq);
@@@ -1000,6 -977,7 +1000,6 @@@ void blk_insert_request(struct request_
         * barrier
         */
        rq->cmd_type = REQ_TYPE_SPECIAL;
 -      rq->cmd_flags |= REQ_SOFTBARRIER;
  
        rq->special = data;
  
  
        drive_stat_acct(rq, 1);
        __elv_add_request(q, rq, where, 0);
 -      blk_start_queueing(q);
 +      __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
  }
  EXPORT_SYMBOL(blk_insert_request);
@@@ -1135,13 -1113,16 +1135,13 @@@ void init_request_from_bio(struct reque
        if (bio_failfast_driver(bio))
                req->cmd_flags |= REQ_FAILFAST_DRIVER;
  
 -      /*
 -       * REQ_BARRIER implies no merging, but lets make it explicit
 -       */
        if (unlikely(bio_discard(bio))) {
                req->cmd_flags |= REQ_DISCARD;
                if (bio_barrier(bio))
                        req->cmd_flags |= REQ_SOFTBARRIER;
                req->q->prepare_discard_fn(req->q, req);
        } else if (unlikely(bio_barrier(bio)))
 -              req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
 +              req->cmd_flags |= REQ_HARDBARRIER;
  
        if (bio_sync(bio))
                req->cmd_flags |= REQ_RW_SYNC;
                req->cmd_flags |= REQ_NOIDLE;
  
        req->errors = 0;
 -      req->hard_sector = req->sector = bio->bi_sector;
 +      req->__sector = bio->bi_sector;
        req->ioprio = bio_prio(bio);
 -      req->start_time = jiffies;
        blk_rq_bio_prep(req->q, req, bio);
  }
  
@@@ -1168,13 -1150,14 +1168,13 @@@ static inline bool queue_should_plug(st
  static int __make_request(struct request_queue *q, struct bio *bio)
  {
        struct request *req;
 -      int el_ret, nr_sectors;
 +      int el_ret;
 +      unsigned int bytes = bio->bi_size;
        const unsigned short prio = bio_prio(bio);
        const int sync = bio_sync(bio);
        const int unplug = bio_unplug(bio);
        int rw_flags;
  
 -      nr_sectors = bio_sectors(bio);
 -
        /*
         * low level driver can indicate that it wants pages above a
         * certain limit bounced to low memory (ie for highmem, or even
  
                req->biotail->bi_next = bio;
                req->biotail = bio;
 -              req->nr_sectors = req->hard_nr_sectors += nr_sectors;
 +              req->__data_len += bytes;
                req->ioprio = ioprio_best(req->ioprio, prio);
                if (!blk_rq_cpu_valid(req))
                        req->cpu = bio->bi_comp_cpu;
                 * not touch req->buffer either...
                 */
                req->buffer = bio_data(bio);
 -              req->current_nr_sectors = bio_cur_sectors(bio);
 -              req->hard_cur_sectors = req->current_nr_sectors;
 -              req->sector = req->hard_sector = bio->bi_sector;
 -              req->nr_sectors = req->hard_nr_sectors += nr_sectors;
 +              req->__sector = bio->bi_sector;
 +              req->__data_len += bytes;
                req->ioprio = ioprio_best(req->ioprio, prio);
                if (!blk_rq_cpu_valid(req))
                        req->cpu = bio->bi_comp_cpu;
@@@ -1608,8 -1593,8 +1608,8 @@@ EXPORT_SYMBOL(submit_bio)
   */
  int blk_rq_check_limits(struct request_queue *q, struct request *rq)
  {
 -      if (rq->nr_sectors > q->max_sectors ||
 -          rq->data_len > q->max_hw_sectors << 9) {
 +      if (blk_rq_sectors(rq) > q->max_sectors ||
 +          blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
                printk(KERN_ERR "%s: over max size limit.\n", __func__);
                return -EIO;
        }
@@@ -1666,15 -1651,40 +1666,15 @@@ int blk_insert_cloned_request(struct re
  }
  EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
  
 -/**
 - * blkdev_dequeue_request - dequeue request and start timeout timer
 - * @req: request to dequeue
 - *
 - * Dequeue @req and start timeout timer on it.  This hands off the
 - * request to the driver.
 - *
 - * Block internal functions which don't want to start timer should
 - * call elv_dequeue_request().
 - */
 -void blkdev_dequeue_request(struct request *req)
 -{
 -      elv_dequeue_request(req->q, req);
 -
 -      /*
 -       * We are now handing the request to the hardware, add the
 -       * timeout handler.
 -       */
 -      blk_add_timer(req);
 -}
 -EXPORT_SYMBOL(blkdev_dequeue_request);
 -
  static void blk_account_io_completion(struct request *req, unsigned int bytes)
  {
 -      if (!blk_do_io_stat(req))
 -              return;
 -
 -      if (blk_fs_request(req)) {
 +      if (blk_do_io_stat(req)) {
                const int rw = rq_data_dir(req);
                struct hd_struct *part;
                int cpu;
  
                cpu = part_stat_lock();
 -              part = disk_map_sector_rcu(req->rq_disk, req->sector);
 +              part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
                part_stat_add(cpu, part, sectors[rw], bytes >> 9);
                part_stat_unlock();
        }
  
  static void blk_account_io_done(struct request *req)
  {
 -      if (!blk_do_io_stat(req))
 -              return;
 -
        /*
         * Account IO completion.  bar_rq isn't accounted as a normal
         * IO on queueing nor completion.  Accounting the containing
         * request is enough.
         */
 -      if (blk_fs_request(req) && req != &req->q->bar_rq) {
 +      if (blk_do_io_stat(req) && req != &req->q->bar_rq) {
                unsigned long duration = jiffies - req->start_time;
                const int rw = rq_data_dir(req);
                struct hd_struct *part;
                int cpu;
  
                cpu = part_stat_lock();
 -              part = disk_map_sector_rcu(req->rq_disk, req->sector);
 +              part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
  
                part_stat_inc(cpu, part, ios[rw]);
                part_stat_add(cpu, part, ticks[rw], duration);
  }
  
  /**
 - * __end_that_request_first - end I/O on a request
 - * @req:      the request being processed
 + * blk_peek_request - peek at the top of a request queue
 + * @q: request queue to peek at
 + *
 + * Description:
 + *     Return the request at the top of @q.  The returned request
 + *     should be started using blk_start_request() before LLD starts
 + *     processing it.
 + *
 + * Return:
 + *     Pointer to the request at the top of @q if available.  Null
 + *     otherwise.
 + *
 + * Context:
 + *     queue_lock must be held.
 + */
 +struct request *blk_peek_request(struct request_queue *q)
 +{
 +      struct request *rq;
 +      int ret;
 +
 +      while ((rq = __elv_next_request(q)) != NULL) {
 +              if (!(rq->cmd_flags & REQ_STARTED)) {
 +                      /*
 +                       * This is the first time the device driver
 +                       * sees this request (possibly after
 +                       * requeueing).  Notify IO scheduler.
 +                       */
 +                      if (blk_sorted_rq(rq))
 +                              elv_activate_rq(q, rq);
 +
 +                      /*
 +                       * just mark as started even if we don't start
 +                       * it, a request that has been delayed should
 +                       * not be passed by new incoming requests
 +                       */
 +                      rq->cmd_flags |= REQ_STARTED;
 +                      trace_block_rq_issue(q, rq);
 +              }
 +
 +              if (!q->boundary_rq || q->boundary_rq == rq) {
 +                      q->end_sector = rq_end_sector(rq);
 +                      q->boundary_rq = NULL;
 +              }
 +
 +              if (rq->cmd_flags & REQ_DONTPREP)
 +                      break;
 +
 +              if (q->dma_drain_size && blk_rq_bytes(rq)) {
 +                      /*
 +                       * make sure space for the drain appears we
 +                       * know we can do this because max_hw_segments
 +                       * has been adjusted to be one fewer than the
 +                       * device can handle
 +                       */
 +                      rq->nr_phys_segments++;
 +              }
 +
 +              if (!q->prep_rq_fn)
 +                      break;
 +
 +              ret = q->prep_rq_fn(q, rq);
 +              if (ret == BLKPREP_OK) {
 +                      break;
 +              } else if (ret == BLKPREP_DEFER) {
 +                      /*
 +                       * the request may have been (partially) prepped.
 +                       * we need to keep this request in the front to
 +                       * avoid resource deadlock.  REQ_STARTED will
 +                       * prevent other fs requests from passing this one.
 +                       */
 +                      if (q->dma_drain_size && blk_rq_bytes(rq) &&
 +                          !(rq->cmd_flags & REQ_DONTPREP)) {
 +                              /*
 +                               * remove the space for the drain we added
 +                               * so that we don't add it again
 +                               */
 +                              --rq->nr_phys_segments;
 +                      }
 +
 +                      rq = NULL;
 +                      break;
 +              } else if (ret == BLKPREP_KILL) {
 +                      rq->cmd_flags |= REQ_QUIET;
 +                      __blk_end_request_all(rq, -EIO);
 +              } else {
 +                      printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
 +                      break;
 +              }
 +      }
 +
 +      return rq;
 +}
 +EXPORT_SYMBOL(blk_peek_request);
 +
 +void blk_dequeue_request(struct request *rq)
 +{
 +      struct request_queue *q = rq->q;
 +
 +      BUG_ON(list_empty(&rq->queuelist));
 +      BUG_ON(ELV_ON_HASH(rq));
 +
 +      list_del_init(&rq->queuelist);
 +
 +      /*
 +       * the time frame between a request being removed from the lists
 +       * and to it is freed is accounted as io that is in progress at
 +       * the driver side.
 +       */
 +      if (blk_account_rq(rq))
 +              q->in_flight[rq_is_sync(rq)]++;
 +}
 +
 +/**
 + * blk_start_request - start request processing on the driver
 + * @req: request to dequeue
 + *
 + * Description:
 + *     Dequeue @req and start timeout timer on it.  This hands off the
 + *     request to the driver.
 + *
 + *     Block internal functions which don't want to start timer should
 + *     call blk_dequeue_request().
 + *
 + * Context:
 + *     queue_lock must be held.
 + */
 +void blk_start_request(struct request *req)
 +{
 +      blk_dequeue_request(req);
 +
 +      /*
 +       * We are now handing the request to the hardware, initialize
 +       * resid_len to full count and add the timeout handler.
 +       */
 +      req->resid_len = blk_rq_bytes(req);
 +      blk_add_timer(req);
 +}
 +EXPORT_SYMBOL(blk_start_request);
 +
 +/**
 + * blk_fetch_request - fetch a request from a request queue
 + * @q: request queue to fetch a request from
 + *
 + * Description:
 + *     Return the request at the top of @q.  The request is started on
 + *     return and LLD can start processing it immediately.
 + *
 + * Return:
 + *     Pointer to the request at the top of @q if available.  Null
 + *     otherwise.
 + *
 + * Context:
 + *     queue_lock must be held.
 + */
 +struct request *blk_fetch_request(struct request_queue *q)
 +{
 +      struct request *rq;
 +
 +      rq = blk_peek_request(q);
 +      if (rq)
 +              blk_start_request(rq);
 +      return rq;
 +}
 +EXPORT_SYMBOL(blk_fetch_request);
 +
 +/**
 + * blk_update_request - Special helper function for request stacking drivers
 + * @rq:             the request being processed
   * @error:    %0 for success, < %0 for error
 - * @nr_bytes: number of bytes to complete
 + * @nr_bytes: number of bytes to complete @rq
   *
   * Description:
 - *     Ends I/O on a number of bytes attached to @req, and sets it up
 - *     for the next range of segments (if any) in the cluster.
 + *     Ends I/O on a number of bytes attached to @rq, but doesn't complete
 + *     the request structure even if @rq doesn't have leftover.
 + *     If @rq has leftover, sets it up for the next range of segments.
 + *
 + *     This special helper function is only for request stacking drivers
 + *     (e.g. request-based dm) so that they can handle partial completion.
 + *     Actual device drivers should use blk_end_request instead.
 + *
 + *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
 + *     %false return from this function.
   *
   * Return:
 - *     %0 - we are done with this request, call end_that_request_last()
 - *     %1 - still buffers pending for this request
 + *     %false - this request doesn't have any more data
 + *     %true  - this request has more data
   **/
 -static int __end_that_request_first(struct request *req, int error,
 -                                  int nr_bytes)
 +bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
  {
        int total_bytes, bio_nbytes, next_idx = 0;
        struct bio *bio;
  
 +      if (!req->bio)
 +              return false;
 +
        trace_block_rq_complete(req->q, req);
  
        /*
 -       * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
 -       * sense key with us all the way through
 +       * For fs requests, rq is just carrier of independent bio's
 +       * and each partial completion should be handled separately.
 +       * Reset per-request error on each partial completion.
 +       *
 +       * TODO: tj: This is too subtle.  It would be better to let
 +       * low level drivers do what they see fit.
         */
 -      if (!blk_pc_request(req))
 +      if (blk_fs_request(req))
                req->errors = 0;
  
        if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
                printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
                                req->rq_disk ? req->rq_disk->disk_name : "?",
 -                              (unsigned long long)req->sector);
 +                              (unsigned long long)blk_rq_pos(req));
        }
  
        blk_account_io_completion(req, nr_bytes);
                } else {
                        int idx = bio->bi_idx + next_idx;
  
-                       if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
+                       if (unlikely(idx >= bio->bi_vcnt)) {
                                blk_dump_rq_flags(req, "__end_that");
                                printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
-                                      __func__, bio->bi_idx, bio->bi_vcnt);
+                                      __func__, idx, bio->bi_vcnt);
                                break;
                        }
  
        /*
         * completely done
         */
 -      if (!req->bio)
 -              return 0;
 +      if (!req->bio) {
 +              /*
 +               * Reset counters so that the request stacking driver
 +               * can find how many bytes remain in the request
 +               * later.
 +               */
 +              req->__data_len = 0;
 +              return false;
 +      }
  
        /*
         * if the request wasn't completed, update state
                bio_iovec(bio)->bv_len -= nr_bytes;
        }
  
 -      blk_recalc_rq_sectors(req, total_bytes >> 9);
 +      req->__data_len -= total_bytes;
 +      req->buffer = bio_data(req->bio);
 +
 +      /* update sector only for requests with clear definition of sector */
 +      if (blk_fs_request(req) || blk_discard_rq(req))
 +              req->__sector += total_bytes >> 9;
 +
 +      /*
 +       * If total number of sectors is less than the first segment
 +       * size, something has gone terribly wrong.
 +       */
 +      if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
 +              printk(KERN_ERR "blk: request botched\n");
 +              req->__data_len = blk_rq_cur_bytes(req);
 +      }
 +
 +      /* recalculate the number of segments */
        blk_recalc_rq_segments(req);
 -      return 1;
 +
 +      return true;
 +}
 +EXPORT_SYMBOL_GPL(blk_update_request);
 +
 +static bool blk_update_bidi_request(struct request *rq, int error,
 +                                  unsigned int nr_bytes,
 +                                  unsigned int bidi_bytes)
 +{
 +      if (blk_update_request(rq, error, nr_bytes))
 +              return true;
 +
 +      /* Bidi request must be completed as a whole */
 +      if (unlikely(blk_bidi_rq(rq)) &&
 +          blk_update_request(rq->next_rq, error, bidi_bytes))
 +              return true;
 +
 +      add_disk_randomness(rq->rq_disk);
 +
 +      return false;
  }
  
  /*
   * queue lock must be held
   */
 -static void end_that_request_last(struct request *req, int error)
 +static void blk_finish_request(struct request *req, int error)
  {
 +      BUG_ON(blk_queued_rq(req));
 +
        if (blk_rq_tagged(req))
                blk_queue_end_tag(req->q, req);
  
 -      if (blk_queued_rq(req))
 -              elv_dequeue_request(req->q, req);
 -
        if (unlikely(laptop_mode) && blk_fs_request(req))
                laptop_io_completion();
  
  }
  
  /**
 - * blk_rq_bytes - Returns bytes left to complete in the entire request
 - * @rq: the request being processed
 - **/
 -unsigned int blk_rq_bytes(struct request *rq)
 -{
 -      if (blk_fs_request(rq))
 -              return rq->hard_nr_sectors << 9;
 -
 -      return rq->data_len;
 -}
 -EXPORT_SYMBOL_GPL(blk_rq_bytes);
 -
 -/**
 - * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
 - * @rq: the request being processed
 - **/
 -unsigned int blk_rq_cur_bytes(struct request *rq)
 -{
 -      if (blk_fs_request(rq))
 -              return rq->current_nr_sectors << 9;
 -
 -      if (rq->bio)
 -              return rq->bio->bi_size;
 -
 -      return rq->data_len;
 -}
 -EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
 -
 -/**
 - * end_request - end I/O on the current segment of the request
 - * @req:      the request being processed
 - * @uptodate: error value or %0/%1 uptodate flag
 + * blk_end_bidi_request - Complete a bidi request
 + * @rq:         the request to complete
 + * @error:      %0 for success, < %0 for error
 + * @nr_bytes:   number of bytes to complete @rq
 + * @bidi_bytes: number of bytes to complete @rq->next_rq
   *
   * Description:
 - *     Ends I/O on the current segment of a request. If that is the only
 - *     remaining segment, the request is also completed and freed.
 - *
 - *     This is a remnant of how older block drivers handled I/O completions.
 - *     Modern drivers typically end I/O on the full request in one go, unless
 - *     they have a residual value to account for. For that case this function
 - *     isn't really useful, unless the residual just happens to be the
 - *     full current segment. In other words, don't use this function in new
 - *     code. Use blk_end_request() or __blk_end_request() to end a request.
 + *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
 + *     Drivers that supports bidi can safely call this member for any
 + *     type of request, bidi or uni.  In the later case @bidi_bytes is
 + *     just ignored.
 + *
 + * Return:
 + *     %false - we are done with this request
 + *     %true  - still buffers pending for this request
   **/
 -void end_request(struct request *req, int uptodate)
 -{
 -      int error = 0;
 -
 -      if (uptodate <= 0)
 -              error = uptodate ? uptodate : -EIO;
 -
 -      __blk_end_request(req, error, req->hard_cur_sectors << 9);
 -}
 -EXPORT_SYMBOL(end_request);
 -
 -static int end_that_request_data(struct request *rq, int error,
 +static bool blk_end_bidi_request(struct request *rq, int error,
                                 unsigned int nr_bytes, unsigned int bidi_bytes)
  {
 -      if (rq->bio) {
 -              if (__end_that_request_first(rq, error, nr_bytes))
 -                      return 1;
 +      struct request_queue *q = rq->q;
 +      unsigned long flags;
  
 -              /* Bidi request must be completed as a whole */
 -              if (blk_bidi_rq(rq) &&
 -                  __end_that_request_first(rq->next_rq, error, bidi_bytes))
 -                      return 1;
 -      }
 +      if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
 +              return true;
  
 -      return 0;
 +      spin_lock_irqsave(q->queue_lock, flags);
 +      blk_finish_request(rq, error);
 +      spin_unlock_irqrestore(q->queue_lock, flags);
 +
 +      return false;
  }
  
  /**
 - * blk_end_io - Generic end_io function to complete a request.
 - * @rq:           the request being processed
 - * @error:        %0 for success, < %0 for error
 - * @nr_bytes:     number of bytes to complete @rq
 - * @bidi_bytes:   number of bytes to complete @rq->next_rq
 - * @drv_callback: function called between completion of bios in the request
 - *                and completion of the request.
 - *                If the callback returns non %0, this helper returns without
 - *                completion of the request.
 + * __blk_end_bidi_request - Complete a bidi request with queue lock held
 + * @rq:         the request to complete
 + * @error:      %0 for success, < %0 for error
 + * @nr_bytes:   number of bytes to complete @rq
 + * @bidi_bytes: number of bytes to complete @rq->next_rq
   *
   * Description:
 - *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
 - *     If @rq has leftover, sets it up for the next range of segments.
 + *     Identical to blk_end_bidi_request() except that queue lock is
 + *     assumed to be locked on entry and remains so on return.
   *
   * Return:
 - *     %0 - we are done with this request
 - *     %1 - this request is not freed yet, it still has pending buffers.
 + *     %false - we are done with this request
 + *     %true  - still buffers pending for this request
   **/
 -static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
 -                    unsigned int bidi_bytes,
 -                    int (drv_callback)(struct request *))
 +static bool __blk_end_bidi_request(struct request *rq, int error,
 +                                 unsigned int nr_bytes, unsigned int bidi_bytes)
  {
 -      struct request_queue *q = rq->q;
 -      unsigned long flags = 0UL;
 -
 -      if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
 -              return 1;
 -
 -      /* Special feature for tricky drivers */
 -      if (drv_callback && drv_callback(rq))
 -              return 1;
 -
 -      add_disk_randomness(rq->rq_disk);
 +      if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
 +              return true;
  
 -      spin_lock_irqsave(q->queue_lock, flags);
 -      end_that_request_last(rq, error);
 -      spin_unlock_irqrestore(q->queue_lock, flags);
 +      blk_finish_request(rq, error);
  
 -      return 0;
 +      return false;
  }
  
  /**
   *     If @rq has leftover, sets it up for the next range of segments.
   *
   * Return:
 - *     %0 - we are done with this request
 - *     %1 - still buffers pending for this request
 + *     %false - we are done with this request
 + *     %true  - still buffers pending for this request
   **/
 -int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 +bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
  {
 -      return blk_end_io(rq, error, nr_bytes, 0, NULL);
 +      return blk_end_bidi_request(rq, error, nr_bytes, 0);
  }
  EXPORT_SYMBOL_GPL(blk_end_request);
  
  /**
 - * __blk_end_request - Helper function for drivers to complete the request.
 - * @rq:       the request being processed
 - * @error:    %0 for success, < %0 for error
 - * @nr_bytes: number of bytes to complete
 + * blk_end_request_all - Helper function for drives to finish the request.
 + * @rq: the request to finish
 + * @err: %0 for success, < %0 for error
   *
   * Description:
 - *     Must be called with queue lock held unlike blk_end_request().
 - *
 - * Return:
 - *     %0 - we are done with this request
 - *     %1 - still buffers pending for this request
 - **/
 -int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 + *     Completely finish @rq.
 + */
 +void blk_end_request_all(struct request *rq, int error)
  {
 -      if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
 -              return 1;
 +      bool pending;
 +      unsigned int bidi_bytes = 0;
  
 -      add_disk_randomness(rq->rq_disk);
 +      if (unlikely(blk_bidi_rq(rq)))
 +              bidi_bytes = blk_rq_bytes(rq->next_rq);
  
 -      end_that_request_last(rq, error);
 +      pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
 +      BUG_ON(pending);
 +}
 +EXPORT_SYMBOL_GPL(blk_end_request_all);
  
 -      return 0;
 +/**
 + * blk_end_request_cur - Helper function to finish the current request chunk.
 + * @rq: the request to finish the current chunk for
 + * @err: %0 for success, < %0 for error
 + *
 + * Description:
 + *     Complete the current consecutively mapped chunk from @rq.
 + *
 + * Return:
 + *     %false - we are done with this request
 + *     %true  - still buffers pending for this request
 + */
 +bool blk_end_request_cur(struct request *rq, int error)
 +{
 +      return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
  }
 -EXPORT_SYMBOL_GPL(__blk_end_request);
 +EXPORT_SYMBOL_GPL(blk_end_request_cur);
  
  /**
 - * blk_end_bidi_request - Helper function for drivers to complete bidi request.
 - * @rq:         the bidi request being processed
 - * @error:      %0 for success, < %0 for error
 - * @nr_bytes:   number of bytes to complete @rq
 - * @bidi_bytes: number of bytes to complete @rq->next_rq
 + * __blk_end_request - Helper function for drivers to complete the request.
 + * @rq:       the request being processed
 + * @error:    %0 for success, < %0 for error
 + * @nr_bytes: number of bytes to complete
   *
   * Description:
 - *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
 + *     Must be called with queue lock held unlike blk_end_request().
   *
   * Return:
 - *     %0 - we are done with this request
 - *     %1 - still buffers pending for this request
 + *     %false - we are done with this request
 + *     %true  - still buffers pending for this request
   **/
 -int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
 -                       unsigned int bidi_bytes)
 +bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
  {
 -      return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
 +      return __blk_end_bidi_request(rq, error, nr_bytes, 0);
  }
 -EXPORT_SYMBOL_GPL(blk_end_bidi_request);
 +EXPORT_SYMBOL_GPL(__blk_end_request);
  
  /**
 - * blk_update_request - Special helper function for request stacking drivers
 - * @rq:           the request being processed
 - * @error:        %0 for success, < %0 for error
 - * @nr_bytes:     number of bytes to complete @rq
 + * __blk_end_request_all - Helper function for drives to finish the request.
 + * @rq: the request to finish
 + * @err: %0 for success, < %0 for error
   *
   * Description:
 - *     Ends I/O on a number of bytes attached to @rq, but doesn't complete
 - *     the request structure even if @rq doesn't have leftover.
 - *     If @rq has leftover, sets it up for the next range of segments.
 - *
 - *     This special helper function is only for request stacking drivers
 - *     (e.g. request-based dm) so that they can handle partial completion.
 - *     Actual device drivers should use blk_end_request instead.
 + *     Completely finish @rq.  Must be called with queue lock held.
   */
 -void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
 +void __blk_end_request_all(struct request *rq, int error)
  {
 -      if (!end_that_request_data(rq, error, nr_bytes, 0)) {
 -              /*
 -               * These members are not updated in end_that_request_data()
 -               * when all bios are completed.
 -               * Update them so that the request stacking driver can find
 -               * how many bytes remain in the request later.
 -               */
 -              rq->nr_sectors = rq->hard_nr_sectors = 0;
 -              rq->current_nr_sectors = rq->hard_cur_sectors = 0;
 -      }
 +      bool pending;
 +      unsigned int bidi_bytes = 0;
 +
 +      if (unlikely(blk_bidi_rq(rq)))
 +              bidi_bytes = blk_rq_bytes(rq->next_rq);
 +
 +      pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
 +      BUG_ON(pending);
  }
 -EXPORT_SYMBOL_GPL(blk_update_request);
 +EXPORT_SYMBOL_GPL(__blk_end_request_all);
  
  /**
 - * blk_end_request_callback - Special helper function for tricky drivers
 - * @rq:           the request being processed
 - * @error:        %0 for success, < %0 for error
 - * @nr_bytes:     number of bytes to complete
 - * @drv_callback: function called between completion of bios in the request
 - *                and completion of the request.
 - *                If the callback returns non %0, this helper returns without
 - *                completion of the request.
 + * __blk_end_request_cur - Helper function to finish the current request chunk.
 + * @rq: the request to finish the current chunk for
 + * @err: %0 for success, < %0 for error
   *
   * Description:
 - *     Ends I/O on a number of bytes attached to @rq.
 - *     If @rq has leftover, sets it up for the next range of segments.
 - *
 - *     This special helper function is used only for existing tricky drivers.
 - *     (e.g. cdrom_newpc_intr() of ide-cd)
 - *     This interface will be removed when such drivers are rewritten.
 - *     Don't use this interface in other places anymore.
 + *     Complete the current consecutively mapped chunk from @rq.  Must
 + *     be called with queue lock held.
   *
   * Return:
 - *     %0 - we are done with this request
 - *     %1 - this request is not freed yet.
 - *          this request still has pending buffers or
 - *          the driver doesn't want to finish this request yet.
 - **/
 -int blk_end_request_callback(struct request *rq, int error,
 -                           unsigned int nr_bytes,
 -                           int (drv_callback)(struct request *))
 + *     %false - we are done with this request
 + *     %true  - still buffers pending for this request
 + */
 +bool __blk_end_request_cur(struct request *rq, int error)
  {
 -      return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
 +      return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
  }
 -EXPORT_SYMBOL_GPL(blk_end_request_callback);
 +EXPORT_SYMBOL_GPL(__blk_end_request_cur);
  
  void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                     struct bio *bio)
                rq->nr_phys_segments = bio_phys_segments(q, bio);
                rq->buffer = bio_data(bio);
        }
 -      rq->current_nr_sectors = bio_cur_sectors(bio);
 -      rq->hard_cur_sectors = rq->current_nr_sectors;
 -      rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
 -      rq->data_len = bio->bi_size;
 -
 +      rq->__data_len = bio->bi_size;
        rq->bio = rq->biotail = bio;
  
        if (bio->bi_bdev)
@@@ -2295,9 -2158,6 +2295,9 @@@ EXPORT_SYMBOL(kblockd_schedule_work)
  
  int __init blk_dev_init(void)
  {
 +      BUILD_BUG_ON(__REQ_NR_BITS > 8 *
 +                      sizeof(((struct request *)0)->cmd_flags));
 +
        kblockd_workqueue = create_workqueue("kblockd");
        if (!kblockd_workqueue)
                panic("Failed to create kblockd\n");
index 6e4c600f5a1c5cac8dc123da72e1f3e89d212c33,342316064e9ffe88b8962910908c345593c9ef1a..d0dfeef55db58444d5eeb979ab97f1e1628112c8
@@@ -313,7 -313,7 +313,7 @@@ ata_scsi_em_message_show(struct device 
                return ap->ops->em_show(ap, buf);
        return -EINVAL;
  }
- DEVICE_ATTR(em_message, S_IRUGO | S_IWUGO,
+ DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
                ata_scsi_em_message_show, ata_scsi_em_message_store);
  EXPORT_SYMBOL_GPL(dev_attr_em_message);
  
@@@ -366,7 -366,7 +366,7 @@@ ata_scsi_activity_store(struct device *
        }
        return -EINVAL;
  }
- DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show,
+ DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
                        ata_scsi_activity_store);
  EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
  
@@@ -1084,7 -1084,7 +1084,7 @@@ static int atapi_drain_needed(struct re
        if (likely(!blk_pc_request(rq)))
                return 0;
  
 -      if (!rq->data_len || (rq->cmd_flags & REQ_RW))
 +      if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
                return 0;
  
        return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
@@@ -2142,13 -2142,14 +2142,14 @@@ static unsigned int ata_scsiop_inq_89(s
  
  static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
  {
+       int form_factor = ata_id_form_factor(args->id);
+       int media_rotation_rate = ata_id_rotation_rate(args->id);
        rbuf[1] = 0xb1;
        rbuf[3] = 0x3c;
-       if (ata_id_major_version(args->id) > 7) {
-               rbuf[4] = args->id[217] >> 8;
-               rbuf[5] = args->id[217];
-               rbuf[7] = args->id[168] & 0xf;
-       }
+       rbuf[4] = media_rotation_rate >> 8;
+       rbuf[5] = media_rotation_rate;
+       rbuf[7] = form_factor;
  
        return 0;
  }
@@@ -2376,7 -2377,23 +2377,23 @@@ saving_not_supp
   */
  static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
  {
-       u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */
+       struct ata_device *dev = args->dev;
+       u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
+       u8 log_per_phys = 0;
+       u16 lowest_aligned = 0;
+       u16 word_106 = dev->id[106];
+       u16 word_209 = dev->id[209];
+       if ((word_106 & 0xc000) == 0x4000) {
+               /* Number and offset of logical sectors per physical sector */
+               if (word_106 & (1 << 13))
+                       log_per_phys = word_106 & 0xf;
+               if ((word_209 & 0xc000) == 0x4000) {
+                       u16 first = dev->id[209] & 0x3fff;
+                       if (first > 0)
+                               lowest_aligned = (1 << log_per_phys) - first;
+               }
+       }
  
        VPRINTK("ENTER\n");
  
                /* sector size */
                rbuf[10] = ATA_SECT_SIZE >> 8;
                rbuf[11] = ATA_SECT_SIZE & 0xff;
+               rbuf[12] = 0;
+               rbuf[13] = log_per_phys;
+               rbuf[14] = (lowest_aligned >> 8) & 0x3f;
+               rbuf[15] = lowest_aligned;
        }
  
        return 0;
diff --combined drivers/ide/ide-cd.c
index 081aed6781cc182e9cda15ea8b90e908bad5d84e,925eb9e245d1e7f0028dbc3cd0e698bce6b98962..1799328decfb1c79ccf86cf1eb136a6194d410d7
@@@ -206,25 -206,54 +206,25 @@@ static void cdrom_analyze_sense_data(id
        ide_cd_log_error(drive->name, failed_command, sense);
  }
  
 -static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
 -                                    struct request *failed_command)
 -{
 -      struct cdrom_info *info         = drive->driver_data;
 -      struct request *rq              = &drive->request_sense_rq;
 -
 -      ide_debug_log(IDE_DBG_SENSE, "enter");
 -
 -      if (sense == NULL)
 -              sense = &info->sense_data;
 -
 -      /* stuff the sense request in front of our current request */
 -      blk_rq_init(NULL, rq);
 -      rq->cmd_type = REQ_TYPE_ATA_PC;
 -      rq->rq_disk = info->disk;
 -
 -      rq->data = sense;
 -      rq->cmd[0] = GPCMD_REQUEST_SENSE;
 -      rq->cmd[4] = 18;
 -      rq->data_len = 18;
 -
 -      rq->cmd_type = REQ_TYPE_SENSE;
 -      rq->cmd_flags |= REQ_PREEMPT;
 -
 -      /* NOTE! Save the failed command in "rq->buffer" */
 -      rq->buffer = (void *) failed_command;
 -
 -      if (failed_command)
 -              ide_debug_log(IDE_DBG_SENSE, "failed_cmd: 0x%x",
 -                                           failed_command->cmd[0]);
 -
 -      drive->hwif->rq = NULL;
 -
 -      elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
 -}
 -
  static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
  {
        /*
 -       * For REQ_TYPE_SENSE, "rq->buffer" points to the original
 -       * failed request
 +       * For REQ_TYPE_SENSE, "rq->special" points to the original
 +       * failed request.  Also, the sense data should be read
 +       * directly from rq which might be different from the original
 +       * sense buffer if it got copied during mapping.
         */
 -      struct request *failed = (struct request *)rq->buffer;
 -      struct cdrom_info *info = drive->driver_data;
 -      void *sense = &info->sense_data;
 +      struct request *failed = (struct request *)rq->special;
 +      void *sense = bio_data(rq->bio);
  
        if (failed) {
                if (failed->sense) {
 +                      /*
 +                       * Sense is always read into drive->sense_data.
 +                       * Copy back if the failed request has its
 +                       * sense pointer set.
 +                       */
 +                      memcpy(failed->sense, sense, 18);
                        sense = failed->sense;
                        failed->sense_len = rq->sense_len;
                }
@@@ -283,7 -312,6 +283,6 @@@ static int cdrom_decode_status(ide_driv
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq = hwif->rq;
        int err, sense_key, do_end_request = 0;
-       u8 quiet = rq->cmd_flags & REQ_QUIET;
  
        /* get the IDE error register */
        err = ide_read_error(drive);
                } else {
                        cdrom_saw_media_change(drive);
  
-                       if (blk_fs_request(rq) && !quiet)
+                       if (blk_fs_request(rq) && !blk_rq_quiet(rq))
                                printk(KERN_ERR PFX "%s: tray open\n",
                                        drive->name);
                }
                 * No point in retrying after an illegal request or data
                 * protect error.
                 */
-               if (!quiet)
+               if (!blk_rq_quiet(rq))
                        ide_dump_status(drive, "command error", stat);
                do_end_request = 1;
                break;
                 * No point in re-trying a zillion times on a bad sector.
                 * If we got here the error is not correctable.
                 */
-               if (!quiet)
+               if (!blk_rq_quiet(rq))
                        ide_dump_status(drive, "media error "
                                        "(bad sector)", stat);
                do_end_request = 1;
                break;
        case BLANK_CHECK:
                /* disk appears blank? */
-               if (!quiet)
+               if (!blk_rq_quiet(rq))
                        ide_dump_status(drive, "media error (blank)",
                                        stat);
                do_end_request = 1;
  
        /* if we got a CHECK_CONDITION status, queue a request sense command */
        if (stat & ATA_ERR)
 -              cdrom_queue_request_sense(drive, NULL, NULL);
 +              return ide_queue_sense_rq(drive, NULL) ? 2 : 1;
        return 1;
  
  end_request:
        if (stat & ATA_ERR) {
 -              struct request_queue *q = drive->queue;
 -              unsigned long flags;
 -
 -              spin_lock_irqsave(q->queue_lock, flags);
 -              blkdev_dequeue_request(rq);
 -              spin_unlock_irqrestore(q->queue_lock, flags);
 -
                hwif->rq = NULL;
 -
 -              cdrom_queue_request_sense(drive, rq->sense, rq);
 -              return 1;
 +              return ide_queue_sense_rq(drive, rq) ? 2 : 1;
        } else
                return 2;
  }
@@@ -466,8 -503,14 +465,8 @@@ static void ide_cd_request_sense_fixup(
         * and some drives don't send them.  Sigh.
         */
        if (rq->cmd[0] == GPCMD_REQUEST_SENSE &&
 -          cmd->nleft > 0 && cmd->nleft <= 5) {
 -              unsigned int ofs = cmd->nbytes - cmd->nleft;
 -
 -              while (cmd->nleft > 0) {
 -                      *((u8 *)rq->data + ofs++) = 0;
 -                      cmd->nleft--;
 -              }
 -      }
 +          cmd->nleft > 0 && cmd->nleft <= 5)
 +              cmd->nleft = 0;
  }
  
  int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
                rq->cmd_flags |= cmd_flags;
                rq->timeout = timeout;
                if (buffer) {
 -                      rq->data = buffer;
 -                      rq->data_len = *bufflen;
 +                      error = blk_rq_map_kern(drive->queue, rq, buffer,
 +                                              *bufflen, GFP_NOIO);
 +                      if (error) {
 +                              blk_put_request(rq);
 +                              return error;
 +                      }
                }
  
                error = blk_execute_rq(drive->queue, info->disk, rq, 0);
  
                if (buffer)
 -                      *bufflen = rq->data_len;
 +                      *bufflen = rq->resid_len;
  
                flags = rq->cmd_flags;
                blk_put_request(rq);
@@@ -569,7 -608,7 +568,7 @@@ static ide_startstop_t cdrom_newpc_intr
        struct request *rq = hwif->rq;
        ide_expiry_t *expiry = NULL;
        int dma_error = 0, dma, thislen, uptodate = 0;
 -      int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0, nsectors;
 +      int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
        int sense = blk_sense_request(rq);
        unsigned int timeout;
        u16 len;
  
  out_end:
        if (blk_pc_request(rq) && rc == 0) {
 -              unsigned int dlen = rq->data_len;
 -
 -              rq->data_len = 0;
 -
 -              if (blk_end_request(rq, 0, dlen))
 -                      BUG();
 -
 +              rq->resid_len = 0;
 +              blk_end_request_all(rq, 0);
                hwif->rq = NULL;
        } else {
                if (sense && uptodate)
                        ide_cd_error_cmd(drive, cmd);
  
                /* make sure it's fully ended */
 -              if (blk_pc_request(rq))
 -                      nsectors = (rq->data_len + 511) >> 9;
 -              else
 -                      nsectors = rq->hard_nr_sectors;
 -
 -              if (nsectors == 0)
 -                      nsectors = 1;
 -
                if (blk_fs_request(rq) == 0) {
 -                      rq->data_len -= (cmd->nbytes - cmd->nleft);
 +                      rq->resid_len -= cmd->nbytes - cmd->nleft;
                        if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
 -                              rq->data_len += cmd->last_xfer_len;
 +                              rq->resid_len += cmd->last_xfer_len;
                }
  
 -              ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9);
 +              ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
  
                if (sense && rc == 2)
                        ide_error(drive, "request sense failure", stat);
@@@ -757,8 -809,8 +756,8 @@@ static ide_startstop_t cdrom_start_rw(i
        }
  
        /* fs requests *must* be hardware frame aligned */
 -      if ((rq->nr_sectors & (sectors_per_frame - 1)) ||
 -          (rq->sector & (sectors_per_frame - 1)))
 +      if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
 +          (blk_rq_pos(rq) & (sectors_per_frame - 1)))
                return ide_stopped;
  
        /* use DMA, if possible */
@@@ -786,10 -838,15 +785,10 @@@ static void cdrom_do_block_pc(ide_drive
        drive->dma = 0;
  
        /* sg request */
 -      if (rq->bio || ((rq->cmd_type == REQ_TYPE_ATA_PC) && rq->data_len)) {
 +      if (rq->bio) {
                struct request_queue *q = drive->queue;
 +              char *buf = bio_data(rq->bio);
                unsigned int alignment;
 -              char *buf;
 -
 -              if (rq->bio)
 -                      buf = bio_data(rq->bio);
 -              else
 -                      buf = rq->data;
  
                drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
  
                 */
                alignment = queue_dma_alignment(q) | q->dma_pad_mask;
                if ((unsigned long)buf & alignment
 -                  || rq->data_len & q->dma_pad_mask
 +                  || blk_rq_bytes(rq) & q->dma_pad_mask
                    || object_is_on_stack(buf))
                        drive->dma = 0;
        }
@@@ -839,9 -896,6 +838,9 @@@ static ide_startstop_t ide_cd_do_reques
                goto out_end;
        }
  
 +      /* prepare sense request for this command */
 +      ide_prep_sense(drive, rq);
 +
        memset(&cmd, 0, sizeof(cmd));
  
        if (rq_data_dir(rq))
  
        cmd.rq = rq;
  
 -      if (blk_fs_request(rq) || rq->data_len) {
 -              ide_init_sg_cmd(&cmd, blk_fs_request(rq) ? (rq->nr_sectors << 9)
 -                                                       : rq->data_len);
 +      if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
 +              ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
                ide_map_sg(drive, &cmd);
        }
  
        return ide_issue_pc(drive, &cmd);
  out_end:
 -      nsectors = rq->hard_nr_sectors;
 +      nsectors = blk_rq_sectors(rq);
  
        if (nsectors == 0)
                nsectors = 1;
@@@ -1340,8 -1395,8 +1339,8 @@@ static int ide_cdrom_probe_capabilities
  static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
  {
        int hard_sect = queue_hardsect_size(q);
 -      long block = (long)rq->hard_sector / (hard_sect >> 9);
 -      unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
 +      long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
 +      unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
  
        memset(rq->cmd, 0, BLK_MAX_CDB);
  
diff --combined drivers/mmc/card/block.c
index 949e99770ad6f9423c1b30a82a167fc4dceea029,b25e9b6516ae6942d6e9a0c34fc3f0dba0f9556a..c5df86546458d1da54f9a0bd778e7957d2357e2e
@@@ -243,7 -243,7 +243,7 @@@ static int mmc_blk_issue_rq(struct mmc_
                brq.mrq.cmd = &brq.cmd;
                brq.mrq.data = &brq.data;
  
 -              brq.cmd.arg = req->sector;
 +              brq.cmd.arg = blk_rq_pos(req);
                if (!mmc_card_blockaddr(card))
                        brq.cmd.arg <<= 9;
                brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
                brq.stop.opcode = MMC_STOP_TRANSMISSION;
                brq.stop.arg = 0;
                brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
 -              brq.data.blocks = req->nr_sectors;
 +              brq.data.blocks = blk_rq_sectors(req);
  
+               /*
+                * The block layer doesn't support all sector count
+                * restrictions, so we need to be prepared for too big
+                * requests.
+                */
+               if (brq.data.blocks > card->host->max_blk_count)
+                       brq.data.blocks = card->host->max_blk_count;
                /*
                 * After a read error, we redo the request one sector at a time
                 * in order to accurately determine which sectors can be read
                 * Adjust the sg list so it is the same size as the
                 * request.
                 */
 -              if (brq.data.blocks != req->nr_sectors) {
 +              if (brq.data.blocks != blk_rq_sectors(req)) {
                        int i, data_size = brq.data.blocks << 9;
                        struct scatterlist *sg;
  
                        printk(KERN_ERR "%s: error %d transferring data,"
                               " sector %u, nr %u, card status %#x\n",
                               req->rq_disk->disk_name, brq.data.error,
 -                             (unsigned)req->sector,
 -                             (unsigned)req->nr_sectors, status);
 +                             (unsigned)blk_rq_pos(req),
 +                             (unsigned)blk_rq_sectors(req), status);
                }
  
                if (brq.stop.error) {
index a8fab39771168c123e86c7700ca60522e6692d6b,167b66dd34c712a73d103d26f603d87042ae4cd7..8032c5adb6a9b2400c318912a7aa376df512b096
@@@ -112,7 -112,7 +112,7 @@@ lpfc_debug_save_dif(struct scsi_cmnd *c
  }
  
  /**
-  * lpfc_update_stats: Update statistical data for the command completion.
+  * lpfc_update_stats - Update statistical data for the command completion
   * @phba: Pointer to HBA object.
   * @lpfc_cmd: lpfc scsi command object pointer.
   *
@@@ -165,8 -165,7 +165,7 @@@ lpfc_update_stats(struct lpfc_hba *phba
  }
  
  /**
-  * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
-  *                   event.
+  * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
   * @phba: Pointer to HBA context object.
   * @vport: Pointer to vport object.
   * @ndlp: Pointer to FC node associated with the target.
@@@ -220,7 -219,7 +219,7 @@@ lpfc_send_sdev_queuedepth_change_event(
  }
  
  /**
-  * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
+  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
   * @phba: The Hba for which this call is being executed.
   *
   * This routine is called when there is resource error in driver or firmware.
@@@ -261,7 -260,7 +260,7 @@@ lpfc_rampdown_queue_depth(struct lpfc_h
  }
  
  /**
-  * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
+  * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
   * @phba: The Hba for which this call is being executed.
   *
   * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
   **/
  static inline void
  lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
-                       struct scsi_device *sdev)
+                       uint32_t queue_depth)
  {
        unsigned long flags;
        struct lpfc_hba *phba = vport->phba;
        uint32_t evt_posted;
        atomic_inc(&phba->num_cmd_success);
  
-       if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
+       if (vport->cfg_lun_queue_depth <= queue_depth)
                return;
        spin_lock_irqsave(&phba->hbalock, flags);
        if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
  }
  
  /**
-  * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
+  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
   * @phba: The Hba for which this call is being executed.
   *
   * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
@@@ -361,7 -360,7 +360,7 @@@ lpfc_ramp_down_queue_handler(struct lpf
  }
  
  /**
-  * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
+  * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
   * @phba: The Hba for which this call is being executed.
   *
   * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
@@@ -410,7 -409,7 +409,7 @@@ lpfc_ramp_up_queue_handler(struct lpfc_
  }
  
  /**
-  * lpfc_scsi_dev_block: set all scsi hosts to block state.
+  * lpfc_scsi_dev_block - set all scsi hosts to block state
   * @phba: Pointer to HBA context object.
   *
   * This function walks vport list and set each SCSI host to block state
@@@ -439,7 -438,7 +438,7 @@@ lpfc_scsi_dev_block(struct lpfc_hba *ph
  }
  
  /**
-  * lpfc_new_scsi_buf: Scsi buffer allocator.
+  * lpfc_new_scsi_buf - Scsi buffer allocator
   * @vport: The virtual port for which this call being executed.
   *
   * This routine allocates a scsi buffer, which contains all the necessary
@@@ -563,7 -562,7 +562,7 @@@ lpfc_new_scsi_buf(struct lpfc_vport *vp
  }
  
  /**
-  * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
+  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba
   * @phba: The Hba for which this call is being executed.
   *
   * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
@@@ -592,7 -591,7 +591,7 @@@ lpfc_get_scsi_buf(struct lpfc_hba * phb
  }
  
  /**
-  * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
+  * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list
   * @phba: The Hba for which this call is being executed.
   * @psb: The scsi buffer which is being released.
   *
@@@ -611,7 -610,7 +610,7 @@@ lpfc_release_scsi_buf(struct lpfc_hba *
  }
  
  /**
-  * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
+  * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer
   * @phba: The Hba for which this call is being executed.
   * @lpfc_cmd: The scsi buffer which is going to be mapped.
   *
@@@ -738,7 -737,7 +737,7 @@@ lpfc_scsi_prep_dma_buf(struct lpfc_hba 
         * Due to difference in data length between DIF/non-DIF paths,
         * we need to set word 4 of IOCB here
         */
-       iocb_cmd->un.fcpi.fcpi_parm = le32_to_cpu(scsi_bufflen(scsi_cmnd));
+       iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
        return 0;
  }
  
@@@ -823,9 -822,9 +822,9 @@@ lpfc_cmd_blksize(struct scsi_cmnd *sc
  /**
   * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
   * @sc:             in: SCSI command
-  * @apptagmask      out: app tag mask
-  * @apptagval       out: app tag value
-  * @reftag          out: ref tag (reference tag)
+  * @apptagmask:     out: app tag mask
+  * @apptagval:      out: app tag value
+  * @reftag:         out: ref tag (reference tag)
   *
   * Description:
   *   Extract DIF paramters from the command if possible.  Otherwise,
@@@ -1313,10 -1312,10 +1312,10 @@@ lpfc_parse_bg_err(struct lpfc_hba *phba
        uint32_t bgstat = bgf->bgstat;
        uint64_t failing_sector = 0;
  
 -      printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
 +      printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
                        "bgstat=0x%x bghm=0x%x\n",
                        cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
 -                      cmd->request->nr_sectors, bgstat, bghm);
 +                      blk_rq_sectors(cmd->request), bgstat, bghm);
  
        spin_lock(&_dump_buf_lock);
        if (!_dump_buf_done) {
@@@ -1413,7 -1412,7 +1412,7 @@@ out
  }
  
  /**
-  * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
+  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
   * @phba: Pointer to hba context object.
   * @vport: Pointer to vport object.
   * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
@@@ -1505,7 -1504,7 +1504,7 @@@ lpfc_send_scsi_error_event(struct lpfc_
  }
  
  /**
-  * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
+  * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather
   * @phba: The Hba for which this call is being executed.
   * @psb: The scsi buffer which is going to be un-mapped.
   *
@@@ -1530,7 -1529,7 +1529,7 @@@ lpfc_scsi_unprep_dma_buf(struct lpfc_hb
  }
  
  /**
-  * lpfc_handler_fcp_err: FCP response handler.
+  * lpfc_handler_fcp_err - FCP response handler
   * @vport: The virtual port for which this call is being executed.
   * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
   * @rsp_iocb: The response IOCB which contains FCP error.
@@@ -1674,7 -1673,7 +1673,7 @@@ lpfc_handle_fcp_err(struct lpfc_vport *
  }
  
  /**
-  * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
+  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
   * @phba: The Hba for which this call is being executed.
   * @pIocbIn: The command IOCBQ for the scsi cmnd.
   * @pIocbOut: The response IOCBQ for the scsi cmnd .
@@@ -1694,10 -1693,12 +1693,12 @@@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hb
        struct lpfc_nodelist *pnode = rdata->pnode;
        struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
        int result;
-       struct scsi_device *sdev, *tmp_sdev;
+       struct scsi_device *tmp_sdev;
        int depth = 0;
        unsigned long flags;
        struct lpfc_fast_path_event *fast_path_evt;
+       struct Scsi_Host *shost = cmd->device->host;
+       uint32_t queue_depth, scsi_id;
  
        lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
        lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
  
        lpfc_update_stats(phba, lpfc_cmd);
        result = cmd->result;
-       sdev = cmd->device;
        if (vport->cfg_max_scsicmpl_time &&
           time_after(jiffies, lpfc_cmd->start_time +
                msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
-               spin_lock_irqsave(sdev->host->host_lock, flags);
+               spin_lock_irqsave(shost->host_lock, flags);
                if (pnode && NLP_CHK_NODE_ACT(pnode)) {
                        if (pnode->cmd_qdepth >
                                atomic_read(&pnode->cmd_pending) &&
  
                        pnode->last_change_time = jiffies;
                }
-               spin_unlock_irqrestore(sdev->host->host_lock, flags);
+               spin_unlock_irqrestore(shost->host_lock, flags);
        } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
                if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
                   time_after(jiffies, pnode->last_change_time +
                              msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
-                       spin_lock_irqsave(sdev->host->host_lock, flags);
+                       spin_lock_irqsave(shost->host_lock, flags);
                        pnode->cmd_qdepth += pnode->cmd_qdepth *
                                LPFC_TGTQ_RAMPUP_PCENT / 100;
                        if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
                                pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
                        pnode->last_change_time = jiffies;
-                       spin_unlock_irqrestore(sdev->host->host_lock, flags);
+                       spin_unlock_irqrestore(shost->host_lock, flags);
                }
        }
  
        lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+       /* The sdev is not guaranteed to be valid post scsi_done upcall. */
+       queue_depth = cmd->device->queue_depth;
+       scsi_id = cmd->device->id;
        cmd->scsi_done(cmd);
  
        if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
                 * If there is a thread waiting for command completion
                 * wake up the thread.
                 */
-               spin_lock_irqsave(sdev->host->host_lock, flags);
+               spin_lock_irqsave(shost->host_lock, flags);
                lpfc_cmd->pCmd = NULL;
                if (lpfc_cmd->waitq)
                        wake_up(lpfc_cmd->waitq);
-               spin_unlock_irqrestore(sdev->host->host_lock, flags);
+               spin_unlock_irqrestore(shost->host_lock, flags);
                lpfc_release_scsi_buf(phba, lpfc_cmd);
                return;
        }
  
  
        if (!result)
-               lpfc_rampup_queue_depth(vport, sdev);
+               lpfc_rampup_queue_depth(vport, queue_depth);
  
        if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
           ((jiffies - pnode->last_ramp_up_time) >
                LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
           ((jiffies - pnode->last_q_full_time) >
                LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
-          (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
-               shost_for_each_device(tmp_sdev, sdev->host) {
+          (vport->cfg_lun_queue_depth > queue_depth)) {
+               shost_for_each_device(tmp_sdev, shost) {
                        if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
-                               if (tmp_sdev->id != sdev->id)
+                               if (tmp_sdev->id != scsi_id)
                                        continue;
                                if (tmp_sdev->ordered_tags)
                                        scsi_adjust_queue_depth(tmp_sdev,
                }
                lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
                        0xFFFFFFFF,
-                       sdev->queue_depth - 1, sdev->queue_depth);
+                       queue_depth , queue_depth + 1);
        }
  
        /*
            NLP_CHK_NODE_ACT(pnode)) {
                pnode->last_q_full_time = jiffies;
  
-               shost_for_each_device(tmp_sdev, sdev->host) {
-                       if (tmp_sdev->id != sdev->id)
+               shost_for_each_device(tmp_sdev, shost) {
+                       if (tmp_sdev->id != scsi_id)
                                continue;
                        depth = scsi_track_queue_full(tmp_sdev,
                                        tmp_sdev->queue_depth - 1);
                 * scsi_track_queue_full.
                 */
                if (depth == -1)
-                       depth = sdev->host->cmd_per_lun;
+                       depth = shost->cmd_per_lun;
  
                if (depth) {
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
         * If there is a thread waiting for command completion
         * wake up the thread.
         */
-       spin_lock_irqsave(sdev->host->host_lock, flags);
+       spin_lock_irqsave(shost->host_lock, flags);
        lpfc_cmd->pCmd = NULL;
        if (lpfc_cmd->waitq)
                wake_up(lpfc_cmd->waitq);
-       spin_unlock_irqrestore(sdev->host->host_lock, flags);
+       spin_unlock_irqrestore(shost->host_lock, flags);
  
        lpfc_release_scsi_buf(phba, lpfc_cmd);
  }
  
  /**
-  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
+  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
   * @data: A pointer to the immediate command data portion of the IOCB.
   * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
   *
@@@ -1953,7 -1957,7 +1957,7 @@@ lpfc_fcpcmd_to_iocb(uint8_t *data, stru
  }
  
  /**
-  * lpfc_scsi_prep_cmnd:  Routine to convert scsi cmnd to FCP information unit.
+  * lpfc_scsi_prep_cmnd -  Routine to convert scsi cmnd to FCP information unit
   * @vport: The virtual port for which this call is being executed.
   * @lpfc_cmd: The scsi command which needs to send.
   * @pnode: Pointer to lpfc_nodelist.
@@@ -2047,7 -2051,7 +2051,7 @@@ lpfc_scsi_prep_cmnd(struct lpfc_vport *
  }
  
  /**
-  * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
+  * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit
   * @vport: The virtual port for which this call is being executed.
   * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
   * @lun: Logical unit number.
@@@ -2110,7 -2114,7 +2114,7 @@@ lpfc_scsi_prep_task_mgmt_cmd(struct lpf
  }
  
  /**
-  * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
+  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
   * @phba: The Hba for which this call is being executed.
   * @cmdiocbq: Pointer to lpfc_iocbq data structure.
   * @rspiocbq: Pointer to lpfc_iocbq data structure.
@@@ -2131,7 -2135,7 +2135,7 @@@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *
  }
  
  /**
-  * lpfc_scsi_tgt_reset: Target reset handler.
+  * lpfc_scsi_tgt_reset - Target reset handler
   * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
   * @vport: The virtual port for which this call is being executed.
   * @tgt_id: Target ID.
@@@ -2198,7 -2202,7 +2202,7 @@@ lpfc_scsi_tgt_reset(struct lpfc_scsi_bu
  }
  
  /**
-  * lpfc_info: Info entry point of scsi_host_template data structure.
+  * lpfc_info - Info entry point of scsi_host_template data structure
   * @host: The scsi host for which this call is being executed.
   *
   * This routine provides module information about hba.
@@@ -2236,7 -2240,7 +2240,7 @@@ lpfc_info(struct Scsi_Host *host
  }
  
  /**
-  * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
+  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
   * @phba: The Hba for which this call is being executed.
   *
   * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
@@@ -2253,7 -2257,7 +2257,7 @@@ static __inline__ void lpfc_poll_rearm_
  }
  
  /**
-  * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
+  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
   * @phba: The Hba for which this call is being executed.
   *
   * This routine starts the fcp_poll_timer of @phba.
@@@ -2264,7 -2268,7 +2268,7 @@@ void lpfc_poll_start_timer(struct lpfc_
  }
  
  /**
-  * lpfc_poll_timeout: Restart polling timer.
+  * lpfc_poll_timeout - Restart polling timer
   * @ptr: Map to lpfc_hba data structure pointer.
   *
   * This routine restarts fcp_poll timer, when FCP ring  polling is enable
@@@ -2283,8 -2287,7 +2287,7 @@@ void lpfc_poll_timeout(unsigned long pt
  }
  
  /**
-  * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
-  * structure.
+  * lpfc_queuecommand - scsi_host_template queuecommand entry point
   * @cmnd: Pointer to scsi_cmnd data structure.
   * @done: Pointer to done routine.
   *
@@@ -2375,15 -2378,15 +2378,15 @@@ lpfc_queuecommand(struct scsi_cmnd *cmn
                if (cmnd->cmnd[0] == READ_10)
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                        "9035 BLKGRD: READ @ sector %llu, "
 -                                       "count %lu\n",
 -                                       (unsigned long long)scsi_get_lba(cmnd),
 -                                      cmnd->request->nr_sectors);
 +                                      "count %u\n",
 +                                      (unsigned long long)scsi_get_lba(cmnd),
 +                                      blk_rq_sectors(cmnd->request));
                else if (cmnd->cmnd[0] == WRITE_10)
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                        "9036 BLKGRD: WRITE @ sector %llu, "
 -                                      "count %lu cmd=%p\n",
 +                                      "count %u cmd=%p\n",
                                        (unsigned long long)scsi_get_lba(cmnd),
 -                                      cmnd->request->nr_sectors,
 +                                      blk_rq_sectors(cmnd->request),
                                        cmnd);
  
                err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
                if (cmnd->cmnd[0] == READ_10)
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                         "9040 dbg: READ @ sector %llu, "
 -                                       "count %lu\n",
 +                                       "count %u\n",
                                         (unsigned long long)scsi_get_lba(cmnd),
 -                                       cmnd->request->nr_sectors);
 +                                       blk_rq_sectors(cmnd->request));
                else if (cmnd->cmnd[0] == WRITE_10)
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                         "9041 dbg: WRITE @ sector %llu, "
 -                                       "count %lu cmd=%p\n",
 +                                       "count %u cmd=%p\n",
                                         (unsigned long long)scsi_get_lba(cmnd),
 -                                       cmnd->request->nr_sectors, cmnd);
 +                                       blk_rq_sectors(cmnd->request), cmnd);
                else
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                         "9042 dbg: parser not implemented\n");
  }
  
  /**
-  * lpfc_block_error_handler: Routine to block error  handler.
+  * lpfc_block_error_handler - Routine to block error  handler
   * @cmnd: Pointer to scsi_cmnd data structure.
   *
   *  This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
@@@ -2472,8 -2475,7 +2475,7 @@@ lpfc_block_error_handler(struct scsi_cm
  }
  
  /**
-  * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
-  *structure.
+  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
   * @cmnd: Pointer to scsi_cmnd data structure.
   *
   * This routine aborts @cmnd pending in base driver.
@@@ -2578,8 -2580,7 +2580,7 @@@ lpfc_abort_handler(struct scsi_cmnd *cm
  }
  
  /**
-  * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
-  *data structure.
+  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
   * @cmnd: Pointer to scsi_cmnd data structure.
   *
   * This routine does a device reset by sending a TARGET_RESET task management
   *
   * Return code :
   *  0x2003 - Error
-  *  0ex2002 - Success
+  *  0x2002 - Success
   **/
  static int
  lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
  }
  
  /**
-  * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
-  * Template data structure.
+  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
   * @cmnd: Pointer to scsi_cmnd data structure.
   *
   * This routine does target reset to all target on @cmnd->device->host.
@@@ -2808,8 -2808,7 +2808,7 @@@ lpfc_bus_reset_handler(struct scsi_cmn
  }
  
  /**
-  * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
-  * structure.
+  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
   * @sdev: Pointer to scsi_device.
   *
   * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
@@@ -2883,8 -2882,7 +2882,7 @@@ lpfc_slave_alloc(struct scsi_device *sd
  }
  
  /**
-  * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
-  *  structure.
+  * lpfc_slave_configure - scsi_host_template slave_configure entry point
   * @sdev: Pointer to scsi_device.
   *
   * This routine configures following items
@@@ -2925,7 -2923,7 +2923,7 @@@ lpfc_slave_configure(struct scsi_devic
  }
  
  /**
-  * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
+  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
   * @sdev: Pointer to scsi_device.
   *
   * This routine sets @sdev hostatdata filed to null.
index 865ec0f4aa80462417c8a0bc2f297e4cad1a92f5,1ce6b24abab297f70937a43cfd414bbe60349f26..5776b2ab6b12a5fa54fe974b11e31619d8ee18e0
@@@ -205,6 -205,74 +205,74 @@@ static unsigned _osd_req_alist_elem_siz
                osdv2_attr_list_elem_size(len);
  }
  
+ static void _osd_req_alist_elem_encode(struct osd_request *or,
+       void *attr_last, const struct osd_attr *oa)
+ {
+       if (osd_req_is_ver1(or)) {
+               struct osdv1_attributes_list_element *attr = attr_last;
+               attr->attr_page = cpu_to_be32(oa->attr_page);
+               attr->attr_id = cpu_to_be32(oa->attr_id);
+               attr->attr_bytes = cpu_to_be16(oa->len);
+               memcpy(attr->attr_val, oa->val_ptr, oa->len);
+       } else {
+               struct osdv2_attributes_list_element *attr = attr_last;
+               attr->attr_page = cpu_to_be32(oa->attr_page);
+               attr->attr_id = cpu_to_be32(oa->attr_id);
+               attr->attr_bytes = cpu_to_be16(oa->len);
+               memcpy(attr->attr_val, oa->val_ptr, oa->len);
+       }
+ }
+ static int _osd_req_alist_elem_decode(struct osd_request *or,
+       void *cur_p, struct osd_attr *oa, unsigned max_bytes)
+ {
+       unsigned inc;
+       if (osd_req_is_ver1(or)) {
+               struct osdv1_attributes_list_element *attr = cur_p;
+               if (max_bytes < sizeof(*attr))
+                       return -1;
+               oa->len = be16_to_cpu(attr->attr_bytes);
+               inc = _osd_req_alist_elem_size(or, oa->len);
+               if (inc > max_bytes)
+                       return -1;
+               oa->attr_page = be32_to_cpu(attr->attr_page);
+               oa->attr_id = be32_to_cpu(attr->attr_id);
+               /* OSD1: On empty attributes we return a pointer to 2 bytes
+                * of zeros. This keeps similar behaviour with OSD2.
+                * (See below)
+                */
+               oa->val_ptr = likely(oa->len) ? attr->attr_val :
+                                               (u8 *)&attr->attr_bytes;
+       } else {
+               struct osdv2_attributes_list_element *attr = cur_p;
+               if (max_bytes < sizeof(*attr))
+                       return -1;
+               oa->len = be16_to_cpu(attr->attr_bytes);
+               inc = _osd_req_alist_elem_size(or, oa->len);
+               if (inc > max_bytes)
+                       return -1;
+               oa->attr_page = be32_to_cpu(attr->attr_page);
+               oa->attr_id = be32_to_cpu(attr->attr_id);
+               /* OSD2: For convenience, on empty attributes, we return 8 bytes
+                * of zeros here. This keeps the same behaviour with OSD2r04,
+                * and is nice with null terminating ASCII fields.
+                * oa->val_ptr == NULL marks the end-of-list, or error.
+                */
+               oa->val_ptr = likely(oa->len) ? attr->attr_val : attr->reserved;
+       }
+       return inc;
+ }
  static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head)
  {
        return osd_req_is_ver1(or) ?
@@@ -282,9 -350,9 +350,9 @@@ _osd_req_sec_params(struct osd_request 
        struct osd_cdb *ocdb = &or->cdb;
  
        if (osd_req_is_ver1(or))
-               return &ocdb->v1.sec_params;
+               return (struct osd_security_parameters *)&ocdb->v1.sec_params;
        else
-               return &ocdb->v2.sec_params;
+               return (struct osd_security_parameters *)&ocdb->v2.sec_params;
  }
  
  void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device)
@@@ -612,9 -680,9 +680,9 @@@ static int _osd_req_list_objects(struc
  
        WARN_ON(or->in.bio);
        bio = bio_map_kern(q, list, len, or->alloc_flags);
-       if (!bio) {
+       if (IS_ERR(bio)) {
                OSD_ERR("!!! Failed to allocate list_objects BIO\n");
-               return -ENOMEM;
+               return PTR_ERR(bio);
        }
  
        bio->bi_rw &= ~(1 << BIO_RW);
@@@ -798,7 -866,6 +866,6 @@@ int osd_req_add_set_attr_list(struct os
        attr_last = or->set_attr.buff + total_bytes;
  
        for (; nelem; --nelem) {
-               struct osd_attributes_list_element *attr;
                unsigned elem_size = _osd_req_alist_elem_size(or, oa->len);
  
                total_bytes += elem_size;
                                or->set_attr.buff + or->set_attr.total_bytes;
                }
  
-               attr = attr_last;
-               attr->attr_page = cpu_to_be32(oa->attr_page);
-               attr->attr_id = cpu_to_be32(oa->attr_id);
-               attr->attr_bytes = cpu_to_be16(oa->len);
-               memcpy(attr->attr_val, oa->val_ptr, oa->len);
+               _osd_req_alist_elem_encode(or, attr_last, oa);
  
                attr_last += elem_size;
                ++oa;
  }
  EXPORT_SYMBOL(osd_req_add_set_attr_list);
  
 -static int _append_map_kern(struct request *req,
 -      void *buff, unsigned len, gfp_t flags)
 -{
 -      struct bio *bio;
 -      int ret;
 -
 -      bio = bio_map_kern(req->q, buff, len, flags);
 -      if (IS_ERR(bio)) {
 -              OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
 -                      PTR_ERR(bio));
 -              return PTR_ERR(bio);
 -      }
 -      ret = blk_rq_append_bio(req->q, req, bio);
 -      if (ret) {
 -              OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
 -              bio_put(bio);
 -      }
 -      return ret;
 -}
 -
  static int _req_append_segment(struct osd_request *or,
        unsigned padding, struct _osd_req_data_segment *seg,
        struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
                else
                        pad_buff = io->pad_buff;
  
 -              ret = _append_map_kern(io->req, pad_buff, padding,
 +              ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
                                       or->alloc_flags);
                if (ret)
                        return ret;
                io->total_bytes += padding;
        }
  
 -      ret = _append_map_kern(io->req, seg->buff, seg->total_bytes,
 +      ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
                               or->alloc_flags);
        if (ret)
                return ret;
@@@ -1050,15 -1133,10 +1113,10 @@@ int osd_req_decode_get_attr_list(struc
        }
  
        for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) {
-               struct osd_attributes_list_element *attr = cur_p;
-               unsigned inc;
+               int inc = _osd_req_alist_elem_decode(or, cur_p, oa,
+                                                returned_bytes - cur_bytes);
  
-               oa->len = be16_to_cpu(attr->attr_bytes);
-               inc = _osd_req_alist_elem_size(or, oa->len);
-               OSD_DEBUG("oa->len=%d inc=%d cur_bytes=%d\n",
-                         oa->len, inc, cur_bytes);
-               cur_bytes += inc;
-               if (cur_bytes > returned_bytes) {
+               if (inc < 0) {
                        OSD_ERR("BAD FOOD from target. list not valid!"
                                "c=%d r=%d n=%d\n",
                                cur_bytes, returned_bytes, n);
                        break;
                }
  
-               oa->attr_page = be32_to_cpu(attr->attr_page);
-               oa->attr_id = be32_to_cpu(attr->attr_id);
-               oa->val_ptr = attr->attr_val;
+               cur_bytes += inc;
                cur_p += inc;
                ++oa;
        }
@@@ -1139,6 -1214,24 +1194,24 @@@ static int _osd_req_finalize_attr_page(
        return ret;
  }
  
+ static inline void osd_sec_parms_set_out_offset(bool is_v1,
+       struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
+ {
+       if (is_v1)
+               sec_parms->v1.data_out_integrity_check_offset = offset;
+       else
+               sec_parms->v2.data_out_integrity_check_offset = offset;
+ }
+ static inline void osd_sec_parms_set_in_offset(bool is_v1,
+       struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
+ {
+       if (is_v1)
+               sec_parms->v1.data_in_integrity_check_offset = offset;
+       else
+               sec_parms->v2.data_in_integrity_check_offset = offset;
+ }
  static int _osd_req_finalize_data_integrity(struct osd_request *or,
        bool has_in, bool has_out, const u8 *cap_key)
  {
                or->out_data_integ.get_attributes_bytes = cpu_to_be64(
                        or->enc_get_attr.total_bytes);
  
-               sec_parms->data_out_integrity_check_offset =
-                       osd_req_encode_offset(or, or->out.total_bytes, &pad);
+               osd_sec_parms_set_out_offset(osd_req_is_ver1(or), sec_parms,
+                       osd_req_encode_offset(or, or->out.total_bytes, &pad));
  
                ret = _req_append_segment(or, pad, &seg, or->out.last_seg,
                                          &or->out);
                };
                unsigned pad;
  
-               sec_parms->data_in_integrity_check_offset =
-                       osd_req_encode_offset(or, or->in.total_bytes, &pad);
+               osd_sec_parms_set_in_offset(osd_req_is_ver1(or), sec_parms,
+                       osd_req_encode_offset(or, or->in.total_bytes, &pad));
  
                ret = _req_append_segment(or, pad, &seg, or->in.last_seg,
                                          &or->in);
  /*
   * osd_finalize_request and helpers
   */
 +static struct request *_make_request(struct request_queue *q, bool has_write,
 +                            struct _osd_io_info *oii, gfp_t flags)
 +{
 +      if (oii->bio)
 +              return blk_make_request(q, oii->bio, flags);
 +      else {
 +              struct request *req;
 +
 +              req = blk_get_request(q, has_write ? WRITE : READ, flags);
 +              if (unlikely(!req))
 +                      return ERR_PTR(-ENOMEM);
 +
 +              return req;
 +      }
 +}
  
  static int _init_blk_request(struct osd_request *or,
        bool has_in, bool has_out)
        struct scsi_device *scsi_device = or->osd_dev->scsi_device;
        struct request_queue *q = scsi_device->request_queue;
        struct request *req;
 -      int ret = -ENOMEM;
 +      int ret;
  
 -      req = blk_get_request(q, has_out, flags);
 -      if (!req)
 +      req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
 +      if (IS_ERR(req)) {
 +              ret = PTR_ERR(req);
                goto out;
 +      }
  
        or->request = req;
        req->cmd_type = REQ_TYPE_BLOCK_PC;
                or->out.req = req;
                if (has_in) {
                        /* allocate bidi request */
 -                      req = blk_get_request(q, READ, flags);
 -                      if (!req) {
 +                      req = _make_request(q, false, &or->in, flags);
 +                      if (IS_ERR(req)) {
                                OSD_DEBUG("blk_get_request for bidi failed\n");
 +                              ret = PTR_ERR(req);
                                goto out;
                        }
                        req->cmd_type = REQ_TYPE_BLOCK_PC;
@@@ -1289,6 -1364,26 +1362,6 @@@ int osd_finalize_request(struct osd_req
                return ret;
        }
  
 -      if (or->out.bio) {
 -              ret = blk_rq_append_bio(or->request->q, or->out.req,
 -                                      or->out.bio);
 -              if (ret) {
 -                      OSD_DEBUG("blk_rq_append_bio out failed\n");
 -                      return ret;
 -              }
 -              OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
 -                      _LLU(or->out.total_bytes), or->out.req->data_len);
 -      }
 -      if (or->in.bio) {
 -              ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
 -              if (ret) {
 -                      OSD_DEBUG("blk_rq_append_bio in failed\n");
 -                      return ret;
 -              }
 -              OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
 -                      _LLU(or->in.total_bytes), or->in.req->data_len);
 -      }
 -
        or->out.pad_buff = sg_out_pad_buffer;
        or->in.pad_buff = sg_in_pad_buffer;
  
diff --combined drivers/scsi/scsi_lib.c
index d7c6c752e0a65f81f69c8c4a4ea3477c10378df0,bb218c8b6e98373344d1c188982a189a7aecbbf9..dd3f9d2b99fd05b7834e0abbb7e2cbe23e12d462
@@@ -240,11 -240,11 +240,11 @@@ int scsi_execute(struct scsi_device *sd
         * is invalid.  Prevent the garbage from being misinterpreted
         * and prevent security leaks by zeroing out the excess data.
         */
 -      if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
 -              memset(buffer + (bufflen - req->data_len), 0, req->data_len);
 +      if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
 +              memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
  
        if (resid)
 -              *resid = req->data_len;
 +              *resid = req->resid_len;
        ret = req->errors;
   out:
        blk_put_request(req);
@@@ -546,9 -546,14 +546,9 @@@ static struct scsi_cmnd *scsi_end_reque
         * to queue the remainder of them.
         */
        if (blk_end_request(req, error, bytes)) {
 -              int leftover = (req->hard_nr_sectors << 9);
 -
 -              if (blk_pc_request(req))
 -                      leftover = req->data_len;
 -
                /* kill remainder if no retrys */
                if (error && scsi_noretry_cmd(cmd))
 -                      blk_end_request(req, error, leftover);
 +                      blk_end_request_all(req, error);
                else {
                        if (requeue) {
                                /*
@@@ -667,6 -672,34 +667,6 @@@ void scsi_release_buffers(struct scsi_c
  }
  EXPORT_SYMBOL(scsi_release_buffers);
  
 -/*
 - * Bidi commands Must be complete as a whole, both sides at once.
 - * If part of the bytes were written and lld returned
 - * scsi_in()->resid and/or scsi_out()->resid this information will be left
 - * in req->data_len and req->next_rq->data_len. The upper-layer driver can
 - * decide what to do with this information.
 - */
 -static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
 -{
 -      struct request *req = cmd->request;
 -      unsigned int dlen = req->data_len;
 -      unsigned int next_dlen = req->next_rq->data_len;
 -
 -      req->data_len = scsi_out(cmd)->resid;
 -      req->next_rq->data_len = scsi_in(cmd)->resid;
 -
 -      /* The req and req->next_rq have not been completed */
 -      BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
 -
 -      scsi_release_buffers(cmd);
 -
 -      /*
 -       * This will goose the queue request function at the end, so we don't
 -       * need to worry about launching another command.
 -       */
 -      scsi_next_command(cmd);
 -}
 -
  /*
   * Function:    scsi_io_completion()
   *
  void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  {
        int result = cmd->result;
 -      int this_count;
        struct request_queue *q = cmd->device->request_queue;
        struct request *req = cmd->request;
        int error = 0;
                        if (!sense_deferred)
                                error = -EIO;
                }
 +
 +              req->resid_len = scsi_get_resid(cmd);
 +
                if (scsi_bidi_cmnd(cmd)) {
 -                      /* will also release_buffers */
 -                      scsi_end_bidi_request(cmd);
 +                      /*
 +                       * Bidi commands Must be complete as a whole,
 +                       * both sides at once.
 +                       */
 +                      req->next_rq->resid_len = scsi_in(cmd)->resid;
 +
 +                      blk_end_request_all(req, 0);
 +
 +                      scsi_release_buffers(cmd);
 +                      scsi_next_command(cmd);
                        return;
                }
 -              req->data_len = scsi_get_resid(cmd);
        }
  
        BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
         * Next deal with any sectors which we were able to correctly
         * handle.
         */
 -      SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
 +      SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
                                      "%d bytes done.\n",
 -                                    req->nr_sectors, good_bytes));
 +                                    blk_rq_sectors(req), good_bytes));
  
        /*
         * Recovered errors need reporting, but they're always treated
         */
        if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
                return;
 -      this_count = blk_rq_bytes(req);
  
        error = -EIO;
  
                        if (driver_byte(result) & DRIVER_SENSE)
                                scsi_print_sense("", cmd);
                }
 -              blk_end_request(req, -EIO, blk_rq_bytes(req));
 +              blk_end_request_all(req, -EIO);
                scsi_next_command(cmd);
                break;
        case ACTION_REPREP:
@@@ -940,7 -965,10 +940,7 @@@ static int scsi_init_sgtable(struct req
        count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
        BUG_ON(count > sdb->table.nents);
        sdb->table.nents = count;
 -      if (blk_pc_request(req))
 -              sdb->length = req->data_len;
 -      else
 -              sdb->length = req->nr_sectors << 9;
 +      sdb->length = blk_rq_bytes(req);
        return BLKPREP_OK;
  }
  
@@@ -1059,21 -1087,22 +1059,21 @@@ int scsi_setup_blk_pc_cmnd(struct scsi_
                if (unlikely(ret))
                        return ret;
        } else {
 -              BUG_ON(req->data_len);
 -              BUG_ON(req->data);
 +              BUG_ON(blk_rq_bytes(req));
  
                memset(&cmd->sdb, 0, sizeof(cmd->sdb));
                req->buffer = NULL;
        }
  
        cmd->cmd_len = req->cmd_len;
 -      if (!req->data_len)
 +      if (!blk_rq_bytes(req))
                cmd->sc_data_direction = DMA_NONE;
        else if (rq_data_dir(req) == WRITE)
                cmd->sc_data_direction = DMA_TO_DEVICE;
        else
                cmd->sc_data_direction = DMA_FROM_DEVICE;
        
 -      cmd->transfersize = req->data_len;
 +      cmd->transfersize = blk_rq_bytes(req);
        cmd->allowed = req->retries;
        return BLKPREP_OK;
  }
@@@ -1183,7 -1212,7 +1183,7 @@@ int scsi_prep_return(struct request_que
                break;
        case BLKPREP_DEFER:
                /*
 -               * If we defer, the elv_next_request() returns NULL, but the
 +               * If we defer, the blk_peek_request() returns NULL, but the
                 * queue must be restarted, so we plug here if no returning
                 * command will automatically do that.
                 */
@@@ -1262,10 -1291,8 +1262,8 @@@ static inline int scsi_target_queue_rea
                if (--starget->target_blocked == 0) {
                        SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
                                         "unblocking target at zero depth\n"));
-               } else {
-                       blk_plug_device(sdev->request_queue);
+               } else
                        return 0;
-               }
        }
  
        if (scsi_target_is_busy(starget)) {
@@@ -1361,7 -1388,7 +1359,7 @@@ static void scsi_kill_request(struct re
        struct scsi_target *starget = scsi_target(sdev);
        struct Scsi_Host *shost = sdev->host;
  
 -      blkdev_dequeue_request(req);
 +      blk_start_request(req);
  
        if (unlikely(cmd == NULL)) {
                printk(KERN_CRIT "impossible request in %s.\n",
@@@ -1453,7 -1480,7 +1451,7 @@@ static void scsi_request_fn(struct requ
  
        if (!sdev) {
                printk("scsi: killing requests for dead queue\n");
 -              while ((req = elv_next_request(q)) != NULL)
 +              while ((req = blk_peek_request(q)) != NULL)
                        scsi_kill_request(req, q);
                return;
        }
                 * that the request is fully prepared even if we cannot 
                 * accept it.
                 */
 -              req = elv_next_request(q);
 +              req = blk_peek_request(q);
                if (!req || !scsi_dev_queue_ready(q, sdev))
                        break;
  
                 * Remove the request from the request list.
                 */
                if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
 -                      blkdev_dequeue_request(req);
 +                      blk_start_request(req);
                sdev->device_busy++;
  
                spin_unlock(q->queue_lock);
diff --combined drivers/scsi/sd.c
index 70c4dd99bbf0148ee1ece8401a0989143ad63426,84044233b637c71929bf22b665d15976096e5d63..40d2860f235a3d2d85bed42b95a24ddc2a180f68
@@@ -50,6 -50,7 +50,7 @@@
  #include <linux/string_helpers.h>
  #include <linux/async.h>
  #include <asm/uaccess.h>
+ #include <asm/unaligned.h>
  
  #include <scsi/scsi.h>
  #include <scsi/scsi_cmnd.h>
@@@ -383,9 -384,9 +384,9 @@@ static int sd_prep_fn(struct request_qu
        struct scsi_device *sdp = q->queuedata;
        struct gendisk *disk = rq->rq_disk;
        struct scsi_disk *sdkp;
 -      sector_t block = rq->sector;
 +      sector_t block = blk_rq_pos(rq);
        sector_t threshold;
 -      unsigned int this_count = rq->nr_sectors;
 +      unsigned int this_count = blk_rq_sectors(rq);
        int ret, host_dif;
  
        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
                                        this_count));
  
        if (!sdp || !scsi_device_online(sdp) ||
 -          block + rq->nr_sectors > get_capacity(disk)) {
 +          block + blk_rq_sectors(rq) > get_capacity(disk)) {
                SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
 -                                              "Finishing %ld sectors\n",
 -                                              rq->nr_sectors));
 +                                              "Finishing %u sectors\n",
 +                                              blk_rq_sectors(rq)));
                SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
                                                "Retry with 0x%p\n", SCpnt));
                goto out;
         * for this.
         */
        if (sdp->sector_size == 1024) {
 -              if ((block & 1) || (rq->nr_sectors & 1)) {
 +              if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
                        scmd_printk(KERN_ERR, SCpnt,
                                    "Bad block number requested\n");
                        goto out;
                }
        }
        if (sdp->sector_size == 2048) {
 -              if ((block & 3) || (rq->nr_sectors & 3)) {
 +              if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
                        scmd_printk(KERN_ERR, SCpnt,
                                    "Bad block number requested\n");
                        goto out;
                }
        }
        if (sdp->sector_size == 4096) {
 -              if ((block & 7) || (rq->nr_sectors & 7)) {
 +              if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
                        scmd_printk(KERN_ERR, SCpnt,
                                    "Bad block number requested\n");
                        goto out;
        }
  
        SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
 -                                      "%s %d/%ld 512 byte blocks.\n",
 +                                      "%s %d/%u 512 byte blocks.\n",
                                        (rq_data_dir(rq) == WRITE) ?
                                        "writing" : "reading", this_count,
 -                                      rq->nr_sectors));
 +                                      blk_rq_sectors(rq)));
  
        /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
        host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@@ -970,8 -971,8 +971,8 @@@ static struct block_device_operations s
  
  static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
  {
 -      u64 start_lba = scmd->request->sector;
 -      u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512);
 +      u64 start_lba = blk_rq_pos(scmd->request);
 +      u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
        u64 bad_lba;
        int info_valid;
  
@@@ -1344,12 -1345,8 +1345,8 @@@ static int read_capacity_16(struct scsi
                return -EINVAL;
        }
  
-       sector_size =   (buffer[8] << 24) | (buffer[9] << 16) |
-                       (buffer[10] << 8) | buffer[11];
-       lba =  (((u64)buffer[0] << 56) | ((u64)buffer[1] << 48) |
-               ((u64)buffer[2] << 40) | ((u64)buffer[3] << 32) |
-               ((u64)buffer[4] << 24) | ((u64)buffer[5] << 16) |
-               ((u64)buffer[6] << 8) | (u64)buffer[7]);
+       sector_size = get_unaligned_be32(&buffer[8]);
+       lba = get_unaligned_be64(&buffer[0]);
  
        sd_read_protection_type(sdkp, buffer);
  
@@@ -1400,10 -1397,8 +1397,8 @@@ static int read_capacity_10(struct scsi
                return -EINVAL;
        }
  
-       sector_size =   (buffer[4] << 24) | (buffer[5] << 16) |
-                       (buffer[6] << 8) | buffer[7];
-       lba =   (buffer[0] << 24) | (buffer[1] << 16) |
-               (buffer[2] << 8) | buffer[3];
+       sector_size = get_unaligned_be32(&buffer[4]);
+       lba = get_unaligned_be32(&buffer[0]);
  
        if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
                sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
diff --combined drivers/scsi/sg.c
index dec4c70677dee667be8f0481f7455327662b07ab,e1716f14cd4710cca0363eab985cdf370d9b9879..0fc2c0ae7691b4b6f38672880ccfed3a7fbacea3
@@@ -179,7 -179,7 +179,7 @@@ typedef struct sg_device { /* holds th
  /* tasklet or soft irq callback */
  static void sg_rq_end_io(struct request *rq, int uptodate);
  static int sg_start_req(Sg_request *srp, unsigned char *cmd);
- static void sg_finish_rem_req(Sg_request * srp);
+ static int sg_finish_rem_req(Sg_request * srp);
  static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
  static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
                           Sg_request * srp);
@@@ -518,7 -518,7 +518,7 @@@ sg_new_read(Sg_fd * sfp, char __user *b
                goto err_out;
        }
  err_out:
-       sg_finish_rem_req(srp);
+       err = sg_finish_rem_req(srp);
        return (0 == err) ? count : err;
  }
  
@@@ -1260,7 -1260,7 +1260,7 @@@ static void sg_rq_end_io(struct reques
  
        sense = rq->sense;
        result = rq->errors;
 -      resid = rq->data_len;
 +      resid = rq->resid_len;
  
        SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
                sdp->disk->disk_name, srp->header.pack_id, result));
@@@ -1696,9 -1696,10 +1696,10 @@@ static int sg_start_req(Sg_request *srp
        return res;
  }
  
- static void
- sg_finish_rem_req(Sg_request * srp)
+ static int sg_finish_rem_req(Sg_request * srp)
  {
+       int ret = 0;
        Sg_fd *sfp = srp->parentfp;
        Sg_scatter_hold *req_schp = &srp->data;
  
  
        if (srp->rq) {
                if (srp->bio)
-                       blk_rq_unmap_user(srp->bio);
+                       ret = blk_rq_unmap_user(srp->bio);
  
                blk_put_request(srp->rq);
        }
  
        sg_remove_request(sfp, srp);
+       return ret;
  }
  
  static int
diff --combined fs/bio.c
index ee3bc67833d2bd24ff2dff6daad4da3a0e9b2efb,98711647ece49548a94409a99ce4b680ee085ca1..81dc93e72535f299b8df7687a2cfa818fcbe4d00
+++ b/fs/bio.c
@@@ -817,6 -817,9 +817,9 @@@ struct bio *bio_copy_user_iov(struct re
                len += iov[i].iov_len;
        }
  
+       if (offset)
+               nr_pages++;
        bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
        if (!bmd)
                return ERR_PTR(-ENOMEM);
@@@ -1198,7 -1201,7 +1201,7 @@@ static void bio_copy_kern_endio(struct 
                char *addr = page_address(bvec->bv_page);
                int len = bmd->iovecs[i].bv_len;
  
 -              if (read && !err)
 +              if (read)
                        memcpy(p, addr, len);
  
                __free_page(bvec->bv_page);
diff --combined include/linux/blkdev.h
index a967dd775dbd115bcd090854de2190c71f6969f8,b4f71f1a4af72bd7c68c6f6fe707f43e6a50888f..56ce53fce72eeabd7452b696c9fc024f12f4a423
@@@ -166,9 -166,19 +166,9 @@@ struct request 
        enum rq_cmd_type_bits cmd_type;
        unsigned long atomic_flags;
  
 -      /* Maintain bio traversal state for part by part I/O submission.
 -       * hard_* are block layer internals, no driver should touch them!
 -       */
 -
 -      sector_t sector;                /* next sector to submit */
 -      sector_t hard_sector;           /* next sector to complete */
 -      unsigned long nr_sectors;       /* no. of sectors left to submit */
 -      unsigned long hard_nr_sectors;  /* no. of sectors left to complete */
 -      /* no. of sectors left to submit in the current segment */
 -      unsigned int current_nr_sectors;
 -
 -      /* no. of sectors left to complete in the current segment */
 -      unsigned int hard_cur_sectors;
 +      /* the following two fields are internal, NEVER access directly */
 +      sector_t __sector;              /* sector cursor */
 +      unsigned int __data_len;        /* total data len */
  
        struct bio *bio;
        struct bio *biotail;
  
        unsigned short ioprio;
  
 -      void *special;
 -      char *buffer;
 +      void *special;          /* opaque pointer available for LLD use */
 +      char *buffer;           /* kaddr of the current segment if available */
  
        int tag;
        int errors;
        unsigned char __cmd[BLK_MAX_CDB];
        unsigned char *cmd;
  
 -      unsigned int data_len;
        unsigned int extra_len; /* length of alignment and padding */
        unsigned int sense_len;
 -      void *data;
 +      unsigned int resid_len; /* residual count */
        void *sense;
  
        unsigned long deadline;
@@@ -404,7 -415,7 +404,7 @@@ struct request_queu
        struct list_head        tag_busy_list;
  
        unsigned int            nr_sorted;
 -      unsigned int            in_flight;
 +      unsigned int            in_flight[2];
  
        unsigned int            rq_timeout;
        struct timer_list       timeout;
@@@ -511,11 -522,6 +511,11 @@@ static inline void queue_flag_clear_unl
        __clear_bit(flag, &q->queue_flags);
  }
  
 +static inline int queue_in_flight(struct request_queue *q)
 +{
 +      return q->in_flight[0] + q->in_flight[1];
 +}
 +
  static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
  {
        WARN_ON_ONCE(!queue_is_locked(q));
@@@ -595,6 -601,7 +595,7 @@@ enum 
                                 blk_failfast_driver(rq))
  #define blk_rq_started(rq)    ((rq)->cmd_flags & REQ_STARTED)
  #define blk_rq_io_stat(rq)    ((rq)->cmd_flags & REQ_IO_STAT)
+ #define blk_rq_quiet(rq)      ((rq)->cmd_flags & REQ_QUIET)
  
  #define blk_account_rq(rq)    (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 
  
@@@ -745,8 -752,6 +746,8 @@@ extern void blk_rq_init(struct request_
  extern void blk_put_request(struct request *);
  extern void __blk_put_request(struct request_queue *, struct request *);
  extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
 +extern struct request *blk_make_request(struct request_queue *, struct bio *,
 +                                      gfp_t);
  extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
  extern void blk_requeue_request(struct request_queue *, struct request *);
  extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
@@@ -762,6 -767,12 +763,6 @@@ extern int scsi_cmd_ioctl(struct reques
  extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
                         struct scsi_ioctl_command __user *);
  
 -/*
 - * Temporary export, until SCSI gets fixed up.
 - */
 -extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
 -                           struct bio *bio);
 -
  /*
   * A queue has just exitted congestion.  Note this in the global counter of
   * congested queues, and wake up anyone who was waiting for requests to be
@@@ -787,6 -798,7 +788,6 @@@ extern void blk_sync_queue(struct reque
  extern void __blk_stop_queue(struct request_queue *q);
  extern void __blk_run_queue(struct request_queue *);
  extern void blk_run_queue(struct request_queue *);
 -extern void blk_start_queueing(struct request_queue *);
  extern int blk_rq_map_user(struct request_queue *, struct request *,
                           struct rq_map_data *, void __user *, unsigned long,
                           gfp_t);
@@@ -819,73 -831,41 +820,73 @@@ static inline void blk_run_address_spac
                blk_run_backing_dev(mapping->backing_dev_info, NULL);
  }
  
 -extern void blkdev_dequeue_request(struct request *req);
 +/*
 + * blk_rq_pos()               : the current sector
 + * blk_rq_bytes()     : bytes left in the entire request
 + * blk_rq_cur_bytes() : bytes left in the current segment
 + * blk_rq_sectors()   : sectors left in the entire request
 + * blk_rq_cur_sectors()       : sectors left in the current segment
 + */
 +static inline sector_t blk_rq_pos(const struct request *rq)
 +{
 +      return rq->__sector;
 +}
 +
 +static inline unsigned int blk_rq_bytes(const struct request *rq)
 +{
 +      return rq->__data_len;
 +}
 +
 +static inline int blk_rq_cur_bytes(const struct request *rq)
 +{
 +      return rq->bio ? bio_cur_bytes(rq->bio) : 0;
 +}
 +
 +static inline unsigned int blk_rq_sectors(const struct request *rq)
 +{
 +      return blk_rq_bytes(rq) >> 9;
 +}
 +
 +static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
 +{
 +      return blk_rq_cur_bytes(rq) >> 9;
 +}
 +
 +/*
 + * Request issue related functions.
 + */
 +extern struct request *blk_peek_request(struct request_queue *q);
 +extern void blk_start_request(struct request *rq);
 +extern struct request *blk_fetch_request(struct request_queue *q);
  
  /*
 - * blk_end_request() and friends.
 - * __blk_end_request() and end_request() must be called with
 - * the request queue spinlock acquired.
 + * Request completion related functions.
 + *
 + * blk_update_request() completes given number of bytes and updates
 + * the request without completing it.
 + *
 + * blk_end_request() and friends.  __blk_end_request() must be called
 + * with the request queue spinlock acquired.
   *
   * Several drivers define their own end_request and call
   * blk_end_request() for parts of the original function.
   * This prevents code duplication in drivers.
   */
 -extern int blk_end_request(struct request *rq, int error,
 -                              unsigned int nr_bytes);
 -extern int __blk_end_request(struct request *rq, int error,
 -                              unsigned int nr_bytes);
 -extern int blk_end_bidi_request(struct request *rq, int error,
 -                              unsigned int nr_bytes, unsigned int bidi_bytes);
 -extern void end_request(struct request *, int);
 -extern int blk_end_request_callback(struct request *rq, int error,
 -                              unsigned int nr_bytes,
 -                              int (drv_callback)(struct request *));
 +extern bool blk_update_request(struct request *rq, int error,
 +                             unsigned int nr_bytes);
 +extern bool blk_end_request(struct request *rq, int error,
 +                          unsigned int nr_bytes);
 +extern void blk_end_request_all(struct request *rq, int error);
 +extern bool blk_end_request_cur(struct request *rq, int error);
 +extern bool __blk_end_request(struct request *rq, int error,
 +                            unsigned int nr_bytes);
 +extern void __blk_end_request_all(struct request *rq, int error);
 +extern bool __blk_end_request_cur(struct request *rq, int error);
 +
  extern void blk_complete_request(struct request *);
  extern void __blk_complete_request(struct request *);
  extern void blk_abort_request(struct request *);
  extern void blk_abort_queue(struct request_queue *);
 -extern void blk_update_request(struct request *rq, int error,
 -                             unsigned int nr_bytes);
 -
 -/*
 - * blk_end_request() takes bytes instead of sectors as a complete size.
 - * blk_rq_bytes() returns bytes left to complete in the entire request.
 - * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
 - */
 -extern unsigned int blk_rq_bytes(struct request *rq);
 -extern unsigned int blk_rq_cur_bytes(struct request *rq);
  
  /*
   * Access functions for manipulating queue properties
diff --combined include/linux/fs.h
index d926c2bea1662c8867694b6c518cff8a2f5ef57c,3b534e527e09922aa0d89b1854ba60a4e5871f5a..83d6b4397245bdbb44f27d9f7cbdfa763ff96767
@@@ -1775,6 -1775,7 +1775,7 @@@ void kill_block_super(struct super_bloc
  void kill_anon_super(struct super_block *sb);
  void kill_litter_super(struct super_block *sb);
  void deactivate_super(struct super_block *sb);
+ void deactivate_locked_super(struct super_block *sb);
  int set_anon_super(struct super_block *s, void *data);
  struct super_block *sget(struct file_system_type *type,
                        int (*test)(struct super_block *,void *),
@@@ -2117,7 -2118,7 +2118,7 @@@ extern struct file *create_write_pipe(i
  extern void free_write_pipe(struct file *);
  
  extern struct file *do_filp_open(int dfd, const char *pathname,
-               int open_flag, int mode);
+               int open_flag, int mode, int acc_mode);
  extern int may_open(struct path *, int, int);
  
  extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
@@@ -2204,8 -2205,6 +2205,8 @@@ extern int generic_segment_checks(cons
  /* fs/splice.c */
  extern ssize_t generic_file_splice_read(struct file *, loff_t *,
                struct pipe_inode_info *, size_t, unsigned int);
 +extern ssize_t default_file_splice_read(struct file *, loff_t *,
 +              struct pipe_inode_info *, size_t, unsigned int);
  extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
                struct file *, loff_t *, size_t, unsigned int);
  extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
@@@ -2369,6 -2368,7 +2370,7 @@@ extern void file_update_time(struct fil
  
  extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt);
  extern void save_mount_options(struct super_block *sb, char *options);
+ extern void replace_mount_options(struct super_block *sb, char *options);
  
  static inline ino_t parent_ino(struct dentry *dentry)
  {