]> Pileus Git - ~andy/linux/blobdiff - drivers/scsi/scsi_lib.c
[SCSI] scsi_lib: fix recognition of cache type of Initio SBP-2 bridges
[~andy/linux] / drivers / scsi / scsi_lib.c
index ba93d6e66d481506dc065c5763eb53f6f91fed7e..eab303d148d8b8d9d8252c2fffcd6517ee5a0504 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
+#include <linux/hardirq.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_dbg.h>
@@ -285,13 +286,12 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
        int result;
        
        if (sshdr) {
-               sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
+               sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
                if (!sense)
                        return DRIVER_ERROR << 24;
-               memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
        }
        result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
-                                 sense, timeout, retries, 0);
+                             sense, timeout, retries, 0);
        if (sshdr)
                scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
 
@@ -436,6 +436,7 @@ free_bios:
  * scsi_execute_async - insert request
  * @sdev:      scsi device
  * @cmd:       scsi command
+ * @cmd_len:   length of scsi cdb
  * @data_direction: data direction
  * @buffer:    data buffer (this can be a kernel buffer or scatterlist)
  * @bufflen:   len of buffer
@@ -445,7 +446,7 @@ free_bios:
  * @flags:     or into request flags
  **/
 int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
-                      int data_direction, void *buffer, unsigned bufflen,
+                      int cmd_len, int data_direction, void *buffer, unsigned bufflen,
                       int use_sg, int timeout, int retries, void *privdata,
                       void (*done)(void *, char *, int, int), gfp_t gfp)
 {
@@ -472,7 +473,7 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
        if (err)
                goto free_req;
 
-       req->cmd_len = COMMAND_SIZE(cmd[0]);
+       req->cmd_len = cmd_len;
        memcpy(req->cmd, cmd, req->cmd_len);
        req->sense = sioc->sense;
        req->sense_len = 0;
@@ -1212,7 +1213,7 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
        return -EOPNOTSUPP;
 }
 
-static void scsi_generic_done(struct scsi_cmnd *cmd)
+static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
 {
        BUG_ON(!blk_pc_request(cmd->request));
        /*
@@ -1224,7 +1225,7 @@ static void scsi_generic_done(struct scsi_cmnd *cmd)
        scsi_io_completion(cmd, cmd->bufflen, 0);
 }
 
-void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
+static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
 {
        struct request *req = cmd->request;
 
@@ -1241,8 +1242,8 @@ void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
        cmd->transfersize = req->data_len;
        cmd->allowed = req->retries;
        cmd->timeout_per_command = req->timeout;
+       cmd->done = scsi_blk_pc_done;
 }
-EXPORT_SYMBOL_GPL(scsi_setup_blk_pc_cmnd);
 
 static int scsi_prep_fn(struct request_queue *q, struct request *req)
 {
@@ -1339,7 +1340,6 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
         * happening now.
         */
        if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
-               struct scsi_driver *drv;
                int ret;
 
                /*
@@ -1371,16 +1371,17 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
                /*
                 * Initialize the actual SCSI command for this request.
                 */
-               if (req->rq_disk) {
+               if (req->flags & REQ_BLOCK_PC) {
+                       scsi_setup_blk_pc_cmnd(cmd);
+               } else if (req->rq_disk) {
+                       struct scsi_driver *drv;
+
                        drv = *(struct scsi_driver **)req->rq_disk->private_data;
                        if (unlikely(!drv->init_command(cmd))) {
                                scsi_release_buffers(cmd);
                                scsi_put_command(cmd);
                                goto kill;
                        }
-               } else {
-                       scsi_setup_blk_pc_cmnd(cmd);
-                       cmd->done = scsi_generic_done;
                }
        }
 
@@ -1493,6 +1494,41 @@ static void scsi_kill_request(struct request *req, request_queue_t *q)
        __scsi_done(cmd);
 }
 
+static void scsi_softirq_done(struct request *rq)
+{
+       struct scsi_cmnd *cmd = rq->completion_data;
+       unsigned long wait_for = cmd->allowed * cmd->timeout_per_command;
+       int disposition;
+
+       INIT_LIST_HEAD(&cmd->eh_entry);
+
+       disposition = scsi_decide_disposition(cmd);
+       if (disposition != SUCCESS &&
+           time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+               sdev_printk(KERN_ERR, cmd->device,
+                           "timing out command, waited %lus\n",
+                           wait_for/HZ);
+               disposition = SUCCESS;
+       }
+                       
+       scsi_log_completion(cmd, disposition);
+
+       switch (disposition) {
+               case SUCCESS:
+                       scsi_finish_command(cmd);
+                       break;
+               case NEEDS_RETRY:
+                       scsi_retry_command(cmd);
+                       break;
+               case ADD_TO_MLQUEUE:
+                       scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
+                       break;
+               default:
+                       if (!scsi_eh_scmd_add(cmd, 0))
+                               scsi_finish_command(cmd);
+       }
+}
+
 /*
  * Function:    scsi_request_fn()
  *
@@ -1667,6 +1703,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
        blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
        blk_queue_segment_boundary(q, shost->dma_boundary);
        blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
+       blk_queue_softirq_done(q, scsi_softirq_done);
 
        if (!shost->use_clustering)
                clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
@@ -1855,8 +1892,16 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
        }
 
        if(scsi_status_is_good(result)) {
-               data->header_length = header_length;
-               if(use_10_for_ms) {
+               if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
+                            (modepage == 6 || modepage == 8))) {
+                       /* Initio breakage? */
+                       header_length = 0;
+                       data->length = 13;
+                       data->medium_type = 0;
+                       data->device_specific = 0;
+                       data->longlba = 0;
+                       data->block_descriptor_length = 0;
+               } else if(use_10_for_ms) {
                        data->length = buffer[0]*256 + buffer[1] + 2;
                        data->medium_type = buffer[2];
                        data->device_specific = buffer[3];
@@ -1869,6 +1914,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
                        data->device_specific = buffer[2];
                        data->block_descriptor_length = buffer[3];
                }
+               data->header_length = header_length;
        }
 
        return result;
@@ -2211,3 +2257,61 @@ scsi_target_unblock(struct device *dev)
                device_for_each_child(dev, NULL, target_unblock);
 }
 EXPORT_SYMBOL_GPL(scsi_target_unblock);
+
+
+struct work_queue_work {
+       struct work_struct      work;
+       void                    (*fn)(void *);
+       void                    *data;
+};
+
+static void execute_in_process_context_work(void *data)
+{
+       void (*fn)(void *data);
+       struct work_queue_work *wqw = data;
+
+       fn = wqw->fn;
+       data = wqw->data;
+
+       kfree(wqw);
+
+       fn(data);
+}
+
+/**
+ * scsi_execute_in_process_context - reliably execute the routine with user context
+ * @fn:                the function to execute
+ * @data:      data to pass to the function
+ *
+ * Executes the function immediately if process context is available,
+ * otherwise schedules the function for delayed execution.
+ *
+ * Returns:    0 - function was executed
+ *             1 - function was scheduled for execution
+ *             <0 - error
+ */
+int scsi_execute_in_process_context(void (*fn)(void *data), void *data)
+{
+       struct work_queue_work *wqw;
+
+       if (!in_interrupt()) {
+               fn(data);
+               return 0;
+       }
+
+       wqw = kmalloc(sizeof(struct work_queue_work), GFP_ATOMIC);
+
+       if (unlikely(!wqw)) {
+               printk(KERN_ERR "Failed to allocate memory\n");
+               WARN_ON(1);
+               return -ENOMEM;
+       }
+
+       INIT_WORK(&wqw->work, execute_in_process_context_work, wqw);
+       wqw->fn = fn;
+       wqw->data = data;
+       schedule_work(&wqw->work);
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(scsi_execute_in_process_context);