X-Git-Url: http://pileus.org/git/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fscsi%2Fscsi_lib.c;h=eab303d148d8b8d9d8252c2fffcd6517ee5a0504;hb=6d73c8514da241c6b1b8d710a6294786604d7142;hp=a7f3f0c84db75537a7cc06d6c7275cef01880874;hpb=d779188d2baf436e67fe8816fca2ef53d246900f;p=~andy%2Flinux diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a7f3f0c84db..eab303d148d 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -285,13 +286,12 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, int result; if (sshdr) { - sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); + sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); if (!sense) return DRIVER_ERROR << 24; - memset(sense, 0, SCSI_SENSE_BUFFERSIZE); } result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, - sense, timeout, retries, 0); + sense, timeout, retries, 0); if (sshdr) scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); @@ -308,7 +308,7 @@ struct scsi_io_context { static kmem_cache_t *scsi_io_context_cache; -static void scsi_end_async(struct request *req) +static void scsi_end_async(struct request *req, int uptodate) { struct scsi_io_context *sioc = req->end_io_data; @@ -436,6 +436,7 @@ free_bios: * scsi_execute_async - insert request * @sdev: scsi device * @cmd: scsi command + * @cmd_len: length of scsi cdb * @data_direction: data direction * @buffer: data buffer (this can be a kernel buffer or scatterlist) * @bufflen: len of buffer @@ -445,7 +446,7 @@ free_bios: * @flags: or into request flags **/ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, - int data_direction, void *buffer, unsigned bufflen, + int cmd_len, int data_direction, void *buffer, unsigned bufflen, int use_sg, int timeout, int retries, void *privdata, void (*done)(void *, char *, int, int), gfp_t gfp) { @@ -472,7 +473,7 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, if (err) goto free_req; - req->cmd_len = COMMAND_SIZE(cmd[0]); + req->cmd_len = cmd_len; memcpy(req->cmd, cmd, req->cmd_len); req->sense = sioc->sense; req->sense_len = 0; @@ -791,7 +792,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, spin_lock_irqsave(q->queue_lock, flags); if (blk_rq_tagged(req)) blk_queue_end_tag(q, req); - end_that_request_last(req); + end_that_request_last(req, uptodate); spin_unlock_irqrestore(q->queue_lock, flags); /* @@ -932,9 +933,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, int sense_valid = 0; int sense_deferred = 0; - if (blk_complete_barrier_rq(q, req, good_bytes >> 9)) - return; - /* * Free up any indirection buffers we allocated for DMA purposes. * For the case of a READ, we need to copy the data out of the @@ -1199,38 +1197,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd) return BLKPREP_KILL; } -static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq) -{ - struct scsi_device *sdev = q->queuedata; - struct scsi_driver *drv; - - if (sdev->sdev_state == SDEV_RUNNING) { - drv = *(struct scsi_driver **) rq->rq_disk->private_data; - - if (drv->prepare_flush) - return drv->prepare_flush(q, rq); - } - - return 0; -} - -static void scsi_end_flush_fn(request_queue_t *q, struct request *rq) -{ - struct scsi_device *sdev = q->queuedata; - struct request *flush_rq = rq->end_io_data; - struct scsi_driver *drv; - - if (flush_rq->errors) { - printk("scsi: barrier error, disabling flush support\n"); - blk_queue_ordered(q, QUEUE_ORDERED_NONE); - } - - if (sdev->sdev_state == SDEV_RUNNING) { - drv = *(struct scsi_driver **) rq->rq_disk->private_data; - drv->end_flush(q, rq); - } -} - static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, sector_t *error_sector) { @@ -1247,7 +1213,7 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, return -EOPNOTSUPP; } -static void scsi_generic_done(struct scsi_cmnd *cmd) +static void scsi_blk_pc_done(struct scsi_cmnd *cmd) { BUG_ON(!blk_pc_request(cmd->request)); /* @@ -1259,7 +1225,7 @@ static void scsi_generic_done(struct scsi_cmnd *cmd) scsi_io_completion(cmd, cmd->bufflen, 0); } -void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) +static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) { struct request *req = cmd->request; @@ -1276,8 +1242,8 @@ void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) cmd->transfersize = req->data_len; cmd->allowed = req->retries; cmd->timeout_per_command = req->timeout; + cmd->done = scsi_blk_pc_done; } -EXPORT_SYMBOL_GPL(scsi_setup_blk_pc_cmnd); static int scsi_prep_fn(struct request_queue *q, struct request *req) { @@ -1374,7 +1340,6 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) * happening now. */ if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { - struct scsi_driver *drv; int ret; /* @@ -1406,16 +1371,17 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) /* * Initialize the actual SCSI command for this request. */ - if (req->rq_disk) { + if (req->flags & REQ_BLOCK_PC) { + scsi_setup_blk_pc_cmnd(cmd); + } else if (req->rq_disk) { + struct scsi_driver *drv; + drv = *(struct scsi_driver **)req->rq_disk->private_data; if (unlikely(!drv->init_command(cmd))) { scsi_release_buffers(cmd); scsi_put_command(cmd); goto kill; } - } else { - scsi_setup_blk_pc_cmnd(cmd); - cmd->done = scsi_generic_done; } } @@ -1528,6 +1494,41 @@ static void scsi_kill_request(struct request *req, request_queue_t *q) __scsi_done(cmd); } +static void scsi_softirq_done(struct request *rq) +{ + struct scsi_cmnd *cmd = rq->completion_data; + unsigned long wait_for = cmd->allowed * cmd->timeout_per_command; + int disposition; + + INIT_LIST_HEAD(&cmd->eh_entry); + + disposition = scsi_decide_disposition(cmd); + if (disposition != SUCCESS && + time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { + sdev_printk(KERN_ERR, cmd->device, + "timing out command, waited %lus\n", + wait_for/HZ); + disposition = SUCCESS; + } + + scsi_log_completion(cmd, disposition); + + switch (disposition) { + case SUCCESS: + scsi_finish_command(cmd); + break; + case NEEDS_RETRY: + scsi_retry_command(cmd); + break; + case ADD_TO_MLQUEUE: + scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); + break; + default: + if (!scsi_eh_scmd_add(cmd, 0)) + scsi_finish_command(cmd); + } +} + /* * Function: scsi_request_fn() * @@ -1702,17 +1703,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); blk_queue_segment_boundary(q, shost->dma_boundary); blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); - - /* - * ordered tags are superior to flush ordering - */ - if (shost->ordered_tag) - blk_queue_ordered(q, QUEUE_ORDERED_TAG); - else if (shost->ordered_flush) { - blk_queue_ordered(q, QUEUE_ORDERED_FLUSH); - q->prepare_flush_fn = scsi_prepare_flush_fn; - q->end_flush_fn = scsi_end_flush_fn; - } + blk_queue_softirq_done(q, scsi_softirq_done); if (!shost->use_clustering) clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); @@ -1901,8 +1892,16 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, } if(scsi_status_is_good(result)) { - data->header_length = header_length; - if(use_10_for_ms) { + if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && + (modepage == 6 || modepage == 8))) { + /* Initio breakage? */ + header_length = 0; + data->length = 13; + data->medium_type = 0; + data->device_specific = 0; + data->longlba = 0; + data->block_descriptor_length = 0; + } else if(use_10_for_ms) { data->length = buffer[0]*256 + buffer[1] + 2; data->medium_type = buffer[2]; data->device_specific = buffer[3]; @@ -1915,6 +1914,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, data->device_specific = buffer[2]; data->block_descriptor_length = buffer[3]; } + data->header_length = header_length; } return result; @@ -2257,3 +2257,61 @@ scsi_target_unblock(struct device *dev) device_for_each_child(dev, NULL, target_unblock); } EXPORT_SYMBOL_GPL(scsi_target_unblock); + + +struct work_queue_work { + struct work_struct work; + void (*fn)(void *); + void *data; +}; + +static void execute_in_process_context_work(void *data) +{ + void (*fn)(void *data); + struct work_queue_work *wqw = data; + + fn = wqw->fn; + data = wqw->data; + + kfree(wqw); + + fn(data); +} + +/** + * scsi_execute_in_process_context - reliably execute the routine with user context + * @fn: the function to execute + * @data: data to pass to the function + * + * Executes the function immediately if process context is available, + * otherwise schedules the function for delayed execution. + * + * Returns: 0 - function was executed + * 1 - function was scheduled for execution + * <0 - error + */ +int scsi_execute_in_process_context(void (*fn)(void *data), void *data) +{ + struct work_queue_work *wqw; + + if (!in_interrupt()) { + fn(data); + return 0; + } + + wqw = kmalloc(sizeof(struct work_queue_work), GFP_ATOMIC); + + if (unlikely(!wqw)) { + printk(KERN_ERR "Failed to allocate memory\n"); + WARN_ON(1); + return -ENOMEM; + } + + INIT_WORK(&wqw->work, execute_in_process_context_work, wqw); + wqw->fn = fn; + wqw->data = data; + schedule_work(&wqw->work); + + return 1; +} +EXPORT_SYMBOL_GPL(scsi_execute_in_process_context);