]> Pileus Git - ~andy/linux/blob - drivers/scsi/scsi_lib.c
[SCSI] fix sym scsi boot hang
[~andy/linux] / drivers / scsi / scsi_lib.c
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
27
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
30
31
32 #define SG_MEMPOOL_NR           (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE         32
34
35 struct scsi_host_sg_pool {
36         size_t          size;
37         char            *name; 
38         kmem_cache_t    *slab;
39         mempool_t       *pool;
40 };
41
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
44 #endif
45
46 #define SP(x) { x, "sgpool-" #x } 
47 static struct scsi_host_sg_pool scsi_sg_pools[] = {
48         SP(8),
49         SP(16),
50         SP(32),
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52         SP(64),
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54         SP(128),
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56         SP(256),
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
59 #endif
60 #endif
61 #endif
62 #endif
63 };      
64 #undef SP
65
66
67 /*
68  * Function:    scsi_insert_special_req()
69  *
70  * Purpose:     Insert pre-formed request into request queue.
71  *
72  * Arguments:   sreq    - request that is ready to be queued.
73  *              at_head - boolean.  True if we should insert at head
74  *                        of queue, false if we should insert at tail.
75  *
76  * Lock status: Assumed that lock is not held upon entry.
77  *
78  * Returns:     Nothing
79  *
80  * Notes:       This function is called from character device and from
81  *              ioctl types of functions where the caller knows exactly
82  *              what SCSI command needs to be issued.   The idea is that
83  *              we merely inject the command into the queue (at the head
84  *              for now), and then call the queue request function to actually
85  *              process it.
86  */
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88 {
89         /*
90          * Because users of this function are apt to reuse requests with no
91          * modification, we have to sanitise the request flags here
92          */
93         sreq->sr_request->flags &= ~REQ_DONTPREP;
94         blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95                            at_head, sreq);
96         return 0;
97 }
98
99 static void scsi_run_queue(struct request_queue *q);
100 static void scsi_release_buffers(struct scsi_cmnd *cmd);
101
102 /*
103  * Function:    scsi_unprep_request()
104  *
105  * Purpose:     Remove all preparation done for a request, including its
106  *              associated scsi_cmnd, so that it can be requeued.
107  *
108  * Arguments:   req     - request to unprepare
109  *
110  * Lock status: Assumed that no locks are held upon entry.
111  *
112  * Returns:     Nothing.
113  */
114 static void scsi_unprep_request(struct request *req)
115 {
116         struct scsi_cmnd *cmd = req->special;
117
118         req->flags &= ~REQ_DONTPREP;
119         req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
120
121         scsi_release_buffers(cmd);
122         scsi_put_command(cmd);
123 }
124
125 /*
126  * Function:    scsi_queue_insert()
127  *
128  * Purpose:     Insert a command in the midlevel queue.
129  *
130  * Arguments:   cmd    - command that we are adding to queue.
131  *              reason - why we are inserting command to queue.
132  *
133  * Lock status: Assumed that lock is not held upon entry.
134  *
135  * Returns:     Nothing.
136  *
137  * Notes:       We do this for one of two cases.  Either the host is busy
138  *              and it cannot accept any more commands for the time being,
139  *              or the device returned QUEUE_FULL and can accept no more
140  *              commands.
141  * Notes:       This could be called either from an interrupt context or a
142  *              normal process context.
143  */
144 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
145 {
146         struct Scsi_Host *host = cmd->device->host;
147         struct scsi_device *device = cmd->device;
148         struct request_queue *q = device->request_queue;
149         unsigned long flags;
150
151         SCSI_LOG_MLQUEUE(1,
152                  printk("Inserting command %p into mlqueue\n", cmd));
153
154         /*
155          * Set the appropriate busy bit for the device/host.
156          *
157          * If the host/device isn't busy, assume that something actually
158          * completed, and that we should be able to queue a command now.
159          *
160          * Note that the prior mid-layer assumption that any host could
161          * always queue at least one command is now broken.  The mid-layer
162          * will implement a user specifiable stall (see
163          * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
164          * if a command is requeued with no other commands outstanding
165          * either for the device or for the host.
166          */
167         if (reason == SCSI_MLQUEUE_HOST_BUSY)
168                 host->host_blocked = host->max_host_blocked;
169         else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
170                 device->device_blocked = device->max_device_blocked;
171
172         /*
173          * Decrement the counters, since these commands are no longer
174          * active on the host/device.
175          */
176         scsi_device_unbusy(device);
177
178         /*
179          * Requeue this command.  It will go before all other commands
180          * that are already in the queue.
181          *
182          * NOTE: there is magic here about the way the queue is plugged if
183          * we have no outstanding commands.
184          * 
185          * Although we *don't* plug the queue, we call the request
186          * function.  The SCSI request function detects the blocked condition
187          * and plugs the queue appropriately.
188          */
189         spin_lock_irqsave(q->queue_lock, flags);
190         blk_requeue_request(q, cmd->request);
191         spin_unlock_irqrestore(q->queue_lock, flags);
192
193         scsi_run_queue(q);
194
195         return 0;
196 }
197
198 /*
199  * Function:    scsi_do_req
200  *
201  * Purpose:     Queue a SCSI request
202  *
203  * Arguments:   sreq      - command descriptor.
204  *              cmnd      - actual SCSI command to be performed.
205  *              buffer    - data buffer.
206  *              bufflen   - size of data buffer.
207  *              done      - completion function to be run.
208  *              timeout   - how long to let it run before timeout.
209  *              retries   - number of retries we allow.
210  *
211  * Lock status: No locks held upon entry.
212  *
213  * Returns:     Nothing.
214  *
215  * Notes:       This function is only used for queueing requests for things
216  *              like ioctls and character device requests - this is because
217  *              we essentially just inject a request into the queue for the
218  *              device.
219  *
220  *              In order to support the scsi_device_quiesce function, we
221  *              now inject requests on the *head* of the device queue
222  *              rather than the tail.
223  */
224 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
225                  void *buffer, unsigned bufflen,
226                  void (*done)(struct scsi_cmnd *),
227                  int timeout, int retries)
228 {
229         /*
230          * If the upper level driver is reusing these things, then
231          * we should release the low-level block now.  Another one will
232          * be allocated later when this request is getting queued.
233          */
234         __scsi_release_request(sreq);
235
236         /*
237          * Our own function scsi_done (which marks the host as not busy,
238          * disables the timeout counter, etc) will be called by us or by the
239          * scsi_hosts[host].queuecommand() function needs to also call
240          * the completion function for the high level driver.
241          */
242         memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
243         sreq->sr_bufflen = bufflen;
244         sreq->sr_buffer = buffer;
245         sreq->sr_allowed = retries;
246         sreq->sr_done = done;
247         sreq->sr_timeout_per_command = timeout;
248
249         if (sreq->sr_cmd_len == 0)
250                 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
251
252         /*
253          * head injection *required* here otherwise quiesce won't work
254          */
255         scsi_insert_special_req(sreq, 1);
256 }
257 EXPORT_SYMBOL(scsi_do_req);
258
259 /* This is the end routine we get to if a command was never attached
260  * to the request.  Simply complete the request without changing
261  * rq_status; this will cause a DRIVER_ERROR. */
262 static void scsi_wait_req_end_io(struct request *req)
263 {
264         BUG_ON(!req->waiting);
265
266         complete(req->waiting);
267 }
268
269 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
270                    unsigned bufflen, int timeout, int retries)
271 {
272         DECLARE_COMPLETION(wait);
273         int write = (sreq->sr_data_direction == DMA_TO_DEVICE);
274         struct request *req;
275
276         req = blk_get_request(sreq->sr_device->request_queue, write,
277                               __GFP_WAIT);
278         if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req,
279                                        buffer, bufflen, __GFP_WAIT)) {
280                 sreq->sr_result = DRIVER_ERROR << 24;
281                 blk_put_request(req);
282                 return;
283         }
284
285         req->flags |= REQ_NOMERGE;
286         req->waiting = &wait;
287         req->end_io = scsi_wait_req_end_io;
288         req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
289         req->sense = sreq->sr_sense_buffer;
290         req->sense_len = 0;
291         memcpy(req->cmd, cmnd, req->cmd_len);
292         req->timeout = timeout;
293         req->flags |= REQ_BLOCK_PC;
294         req->rq_disk = NULL;
295         blk_insert_request(sreq->sr_device->request_queue, req,
296                            sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
297         wait_for_completion(&wait);
298         sreq->sr_request->waiting = NULL;
299         sreq->sr_result = req->errors;
300         if (req->errors)
301                 sreq->sr_result |= (DRIVER_ERROR << 24);
302
303         blk_put_request(req);
304 }
305
306 EXPORT_SYMBOL(scsi_wait_req);
307
308 /**
309  * scsi_execute - insert request and wait for the result
310  * @sdev:       scsi device
311  * @cmd:        scsi command
312  * @data_direction: data direction
313  * @buffer:     data buffer
314  * @bufflen:    len of buffer
315  * @sense:      optional sense buffer
316  * @timeout:    request timeout in seconds
317  * @retries:    number of times to retry request
318  * @flags:      or into request flags;
319  *
320  * returns the req->errors value which is the the scsi_cmnd result
321  * field.
322  **/
323 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
324                  int data_direction, void *buffer, unsigned bufflen,
325                  unsigned char *sense, int timeout, int retries, int flags)
326 {
327         struct request *req;
328         int write = (data_direction == DMA_TO_DEVICE);
329         int ret = DRIVER_ERROR << 24;
330
331         req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
332
333         if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
334                                         buffer, bufflen, __GFP_WAIT))
335                 goto out;
336
337         req->cmd_len = COMMAND_SIZE(cmd[0]);
338         memcpy(req->cmd, cmd, req->cmd_len);
339         req->sense = sense;
340         req->sense_len = 0;
341         req->timeout = timeout;
342         req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
343
344         /*
345          * head injection *required* here otherwise quiesce won't work
346          */
347         blk_execute_rq(req->q, NULL, req, 1);
348
349         ret = req->errors;
350  out:
351         blk_put_request(req);
352
353         return ret;
354 }
355 EXPORT_SYMBOL(scsi_execute);
356
357
358 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
359                      int data_direction, void *buffer, unsigned bufflen,
360                      struct scsi_sense_hdr *sshdr, int timeout, int retries)
361 {
362         char *sense = NULL;
363         int result;
364         
365         if (sshdr) {
366                 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
367                 if (!sense)
368                         return DRIVER_ERROR << 24;
369                 memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
370         }
371         result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
372                                   sense, timeout, retries, 0);
373         if (sshdr)
374                 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
375
376         kfree(sense);
377         return result;
378 }
379 EXPORT_SYMBOL(scsi_execute_req);
380
381 /*
382  * Function:    scsi_init_cmd_errh()
383  *
384  * Purpose:     Initialize cmd fields related to error handling.
385  *
386  * Arguments:   cmd     - command that is ready to be queued.
387  *
388  * Returns:     Nothing
389  *
390  * Notes:       This function has the job of initializing a number of
391  *              fields related to error handling.   Typically this will
392  *              be called once for each command, as required.
393  */
394 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
395 {
396         cmd->serial_number = 0;
397
398         memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
399
400         if (cmd->cmd_len == 0)
401                 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
402
403         /*
404          * We need saved copies of a number of fields - this is because
405          * error handling may need to overwrite these with different values
406          * to run different commands, and once error handling is complete,
407          * we will need to restore these values prior to running the actual
408          * command.
409          */
410         cmd->old_use_sg = cmd->use_sg;
411         cmd->old_cmd_len = cmd->cmd_len;
412         cmd->sc_old_data_direction = cmd->sc_data_direction;
413         cmd->old_underflow = cmd->underflow;
414         memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
415         cmd->buffer = cmd->request_buffer;
416         cmd->bufflen = cmd->request_bufflen;
417
418         return 1;
419 }
420
421 /*
422  * Function:   scsi_setup_cmd_retry()
423  *
424  * Purpose:    Restore the command state for a retry
425  *
426  * Arguments:  cmd      - command to be restored
427  *
428  * Returns:    Nothing
429  *
430  * Notes:      Immediately prior to retrying a command, we need
431  *             to restore certain fields that we saved above.
432  */
433 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
434 {
435         memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
436         cmd->request_buffer = cmd->buffer;
437         cmd->request_bufflen = cmd->bufflen;
438         cmd->use_sg = cmd->old_use_sg;
439         cmd->cmd_len = cmd->old_cmd_len;
440         cmd->sc_data_direction = cmd->sc_old_data_direction;
441         cmd->underflow = cmd->old_underflow;
442 }
443
444 void scsi_device_unbusy(struct scsi_device *sdev)
445 {
446         struct Scsi_Host *shost = sdev->host;
447         unsigned long flags;
448
449         spin_lock_irqsave(shost->host_lock, flags);
450         shost->host_busy--;
451         if (unlikely((shost->shost_state == SHOST_RECOVERY) &&
452                      shost->host_failed))
453                 scsi_eh_wakeup(shost);
454         spin_unlock(shost->host_lock);
455         spin_lock(sdev->request_queue->queue_lock);
456         sdev->device_busy--;
457         spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
458 }
459
460 /*
461  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
462  * and call blk_run_queue for all the scsi_devices on the target -
463  * including current_sdev first.
464  *
465  * Called with *no* scsi locks held.
466  */
467 static void scsi_single_lun_run(struct scsi_device *current_sdev)
468 {
469         struct Scsi_Host *shost = current_sdev->host;
470         struct scsi_device *sdev, *tmp;
471         struct scsi_target *starget = scsi_target(current_sdev);
472         unsigned long flags;
473
474         spin_lock_irqsave(shost->host_lock, flags);
475         starget->starget_sdev_user = NULL;
476         spin_unlock_irqrestore(shost->host_lock, flags);
477
478         /*
479          * Call blk_run_queue for all LUNs on the target, starting with
480          * current_sdev. We race with others (to set starget_sdev_user),
481          * but in most cases, we will be first. Ideally, each LU on the
482          * target would get some limited time or requests on the target.
483          */
484         blk_run_queue(current_sdev->request_queue);
485
486         spin_lock_irqsave(shost->host_lock, flags);
487         if (starget->starget_sdev_user)
488                 goto out;
489         list_for_each_entry_safe(sdev, tmp, &starget->devices,
490                         same_target_siblings) {
491                 if (sdev == current_sdev)
492                         continue;
493                 if (scsi_device_get(sdev))
494                         continue;
495
496                 spin_unlock_irqrestore(shost->host_lock, flags);
497                 blk_run_queue(sdev->request_queue);
498                 spin_lock_irqsave(shost->host_lock, flags);
499         
500                 scsi_device_put(sdev);
501         }
502  out:
503         spin_unlock_irqrestore(shost->host_lock, flags);
504 }
505
506 /*
507  * Function:    scsi_run_queue()
508  *
509  * Purpose:     Select a proper request queue to serve next
510  *
511  * Arguments:   q       - last request's queue
512  *
513  * Returns:     Nothing
514  *
515  * Notes:       The previous command was completely finished, start
516  *              a new one if possible.
517  */
518 static void scsi_run_queue(struct request_queue *q)
519 {
520         struct scsi_device *sdev = q->queuedata;
521         struct Scsi_Host *shost = sdev->host;
522         unsigned long flags;
523
524         if (sdev->single_lun)
525                 scsi_single_lun_run(sdev);
526
527         spin_lock_irqsave(shost->host_lock, flags);
528         while (!list_empty(&shost->starved_list) &&
529                !shost->host_blocked && !shost->host_self_blocked &&
530                 !((shost->can_queue > 0) &&
531                   (shost->host_busy >= shost->can_queue))) {
532                 /*
533                  * As long as shost is accepting commands and we have
534                  * starved queues, call blk_run_queue. scsi_request_fn
535                  * drops the queue_lock and can add us back to the
536                  * starved_list.
537                  *
538                  * host_lock protects the starved_list and starved_entry.
539                  * scsi_request_fn must get the host_lock before checking
540                  * or modifying starved_list or starved_entry.
541                  */
542                 sdev = list_entry(shost->starved_list.next,
543                                           struct scsi_device, starved_entry);
544                 list_del_init(&sdev->starved_entry);
545                 spin_unlock_irqrestore(shost->host_lock, flags);
546
547                 blk_run_queue(sdev->request_queue);
548
549                 spin_lock_irqsave(shost->host_lock, flags);
550                 if (unlikely(!list_empty(&sdev->starved_entry)))
551                         /*
552                          * sdev lost a race, and was put back on the
553                          * starved list. This is unlikely but without this
554                          * in theory we could loop forever.
555                          */
556                         break;
557         }
558         spin_unlock_irqrestore(shost->host_lock, flags);
559
560         blk_run_queue(q);
561 }
562
563 /*
564  * Function:    scsi_requeue_command()
565  *
566  * Purpose:     Handle post-processing of completed commands.
567  *
568  * Arguments:   q       - queue to operate on
569  *              cmd     - command that may need to be requeued.
570  *
571  * Returns:     Nothing
572  *
573  * Notes:       After command completion, there may be blocks left
574  *              over which weren't finished by the previous command
575  *              this can be for a number of reasons - the main one is
576  *              I/O errors in the middle of the request, in which case
577  *              we need to request the blocks that come after the bad
578  *              sector.
579  * Notes:       Upon return, cmd is a stale pointer.
580  */
581 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
582 {
583         struct request *req = cmd->request;
584         unsigned long flags;
585
586         scsi_unprep_request(req);
587         spin_lock_irqsave(q->queue_lock, flags);
588         blk_requeue_request(q, req);
589         spin_unlock_irqrestore(q->queue_lock, flags);
590
591         scsi_run_queue(q);
592 }
593
594 void scsi_next_command(struct scsi_cmnd *cmd)
595 {
596         struct request_queue *q = cmd->device->request_queue;
597
598         scsi_put_command(cmd);
599         scsi_run_queue(q);
600 }
601
602 void scsi_run_host_queues(struct Scsi_Host *shost)
603 {
604         struct scsi_device *sdev;
605
606         shost_for_each_device(sdev, shost)
607                 scsi_run_queue(sdev->request_queue);
608 }
609
610 /*
611  * Function:    scsi_end_request()
612  *
613  * Purpose:     Post-processing of completed commands (usually invoked at end
614  *              of upper level post-processing and scsi_io_completion).
615  *
616  * Arguments:   cmd      - command that is complete.
617  *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
618  *              bytes    - number of bytes of completed I/O
619  *              requeue  - indicates whether we should requeue leftovers.
620  *
621  * Lock status: Assumed that lock is not held upon entry.
622  *
623  * Returns:     cmd if requeue required, NULL otherwise.
624  *
625  * Notes:       This is called for block device requests in order to
626  *              mark some number of sectors as complete.
627  * 
628  *              We are guaranteeing that the request queue will be goosed
629  *              at some point during this call.
630  * Notes:       If cmd was requeued, upon return it will be a stale pointer.
631  */
632 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
633                                           int bytes, int requeue)
634 {
635         request_queue_t *q = cmd->device->request_queue;
636         struct request *req = cmd->request;
637         unsigned long flags;
638
639         /*
640          * If there are blocks left over at the end, set up the command
641          * to queue the remainder of them.
642          */
643         if (end_that_request_chunk(req, uptodate, bytes)) {
644                 int leftover = (req->hard_nr_sectors << 9);
645
646                 if (blk_pc_request(req))
647                         leftover = req->data_len;
648
649                 /* kill remainder if no retrys */
650                 if (!uptodate && blk_noretry_request(req))
651                         end_that_request_chunk(req, 0, leftover);
652                 else {
653                         if (requeue) {
654                                 /*
655                                  * Bleah.  Leftovers again.  Stick the
656                                  * leftovers in the front of the
657                                  * queue, and goose the queue again.
658                                  */
659                                 scsi_requeue_command(q, cmd);
660                                 cmd = NULL;
661                         }
662                         return cmd;
663                 }
664         }
665
666         add_disk_randomness(req->rq_disk);
667
668         spin_lock_irqsave(q->queue_lock, flags);
669         if (blk_rq_tagged(req))
670                 blk_queue_end_tag(q, req);
671         end_that_request_last(req);
672         spin_unlock_irqrestore(q->queue_lock, flags);
673
674         /*
675          * This will goose the queue request function at the end, so we don't
676          * need to worry about launching another command.
677          */
678         scsi_next_command(cmd);
679         return NULL;
680 }
681
682 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
683 {
684         struct scsi_host_sg_pool *sgp;
685         struct scatterlist *sgl;
686
687         BUG_ON(!cmd->use_sg);
688
689         switch (cmd->use_sg) {
690         case 1 ... 8:
691                 cmd->sglist_len = 0;
692                 break;
693         case 9 ... 16:
694                 cmd->sglist_len = 1;
695                 break;
696         case 17 ... 32:
697                 cmd->sglist_len = 2;
698                 break;
699 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
700         case 33 ... 64:
701                 cmd->sglist_len = 3;
702                 break;
703 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
704         case 65 ... 128:
705                 cmd->sglist_len = 4;
706                 break;
707 #if (SCSI_MAX_PHYS_SEGMENTS  > 128)
708         case 129 ... 256:
709                 cmd->sglist_len = 5;
710                 break;
711 #endif
712 #endif
713 #endif
714         default:
715                 return NULL;
716         }
717
718         sgp = scsi_sg_pools + cmd->sglist_len;
719         sgl = mempool_alloc(sgp->pool, gfp_mask);
720         return sgl;
721 }
722
723 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
724 {
725         struct scsi_host_sg_pool *sgp;
726
727         BUG_ON(index >= SG_MEMPOOL_NR);
728
729         sgp = scsi_sg_pools + index;
730         mempool_free(sgl, sgp->pool);
731 }
732
733 /*
734  * Function:    scsi_release_buffers()
735  *
736  * Purpose:     Completion processing for block device I/O requests.
737  *
738  * Arguments:   cmd     - command that we are bailing.
739  *
740  * Lock status: Assumed that no lock is held upon entry.
741  *
742  * Returns:     Nothing
743  *
744  * Notes:       In the event that an upper level driver rejects a
745  *              command, we must release resources allocated during
746  *              the __init_io() function.  Primarily this would involve
747  *              the scatter-gather table, and potentially any bounce
748  *              buffers.
749  */
750 static void scsi_release_buffers(struct scsi_cmnd *cmd)
751 {
752         struct request *req = cmd->request;
753
754         /*
755          * Free up any indirection buffers we allocated for DMA purposes. 
756          */
757         if (cmd->use_sg)
758                 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
759         else if (cmd->request_buffer != req->buffer)
760                 kfree(cmd->request_buffer);
761
762         /*
763          * Zero these out.  They now point to freed memory, and it is
764          * dangerous to hang onto the pointers.
765          */
766         cmd->buffer  = NULL;
767         cmd->bufflen = 0;
768         cmd->request_buffer = NULL;
769         cmd->request_bufflen = 0;
770 }
771
772 /*
773  * Function:    scsi_io_completion()
774  *
775  * Purpose:     Completion processing for block device I/O requests.
776  *
777  * Arguments:   cmd   - command that is finished.
778  *
779  * Lock status: Assumed that no lock is held upon entry.
780  *
781  * Returns:     Nothing
782  *
783  * Notes:       This function is matched in terms of capabilities to
784  *              the function that created the scatter-gather list.
785  *              In other words, if there are no bounce buffers
786  *              (the normal case for most drivers), we don't need
787  *              the logic to deal with cleaning up afterwards.
788  *
789  *              We must do one of several things here:
790  *
791  *              a) Call scsi_end_request.  This will finish off the
792  *                 specified number of sectors.  If we are done, the
793  *                 command block will be released, and the queue
794  *                 function will be goosed.  If we are not done, then
795  *                 scsi_end_request will directly goose the queue.
796  *
797  *              b) We can just use scsi_requeue_command() here.  This would
798  *                 be used if we just wanted to retry, for example.
799  */
800 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
801                         unsigned int block_bytes)
802 {
803         int result = cmd->result;
804         int this_count = cmd->bufflen;
805         request_queue_t *q = cmd->device->request_queue;
806         struct request *req = cmd->request;
807         int clear_errors = 1;
808         struct scsi_sense_hdr sshdr;
809         int sense_valid = 0;
810         int sense_deferred = 0;
811
812         if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
813                 return;
814
815         /*
816          * Free up any indirection buffers we allocated for DMA purposes. 
817          * For the case of a READ, we need to copy the data out of the
818          * bounce buffer and into the real buffer.
819          */
820         if (cmd->use_sg)
821                 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
822         else if (cmd->buffer != req->buffer) {
823                 if (rq_data_dir(req) == READ) {
824                         unsigned long flags;
825                         char *to = bio_kmap_irq(req->bio, &flags);
826                         memcpy(to, cmd->buffer, cmd->bufflen);
827                         bio_kunmap_irq(to, &flags);
828                 }
829                 kfree(cmd->buffer);
830         }
831
832         if (result) {
833                 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
834                 if (sense_valid)
835                         sense_deferred = scsi_sense_is_deferred(&sshdr);
836         }
837         if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
838                 req->errors = result;
839                 if (result) {
840                         clear_errors = 0;
841                         if (sense_valid && req->sense) {
842                                 /*
843                                  * SG_IO wants current and deferred errors
844                                  */
845                                 int len = 8 + cmd->sense_buffer[7];
846
847                                 if (len > SCSI_SENSE_BUFFERSIZE)
848                                         len = SCSI_SENSE_BUFFERSIZE;
849                                 memcpy(req->sense, cmd->sense_buffer,  len);
850                                 req->sense_len = len;
851                         }
852                 } else
853                         req->data_len = cmd->resid;
854         }
855
856         /*
857          * Zero these out.  They now point to freed memory, and it is
858          * dangerous to hang onto the pointers.
859          */
860         cmd->buffer  = NULL;
861         cmd->bufflen = 0;
862         cmd->request_buffer = NULL;
863         cmd->request_bufflen = 0;
864
865         /*
866          * Next deal with any sectors which we were able to correctly
867          * handle.
868          */
869         if (good_bytes >= 0) {
870                 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
871                                               req->nr_sectors, good_bytes));
872                 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
873
874                 if (clear_errors)
875                         req->errors = 0;
876                 /*
877                  * If multiple sectors are requested in one buffer, then
878                  * they will have been finished off by the first command.
879                  * If not, then we have a multi-buffer command.
880                  *
881                  * If block_bytes != 0, it means we had a medium error
882                  * of some sort, and that we want to mark some number of
883                  * sectors as not uptodate.  Thus we want to inhibit
884                  * requeueing right here - we will requeue down below
885                  * when we handle the bad sectors.
886                  */
887
888                 /*
889                  * If the command completed without error, then either
890                  * finish off the rest of the command, or start a new one.
891                  */
892                 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
893                         return;
894         }
895         /*
896          * Now, if we were good little boys and girls, Santa left us a request
897          * sense buffer.  We can extract information from this, so we
898          * can choose a block to remap, etc.
899          */
900         if (sense_valid && !sense_deferred) {
901                 switch (sshdr.sense_key) {
902                 case UNIT_ATTENTION:
903                         if (cmd->device->removable) {
904                                 /* detected disc change.  set a bit 
905                                  * and quietly refuse further access.
906                                  */
907                                 cmd->device->changed = 1;
908                                 scsi_end_request(cmd, 0,
909                                                 this_count, 1);
910                                 return;
911                         } else {
912                                 /*
913                                 * Must have been a power glitch, or a
914                                 * bus reset.  Could not have been a
915                                 * media change, so we just retry the
916                                 * request and see what happens.  
917                                 */
918                                 scsi_requeue_command(q, cmd);
919                                 return;
920                         }
921                         break;
922                 case ILLEGAL_REQUEST:
923                         /*
924                         * If we had an ILLEGAL REQUEST returned, then we may
925                         * have performed an unsupported command.  The only
926                         * thing this should be would be a ten byte read where
927                         * only a six byte read was supported.  Also, on a
928                         * system where READ CAPACITY failed, we may have read
929                         * past the end of the disk.
930                         */
931                         if (cmd->device->use_10_for_rw &&
932                             (cmd->cmnd[0] == READ_10 ||
933                              cmd->cmnd[0] == WRITE_10)) {
934                                 cmd->device->use_10_for_rw = 0;
935                                 /*
936                                  * This will cause a retry with a 6-byte
937                                  * command.
938                                  */
939                                 scsi_requeue_command(q, cmd);
940                                 result = 0;
941                         } else {
942                                 scsi_end_request(cmd, 0, this_count, 1);
943                                 return;
944                         }
945                         break;
946                 case NOT_READY:
947                         /*
948                          * If the device is in the process of becoming ready,
949                          * retry.
950                          */
951                         if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
952                                 scsi_requeue_command(q, cmd);
953                                 return;
954                         }
955                         if (!(req->flags & REQ_QUIET))
956                                 dev_printk(KERN_INFO,
957                                            &cmd->device->sdev_gendev,
958                                            "Device not ready.\n");
959                         scsi_end_request(cmd, 0, this_count, 1);
960                         return;
961                 case VOLUME_OVERFLOW:
962                         if (!(req->flags & REQ_QUIET)) {
963                                 dev_printk(KERN_INFO,
964                                            &cmd->device->sdev_gendev,
965                                            "Volume overflow, CDB: ");
966                                 __scsi_print_command(cmd->data_cmnd);
967                                 scsi_print_sense("", cmd);
968                         }
969                         scsi_end_request(cmd, 0, block_bytes, 1);
970                         return;
971                 default:
972                         break;
973                 }
974         }                       /* driver byte != 0 */
975         if (host_byte(result) == DID_RESET) {
976                 /*
977                  * Third party bus reset or reset for error
978                  * recovery reasons.  Just retry the request
979                  * and see what happens.  
980                  */
981                 scsi_requeue_command(q, cmd);
982                 return;
983         }
984         if (result) {
985                 if (!(req->flags & REQ_QUIET)) {
986                         dev_printk(KERN_INFO, &cmd->device->sdev_gendev,
987                                    "SCSI error: return code = 0x%x\n", result);
988
989                         if (driver_byte(result) & DRIVER_SENSE)
990                                 scsi_print_sense("", cmd);
991                 }
992                 /*
993                  * Mark a single buffer as not uptodate.  Queue the remainder.
994                  * We sometimes get this cruft in the event that a medium error
995                  * isn't properly reported.
996                  */
997                 block_bytes = req->hard_cur_sectors << 9;
998                 if (!block_bytes)
999                         block_bytes = req->data_len;
1000                 scsi_end_request(cmd, 0, block_bytes, 1);
1001         }
1002 }
1003 EXPORT_SYMBOL(scsi_io_completion);
1004
1005 /*
1006  * Function:    scsi_init_io()
1007  *
1008  * Purpose:     SCSI I/O initialize function.
1009  *
1010  * Arguments:   cmd   - Command descriptor we wish to initialize
1011  *
1012  * Returns:     0 on success
1013  *              BLKPREP_DEFER if the failure is retryable
1014  *              BLKPREP_KILL if the failure is fatal
1015  */
1016 static int scsi_init_io(struct scsi_cmnd *cmd)
1017 {
1018         struct request     *req = cmd->request;
1019         struct scatterlist *sgpnt;
1020         int                count;
1021
1022         /*
1023          * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
1024          */
1025         if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
1026                 cmd->request_bufflen = req->data_len;
1027                 cmd->request_buffer = req->data;
1028                 req->buffer = req->data;
1029                 cmd->use_sg = 0;
1030                 return 0;
1031         }
1032
1033         /*
1034          * we used to not use scatter-gather for single segment request,
1035          * but now we do (it makes highmem I/O easier to support without
1036          * kmapping pages)
1037          */
1038         cmd->use_sg = req->nr_phys_segments;
1039
1040         /*
1041          * if sg table allocation fails, requeue request later.
1042          */
1043         sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1044         if (unlikely(!sgpnt))
1045                 return BLKPREP_DEFER;
1046
1047         cmd->request_buffer = (char *) sgpnt;
1048         cmd->request_bufflen = req->nr_sectors << 9;
1049         if (blk_pc_request(req))
1050                 cmd->request_bufflen = req->data_len;
1051         req->buffer = NULL;
1052
1053         /* 
1054          * Next, walk the list, and fill in the addresses and sizes of
1055          * each segment.
1056          */
1057         count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1058
1059         /*
1060          * mapped well, send it off
1061          */
1062         if (likely(count <= cmd->use_sg)) {
1063                 cmd->use_sg = count;
1064                 return 0;
1065         }
1066
1067         printk(KERN_ERR "Incorrect number of segments after building list\n");
1068         printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1069         printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1070                         req->current_nr_sectors);
1071
1072         /* release the command and kill it */
1073         scsi_release_buffers(cmd);
1074         scsi_put_command(cmd);
1075         return BLKPREP_KILL;
1076 }
1077
1078 static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1079 {
1080         struct scsi_device *sdev = q->queuedata;
1081         struct scsi_driver *drv;
1082
1083         if (sdev->sdev_state == SDEV_RUNNING) {
1084                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1085
1086                 if (drv->prepare_flush)
1087                         return drv->prepare_flush(q, rq);
1088         }
1089
1090         return 0;
1091 }
1092
1093 static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1094 {
1095         struct scsi_device *sdev = q->queuedata;
1096         struct request *flush_rq = rq->end_io_data;
1097         struct scsi_driver *drv;
1098
1099         if (flush_rq->errors) {
1100                 printk("scsi: barrier error, disabling flush support\n");
1101                 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1102         }
1103
1104         if (sdev->sdev_state == SDEV_RUNNING) {
1105                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1106                 drv->end_flush(q, rq);
1107         }
1108 }
1109
1110 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1111                                sector_t *error_sector)
1112 {
1113         struct scsi_device *sdev = q->queuedata;
1114         struct scsi_driver *drv;
1115
1116         if (sdev->sdev_state != SDEV_RUNNING)
1117                 return -ENXIO;
1118
1119         drv = *(struct scsi_driver **) disk->private_data;
1120         if (drv->issue_flush)
1121                 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1122
1123         return -EOPNOTSUPP;
1124 }
1125
1126 static void scsi_generic_done(struct scsi_cmnd *cmd)
1127 {
1128         BUG_ON(!blk_pc_request(cmd->request));
1129         scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1130 }
1131
1132 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1133 {
1134         struct scsi_device *sdev = q->queuedata;
1135         struct scsi_cmnd *cmd;
1136         int specials_only = 0;
1137
1138         /*
1139          * Just check to see if the device is online.  If it isn't, we
1140          * refuse to process any commands.  The device must be brought
1141          * online before trying any recovery commands
1142          */
1143         if (unlikely(!scsi_device_online(sdev))) {
1144                 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1145                        sdev->host->host_no, sdev->id, sdev->lun);
1146                 goto kill;
1147         }
1148         if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1149                 /* OK, we're not in a running state don't prep
1150                  * user commands */
1151                 if (sdev->sdev_state == SDEV_DEL) {
1152                         /* Device is fully deleted, no commands
1153                          * at all allowed down */
1154                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1155                                sdev->host->host_no, sdev->id, sdev->lun);
1156                         goto kill;
1157                 }
1158                 /* OK, we only allow special commands (i.e. not
1159                  * user initiated ones */
1160                 specials_only = sdev->sdev_state;
1161         }
1162
1163         /*
1164          * Find the actual device driver associated with this command.
1165          * The SPECIAL requests are things like character device or
1166          * ioctls, which did not originate from ll_rw_blk.  Note that
1167          * the special field is also used to indicate the cmd for
1168          * the remainder of a partially fulfilled request that can 
1169          * come up when there is a medium error.  We have to treat
1170          * these two cases differently.  We differentiate by looking
1171          * at request->cmd, as this tells us the real story.
1172          */
1173         if (req->flags & REQ_SPECIAL && req->special) {
1174                 struct scsi_request *sreq = req->special;
1175
1176                 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1177                         cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1178                         if (unlikely(!cmd))
1179                                 goto defer;
1180                         scsi_init_cmd_from_req(cmd, sreq);
1181                 } else
1182                         cmd = req->special;
1183         } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1184
1185                 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1186                         if(specials_only == SDEV_QUIESCE ||
1187                                         specials_only == SDEV_BLOCK)
1188                                 goto defer;
1189                         
1190                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1191                                sdev->host->host_no, sdev->id, sdev->lun);
1192                         goto kill;
1193                 }
1194                         
1195                         
1196                 /*
1197                  * Now try and find a command block that we can use.
1198                  */
1199                 if (!req->special) {
1200                         cmd = scsi_get_command(sdev, GFP_ATOMIC);
1201                         if (unlikely(!cmd))
1202                                 goto defer;
1203                 } else
1204                         cmd = req->special;
1205                 
1206                 /* pull a tag out of the request if we have one */
1207                 cmd->tag = req->tag;
1208         } else {
1209                 blk_dump_rq_flags(req, "SCSI bad req");
1210                 goto kill;
1211         }
1212         
1213         /* note the overloading of req->special.  When the tag
1214          * is active it always means cmd.  If the tag goes
1215          * back for re-queueing, it may be reset */
1216         req->special = cmd;
1217         cmd->request = req;
1218         
1219         /*
1220          * FIXME: drop the lock here because the functions below
1221          * expect to be called without the queue lock held.  Also,
1222          * previously, we dequeued the request before dropping the
1223          * lock.  We hope REQ_STARTED prevents anything untoward from
1224          * happening now.
1225          */
1226         if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1227                 struct scsi_driver *drv;
1228                 int ret;
1229
1230                 /*
1231                  * This will do a couple of things:
1232                  *  1) Fill in the actual SCSI command.
1233                  *  2) Fill in any other upper-level specific fields
1234                  * (timeout).
1235                  *
1236                  * If this returns 0, it means that the request failed
1237                  * (reading past end of disk, reading offline device,
1238                  * etc).   This won't actually talk to the device, but
1239                  * some kinds of consistency checking may cause the     
1240                  * request to be rejected immediately.
1241                  */
1242
1243                 /* 
1244                  * This sets up the scatter-gather table (allocating if
1245                  * required).
1246                  */
1247                 ret = scsi_init_io(cmd);
1248                 switch(ret) {
1249                 case BLKPREP_KILL:
1250                         /* BLKPREP_KILL return also releases the command */
1251                         goto kill;
1252                 case BLKPREP_DEFER:
1253                         goto defer;
1254                 }
1255                 
1256                 /*
1257                  * Initialize the actual SCSI command for this request.
1258                  */
1259                 if (req->rq_disk) {
1260                         drv = *(struct scsi_driver **)req->rq_disk->private_data;
1261                         if (unlikely(!drv->init_command(cmd))) {
1262                                 scsi_release_buffers(cmd);
1263                                 scsi_put_command(cmd);
1264                                 goto kill;
1265                         }
1266                 } else {
1267                         memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1268                         cmd->cmd_len = req->cmd_len;
1269                         if (rq_data_dir(req) == WRITE)
1270                                 cmd->sc_data_direction = DMA_TO_DEVICE;
1271                         else if (req->data_len)
1272                                 cmd->sc_data_direction = DMA_FROM_DEVICE;
1273                         else
1274                                 cmd->sc_data_direction = DMA_NONE;
1275                         
1276                         cmd->transfersize = req->data_len;
1277                         cmd->allowed = 3;
1278                         cmd->timeout_per_command = req->timeout;
1279                         cmd->done = scsi_generic_done;
1280                 }
1281         }
1282
1283         /*
1284          * The request is now prepped, no need to come back here
1285          */
1286         req->flags |= REQ_DONTPREP;
1287         return BLKPREP_OK;
1288
1289  defer:
1290         /* If we defer, the elv_next_request() returns NULL, but the
1291          * queue must be restarted, so we plug here if no returning
1292          * command will automatically do that. */
1293         if (sdev->device_busy == 0)
1294                 blk_plug_device(q);
1295         return BLKPREP_DEFER;
1296  kill:
1297         req->errors = DID_NO_CONNECT << 16;
1298         return BLKPREP_KILL;
1299 }
1300
1301 /*
1302  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1303  * return 0.
1304  *
1305  * Called with the queue_lock held.
1306  */
1307 static inline int scsi_dev_queue_ready(struct request_queue *q,
1308                                   struct scsi_device *sdev)
1309 {
1310         if (sdev->device_busy >= sdev->queue_depth)
1311                 return 0;
1312         if (sdev->device_busy == 0 && sdev->device_blocked) {
1313                 /*
1314                  * unblock after device_blocked iterates to zero
1315                  */
1316                 if (--sdev->device_blocked == 0) {
1317                         SCSI_LOG_MLQUEUE(3,
1318                                 printk("scsi%d (%d:%d) unblocking device at"
1319                                        " zero depth\n", sdev->host->host_no,
1320                                        sdev->id, sdev->lun));
1321                 } else {
1322                         blk_plug_device(q);
1323                         return 0;
1324                 }
1325         }
1326         if (sdev->device_blocked)
1327                 return 0;
1328
1329         return 1;
1330 }
1331
1332 /*
1333  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1334  * return 0. We must end up running the queue again whenever 0 is
1335  * returned, else IO can hang.
1336  *
1337  * Called with host_lock held.
1338  */
1339 static inline int scsi_host_queue_ready(struct request_queue *q,
1340                                    struct Scsi_Host *shost,
1341                                    struct scsi_device *sdev)
1342 {
1343         if (shost->shost_state == SHOST_RECOVERY)
1344                 return 0;
1345         if (shost->host_busy == 0 && shost->host_blocked) {
1346                 /*
1347                  * unblock after host_blocked iterates to zero
1348                  */
1349                 if (--shost->host_blocked == 0) {
1350                         SCSI_LOG_MLQUEUE(3,
1351                                 printk("scsi%d unblocking host at zero depth\n",
1352                                         shost->host_no));
1353                 } else {
1354                         blk_plug_device(q);
1355                         return 0;
1356                 }
1357         }
1358         if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1359             shost->host_blocked || shost->host_self_blocked) {
1360                 if (list_empty(&sdev->starved_entry))
1361                         list_add_tail(&sdev->starved_entry, &shost->starved_list);
1362                 return 0;
1363         }
1364
1365         /* We're OK to process the command, so we can't be starved */
1366         if (!list_empty(&sdev->starved_entry))
1367                 list_del_init(&sdev->starved_entry);
1368
1369         return 1;
1370 }
1371
1372 /*
1373  * Kill a request for a dead device
1374  */
1375 static void scsi_kill_request(struct request *req, request_queue_t *q)
1376 {
1377         struct scsi_cmnd *cmd = req->special;
1378
1379         blkdev_dequeue_request(req);
1380
1381         if (unlikely(cmd == NULL)) {
1382                 printk(KERN_CRIT "impossible request in %s.\n",
1383                                  __FUNCTION__);
1384                 BUG();
1385         }
1386
1387         scsi_init_cmd_errh(cmd);
1388         cmd->result = DID_NO_CONNECT << 16;
1389         atomic_inc(&cmd->device->iorequest_cnt);
1390         __scsi_done(cmd);
1391 }
1392
1393 /*
1394  * Function:    scsi_request_fn()
1395  *
1396  * Purpose:     Main strategy routine for SCSI.
1397  *
1398  * Arguments:   q       - Pointer to actual queue.
1399  *
1400  * Returns:     Nothing
1401  *
1402  * Lock status: IO request lock assumed to be held when called.
1403  */
1404 static void scsi_request_fn(struct request_queue *q)
1405 {
1406         struct scsi_device *sdev = q->queuedata;
1407         struct Scsi_Host *shost;
1408         struct scsi_cmnd *cmd;
1409         struct request *req;
1410
1411         if (!sdev) {
1412                 printk("scsi: killing requests for dead queue\n");
1413                 while ((req = elv_next_request(q)) != NULL)
1414                         scsi_kill_request(req, q);
1415                 return;
1416         }
1417
1418         if(!get_device(&sdev->sdev_gendev))
1419                 /* We must be tearing the block queue down already */
1420                 return;
1421
1422         /*
1423          * To start with, we keep looping until the queue is empty, or until
1424          * the host is no longer able to accept any more requests.
1425          */
1426         shost = sdev->host;
1427         while (!blk_queue_plugged(q)) {
1428                 int rtn;
1429                 /*
1430                  * get next queueable request.  We do this early to make sure
1431                  * that the request is fully prepared even if we cannot 
1432                  * accept it.
1433                  */
1434                 req = elv_next_request(q);
1435                 if (!req || !scsi_dev_queue_ready(q, sdev))
1436                         break;
1437
1438                 if (unlikely(!scsi_device_online(sdev))) {
1439                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1440                                sdev->host->host_no, sdev->id, sdev->lun);
1441                         scsi_kill_request(req, q);
1442                         continue;
1443                 }
1444
1445
1446                 /*
1447                  * Remove the request from the request list.
1448                  */
1449                 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1450                         blkdev_dequeue_request(req);
1451                 sdev->device_busy++;
1452
1453                 spin_unlock(q->queue_lock);
1454                 cmd = req->special;
1455                 if (unlikely(cmd == NULL)) {
1456                         printk(KERN_CRIT "impossible request in %s.\n"
1457                                          "please mail a stack trace to "
1458                                          "linux-scsi@vger.kernel.org",
1459                                          __FUNCTION__);
1460                         BUG();
1461                 }
1462                 spin_lock(shost->host_lock);
1463
1464                 if (!scsi_host_queue_ready(q, shost, sdev))
1465                         goto not_ready;
1466                 if (sdev->single_lun) {
1467                         if (scsi_target(sdev)->starget_sdev_user &&
1468                             scsi_target(sdev)->starget_sdev_user != sdev)
1469                                 goto not_ready;
1470                         scsi_target(sdev)->starget_sdev_user = sdev;
1471                 }
1472                 shost->host_busy++;
1473
1474                 /*
1475                  * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1476                  *              take the lock again.
1477                  */
1478                 spin_unlock_irq(shost->host_lock);
1479
1480                 /*
1481                  * Finally, initialize any error handling parameters, and set up
1482                  * the timers for timeouts.
1483                  */
1484                 scsi_init_cmd_errh(cmd);
1485
1486                 /*
1487                  * Dispatch the command to the low-level driver.
1488                  */
1489                 rtn = scsi_dispatch_cmd(cmd);
1490                 spin_lock_irq(q->queue_lock);
1491                 if(rtn) {
1492                         /* we're refusing the command; because of
1493                          * the way locks get dropped, we need to 
1494                          * check here if plugging is required */
1495                         if(sdev->device_busy == 0)
1496                                 blk_plug_device(q);
1497
1498                         break;
1499                 }
1500         }
1501
1502         goto out;
1503
1504  not_ready:
1505         spin_unlock_irq(shost->host_lock);
1506
1507         /*
1508          * lock q, handle tag, requeue req, and decrement device_busy. We
1509          * must return with queue_lock held.
1510          *
1511          * Decrementing device_busy without checking it is OK, as all such
1512          * cases (host limits or settings) should run the queue at some
1513          * later time.
1514          */
1515         scsi_unprep_request(req);
1516         spin_lock_irq(q->queue_lock);
1517         blk_requeue_request(q, req);
1518         sdev->device_busy--;
1519         if(sdev->device_busy == 0)
1520                 blk_plug_device(q);
1521  out:
1522         /* must be careful here...if we trigger the ->remove() function
1523          * we cannot be holding the q lock */
1524         spin_unlock_irq(q->queue_lock);
1525         put_device(&sdev->sdev_gendev);
1526         spin_lock_irq(q->queue_lock);
1527 }
1528
1529 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1530 {
1531         struct device *host_dev;
1532         u64 bounce_limit = 0xffffffff;
1533
1534         if (shost->unchecked_isa_dma)
1535                 return BLK_BOUNCE_ISA;
1536         /*
1537          * Platforms with virtual-DMA translation
1538          * hardware have no practical limit.
1539          */
1540         if (!PCI_DMA_BUS_IS_PHYS)
1541                 return BLK_BOUNCE_ANY;
1542
1543         host_dev = scsi_get_device(shost);
1544         if (host_dev && host_dev->dma_mask)
1545                 bounce_limit = *host_dev->dma_mask;
1546
1547         return bounce_limit;
1548 }
1549 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1550
1551 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1552 {
1553         struct Scsi_Host *shost = sdev->host;
1554         struct request_queue *q;
1555
1556         q = blk_init_queue(scsi_request_fn, NULL);
1557         if (!q)
1558                 return NULL;
1559
1560         blk_queue_prep_rq(q, scsi_prep_fn);
1561
1562         blk_queue_max_hw_segments(q, shost->sg_tablesize);
1563         blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1564         blk_queue_max_sectors(q, shost->max_sectors);
1565         blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1566         blk_queue_segment_boundary(q, shost->dma_boundary);
1567         blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1568
1569         /*
1570          * ordered tags are superior to flush ordering
1571          */
1572         if (shost->ordered_tag)
1573                 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1574         else if (shost->ordered_flush) {
1575                 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1576                 q->prepare_flush_fn = scsi_prepare_flush_fn;
1577                 q->end_flush_fn = scsi_end_flush_fn;
1578         }
1579
1580         if (!shost->use_clustering)
1581                 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1582         return q;
1583 }
1584
1585 void scsi_free_queue(struct request_queue *q)
1586 {
1587         blk_cleanup_queue(q);
1588 }
1589
1590 /*
1591  * Function:    scsi_block_requests()
1592  *
1593  * Purpose:     Utility function used by low-level drivers to prevent further
1594  *              commands from being queued to the device.
1595  *
1596  * Arguments:   shost       - Host in question
1597  *
1598  * Returns:     Nothing
1599  *
1600  * Lock status: No locks are assumed held.
1601  *
1602  * Notes:       There is no timer nor any other means by which the requests
1603  *              get unblocked other than the low-level driver calling
1604  *              scsi_unblock_requests().
1605  */
1606 void scsi_block_requests(struct Scsi_Host *shost)
1607 {
1608         shost->host_self_blocked = 1;
1609 }
1610 EXPORT_SYMBOL(scsi_block_requests);
1611
1612 /*
1613  * Function:    scsi_unblock_requests()
1614  *
1615  * Purpose:     Utility function used by low-level drivers to allow further
1616  *              commands from being queued to the device.
1617  *
1618  * Arguments:   shost       - Host in question
1619  *
1620  * Returns:     Nothing
1621  *
1622  * Lock status: No locks are assumed held.
1623  *
1624  * Notes:       There is no timer nor any other means by which the requests
1625  *              get unblocked other than the low-level driver calling
1626  *              scsi_unblock_requests().
1627  *
1628  *              This is done as an API function so that changes to the
1629  *              internals of the scsi mid-layer won't require wholesale
1630  *              changes to drivers that use this feature.
1631  */
1632 void scsi_unblock_requests(struct Scsi_Host *shost)
1633 {
1634         shost->host_self_blocked = 0;
1635         scsi_run_host_queues(shost);
1636 }
1637 EXPORT_SYMBOL(scsi_unblock_requests);
1638
1639 int __init scsi_init_queue(void)
1640 {
1641         int i;
1642
1643         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1644                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1645                 int size = sgp->size * sizeof(struct scatterlist);
1646
1647                 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1648                                 SLAB_HWCACHE_ALIGN, NULL, NULL);
1649                 if (!sgp->slab) {
1650                         printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1651                                         sgp->name);
1652                 }
1653
1654                 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1655                                 mempool_alloc_slab, mempool_free_slab,
1656                                 sgp->slab);
1657                 if (!sgp->pool) {
1658                         printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1659                                         sgp->name);
1660                 }
1661         }
1662
1663         return 0;
1664 }
1665
1666 void scsi_exit_queue(void)
1667 {
1668         int i;
1669
1670         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1671                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1672                 mempool_destroy(sgp->pool);
1673                 kmem_cache_destroy(sgp->slab);
1674         }
1675 }
1676 /**
1677  *      scsi_mode_sense - issue a mode sense, falling back from 10 to 
1678  *              six bytes if necessary.
1679  *      @sdev:  SCSI device to be queried
1680  *      @dbd:   set if mode sense will allow block descriptors to be returned
1681  *      @modepage: mode page being requested
1682  *      @buffer: request buffer (may not be smaller than eight bytes)
1683  *      @len:   length of request buffer.
1684  *      @timeout: command timeout
1685  *      @retries: number of retries before failing
1686  *      @data: returns a structure abstracting the mode header data
1687  *      @sense: place to put sense data (or NULL if no sense to be collected).
1688  *              must be SCSI_SENSE_BUFFERSIZE big.
1689  *
1690  *      Returns zero if unsuccessful, or the header offset (either 4
1691  *      or 8 depending on whether a six or ten byte command was
1692  *      issued) if successful.
1693  **/
1694 int
1695 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1696                   unsigned char *buffer, int len, int timeout, int retries,
1697                   struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
1698         unsigned char cmd[12];
1699         int use_10_for_ms;
1700         int header_length;
1701         int result;
1702         struct scsi_sense_hdr my_sshdr;
1703
1704         memset(data, 0, sizeof(*data));
1705         memset(&cmd[0], 0, 12);
1706         cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
1707         cmd[2] = modepage;
1708
1709         /* caller might not be interested in sense, but we need it */
1710         if (!sshdr)
1711                 sshdr = &my_sshdr;
1712
1713  retry:
1714         use_10_for_ms = sdev->use_10_for_ms;
1715
1716         if (use_10_for_ms) {
1717                 if (len < 8)
1718                         len = 8;
1719
1720                 cmd[0] = MODE_SENSE_10;
1721                 cmd[8] = len;
1722                 header_length = 8;
1723         } else {
1724                 if (len < 4)
1725                         len = 4;
1726
1727                 cmd[0] = MODE_SENSE;
1728                 cmd[4] = len;
1729                 header_length = 4;
1730         }
1731
1732         memset(buffer, 0, len);
1733
1734         result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1735                                   sshdr, timeout, retries);
1736
1737         /* This code looks awful: what it's doing is making sure an
1738          * ILLEGAL REQUEST sense return identifies the actual command
1739          * byte as the problem.  MODE_SENSE commands can return
1740          * ILLEGAL REQUEST if the code page isn't supported */
1741
1742         if (use_10_for_ms && !scsi_status_is_good(result) &&
1743             (driver_byte(result) & DRIVER_SENSE)) {
1744                 if (scsi_sense_valid(sshdr)) {
1745                         if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1746                             (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1747                                 /* 
1748                                  * Invalid command operation code
1749                                  */
1750                                 sdev->use_10_for_ms = 0;
1751                                 goto retry;
1752                         }
1753                 }
1754         }
1755
1756         if(scsi_status_is_good(result)) {
1757                 data->header_length = header_length;
1758                 if(use_10_for_ms) {
1759                         data->length = buffer[0]*256 + buffer[1] + 2;
1760                         data->medium_type = buffer[2];
1761                         data->device_specific = buffer[3];
1762                         data->longlba = buffer[4] & 0x01;
1763                         data->block_descriptor_length = buffer[6]*256
1764                                 + buffer[7];
1765                 } else {
1766                         data->length = buffer[0] + 1;
1767                         data->medium_type = buffer[1];
1768                         data->device_specific = buffer[2];
1769                         data->block_descriptor_length = buffer[3];
1770                 }
1771         }
1772
1773         return result;
1774 }
1775 EXPORT_SYMBOL(scsi_mode_sense);
1776
1777 int
1778 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1779 {
1780         char cmd[] = {
1781                 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1782         };
1783         struct scsi_sense_hdr sshdr;
1784         int result;
1785         
1786         result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
1787                                   timeout, retries);
1788
1789         if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1790
1791                 if ((scsi_sense_valid(&sshdr)) &&
1792                     ((sshdr.sense_key == UNIT_ATTENTION) ||
1793                      (sshdr.sense_key == NOT_READY))) {
1794                         sdev->changed = 1;
1795                         result = 0;
1796                 }
1797         }
1798         return result;
1799 }
1800 EXPORT_SYMBOL(scsi_test_unit_ready);
1801
1802 /**
1803  *      scsi_device_set_state - Take the given device through the device
1804  *              state model.
1805  *      @sdev:  scsi device to change the state of.
1806  *      @state: state to change to.
1807  *
1808  *      Returns zero if unsuccessful or an error if the requested 
1809  *      transition is illegal.
1810  **/
1811 int
1812 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1813 {
1814         enum scsi_device_state oldstate = sdev->sdev_state;
1815
1816         if (state == oldstate)
1817                 return 0;
1818
1819         switch (state) {
1820         case SDEV_CREATED:
1821                 /* There are no legal states that come back to
1822                  * created.  This is the manually initialised start
1823                  * state */
1824                 goto illegal;
1825                         
1826         case SDEV_RUNNING:
1827                 switch (oldstate) {
1828                 case SDEV_CREATED:
1829                 case SDEV_OFFLINE:
1830                 case SDEV_QUIESCE:
1831                 case SDEV_BLOCK:
1832                         break;
1833                 default:
1834                         goto illegal;
1835                 }
1836                 break;
1837
1838         case SDEV_QUIESCE:
1839                 switch (oldstate) {
1840                 case SDEV_RUNNING:
1841                 case SDEV_OFFLINE:
1842                         break;
1843                 default:
1844                         goto illegal;
1845                 }
1846                 break;
1847
1848         case SDEV_OFFLINE:
1849                 switch (oldstate) {
1850                 case SDEV_CREATED:
1851                 case SDEV_RUNNING:
1852                 case SDEV_QUIESCE:
1853                 case SDEV_BLOCK:
1854                         break;
1855                 default:
1856                         goto illegal;
1857                 }
1858                 break;
1859
1860         case SDEV_BLOCK:
1861                 switch (oldstate) {
1862                 case SDEV_CREATED:
1863                 case SDEV_RUNNING:
1864                         break;
1865                 default:
1866                         goto illegal;
1867                 }
1868                 break;
1869
1870         case SDEV_CANCEL:
1871                 switch (oldstate) {
1872                 case SDEV_CREATED:
1873                 case SDEV_RUNNING:
1874                 case SDEV_OFFLINE:
1875                 case SDEV_BLOCK:
1876                         break;
1877                 default:
1878                         goto illegal;
1879                 }
1880                 break;
1881
1882         case SDEV_DEL:
1883                 switch (oldstate) {
1884                 case SDEV_CANCEL:
1885                         break;
1886                 default:
1887                         goto illegal;
1888                 }
1889                 break;
1890
1891         }
1892         sdev->sdev_state = state;
1893         return 0;
1894
1895  illegal:
1896         SCSI_LOG_ERROR_RECOVERY(1, 
1897                                 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1898                                            "Illegal state transition %s->%s\n",
1899                                            scsi_device_state_name(oldstate),
1900                                            scsi_device_state_name(state))
1901                                 );
1902         return -EINVAL;
1903 }
1904 EXPORT_SYMBOL(scsi_device_set_state);
1905
1906 /**
1907  *      scsi_device_quiesce - Block user issued commands.
1908  *      @sdev:  scsi device to quiesce.
1909  *
1910  *      This works by trying to transition to the SDEV_QUIESCE state
1911  *      (which must be a legal transition).  When the device is in this
1912  *      state, only special requests will be accepted, all others will
1913  *      be deferred.  Since special requests may also be requeued requests,
1914  *      a successful return doesn't guarantee the device will be 
1915  *      totally quiescent.
1916  *
1917  *      Must be called with user context, may sleep.
1918  *
1919  *      Returns zero if unsuccessful or an error if not.
1920  **/
1921 int
1922 scsi_device_quiesce(struct scsi_device *sdev)
1923 {
1924         int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1925         if (err)
1926                 return err;
1927
1928         scsi_run_queue(sdev->request_queue);
1929         while (sdev->device_busy) {
1930                 msleep_interruptible(200);
1931                 scsi_run_queue(sdev->request_queue);
1932         }
1933         return 0;
1934 }
1935 EXPORT_SYMBOL(scsi_device_quiesce);
1936
1937 /**
1938  *      scsi_device_resume - Restart user issued commands to a quiesced device.
1939  *      @sdev:  scsi device to resume.
1940  *
1941  *      Moves the device from quiesced back to running and restarts the
1942  *      queues.
1943  *
1944  *      Must be called with user context, may sleep.
1945  **/
1946 void
1947 scsi_device_resume(struct scsi_device *sdev)
1948 {
1949         if(scsi_device_set_state(sdev, SDEV_RUNNING))
1950                 return;
1951         scsi_run_queue(sdev->request_queue);
1952 }
1953 EXPORT_SYMBOL(scsi_device_resume);
1954
1955 static void
1956 device_quiesce_fn(struct scsi_device *sdev, void *data)
1957 {
1958         scsi_device_quiesce(sdev);
1959 }
1960
1961 void
1962 scsi_target_quiesce(struct scsi_target *starget)
1963 {
1964         starget_for_each_device(starget, NULL, device_quiesce_fn);
1965 }
1966 EXPORT_SYMBOL(scsi_target_quiesce);
1967
1968 static void
1969 device_resume_fn(struct scsi_device *sdev, void *data)
1970 {
1971         scsi_device_resume(sdev);
1972 }
1973
1974 void
1975 scsi_target_resume(struct scsi_target *starget)
1976 {
1977         starget_for_each_device(starget, NULL, device_resume_fn);
1978 }
1979 EXPORT_SYMBOL(scsi_target_resume);
1980
1981 /**
1982  * scsi_internal_device_block - internal function to put a device
1983  *                              temporarily into the SDEV_BLOCK state
1984  * @sdev:       device to block
1985  *
1986  * Block request made by scsi lld's to temporarily stop all
1987  * scsi commands on the specified device.  Called from interrupt
1988  * or normal process context.
1989  *
1990  * Returns zero if successful or error if not
1991  *
1992  * Notes:       
1993  *      This routine transitions the device to the SDEV_BLOCK state
1994  *      (which must be a legal transition).  When the device is in this
1995  *      state, all commands are deferred until the scsi lld reenables
1996  *      the device with scsi_device_unblock or device_block_tmo fires.
1997  *      This routine assumes the host_lock is held on entry.
1998  **/
1999 int
2000 scsi_internal_device_block(struct scsi_device *sdev)
2001 {
2002         request_queue_t *q = sdev->request_queue;
2003         unsigned long flags;
2004         int err = 0;
2005
2006         err = scsi_device_set_state(sdev, SDEV_BLOCK);
2007         if (err)
2008                 return err;
2009
2010         /* 
2011          * The device has transitioned to SDEV_BLOCK.  Stop the
2012          * block layer from calling the midlayer with this device's
2013          * request queue. 
2014          */
2015         spin_lock_irqsave(q->queue_lock, flags);
2016         blk_stop_queue(q);
2017         spin_unlock_irqrestore(q->queue_lock, flags);
2018
2019         return 0;
2020 }
2021 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2022  
2023 /**
2024  * scsi_internal_device_unblock - resume a device after a block request
2025  * @sdev:       device to resume
2026  *
2027  * Called by scsi lld's or the midlayer to restart the device queue
2028  * for the previously suspended scsi device.  Called from interrupt or
2029  * normal process context.
2030  *
2031  * Returns zero if successful or error if not.
2032  *
2033  * Notes:       
2034  *      This routine transitions the device to the SDEV_RUNNING state
2035  *      (which must be a legal transition) allowing the midlayer to
2036  *      goose the queue for this device.  This routine assumes the 
2037  *      host_lock is held upon entry.
2038  **/
2039 int
2040 scsi_internal_device_unblock(struct scsi_device *sdev)
2041 {
2042         request_queue_t *q = sdev->request_queue; 
2043         int err;
2044         unsigned long flags;
2045         
2046         /* 
2047          * Try to transition the scsi device to SDEV_RUNNING
2048          * and goose the device queue if successful.  
2049          */
2050         err = scsi_device_set_state(sdev, SDEV_RUNNING);
2051         if (err)
2052                 return err;
2053
2054         spin_lock_irqsave(q->queue_lock, flags);
2055         blk_start_queue(q);
2056         spin_unlock_irqrestore(q->queue_lock, flags);
2057
2058         return 0;
2059 }
2060 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2061
2062 static void
2063 device_block(struct scsi_device *sdev, void *data)
2064 {
2065         scsi_internal_device_block(sdev);
2066 }
2067
2068 static int
2069 target_block(struct device *dev, void *data)
2070 {
2071         if (scsi_is_target_device(dev))
2072                 starget_for_each_device(to_scsi_target(dev), NULL,
2073                                         device_block);
2074         return 0;
2075 }
2076
2077 void
2078 scsi_target_block(struct device *dev)
2079 {
2080         if (scsi_is_target_device(dev))
2081                 starget_for_each_device(to_scsi_target(dev), NULL,
2082                                         device_block);
2083         else
2084                 device_for_each_child(dev, NULL, target_block);
2085 }
2086 EXPORT_SYMBOL_GPL(scsi_target_block);
2087
2088 static void
2089 device_unblock(struct scsi_device *sdev, void *data)
2090 {
2091         scsi_internal_device_unblock(sdev);
2092 }
2093
2094 static int
2095 target_unblock(struct device *dev, void *data)
2096 {
2097         if (scsi_is_target_device(dev))
2098                 starget_for_each_device(to_scsi_target(dev), NULL,
2099                                         device_unblock);
2100         return 0;
2101 }
2102
2103 void
2104 scsi_target_unblock(struct device *dev)
2105 {
2106         if (scsi_is_target_device(dev))
2107                 starget_for_each_device(to_scsi_target(dev), NULL,
2108                                         device_unblock);
2109         else
2110                 device_for_each_child(dev, NULL, target_unblock);
2111 }
2112 EXPORT_SYMBOL_GPL(scsi_target_unblock);