]> Pileus Git - ~andy/linux/blob - drivers/scsi/lpfc/lpfc_scsi.c
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[~andy/linux] / drivers / scsi / lpfc / lpfc_scsi.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <asm/unaligned.h>
27
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_eh.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_tcq.h>
33 #include <scsi/scsi_transport_fc.h>
34
35 #include "lpfc_version.h"
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc.h"
43 #include "lpfc_scsi.h"
44 #include "lpfc_logmsg.h"
45 #include "lpfc_crtn.h"
46 #include "lpfc_vport.h"
47
48 #define LPFC_RESET_WAIT  2
49 #define LPFC_ABORT_WAIT  2
50
51 int _dump_buf_done;
52
53 static char *dif_op_str[] = {
54         "PROT_NORMAL",
55         "PROT_READ_INSERT",
56         "PROT_WRITE_STRIP",
57         "PROT_READ_STRIP",
58         "PROT_WRITE_INSERT",
59         "PROT_READ_PASS",
60         "PROT_WRITE_PASS",
61 };
62
63 struct scsi_dif_tuple {
64         __be16 guard_tag;       /* Checksum */
65         __be16 app_tag;         /* Opaque storage */
66         __be32 ref_tag;         /* Target LBA or indirect LBA */
67 };
68
69 static void
70 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
71 static void
72 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
73
74 static void
75 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
76 {
77         void *src, *dst;
78         struct scatterlist *sgde = scsi_sglist(cmnd);
79
80         if (!_dump_buf_data) {
81                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
82                         "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
83                                 __func__);
84                 return;
85         }
86
87
88         if (!sgde) {
89                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
90                         "9051 BLKGRD: ERROR: data scatterlist is null\n");
91                 return;
92         }
93
94         dst = (void *) _dump_buf_data;
95         while (sgde) {
96                 src = sg_virt(sgde);
97                 memcpy(dst, src, sgde->length);
98                 dst += sgde->length;
99                 sgde = sg_next(sgde);
100         }
101 }
102
103 static void
104 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
105 {
106         void *src, *dst;
107         struct scatterlist *sgde = scsi_prot_sglist(cmnd);
108
109         if (!_dump_buf_dif) {
110                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
111                         "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
112                                 __func__);
113                 return;
114         }
115
116         if (!sgde) {
117                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
118                         "9053 BLKGRD: ERROR: prot scatterlist is null\n");
119                 return;
120         }
121
122         dst = _dump_buf_dif;
123         while (sgde) {
124                 src = sg_virt(sgde);
125                 memcpy(dst, src, sgde->length);
126                 dst += sgde->length;
127                 sgde = sg_next(sgde);
128         }
129 }
130
131 /**
132  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
133  * @phba: Pointer to HBA object.
134  * @lpfc_cmd: lpfc scsi command object pointer.
135  *
136  * This function is called from the lpfc_prep_task_mgmt_cmd function to
137  * set the last bit in the response sge entry.
138  **/
139 static void
140 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
141                                 struct lpfc_scsi_buf *lpfc_cmd)
142 {
143         struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
144         if (sgl) {
145                 sgl += 1;
146                 sgl->word2 = le32_to_cpu(sgl->word2);
147                 bf_set(lpfc_sli4_sge_last, sgl, 1);
148                 sgl->word2 = cpu_to_le32(sgl->word2);
149         }
150 }
151
152 /**
153  * lpfc_update_stats - Update statistical data for the command completion
154  * @phba: Pointer to HBA object.
155  * @lpfc_cmd: lpfc scsi command object pointer.
156  *
157  * This function is called when there is a command completion and this
158  * function updates the statistical data for the command completion.
159  **/
160 static void
161 lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
162 {
163         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
164         struct lpfc_nodelist *pnode = rdata->pnode;
165         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
166         unsigned long flags;
167         struct Scsi_Host  *shost = cmd->device->host;
168         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
169         unsigned long latency;
170         int i;
171
172         if (cmd->result)
173                 return;
174
175         latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
176
177         spin_lock_irqsave(shost->host_lock, flags);
178         if (!vport->stat_data_enabled ||
179                 vport->stat_data_blocked ||
180                 !pnode ||
181                 !pnode->lat_data ||
182                 (phba->bucket_type == LPFC_NO_BUCKET)) {
183                 spin_unlock_irqrestore(shost->host_lock, flags);
184                 return;
185         }
186
187         if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
188                 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
189                         phba->bucket_step;
190                 /* check array subscript bounds */
191                 if (i < 0)
192                         i = 0;
193                 else if (i >= LPFC_MAX_BUCKET_COUNT)
194                         i = LPFC_MAX_BUCKET_COUNT - 1;
195         } else {
196                 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
197                         if (latency <= (phba->bucket_base +
198                                 ((1<<i)*phba->bucket_step)))
199                                 break;
200         }
201
202         pnode->lat_data[i].cmd_count++;
203         spin_unlock_irqrestore(shost->host_lock, flags);
204 }
205
206 /**
207  * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
208  * @phba: Pointer to HBA context object.
209  * @vport: Pointer to vport object.
210  * @ndlp: Pointer to FC node associated with the target.
211  * @lun: Lun number of the scsi device.
212  * @old_val: Old value of the queue depth.
213  * @new_val: New value of the queue depth.
214  *
215  * This function sends an event to the mgmt application indicating
216  * there is a change in the scsi device queue depth.
217  **/
218 static void
219 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
220                 struct lpfc_vport  *vport,
221                 struct lpfc_nodelist *ndlp,
222                 uint32_t lun,
223                 uint32_t old_val,
224                 uint32_t new_val)
225 {
226         struct lpfc_fast_path_event *fast_path_evt;
227         unsigned long flags;
228
229         fast_path_evt = lpfc_alloc_fast_evt(phba);
230         if (!fast_path_evt)
231                 return;
232
233         fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
234                 FC_REG_SCSI_EVENT;
235         fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
236                 LPFC_EVENT_VARQUEDEPTH;
237
238         /* Report all luns with change in queue depth */
239         fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
240         if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
241                 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
242                         &ndlp->nlp_portname, sizeof(struct lpfc_name));
243                 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
244                         &ndlp->nlp_nodename, sizeof(struct lpfc_name));
245         }
246
247         fast_path_evt->un.queue_depth_evt.oldval = old_val;
248         fast_path_evt->un.queue_depth_evt.newval = new_val;
249         fast_path_evt->vport = vport;
250
251         fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
252         spin_lock_irqsave(&phba->hbalock, flags);
253         list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
254         spin_unlock_irqrestore(&phba->hbalock, flags);
255         lpfc_worker_wake_up(phba);
256
257         return;
258 }
259
260 /**
261  * lpfc_change_queue_depth - Alter scsi device queue depth
262  * @sdev: Pointer the scsi device on which to change the queue depth.
263  * @qdepth: New queue depth to set the sdev to.
264  * @reason: The reason for the queue depth change.
265  *
266  * This function is called by the midlayer and the LLD to alter the queue
267  * depth for a scsi device. This function sets the queue depth to the new
268  * value and sends an event out to log the queue depth change.
269  **/
270 int
271 lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
272 {
273         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
274         struct lpfc_hba   *phba = vport->phba;
275         struct lpfc_rport_data *rdata;
276         unsigned long new_queue_depth, old_queue_depth;
277
278         old_queue_depth = sdev->queue_depth;
279         scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
280         new_queue_depth = sdev->queue_depth;
281         rdata = sdev->hostdata;
282         if (rdata)
283                 lpfc_send_sdev_queuedepth_change_event(phba, vport,
284                                                        rdata->pnode, sdev->lun,
285                                                        old_queue_depth,
286                                                        new_queue_depth);
287         return sdev->queue_depth;
288 }
289
290 /**
291  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
292  * @phba: The Hba for which this call is being executed.
293  *
294  * This routine is called when there is resource error in driver or firmware.
295  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
296  * posts at most 1 event each second. This routine wakes up worker thread of
297  * @phba to process WORKER_RAM_DOWN_EVENT event.
298  *
299  * This routine should be called with no lock held.
300  **/
301 void
302 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
303 {
304         unsigned long flags;
305         uint32_t evt_posted;
306
307         spin_lock_irqsave(&phba->hbalock, flags);
308         atomic_inc(&phba->num_rsrc_err);
309         phba->last_rsrc_error_time = jiffies;
310
311         if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
312                 spin_unlock_irqrestore(&phba->hbalock, flags);
313                 return;
314         }
315
316         phba->last_ramp_down_time = jiffies;
317
318         spin_unlock_irqrestore(&phba->hbalock, flags);
319
320         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
321         evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
322         if (!evt_posted)
323                 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
324         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
325
326         if (!evt_posted)
327                 lpfc_worker_wake_up(phba);
328         return;
329 }
330
331 /**
332  * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
333  * @phba: The Hba for which this call is being executed.
334  *
335  * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
336  * post at most 1 event every 5 minute after last_ramp_up_time or
337  * last_rsrc_error_time.  This routine wakes up worker thread of @phba
338  * to process WORKER_RAM_DOWN_EVENT event.
339  *
340  * This routine should be called with no lock held.
341  **/
342 static inline void
343 lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
344                         uint32_t queue_depth)
345 {
346         unsigned long flags;
347         struct lpfc_hba *phba = vport->phba;
348         uint32_t evt_posted;
349         atomic_inc(&phba->num_cmd_success);
350
351         if (vport->cfg_lun_queue_depth <= queue_depth)
352                 return;
353         spin_lock_irqsave(&phba->hbalock, flags);
354         if (time_before(jiffies,
355                         phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
356             time_before(jiffies,
357                         phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
358                 spin_unlock_irqrestore(&phba->hbalock, flags);
359                 return;
360         }
361         phba->last_ramp_up_time = jiffies;
362         spin_unlock_irqrestore(&phba->hbalock, flags);
363
364         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
365         evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
366         if (!evt_posted)
367                 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
368         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
369
370         if (!evt_posted)
371                 lpfc_worker_wake_up(phba);
372         return;
373 }
374
375 /**
376  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
377  * @phba: The Hba for which this call is being executed.
378  *
379  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
380  * thread.This routine reduces queue depth for all scsi device on each vport
381  * associated with @phba.
382  **/
383 void
384 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
385 {
386         struct lpfc_vport **vports;
387         struct Scsi_Host  *shost;
388         struct scsi_device *sdev;
389         unsigned long new_queue_depth;
390         unsigned long num_rsrc_err, num_cmd_success;
391         int i;
392
393         num_rsrc_err = atomic_read(&phba->num_rsrc_err);
394         num_cmd_success = atomic_read(&phba->num_cmd_success);
395
396         /*
397          * The error and success command counters are global per
398          * driver instance.  If another handler has already
399          * operated on this error event, just exit.
400          */
401         if (num_rsrc_err == 0)
402                 return;
403
404         vports = lpfc_create_vport_work_array(phba);
405         if (vports != NULL)
406                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
407                         shost = lpfc_shost_from_vport(vports[i]);
408                         shost_for_each_device(sdev, shost) {
409                                 new_queue_depth =
410                                         sdev->queue_depth * num_rsrc_err /
411                                         (num_rsrc_err + num_cmd_success);
412                                 if (!new_queue_depth)
413                                         new_queue_depth = sdev->queue_depth - 1;
414                                 else
415                                         new_queue_depth = sdev->queue_depth -
416                                                                 new_queue_depth;
417                                 lpfc_change_queue_depth(sdev, new_queue_depth,
418                                                         SCSI_QDEPTH_DEFAULT);
419                         }
420                 }
421         lpfc_destroy_vport_work_array(phba, vports);
422         atomic_set(&phba->num_rsrc_err, 0);
423         atomic_set(&phba->num_cmd_success, 0);
424 }
425
426 /**
427  * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
428  * @phba: The Hba for which this call is being executed.
429  *
430  * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
431  * thread.This routine increases queue depth for all scsi device on each vport
432  * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
433  * num_cmd_success to zero.
434  **/
435 void
436 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
437 {
438         struct lpfc_vport **vports;
439         struct Scsi_Host  *shost;
440         struct scsi_device *sdev;
441         int i;
442
443         vports = lpfc_create_vport_work_array(phba);
444         if (vports != NULL)
445                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
446                         shost = lpfc_shost_from_vport(vports[i]);
447                         shost_for_each_device(sdev, shost) {
448                                 if (vports[i]->cfg_lun_queue_depth <=
449                                     sdev->queue_depth)
450                                         continue;
451                                 lpfc_change_queue_depth(sdev,
452                                                         sdev->queue_depth+1,
453                                                         SCSI_QDEPTH_RAMP_UP);
454                         }
455                 }
456         lpfc_destroy_vport_work_array(phba, vports);
457         atomic_set(&phba->num_rsrc_err, 0);
458         atomic_set(&phba->num_cmd_success, 0);
459 }
460
461 /**
462  * lpfc_scsi_dev_block - set all scsi hosts to block state
463  * @phba: Pointer to HBA context object.
464  *
465  * This function walks vport list and set each SCSI host to block state
466  * by invoking fc_remote_port_delete() routine. This function is invoked
467  * with EEH when device's PCI slot has been permanently disabled.
468  **/
469 void
470 lpfc_scsi_dev_block(struct lpfc_hba *phba)
471 {
472         struct lpfc_vport **vports;
473         struct Scsi_Host  *shost;
474         struct scsi_device *sdev;
475         struct fc_rport *rport;
476         int i;
477
478         vports = lpfc_create_vport_work_array(phba);
479         if (vports != NULL)
480                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
481                         shost = lpfc_shost_from_vport(vports[i]);
482                         shost_for_each_device(sdev, shost) {
483                                 rport = starget_to_rport(scsi_target(sdev));
484                                 fc_remote_port_delete(rport);
485                         }
486                 }
487         lpfc_destroy_vport_work_array(phba, vports);
488 }
489
490 /**
491  * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
492  * @vport: The virtual port for which this call being executed.
493  * @num_to_allocate: The requested number of buffers to allocate.
494  *
495  * This routine allocates a scsi buffer for device with SLI-3 interface spec,
496  * the scsi buffer contains all the necessary information needed to initiate
497  * a SCSI I/O. The non-DMAable buffer region contains information to build
498  * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
499  * and the initial BPL. In addition to allocating memory, the FCP CMND and
500  * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
501  *
502  * Return codes:
503  *   int - number of scsi buffers that were allocated.
504  *   0 = failure, less than num_to_alloc is a partial failure.
505  **/
506 static int
507 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
508 {
509         struct lpfc_hba *phba = vport->phba;
510         struct lpfc_scsi_buf *psb;
511         struct ulp_bde64 *bpl;
512         IOCB_t *iocb;
513         dma_addr_t pdma_phys_fcp_cmd;
514         dma_addr_t pdma_phys_fcp_rsp;
515         dma_addr_t pdma_phys_bpl;
516         uint16_t iotag;
517         int bcnt;
518
519         for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
520                 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
521                 if (!psb)
522                         break;
523
524                 /*
525                  * Get memory from the pci pool to map the virt space to pci
526                  * bus space for an I/O.  The DMA buffer includes space for the
527                  * struct fcp_cmnd, struct fcp_rsp and the number of bde's
528                  * necessary to support the sg_tablesize.
529                  */
530                 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
531                                         GFP_KERNEL, &psb->dma_handle);
532                 if (!psb->data) {
533                         kfree(psb);
534                         break;
535                 }
536
537                 /* Initialize virtual ptrs to dma_buf region. */
538                 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
539
540                 /* Allocate iotag for psb->cur_iocbq. */
541                 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
542                 if (iotag == 0) {
543                         pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
544                                         psb->data, psb->dma_handle);
545                         kfree(psb);
546                         break;
547                 }
548                 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
549
550                 psb->fcp_cmnd = psb->data;
551                 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
552                 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
553                         sizeof(struct fcp_rsp);
554
555                 /* Initialize local short-hand pointers. */
556                 bpl = psb->fcp_bpl;
557                 pdma_phys_fcp_cmd = psb->dma_handle;
558                 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
559                 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
560                         sizeof(struct fcp_rsp);
561
562                 /*
563                  * The first two bdes are the FCP_CMD and FCP_RSP. The balance
564                  * are sg list bdes.  Initialize the first two and leave the
565                  * rest for queuecommand.
566                  */
567                 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
568                 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
569                 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
570                 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
571                 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
572
573                 /* Setup the physical region for the FCP RSP */
574                 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
575                 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
576                 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
577                 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
578                 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
579
580                 /*
581                  * Since the IOCB for the FCP I/O is built into this
582                  * lpfc_scsi_buf, initialize it with all known data now.
583                  */
584                 iocb = &psb->cur_iocbq.iocb;
585                 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
586                 if ((phba->sli_rev == 3) &&
587                                 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
588                         /* fill in immediate fcp command BDE */
589                         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
590                         iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
591                         iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
592                                         unsli3.fcp_ext.icd);
593                         iocb->un.fcpi64.bdl.addrHigh = 0;
594                         iocb->ulpBdeCount = 0;
595                         iocb->ulpLe = 0;
596                         /* fill in response BDE */
597                         iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
598                                                         BUFF_TYPE_BDE_64;
599                         iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
600                                 sizeof(struct fcp_rsp);
601                         iocb->unsli3.fcp_ext.rbde.addrLow =
602                                 putPaddrLow(pdma_phys_fcp_rsp);
603                         iocb->unsli3.fcp_ext.rbde.addrHigh =
604                                 putPaddrHigh(pdma_phys_fcp_rsp);
605                 } else {
606                         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
607                         iocb->un.fcpi64.bdl.bdeSize =
608                                         (2 * sizeof(struct ulp_bde64));
609                         iocb->un.fcpi64.bdl.addrLow =
610                                         putPaddrLow(pdma_phys_bpl);
611                         iocb->un.fcpi64.bdl.addrHigh =
612                                         putPaddrHigh(pdma_phys_bpl);
613                         iocb->ulpBdeCount = 1;
614                         iocb->ulpLe = 1;
615                 }
616                 iocb->ulpClass = CLASS3;
617                 psb->status = IOSTAT_SUCCESS;
618                 /* Put it back into the SCSI buffer list */
619                 psb->cur_iocbq.context1  = psb;
620                 lpfc_release_scsi_buf_s3(phba, psb);
621
622         }
623
624         return bcnt;
625 }
626
627 /**
628  * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
629  * @vport: pointer to lpfc vport data structure.
630  *
631  * This routine is invoked by the vport cleanup for deletions and the cleanup
632  * for an ndlp on removal.
633  **/
634 void
635 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
636 {
637         struct lpfc_hba *phba = vport->phba;
638         struct lpfc_scsi_buf *psb, *next_psb;
639         unsigned long iflag = 0;
640
641         spin_lock_irqsave(&phba->hbalock, iflag);
642         spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
643         list_for_each_entry_safe(psb, next_psb,
644                                 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
645                 if (psb->rdata && psb->rdata->pnode
646                         && psb->rdata->pnode->vport == vport)
647                         psb->rdata = NULL;
648         }
649         spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
650         spin_unlock_irqrestore(&phba->hbalock, iflag);
651 }
652
653 /**
654  * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
655  * @phba: pointer to lpfc hba data structure.
656  * @axri: pointer to the fcp xri abort wcqe structure.
657  *
658  * This routine is invoked by the worker thread to process a SLI4 fast-path
659  * FCP aborted xri.
660  **/
661 void
662 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
663                           struct sli4_wcqe_xri_aborted *axri)
664 {
665         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
666         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
667         struct lpfc_scsi_buf *psb, *next_psb;
668         unsigned long iflag = 0;
669         struct lpfc_iocbq *iocbq;
670         int i;
671         struct lpfc_nodelist *ndlp;
672         int rrq_empty = 0;
673         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
674
675         spin_lock_irqsave(&phba->hbalock, iflag);
676         spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
677         list_for_each_entry_safe(psb, next_psb,
678                 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
679                 if (psb->cur_iocbq.sli4_xritag == xri) {
680                         list_del(&psb->list);
681                         psb->exch_busy = 0;
682                         psb->status = IOSTAT_SUCCESS;
683                         spin_unlock(
684                                 &phba->sli4_hba.abts_scsi_buf_list_lock);
685                         if (psb->rdata && psb->rdata->pnode)
686                                 ndlp = psb->rdata->pnode;
687                         else
688                                 ndlp = NULL;
689
690                         rrq_empty = list_empty(&phba->active_rrq_list);
691                         spin_unlock_irqrestore(&phba->hbalock, iflag);
692                         if (ndlp) {
693                                 lpfc_set_rrq_active(phba, ndlp,
694                                         psb->cur_iocbq.sli4_lxritag, rxid, 1);
695                                 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
696                         }
697                         lpfc_release_scsi_buf_s4(phba, psb);
698                         if (rrq_empty)
699                                 lpfc_worker_wake_up(phba);
700                         return;
701                 }
702         }
703         spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
704         for (i = 1; i <= phba->sli.last_iotag; i++) {
705                 iocbq = phba->sli.iocbq_lookup[i];
706
707                 if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
708                         (iocbq->iocb_flag & LPFC_IO_LIBDFC))
709                         continue;
710                 if (iocbq->sli4_xritag != xri)
711                         continue;
712                 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
713                 psb->exch_busy = 0;
714                 spin_unlock_irqrestore(&phba->hbalock, iflag);
715                 if (pring->txq_cnt)
716                         lpfc_worker_wake_up(phba);
717                 return;
718
719         }
720         spin_unlock_irqrestore(&phba->hbalock, iflag);
721 }
722
723 /**
724  * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
725  * @phba: pointer to lpfc hba data structure.
726  * @post_sblist: pointer to the scsi buffer list.
727  *
728  * This routine walks a list of scsi buffers that was passed in. It attempts
729  * to construct blocks of scsi buffer sgls which contains contiguous xris and
730  * uses the non-embedded SGL block post mailbox commands to post to the port.
731  * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
732  * embedded SGL post mailbox command for posting. The @post_sblist passed in
733  * must be local list, thus no lock is needed when manipulate the list.
734  *
735  * Returns: 0 = failure, non-zero number of successfully posted buffers.
736  **/
737 int
738 lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
739                              struct list_head *post_sblist, int sb_count)
740 {
741         struct lpfc_scsi_buf *psb, *psb_next;
742         int status;
743         int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
744         dma_addr_t pdma_phys_bpl1;
745         int last_xritag = NO_XRI;
746         LIST_HEAD(prep_sblist);
747         LIST_HEAD(blck_sblist);
748         LIST_HEAD(scsi_sblist);
749
750         /* sanity check */
751         if (sb_count <= 0)
752                 return -EINVAL;
753
754         list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
755                 list_del_init(&psb->list);
756                 block_cnt++;
757                 if ((last_xritag != NO_XRI) &&
758                     (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
759                         /* a hole in xri block, form a sgl posting block */
760                         list_splice_init(&prep_sblist, &blck_sblist);
761                         post_cnt = block_cnt - 1;
762                         /* prepare list for next posting block */
763                         list_add_tail(&psb->list, &prep_sblist);
764                         block_cnt = 1;
765                 } else {
766                         /* prepare list for next posting block */
767                         list_add_tail(&psb->list, &prep_sblist);
768                         /* enough sgls for non-embed sgl mbox command */
769                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
770                                 list_splice_init(&prep_sblist, &blck_sblist);
771                                 post_cnt = block_cnt;
772                                 block_cnt = 0;
773                         }
774                 }
775                 num_posting++;
776                 last_xritag = psb->cur_iocbq.sli4_xritag;
777
778                 /* end of repost sgl list condition for SCSI buffers */
779                 if (num_posting == sb_count) {
780                         if (post_cnt == 0) {
781                                 /* last sgl posting block */
782                                 list_splice_init(&prep_sblist, &blck_sblist);
783                                 post_cnt = block_cnt;
784                         } else if (block_cnt == 1) {
785                                 /* last single sgl with non-contiguous xri */
786                                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
787                                         pdma_phys_bpl1 = psb->dma_phys_bpl +
788                                                                 SGL_PAGE_SIZE;
789                                 else
790                                         pdma_phys_bpl1 = 0;
791                                 status = lpfc_sli4_post_sgl(phba,
792                                                 psb->dma_phys_bpl,
793                                                 pdma_phys_bpl1,
794                                                 psb->cur_iocbq.sli4_xritag);
795                                 if (status) {
796                                         /* failure, put on abort scsi list */
797                                         psb->exch_busy = 1;
798                                 } else {
799                                         /* success, put on SCSI buffer list */
800                                         psb->exch_busy = 0;
801                                         psb->status = IOSTAT_SUCCESS;
802                                         num_posted++;
803                                 }
804                                 /* success, put on SCSI buffer sgl list */
805                                 list_add_tail(&psb->list, &scsi_sblist);
806                         }
807                 }
808
809                 /* continue until a nembed page worth of sgls */
810                 if (post_cnt == 0)
811                         continue;
812
813                 /* post block of SCSI buffer list sgls */
814                 status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
815                                                        post_cnt);
816
817                 /* don't reset xirtag due to hole in xri block */
818                 if (block_cnt == 0)
819                         last_xritag = NO_XRI;
820
821                 /* reset SCSI buffer post count for next round of posting */
822                 post_cnt = 0;
823
824                 /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
825                 while (!list_empty(&blck_sblist)) {
826                         list_remove_head(&blck_sblist, psb,
827                                          struct lpfc_scsi_buf, list);
828                         if (status) {
829                                 /* failure, put on abort scsi list */
830                                 psb->exch_busy = 1;
831                         } else {
832                                 /* success, put on SCSI buffer list */
833                                 psb->exch_busy = 0;
834                                 psb->status = IOSTAT_SUCCESS;
835                                 num_posted++;
836                         }
837                         list_add_tail(&psb->list, &scsi_sblist);
838                 }
839         }
840         /* Push SCSI buffers with sgl posted to the availble list */
841         while (!list_empty(&scsi_sblist)) {
842                 list_remove_head(&scsi_sblist, psb,
843                                  struct lpfc_scsi_buf, list);
844                 lpfc_release_scsi_buf_s4(phba, psb);
845         }
846         return num_posted;
847 }
848
849 /**
850  * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
851  * @phba: pointer to lpfc hba data structure.
852  *
853  * This routine walks the list of scsi buffers that have been allocated and
854  * repost them to the port by using SGL block post. This is needed after a
855  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
856  * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
857  * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
858  *
859  * Returns: 0 = success, non-zero failure.
860  **/
861 int
862 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
863 {
864         LIST_HEAD(post_sblist);
865         int num_posted, rc = 0;
866
867         /* get all SCSI buffers need to repost to a local list */
868         spin_lock(&phba->scsi_buf_list_lock);
869         list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
870         spin_unlock(&phba->scsi_buf_list_lock);
871
872         /* post the list of scsi buffer sgls to port if available */
873         if (!list_empty(&post_sblist)) {
874                 num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
875                                                 phba->sli4_hba.scsi_xri_cnt);
876                 /* failed to post any scsi buffer, return error */
877                 if (num_posted == 0)
878                         rc = -EIO;
879         }
880         return rc;
881 }
882
883 /**
884  * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
885  * @vport: The virtual port for which this call being executed.
886  * @num_to_allocate: The requested number of buffers to allocate.
887  *
888  * This routine allocates scsi buffers for device with SLI-4 interface spec,
889  * the scsi buffer contains all the necessary information needed to initiate
890  * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
891  * them on a list, it post them to the port by using SGL block post.
892  *
893  * Return codes:
894  *   int - number of scsi buffers that were allocated and posted.
895  *   0 = failure, less than num_to_alloc is a partial failure.
896  **/
897 static int
898 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
899 {
900         struct lpfc_hba *phba = vport->phba;
901         struct lpfc_scsi_buf *psb;
902         struct sli4_sge *sgl;
903         IOCB_t *iocb;
904         dma_addr_t pdma_phys_fcp_cmd;
905         dma_addr_t pdma_phys_fcp_rsp;
906         dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
907         uint16_t iotag, lxri = 0;
908         int bcnt, num_posted;
909         LIST_HEAD(prep_sblist);
910         LIST_HEAD(post_sblist);
911         LIST_HEAD(scsi_sblist);
912
913         for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
914                 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
915                 if (!psb)
916                         break;
917                 /*
918                  * Get memory from the pci pool to map the virt space to
919                  * pci bus space for an I/O. The DMA buffer includes space
920                  * for the struct fcp_cmnd, struct fcp_rsp and the number
921                  * of bde's necessary to support the sg_tablesize.
922                  */
923                 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
924                                                 GFP_KERNEL, &psb->dma_handle);
925                 if (!psb->data) {
926                         kfree(psb);
927                         break;
928                 }
929                 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
930
931                 /* Allocate iotag for psb->cur_iocbq. */
932                 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
933                 if (iotag == 0) {
934                         pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
935                                 psb->data, psb->dma_handle);
936                         kfree(psb);
937                         break;
938                 }
939
940                 lxri = lpfc_sli4_next_xritag(phba);
941                 if (lxri == NO_XRI) {
942                         pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
943                               psb->data, psb->dma_handle);
944                         kfree(psb);
945                         break;
946                 }
947                 psb->cur_iocbq.sli4_lxritag = lxri;
948                 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
949                 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
950                 psb->fcp_bpl = psb->data;
951                 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
952                         - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
953                 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
954                                         sizeof(struct fcp_cmnd));
955
956                 /* Initialize local short-hand pointers. */
957                 sgl = (struct sli4_sge *)psb->fcp_bpl;
958                 pdma_phys_bpl = psb->dma_handle;
959                 pdma_phys_fcp_cmd =
960                         (psb->dma_handle + phba->cfg_sg_dma_buf_size)
961                          - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
962                 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
963
964                 /*
965                  * The first two bdes are the FCP_CMD and FCP_RSP.
966                  * The balance are sg list bdes. Initialize the
967                  * first two and leave the rest for queuecommand.
968                  */
969                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
970                 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
971                 sgl->word2 = le32_to_cpu(sgl->word2);
972                 bf_set(lpfc_sli4_sge_last, sgl, 0);
973                 sgl->word2 = cpu_to_le32(sgl->word2);
974                 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
975                 sgl++;
976
977                 /* Setup the physical region for the FCP RSP */
978                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
979                 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
980                 sgl->word2 = le32_to_cpu(sgl->word2);
981                 bf_set(lpfc_sli4_sge_last, sgl, 1);
982                 sgl->word2 = cpu_to_le32(sgl->word2);
983                 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
984
985                 /*
986                  * Since the IOCB for the FCP I/O is built into this
987                  * lpfc_scsi_buf, initialize it with all known data now.
988                  */
989                 iocb = &psb->cur_iocbq.iocb;
990                 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
991                 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
992                 /* setting the BLP size to 2 * sizeof BDE may not be correct.
993                  * We are setting the bpl to point to out sgl. An sgl's
994                  * entries are 16 bytes, a bpl entries are 12 bytes.
995                  */
996                 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
997                 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
998                 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
999                 iocb->ulpBdeCount = 1;
1000                 iocb->ulpLe = 1;
1001                 iocb->ulpClass = CLASS3;
1002                 psb->cur_iocbq.context1 = psb;
1003                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1004                         pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
1005                 else
1006                         pdma_phys_bpl1 = 0;
1007                 psb->dma_phys_bpl = pdma_phys_bpl;
1008
1009                 /* add the scsi buffer to a post list */
1010                 list_add_tail(&psb->list, &post_sblist);
1011                 spin_lock_irq(&phba->scsi_buf_list_lock);
1012                 phba->sli4_hba.scsi_xri_cnt++;
1013                 spin_unlock_irq(&phba->scsi_buf_list_lock);
1014         }
1015         lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1016                         "3021 Allocate %d out of %d requested new SCSI "
1017                         "buffers\n", bcnt, num_to_alloc);
1018
1019         /* post the list of scsi buffer sgls to port if available */
1020         if (!list_empty(&post_sblist))
1021                 num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
1022                                                           &post_sblist, bcnt);
1023         else
1024                 num_posted = 0;
1025
1026         return num_posted;
1027 }
1028
1029 /**
1030  * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
1031  * @vport: The virtual port for which this call being executed.
1032  * @num_to_allocate: The requested number of buffers to allocate.
1033  *
1034  * This routine wraps the actual SCSI buffer allocator function pointer from
1035  * the lpfc_hba struct.
1036  *
1037  * Return codes:
1038  *   int - number of scsi buffers that were allocated.
1039  *   0 = failure, less than num_to_alloc is a partial failure.
1040  **/
1041 static inline int
1042 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
1043 {
1044         return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
1045 }
1046
1047 /**
1048  * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1049  * @phba: The HBA for which this call is being executed.
1050  *
1051  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1052  * and returns to caller.
1053  *
1054  * Return codes:
1055  *   NULL - Error
1056  *   Pointer to lpfc_scsi_buf - Success
1057  **/
1058 static struct lpfc_scsi_buf*
1059 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1060 {
1061         struct  lpfc_scsi_buf * lpfc_cmd = NULL;
1062         struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
1063         unsigned long iflag = 0;
1064
1065         spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1066         list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
1067         if (lpfc_cmd) {
1068                 lpfc_cmd->seg_cnt = 0;
1069                 lpfc_cmd->nonsg_phys = 0;
1070                 lpfc_cmd->prot_seg_cnt = 0;
1071         }
1072         spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1073         return  lpfc_cmd;
1074 }
1075 /**
1076  * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1077  * @phba: The HBA for which this call is being executed.
1078  *
1079  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1080  * and returns to caller.
1081  *
1082  * Return codes:
1083  *   NULL - Error
1084  *   Pointer to lpfc_scsi_buf - Success
1085  **/
1086 static struct lpfc_scsi_buf*
1087 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1088 {
1089         struct lpfc_scsi_buf *lpfc_cmd ;
1090         unsigned long iflag = 0;
1091         int found = 0;
1092
1093         spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1094         list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
1095                                                         list) {
1096                 if (lpfc_test_rrq_active(phba, ndlp,
1097                                          lpfc_cmd->cur_iocbq.sli4_lxritag))
1098                         continue;
1099                 list_del(&lpfc_cmd->list);
1100                 found = 1;
1101                 lpfc_cmd->seg_cnt = 0;
1102                 lpfc_cmd->nonsg_phys = 0;
1103                 lpfc_cmd->prot_seg_cnt = 0;
1104                 break;
1105         }
1106         spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
1107                                                  iflag);
1108         if (!found)
1109                 return NULL;
1110         else
1111                 return  lpfc_cmd;
1112 }
1113 /**
1114  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1115  * @phba: The HBA for which this call is being executed.
1116  *
1117  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1118  * and returns to caller.
1119  *
1120  * Return codes:
1121  *   NULL - Error
1122  *   Pointer to lpfc_scsi_buf - Success
1123  **/
1124 static struct lpfc_scsi_buf*
1125 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1126 {
1127         return  phba->lpfc_get_scsi_buf(phba, ndlp);
1128 }
1129
1130 /**
1131  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1132  * @phba: The Hba for which this call is being executed.
1133  * @psb: The scsi buffer which is being released.
1134  *
1135  * This routine releases @psb scsi buffer by adding it to tail of @phba
1136  * lpfc_scsi_buf_list list.
1137  **/
1138 static void
1139 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1140 {
1141         unsigned long iflag = 0;
1142
1143         spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1144         psb->pCmd = NULL;
1145         list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1146         spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1147 }
1148
1149 /**
1150  * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1151  * @phba: The Hba for which this call is being executed.
1152  * @psb: The scsi buffer which is being released.
1153  *
1154  * This routine releases @psb scsi buffer by adding it to tail of @phba
1155  * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1156  * and cannot be reused for at least RA_TOV amount of time if it was
1157  * aborted.
1158  **/
1159 static void
1160 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1161 {
1162         unsigned long iflag = 0;
1163
1164         if (psb->exch_busy) {
1165                 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1166                                         iflag);
1167                 psb->pCmd = NULL;
1168                 list_add_tail(&psb->list,
1169                         &phba->sli4_hba.lpfc_abts_scsi_buf_list);
1170                 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1171                                         iflag);
1172         } else {
1173
1174                 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1175                 psb->pCmd = NULL;
1176                 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1177                 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1178         }
1179 }
1180
1181 /**
1182  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1183  * @phba: The Hba for which this call is being executed.
1184  * @psb: The scsi buffer which is being released.
1185  *
1186  * This routine releases @psb scsi buffer by adding it to tail of @phba
1187  * lpfc_scsi_buf_list list.
1188  **/
1189 static void
1190 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1191 {
1192
1193         phba->lpfc_release_scsi_buf(phba, psb);
1194 }
1195
1196 /**
1197  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1198  * @phba: The Hba for which this call is being executed.
1199  * @lpfc_cmd: The scsi buffer which is going to be mapped.
1200  *
1201  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1202  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1203  * through sg elements and format the bdea. This routine also initializes all
1204  * IOCB fields which are dependent on scsi command request buffer.
1205  *
1206  * Return codes:
1207  *   1 - Error
1208  *   0 - Success
1209  **/
1210 static int
1211 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1212 {
1213         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1214         struct scatterlist *sgel = NULL;
1215         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1216         struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1217         struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1218         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1219         struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1220         dma_addr_t physaddr;
1221         uint32_t num_bde = 0;
1222         int nseg, datadir = scsi_cmnd->sc_data_direction;
1223
1224         /*
1225          * There are three possibilities here - use scatter-gather segment, use
1226          * the single mapping, or neither.  Start the lpfc command prep by
1227          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1228          * data bde entry.
1229          */
1230         bpl += 2;
1231         if (scsi_sg_count(scsi_cmnd)) {
1232                 /*
1233                  * The driver stores the segment count returned from pci_map_sg
1234                  * because this a count of dma-mappings used to map the use_sg
1235                  * pages.  They are not guaranteed to be the same for those
1236                  * architectures that implement an IOMMU.
1237                  */
1238
1239                 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1240                                   scsi_sg_count(scsi_cmnd), datadir);
1241                 if (unlikely(!nseg))
1242                         return 1;
1243
1244                 lpfc_cmd->seg_cnt = nseg;
1245                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1246                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1247                                 "9064 BLKGRD: %s: Too many sg segments from "
1248                                "dma_map_sg.  Config %d, seg_cnt %d\n",
1249                                __func__, phba->cfg_sg_seg_cnt,
1250                                lpfc_cmd->seg_cnt);
1251                         scsi_dma_unmap(scsi_cmnd);
1252                         return 1;
1253                 }
1254
1255                 /*
1256                  * The driver established a maximum scatter-gather segment count
1257                  * during probe that limits the number of sg elements in any
1258                  * single scsi command.  Just run through the seg_cnt and format
1259                  * the bde's.
1260                  * When using SLI-3 the driver will try to fit all the BDEs into
1261                  * the IOCB. If it can't then the BDEs get added to a BPL as it
1262                  * does for SLI-2 mode.
1263                  */
1264                 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1265                         physaddr = sg_dma_address(sgel);
1266                         if (phba->sli_rev == 3 &&
1267                             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1268                             !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1269                             nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1270                                 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1271                                 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1272                                 data_bde->addrLow = putPaddrLow(physaddr);
1273                                 data_bde->addrHigh = putPaddrHigh(physaddr);
1274                                 data_bde++;
1275                         } else {
1276                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1277                                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1278                                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1279                                 bpl->addrLow =
1280                                         le32_to_cpu(putPaddrLow(physaddr));
1281                                 bpl->addrHigh =
1282                                         le32_to_cpu(putPaddrHigh(physaddr));
1283                                 bpl++;
1284                         }
1285                 }
1286         }
1287
1288         /*
1289          * Finish initializing those IOCB fields that are dependent on the
1290          * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
1291          * explicitly reinitialized and for SLI-3 the extended bde count is
1292          * explicitly reinitialized since all iocb memory resources are reused.
1293          */
1294         if (phba->sli_rev == 3 &&
1295             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1296             !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1297                 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1298                         /*
1299                          * The extended IOCB format can only fit 3 BDE or a BPL.
1300                          * This I/O has more than 3 BDE so the 1st data bde will
1301                          * be a BPL that is filled in here.
1302                          */
1303                         physaddr = lpfc_cmd->dma_handle;
1304                         data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1305                         data_bde->tus.f.bdeSize = (num_bde *
1306                                                    sizeof(struct ulp_bde64));
1307                         physaddr += (sizeof(struct fcp_cmnd) +
1308                                      sizeof(struct fcp_rsp) +
1309                                      (2 * sizeof(struct ulp_bde64)));
1310                         data_bde->addrHigh = putPaddrHigh(physaddr);
1311                         data_bde->addrLow = putPaddrLow(physaddr);
1312                         /* ebde count includes the response bde and data bpl */
1313                         iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1314                 } else {
1315                         /* ebde count includes the response bde and data bdes */
1316                         iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1317                 }
1318         } else {
1319                 iocb_cmd->un.fcpi64.bdl.bdeSize =
1320                         ((num_bde + 2) * sizeof(struct ulp_bde64));
1321                 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1322         }
1323         fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1324
1325         /*
1326          * Due to difference in data length between DIF/non-DIF paths,
1327          * we need to set word 4 of IOCB here
1328          */
1329         iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1330         return 0;
1331 }
1332
1333 static inline unsigned
1334 lpfc_cmd_blksize(struct scsi_cmnd *sc)
1335 {
1336         return sc->device->sector_size;
1337 }
1338
1339 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1340
1341 /* Return if if error injection is detected by Initiator */
1342 #define BG_ERR_INIT     0x1
1343 /* Return if if error injection is detected by Target */
1344 #define BG_ERR_TGT      0x2
1345 /* Return if if swapping CSUM<-->CRC is required for error injection */
1346 #define BG_ERR_SWAP     0x10
1347 /* Return if disabling Guard/Ref/App checking is required for error injection */
1348 #define BG_ERR_CHECK    0x20
1349
1350 /**
1351  * lpfc_bg_err_inject - Determine if we should inject an error
1352  * @phba: The Hba for which this call is being executed.
1353  * @sc: The SCSI command to examine
1354  * @reftag: (out) BlockGuard reference tag for transmitted data
1355  * @apptag: (out) BlockGuard application tag for transmitted data
1356  * @new_guard (in) Value to replace CRC with if needed
1357  *
1358  * Returns BG_ERR_* bit mask or 0 if request ignored
1359  **/
1360 static int
1361 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1362                 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1363 {
1364         struct scatterlist *sgpe; /* s/g prot entry */
1365         struct scatterlist *sgde; /* s/g data entry */
1366         struct lpfc_scsi_buf *lpfc_cmd = NULL;
1367         struct scsi_dif_tuple *src = NULL;
1368         struct lpfc_nodelist *ndlp;
1369         struct lpfc_rport_data *rdata;
1370         uint32_t op = scsi_get_prot_op(sc);
1371         uint32_t blksize;
1372         uint32_t numblks;
1373         sector_t lba;
1374         int rc = 0;
1375         int blockoff = 0;
1376
1377         if (op == SCSI_PROT_NORMAL)
1378                 return 0;
1379
1380         sgpe = scsi_prot_sglist(sc);
1381         sgde = scsi_sglist(sc);
1382         lba = scsi_get_lba(sc);
1383
1384         /* First check if we need to match the LBA */
1385         if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1386                 blksize = lpfc_cmd_blksize(sc);
1387                 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1388
1389                 /* Make sure we have the right LBA if one is specified */
1390                 if ((phba->lpfc_injerr_lba < lba) ||
1391                         (phba->lpfc_injerr_lba >= (lba + numblks)))
1392                         return 0;
1393                 if (sgpe) {
1394                         blockoff = phba->lpfc_injerr_lba - lba;
1395                         numblks = sg_dma_len(sgpe) /
1396                                 sizeof(struct scsi_dif_tuple);
1397                         if (numblks < blockoff)
1398                                 blockoff = numblks;
1399                 }
1400         }
1401
1402         /* Next check if we need to match the remote NPortID or WWPN */
1403         rdata = sc->device->hostdata;
1404         if (rdata && rdata->pnode) {
1405                 ndlp = rdata->pnode;
1406
1407                 /* Make sure we have the right NPortID if one is specified */
1408                 if (phba->lpfc_injerr_nportid  &&
1409                         (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1410                         return 0;
1411
1412                 /*
1413                  * Make sure we have the right WWPN if one is specified.
1414                  * wwn[0] should be a non-zero NAA in a good WWPN.
1415                  */
1416                 if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1417                         (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1418                                 sizeof(struct lpfc_name)) != 0))
1419                         return 0;
1420         }
1421
1422         /* Setup a ptr to the protection data if the SCSI host provides it */
1423         if (sgpe) {
1424                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1425                 src += blockoff;
1426                 lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1427         }
1428
1429         /* Should we change the Reference Tag */
1430         if (reftag) {
1431                 if (phba->lpfc_injerr_wref_cnt) {
1432                         switch (op) {
1433                         case SCSI_PROT_WRITE_PASS:
1434                                 if (src) {
1435                                         /*
1436                                          * For WRITE_PASS, force the error
1437                                          * to be sent on the wire. It should
1438                                          * be detected by the Target.
1439                                          * If blockoff != 0 error will be
1440                                          * inserted in middle of the IO.
1441                                          */
1442
1443                                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1444                                         "9076 BLKGRD: Injecting reftag error: "
1445                                         "write lba x%lx + x%x oldrefTag x%x\n",
1446                                         (unsigned long)lba, blockoff,
1447                                         be32_to_cpu(src->ref_tag));
1448
1449                                         /*
1450                                          * Save the old ref_tag so we can
1451                                          * restore it on completion.
1452                                          */
1453                                         if (lpfc_cmd) {
1454                                                 lpfc_cmd->prot_data_type =
1455                                                         LPFC_INJERR_REFTAG;
1456                                                 lpfc_cmd->prot_data_segment =
1457                                                         src;
1458                                                 lpfc_cmd->prot_data =
1459                                                         src->ref_tag;
1460                                         }
1461                                         src->ref_tag = cpu_to_be32(0xDEADBEEF);
1462                                         phba->lpfc_injerr_wref_cnt--;
1463                                         if (phba->lpfc_injerr_wref_cnt == 0) {
1464                                                 phba->lpfc_injerr_nportid = 0;
1465                                                 phba->lpfc_injerr_lba =
1466                                                         LPFC_INJERR_LBA_OFF;
1467                                                 memset(&phba->lpfc_injerr_wwpn,
1468                                                   0, sizeof(struct lpfc_name));
1469                                         }
1470                                         rc = BG_ERR_TGT | BG_ERR_CHECK;
1471
1472                                         break;
1473                                 }
1474                                 /* Drop thru */
1475                         case SCSI_PROT_WRITE_INSERT:
1476                                 /*
1477                                  * For WRITE_INSERT, force the error
1478                                  * to be sent on the wire. It should be
1479                                  * detected by the Target.
1480                                  */
1481                                 /* DEADBEEF will be the reftag on the wire */
1482                                 *reftag = 0xDEADBEEF;
1483                                 phba->lpfc_injerr_wref_cnt--;
1484                                 if (phba->lpfc_injerr_wref_cnt == 0) {
1485                                         phba->lpfc_injerr_nportid = 0;
1486                                         phba->lpfc_injerr_lba =
1487                                         LPFC_INJERR_LBA_OFF;
1488                                         memset(&phba->lpfc_injerr_wwpn,
1489                                                 0, sizeof(struct lpfc_name));
1490                                 }
1491                                 rc = BG_ERR_TGT | BG_ERR_CHECK;
1492
1493                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1494                                         "9078 BLKGRD: Injecting reftag error: "
1495                                         "write lba x%lx\n", (unsigned long)lba);
1496                                 break;
1497                         case SCSI_PROT_WRITE_STRIP:
1498                                 /*
1499                                  * For WRITE_STRIP and WRITE_PASS,
1500                                  * force the error on data
1501                                  * being copied from SLI-Host to SLI-Port.
1502                                  */
1503                                 *reftag = 0xDEADBEEF;
1504                                 phba->lpfc_injerr_wref_cnt--;
1505                                 if (phba->lpfc_injerr_wref_cnt == 0) {
1506                                         phba->lpfc_injerr_nportid = 0;
1507                                         phba->lpfc_injerr_lba =
1508                                                 LPFC_INJERR_LBA_OFF;
1509                                         memset(&phba->lpfc_injerr_wwpn,
1510                                                 0, sizeof(struct lpfc_name));
1511                                 }
1512                                 rc = BG_ERR_INIT;
1513
1514                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1515                                         "9077 BLKGRD: Injecting reftag error: "
1516                                         "write lba x%lx\n", (unsigned long)lba);
1517                                 break;
1518                         }
1519                 }
1520                 if (phba->lpfc_injerr_rref_cnt) {
1521                         switch (op) {
1522                         case SCSI_PROT_READ_INSERT:
1523                         case SCSI_PROT_READ_STRIP:
1524                         case SCSI_PROT_READ_PASS:
1525                                 /*
1526                                  * For READ_STRIP and READ_PASS, force the
1527                                  * error on data being read off the wire. It
1528                                  * should force an IO error to the driver.
1529                                  */
1530                                 *reftag = 0xDEADBEEF;
1531                                 phba->lpfc_injerr_rref_cnt--;
1532                                 if (phba->lpfc_injerr_rref_cnt == 0) {
1533                                         phba->lpfc_injerr_nportid = 0;
1534                                         phba->lpfc_injerr_lba =
1535                                                 LPFC_INJERR_LBA_OFF;
1536                                         memset(&phba->lpfc_injerr_wwpn,
1537                                                 0, sizeof(struct lpfc_name));
1538                                 }
1539                                 rc = BG_ERR_INIT;
1540
1541                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1542                                         "9079 BLKGRD: Injecting reftag error: "
1543                                         "read lba x%lx\n", (unsigned long)lba);
1544                                 break;
1545                         }
1546                 }
1547         }
1548
1549         /* Should we change the Application Tag */
1550         if (apptag) {
1551                 if (phba->lpfc_injerr_wapp_cnt) {
1552                         switch (op) {
1553                         case SCSI_PROT_WRITE_PASS:
1554                                 if (src) {
1555                                         /*
1556                                          * For WRITE_PASS, force the error
1557                                          * to be sent on the wire. It should
1558                                          * be detected by the Target.
1559                                          * If blockoff != 0 error will be
1560                                          * inserted in middle of the IO.
1561                                          */
1562
1563                                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1564                                         "9080 BLKGRD: Injecting apptag error: "
1565                                         "write lba x%lx + x%x oldappTag x%x\n",
1566                                         (unsigned long)lba, blockoff,
1567                                         be16_to_cpu(src->app_tag));
1568
1569                                         /*
1570                                          * Save the old app_tag so we can
1571                                          * restore it on completion.
1572                                          */
1573                                         if (lpfc_cmd) {
1574                                                 lpfc_cmd->prot_data_type =
1575                                                         LPFC_INJERR_APPTAG;
1576                                                 lpfc_cmd->prot_data_segment =
1577                                                         src;
1578                                                 lpfc_cmd->prot_data =
1579                                                         src->app_tag;
1580                                         }
1581                                         src->app_tag = cpu_to_be16(0xDEAD);
1582                                         phba->lpfc_injerr_wapp_cnt--;
1583                                         if (phba->lpfc_injerr_wapp_cnt == 0) {
1584                                                 phba->lpfc_injerr_nportid = 0;
1585                                                 phba->lpfc_injerr_lba =
1586                                                         LPFC_INJERR_LBA_OFF;
1587                                                 memset(&phba->lpfc_injerr_wwpn,
1588                                                   0, sizeof(struct lpfc_name));
1589                                         }
1590                                         rc = BG_ERR_TGT | BG_ERR_CHECK;
1591                                         break;
1592                                 }
1593                                 /* Drop thru */
1594                         case SCSI_PROT_WRITE_INSERT:
1595                                 /*
1596                                  * For WRITE_INSERT, force the
1597                                  * error to be sent on the wire. It should be
1598                                  * detected by the Target.
1599                                  */
1600                                 /* DEAD will be the apptag on the wire */
1601                                 *apptag = 0xDEAD;
1602                                 phba->lpfc_injerr_wapp_cnt--;
1603                                 if (phba->lpfc_injerr_wapp_cnt == 0) {
1604                                         phba->lpfc_injerr_nportid = 0;
1605                                         phba->lpfc_injerr_lba =
1606                                                 LPFC_INJERR_LBA_OFF;
1607                                         memset(&phba->lpfc_injerr_wwpn,
1608                                                 0, sizeof(struct lpfc_name));
1609                                 }
1610                                 rc = BG_ERR_TGT | BG_ERR_CHECK;
1611
1612                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1613                                         "0813 BLKGRD: Injecting apptag error: "
1614                                         "write lba x%lx\n", (unsigned long)lba);
1615                                 break;
1616                         case SCSI_PROT_WRITE_STRIP:
1617                                 /*
1618                                  * For WRITE_STRIP and WRITE_PASS,
1619                                  * force the error on data
1620                                  * being copied from SLI-Host to SLI-Port.
1621                                  */
1622                                 *apptag = 0xDEAD;
1623                                 phba->lpfc_injerr_wapp_cnt--;
1624                                 if (phba->lpfc_injerr_wapp_cnt == 0) {
1625                                         phba->lpfc_injerr_nportid = 0;
1626                                         phba->lpfc_injerr_lba =
1627                                                 LPFC_INJERR_LBA_OFF;
1628                                         memset(&phba->lpfc_injerr_wwpn,
1629                                                 0, sizeof(struct lpfc_name));
1630                                 }
1631                                 rc = BG_ERR_INIT;
1632
1633                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1634                                         "0812 BLKGRD: Injecting apptag error: "
1635                                         "write lba x%lx\n", (unsigned long)lba);
1636                                 break;
1637                         }
1638                 }
1639                 if (phba->lpfc_injerr_rapp_cnt) {
1640                         switch (op) {
1641                         case SCSI_PROT_READ_INSERT:
1642                         case SCSI_PROT_READ_STRIP:
1643                         case SCSI_PROT_READ_PASS:
1644                                 /*
1645                                  * For READ_STRIP and READ_PASS, force the
1646                                  * error on data being read off the wire. It
1647                                  * should force an IO error to the driver.
1648                                  */
1649                                 *apptag = 0xDEAD;
1650                                 phba->lpfc_injerr_rapp_cnt--;
1651                                 if (phba->lpfc_injerr_rapp_cnt == 0) {
1652                                         phba->lpfc_injerr_nportid = 0;
1653                                         phba->lpfc_injerr_lba =
1654                                                 LPFC_INJERR_LBA_OFF;
1655                                         memset(&phba->lpfc_injerr_wwpn,
1656                                                 0, sizeof(struct lpfc_name));
1657                                 }
1658                                 rc = BG_ERR_INIT;
1659
1660                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1661                                         "0814 BLKGRD: Injecting apptag error: "
1662                                         "read lba x%lx\n", (unsigned long)lba);
1663                                 break;
1664                         }
1665                 }
1666         }
1667
1668
1669         /* Should we change the Guard Tag */
1670         if (new_guard) {
1671                 if (phba->lpfc_injerr_wgrd_cnt) {
1672                         switch (op) {
1673                         case SCSI_PROT_WRITE_PASS:
1674                                 rc = BG_ERR_CHECK;
1675                                 /* Drop thru */
1676
1677                         case SCSI_PROT_WRITE_INSERT:
1678                                 /*
1679                                  * For WRITE_INSERT, force the
1680                                  * error to be sent on the wire. It should be
1681                                  * detected by the Target.
1682                                  */
1683                                 phba->lpfc_injerr_wgrd_cnt--;
1684                                 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1685                                         phba->lpfc_injerr_nportid = 0;
1686                                         phba->lpfc_injerr_lba =
1687                                                 LPFC_INJERR_LBA_OFF;
1688                                         memset(&phba->lpfc_injerr_wwpn,
1689                                                 0, sizeof(struct lpfc_name));
1690                                 }
1691
1692                                 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1693                                 /* Signals the caller to swap CRC->CSUM */
1694
1695                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1696                                         "0817 BLKGRD: Injecting guard error: "
1697                                         "write lba x%lx\n", (unsigned long)lba);
1698                                 break;
1699                         case SCSI_PROT_WRITE_STRIP:
1700                                 /*
1701                                  * For WRITE_STRIP and WRITE_PASS,
1702                                  * force the error on data
1703                                  * being copied from SLI-Host to SLI-Port.
1704                                  */
1705                                 phba->lpfc_injerr_wgrd_cnt--;
1706                                 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1707                                         phba->lpfc_injerr_nportid = 0;
1708                                         phba->lpfc_injerr_lba =
1709                                                 LPFC_INJERR_LBA_OFF;
1710                                         memset(&phba->lpfc_injerr_wwpn,
1711                                                 0, sizeof(struct lpfc_name));
1712                                 }
1713
1714                                 rc = BG_ERR_INIT | BG_ERR_SWAP;
1715                                 /* Signals the caller to swap CRC->CSUM */
1716
1717                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1718                                         "0816 BLKGRD: Injecting guard error: "
1719                                         "write lba x%lx\n", (unsigned long)lba);
1720                                 break;
1721                         }
1722                 }
1723                 if (phba->lpfc_injerr_rgrd_cnt) {
1724                         switch (op) {
1725                         case SCSI_PROT_READ_INSERT:
1726                         case SCSI_PROT_READ_STRIP:
1727                         case SCSI_PROT_READ_PASS:
1728                                 /*
1729                                  * For READ_STRIP and READ_PASS, force the
1730                                  * error on data being read off the wire. It
1731                                  * should force an IO error to the driver.
1732                                  */
1733                                 phba->lpfc_injerr_rgrd_cnt--;
1734                                 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1735                                         phba->lpfc_injerr_nportid = 0;
1736                                         phba->lpfc_injerr_lba =
1737                                                 LPFC_INJERR_LBA_OFF;
1738                                         memset(&phba->lpfc_injerr_wwpn,
1739                                                 0, sizeof(struct lpfc_name));
1740                                 }
1741
1742                                 rc = BG_ERR_INIT | BG_ERR_SWAP;
1743                                 /* Signals the caller to swap CRC->CSUM */
1744
1745                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1746                                         "0818 BLKGRD: Injecting guard error: "
1747                                         "read lba x%lx\n", (unsigned long)lba);
1748                         }
1749                 }
1750         }
1751
1752         return rc;
1753 }
1754 #endif
1755
1756 /**
1757  * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1758  * the specified SCSI command.
1759  * @phba: The Hba for which this call is being executed.
1760  * @sc: The SCSI command to examine
1761  * @txopt: (out) BlockGuard operation for transmitted data
1762  * @rxopt: (out) BlockGuard operation for received data
1763  *
1764  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1765  *
1766  **/
1767 static int
1768 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1769                 uint8_t *txop, uint8_t *rxop)
1770 {
1771         uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1772         uint8_t ret = 0;
1773
1774         if (guard_type == SHOST_DIX_GUARD_IP) {
1775                 switch (scsi_get_prot_op(sc)) {
1776                 case SCSI_PROT_READ_INSERT:
1777                 case SCSI_PROT_WRITE_STRIP:
1778                         *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1779                         *txop = BG_OP_IN_CSUM_OUT_NODIF;
1780                         break;
1781
1782                 case SCSI_PROT_READ_STRIP:
1783                 case SCSI_PROT_WRITE_INSERT:
1784                         *rxop = BG_OP_IN_CRC_OUT_NODIF;
1785                         *txop = BG_OP_IN_NODIF_OUT_CRC;
1786                         break;
1787
1788                 case SCSI_PROT_READ_PASS:
1789                 case SCSI_PROT_WRITE_PASS:
1790                         *rxop = BG_OP_IN_CRC_OUT_CSUM;
1791                         *txop = BG_OP_IN_CSUM_OUT_CRC;
1792                         break;
1793
1794                 case SCSI_PROT_NORMAL:
1795                 default:
1796                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1797                                 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1798                                         scsi_get_prot_op(sc));
1799                         ret = 1;
1800                         break;
1801
1802                 }
1803         } else {
1804                 switch (scsi_get_prot_op(sc)) {
1805                 case SCSI_PROT_READ_STRIP:
1806                 case SCSI_PROT_WRITE_INSERT:
1807                         *rxop = BG_OP_IN_CRC_OUT_NODIF;
1808                         *txop = BG_OP_IN_NODIF_OUT_CRC;
1809                         break;
1810
1811                 case SCSI_PROT_READ_PASS:
1812                 case SCSI_PROT_WRITE_PASS:
1813                         *rxop = BG_OP_IN_CRC_OUT_CRC;
1814                         *txop = BG_OP_IN_CRC_OUT_CRC;
1815                         break;
1816
1817                 case SCSI_PROT_READ_INSERT:
1818                 case SCSI_PROT_WRITE_STRIP:
1819                         *rxop = BG_OP_IN_NODIF_OUT_CRC;
1820                         *txop = BG_OP_IN_CRC_OUT_NODIF;
1821                         break;
1822
1823                 case SCSI_PROT_NORMAL:
1824                 default:
1825                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1826                                 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1827                                         scsi_get_prot_op(sc));
1828                         ret = 1;
1829                         break;
1830                 }
1831         }
1832
1833         return ret;
1834 }
1835
1836 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1837 /**
1838  * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1839  * the specified SCSI command in order to force a guard tag error.
1840  * @phba: The Hba for which this call is being executed.
1841  * @sc: The SCSI command to examine
1842  * @txopt: (out) BlockGuard operation for transmitted data
1843  * @rxopt: (out) BlockGuard operation for received data
1844  *
1845  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1846  *
1847  **/
1848 static int
1849 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1850                 uint8_t *txop, uint8_t *rxop)
1851 {
1852         uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1853         uint8_t ret = 0;
1854
1855         if (guard_type == SHOST_DIX_GUARD_IP) {
1856                 switch (scsi_get_prot_op(sc)) {
1857                 case SCSI_PROT_READ_INSERT:
1858                 case SCSI_PROT_WRITE_STRIP:
1859                         *rxop = BG_OP_IN_NODIF_OUT_CRC;
1860                         *txop = BG_OP_IN_CRC_OUT_NODIF;
1861                         break;
1862
1863                 case SCSI_PROT_READ_STRIP:
1864                 case SCSI_PROT_WRITE_INSERT:
1865                         *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1866                         *txop = BG_OP_IN_NODIF_OUT_CSUM;
1867                         break;
1868
1869                 case SCSI_PROT_READ_PASS:
1870                 case SCSI_PROT_WRITE_PASS:
1871                         *rxop = BG_OP_IN_CSUM_OUT_CRC;
1872                         *txop = BG_OP_IN_CRC_OUT_CSUM;
1873                         break;
1874
1875                 case SCSI_PROT_NORMAL:
1876                 default:
1877                         break;
1878
1879                 }
1880         } else {
1881                 switch (scsi_get_prot_op(sc)) {
1882                 case SCSI_PROT_READ_STRIP:
1883                 case SCSI_PROT_WRITE_INSERT:
1884                         *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1885                         *txop = BG_OP_IN_NODIF_OUT_CSUM;
1886                         break;
1887
1888                 case SCSI_PROT_READ_PASS:
1889                 case SCSI_PROT_WRITE_PASS:
1890                         *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1891                         *txop = BG_OP_IN_CSUM_OUT_CSUM;
1892                         break;
1893
1894                 case SCSI_PROT_READ_INSERT:
1895                 case SCSI_PROT_WRITE_STRIP:
1896                         *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1897                         *txop = BG_OP_IN_CSUM_OUT_NODIF;
1898                         break;
1899
1900                 case SCSI_PROT_NORMAL:
1901                 default:
1902                         break;
1903                 }
1904         }
1905
1906         return ret;
1907 }
1908 #endif
1909
1910 /**
1911  * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1912  * @phba: The Hba for which this call is being executed.
1913  * @sc: pointer to scsi command we're working on
1914  * @bpl: pointer to buffer list for protection groups
1915  * @datacnt: number of segments of data that have been dma mapped
1916  *
1917  * This function sets up BPL buffer list for protection groups of
1918  * type LPFC_PG_TYPE_NO_DIF
1919  *
1920  * This is usually used when the HBA is instructed to generate
1921  * DIFs and insert them into data stream (or strip DIF from
1922  * incoming data stream)
1923  *
1924  * The buffer list consists of just one protection group described
1925  * below:
1926  *                                +-------------------------+
1927  *   start of prot group  -->     |          PDE_5          |
1928  *                                +-------------------------+
1929  *                                |          PDE_6          |
1930  *                                +-------------------------+
1931  *                                |         Data BDE        |
1932  *                                +-------------------------+
1933  *                                |more Data BDE's ... (opt)|
1934  *                                +-------------------------+
1935  *
1936  *
1937  * Note: Data s/g buffers have been dma mapped
1938  *
1939  * Returns the number of BDEs added to the BPL.
1940  **/
1941 static int
1942 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1943                 struct ulp_bde64 *bpl, int datasegcnt)
1944 {
1945         struct scatterlist *sgde = NULL; /* s/g data entry */
1946         struct lpfc_pde5 *pde5 = NULL;
1947         struct lpfc_pde6 *pde6 = NULL;
1948         dma_addr_t physaddr;
1949         int i = 0, num_bde = 0, status;
1950         int datadir = sc->sc_data_direction;
1951 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1952         uint32_t rc;
1953 #endif
1954         uint32_t checking = 1;
1955         uint32_t reftag;
1956         unsigned blksize;
1957         uint8_t txop, rxop;
1958
1959         status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1960         if (status)
1961                 goto out;
1962
1963         /* extract some info from the scsi command for pde*/
1964         blksize = lpfc_cmd_blksize(sc);
1965         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1966
1967 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1968         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1969         if (rc) {
1970                 if (rc & BG_ERR_SWAP)
1971                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1972                 if (rc & BG_ERR_CHECK)
1973                         checking = 0;
1974         }
1975 #endif
1976
1977         /* setup PDE5 with what we have */
1978         pde5 = (struct lpfc_pde5 *) bpl;
1979         memset(pde5, 0, sizeof(struct lpfc_pde5));
1980         bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1981
1982         /* Endianness conversion if necessary for PDE5 */
1983         pde5->word0 = cpu_to_le32(pde5->word0);
1984         pde5->reftag = cpu_to_le32(reftag);
1985
1986         /* advance bpl and increment bde count */
1987         num_bde++;
1988         bpl++;
1989         pde6 = (struct lpfc_pde6 *) bpl;
1990
1991         /* setup PDE6 with the rest of the info */
1992         memset(pde6, 0, sizeof(struct lpfc_pde6));
1993         bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1994         bf_set(pde6_optx, pde6, txop);
1995         bf_set(pde6_oprx, pde6, rxop);
1996         if (datadir == DMA_FROM_DEVICE) {
1997                 bf_set(pde6_ce, pde6, checking);
1998                 bf_set(pde6_re, pde6, checking);
1999         }
2000         bf_set(pde6_ai, pde6, 1);
2001         bf_set(pde6_ae, pde6, 0);
2002         bf_set(pde6_apptagval, pde6, 0);
2003
2004         /* Endianness conversion if necessary for PDE6 */
2005         pde6->word0 = cpu_to_le32(pde6->word0);
2006         pde6->word1 = cpu_to_le32(pde6->word1);
2007         pde6->word2 = cpu_to_le32(pde6->word2);
2008
2009         /* advance bpl and increment bde count */
2010         num_bde++;
2011         bpl++;
2012
2013         /* assumption: caller has already run dma_map_sg on command data */
2014         scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2015                 physaddr = sg_dma_address(sgde);
2016                 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
2017                 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2018                 bpl->tus.f.bdeSize = sg_dma_len(sgde);
2019                 if (datadir == DMA_TO_DEVICE)
2020                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2021                 else
2022                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2023                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2024                 bpl++;
2025                 num_bde++;
2026         }
2027
2028 out:
2029         return num_bde;
2030 }
2031
2032 /**
2033  * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
2034  * @phba: The Hba for which this call is being executed.
2035  * @sc: pointer to scsi command we're working on
2036  * @bpl: pointer to buffer list for protection groups
2037  * @datacnt: number of segments of data that have been dma mapped
2038  * @protcnt: number of segment of protection data that have been dma mapped
2039  *
2040  * This function sets up BPL buffer list for protection groups of
2041  * type LPFC_PG_TYPE_DIF
2042  *
2043  * This is usually used when DIFs are in their own buffers,
2044  * separate from the data. The HBA can then by instructed
2045  * to place the DIFs in the outgoing stream.  For read operations,
2046  * The HBA could extract the DIFs and place it in DIF buffers.
2047  *
2048  * The buffer list for this type consists of one or more of the
2049  * protection groups described below:
2050  *                                    +-------------------------+
2051  *   start of first prot group  -->   |          PDE_5          |
2052  *                                    +-------------------------+
2053  *                                    |          PDE_6          |
2054  *                                    +-------------------------+
2055  *                                    |      PDE_7 (Prot BDE)   |
2056  *                                    +-------------------------+
2057  *                                    |        Data BDE         |
2058  *                                    +-------------------------+
2059  *                                    |more Data BDE's ... (opt)|
2060  *                                    +-------------------------+
2061  *   start of new  prot group  -->    |          PDE_5          |
2062  *                                    +-------------------------+
2063  *                                    |          ...            |
2064  *                                    +-------------------------+
2065  *
2066  * Note: It is assumed that both data and protection s/g buffers have been
2067  *       mapped for DMA
2068  *
2069  * Returns the number of BDEs added to the BPL.
2070  **/
2071 static int
2072 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2073                 struct ulp_bde64 *bpl, int datacnt, int protcnt)
2074 {
2075         struct scatterlist *sgde = NULL; /* s/g data entry */
2076         struct scatterlist *sgpe = NULL; /* s/g prot entry */
2077         struct lpfc_pde5 *pde5 = NULL;
2078         struct lpfc_pde6 *pde6 = NULL;
2079         struct lpfc_pde7 *pde7 = NULL;
2080         dma_addr_t dataphysaddr, protphysaddr;
2081         unsigned short curr_data = 0, curr_prot = 0;
2082         unsigned int split_offset;
2083         unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2084         unsigned int protgrp_blks, protgrp_bytes;
2085         unsigned int remainder, subtotal;
2086         int status;
2087         int datadir = sc->sc_data_direction;
2088         unsigned char pgdone = 0, alldone = 0;
2089         unsigned blksize;
2090 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2091         uint32_t rc;
2092 #endif
2093         uint32_t checking = 1;
2094         uint32_t reftag;
2095         uint8_t txop, rxop;
2096         int num_bde = 0;
2097
2098         sgpe = scsi_prot_sglist(sc);
2099         sgde = scsi_sglist(sc);
2100
2101         if (!sgpe || !sgde) {
2102                 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2103                                 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2104                                 sgpe, sgde);
2105                 return 0;
2106         }
2107
2108         status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2109         if (status)
2110                 goto out;
2111
2112         /* extract some info from the scsi command */
2113         blksize = lpfc_cmd_blksize(sc);
2114         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2115
2116 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2117         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2118         if (rc) {
2119                 if (rc & BG_ERR_SWAP)
2120                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2121                 if (rc & BG_ERR_CHECK)
2122                         checking = 0;
2123         }
2124 #endif
2125
2126         split_offset = 0;
2127         do {
2128                 /* setup PDE5 with what we have */
2129                 pde5 = (struct lpfc_pde5 *) bpl;
2130                 memset(pde5, 0, sizeof(struct lpfc_pde5));
2131                 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2132
2133                 /* Endianness conversion if necessary for PDE5 */
2134                 pde5->word0 = cpu_to_le32(pde5->word0);
2135                 pde5->reftag = cpu_to_le32(reftag);
2136
2137                 /* advance bpl and increment bde count */
2138                 num_bde++;
2139                 bpl++;
2140                 pde6 = (struct lpfc_pde6 *) bpl;
2141
2142                 /* setup PDE6 with the rest of the info */
2143                 memset(pde6, 0, sizeof(struct lpfc_pde6));
2144                 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2145                 bf_set(pde6_optx, pde6, txop);
2146                 bf_set(pde6_oprx, pde6, rxop);
2147                 bf_set(pde6_ce, pde6, checking);
2148                 bf_set(pde6_re, pde6, checking);
2149                 bf_set(pde6_ai, pde6, 1);
2150                 bf_set(pde6_ae, pde6, 0);
2151                 bf_set(pde6_apptagval, pde6, 0);
2152
2153                 /* Endianness conversion if necessary for PDE6 */
2154                 pde6->word0 = cpu_to_le32(pde6->word0);
2155                 pde6->word1 = cpu_to_le32(pde6->word1);
2156                 pde6->word2 = cpu_to_le32(pde6->word2);
2157
2158                 /* advance bpl and increment bde count */
2159                 num_bde++;
2160                 bpl++;
2161
2162                 /* setup the first BDE that points to protection buffer */
2163                 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2164                 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2165
2166                 /* must be integer multiple of the DIF block length */
2167                 BUG_ON(protgroup_len % 8);
2168
2169                 pde7 = (struct lpfc_pde7 *) bpl;
2170                 memset(pde7, 0, sizeof(struct lpfc_pde7));
2171                 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
2172
2173                 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
2174                 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
2175
2176                 protgrp_blks = protgroup_len / 8;
2177                 protgrp_bytes = protgrp_blks * blksize;
2178
2179                 /* check if this pde is crossing the 4K boundary; if so split */
2180                 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
2181                         protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
2182                         protgroup_offset += protgroup_remainder;
2183                         protgrp_blks = protgroup_remainder / 8;
2184                         protgrp_bytes = protgrp_blks * blksize;
2185                 } else {
2186                         protgroup_offset = 0;
2187                         curr_prot++;
2188                 }
2189
2190                 num_bde++;
2191
2192                 /* setup BDE's for data blocks associated with DIF data */
2193                 pgdone = 0;
2194                 subtotal = 0; /* total bytes processed for current prot grp */
2195                 while (!pgdone) {
2196                         if (!sgde) {
2197                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2198                                         "9065 BLKGRD:%s Invalid data segment\n",
2199                                                 __func__);
2200                                 return 0;
2201                         }
2202                         bpl++;
2203                         dataphysaddr = sg_dma_address(sgde) + split_offset;
2204                         bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
2205                         bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
2206
2207                         remainder = sg_dma_len(sgde) - split_offset;
2208
2209                         if ((subtotal + remainder) <= protgrp_bytes) {
2210                                 /* we can use this whole buffer */
2211                                 bpl->tus.f.bdeSize = remainder;
2212                                 split_offset = 0;
2213
2214                                 if ((subtotal + remainder) == protgrp_bytes)
2215                                         pgdone = 1;
2216                         } else {
2217                                 /* must split this buffer with next prot grp */
2218                                 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
2219                                 split_offset += bpl->tus.f.bdeSize;
2220                         }
2221
2222                         subtotal += bpl->tus.f.bdeSize;
2223
2224                         if (datadir == DMA_TO_DEVICE)
2225                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2226                         else
2227                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2228                         bpl->tus.w = le32_to_cpu(bpl->tus.w);
2229
2230                         num_bde++;
2231                         curr_data++;
2232
2233                         if (split_offset)
2234                                 break;
2235
2236                         /* Move to the next s/g segment if possible */
2237                         sgde = sg_next(sgde);
2238
2239                 }
2240
2241                 if (protgroup_offset) {
2242                         /* update the reference tag */
2243                         reftag += protgrp_blks;
2244                         bpl++;
2245                         continue;
2246                 }
2247
2248                 /* are we done ? */
2249                 if (curr_prot == protcnt) {
2250                         alldone = 1;
2251                 } else if (curr_prot < protcnt) {
2252                         /* advance to next prot buffer */
2253                         sgpe = sg_next(sgpe);
2254                         bpl++;
2255
2256                         /* update the reference tag */
2257                         reftag += protgrp_blks;
2258                 } else {
2259                         /* if we're here, we have a bug */
2260                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2261                                 "9054 BLKGRD: bug in %s\n", __func__);
2262                 }
2263
2264         } while (!alldone);
2265 out:
2266
2267         return num_bde;
2268 }
2269
2270 /**
2271  * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
2272  * @phba: The Hba for which this call is being executed.
2273  * @sc: pointer to scsi command we're working on
2274  * @sgl: pointer to buffer list for protection groups
2275  * @datacnt: number of segments of data that have been dma mapped
2276  *
2277  * This function sets up SGL buffer list for protection groups of
2278  * type LPFC_PG_TYPE_NO_DIF
2279  *
2280  * This is usually used when the HBA is instructed to generate
2281  * DIFs and insert them into data stream (or strip DIF from
2282  * incoming data stream)
2283  *
2284  * The buffer list consists of just one protection group described
2285  * below:
2286  *                                +-------------------------+
2287  *   start of prot group  -->     |         DI_SEED         |
2288  *                                +-------------------------+
2289  *                                |         Data SGE        |
2290  *                                +-------------------------+
2291  *                                |more Data SGE's ... (opt)|
2292  *                                +-------------------------+
2293  *
2294  *
2295  * Note: Data s/g buffers have been dma mapped
2296  *
2297  * Returns the number of SGEs added to the SGL.
2298  **/
2299 static int
2300 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2301                 struct sli4_sge *sgl, int datasegcnt)
2302 {
2303         struct scatterlist *sgde = NULL; /* s/g data entry */
2304         struct sli4_sge_diseed *diseed = NULL;
2305         dma_addr_t physaddr;
2306         int i = 0, num_sge = 0, status;
2307         int datadir = sc->sc_data_direction;
2308         uint32_t reftag;
2309         unsigned blksize;
2310         uint8_t txop, rxop;
2311 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2312         uint32_t rc;
2313 #endif
2314         uint32_t checking = 1;
2315         uint32_t dma_len;
2316         uint32_t dma_offset = 0;
2317
2318         status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2319         if (status)
2320                 goto out;
2321
2322         /* extract some info from the scsi command for pde*/
2323         blksize = lpfc_cmd_blksize(sc);
2324         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2325
2326 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2327         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2328         if (rc) {
2329                 if (rc & BG_ERR_SWAP)
2330                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2331                 if (rc & BG_ERR_CHECK)
2332                         checking = 0;
2333         }
2334 #endif
2335
2336         /* setup DISEED with what we have */
2337         diseed = (struct sli4_sge_diseed *) sgl;
2338         memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2339         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2340
2341         /* Endianness conversion if necessary */
2342         diseed->ref_tag = cpu_to_le32(reftag);
2343         diseed->ref_tag_tran = diseed->ref_tag;
2344
2345         /* setup DISEED with the rest of the info */
2346         bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2347         bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2348         if (datadir == DMA_FROM_DEVICE) {
2349                 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2350                 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2351         }
2352         bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2353         bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2354
2355         /* Endianness conversion if necessary for DISEED */
2356         diseed->word2 = cpu_to_le32(diseed->word2);
2357         diseed->word3 = cpu_to_le32(diseed->word3);
2358
2359         /* advance bpl and increment sge count */
2360         num_sge++;
2361         sgl++;
2362
2363         /* assumption: caller has already run dma_map_sg on command data */
2364         scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2365                 physaddr = sg_dma_address(sgde);
2366                 dma_len = sg_dma_len(sgde);
2367                 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2368                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2369                 if ((i + 1) == datasegcnt)
2370                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2371                 else
2372                         bf_set(lpfc_sli4_sge_last, sgl, 0);
2373                 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2374                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2375
2376                 sgl->sge_len = cpu_to_le32(dma_len);
2377                 dma_offset += dma_len;
2378
2379                 sgl++;
2380                 num_sge++;
2381         }
2382
2383 out:
2384         return num_sge;
2385 }
2386
2387 /**
2388  * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2389  * @phba: The Hba for which this call is being executed.
2390  * @sc: pointer to scsi command we're working on
2391  * @sgl: pointer to buffer list for protection groups
2392  * @datacnt: number of segments of data that have been dma mapped
2393  * @protcnt: number of segment of protection data that have been dma mapped
2394  *
2395  * This function sets up SGL buffer list for protection groups of
2396  * type LPFC_PG_TYPE_DIF
2397  *
2398  * This is usually used when DIFs are in their own buffers,
2399  * separate from the data. The HBA can then by instructed
2400  * to place the DIFs in the outgoing stream.  For read operations,
2401  * The HBA could extract the DIFs and place it in DIF buffers.
2402  *
2403  * The buffer list for this type consists of one or more of the
2404  * protection groups described below:
2405  *                                    +-------------------------+
2406  *   start of first prot group  -->   |         DISEED          |
2407  *                                    +-------------------------+
2408  *                                    |      DIF (Prot SGE)     |
2409  *                                    +-------------------------+
2410  *                                    |        Data SGE         |
2411  *                                    +-------------------------+
2412  *                                    |more Data SGE's ... (opt)|
2413  *                                    +-------------------------+
2414  *   start of new  prot group  -->    |         DISEED          |
2415  *                                    +-------------------------+
2416  *                                    |          ...            |
2417  *                                    +-------------------------+
2418  *
2419  * Note: It is assumed that both data and protection s/g buffers have been
2420  *       mapped for DMA
2421  *
2422  * Returns the number of SGEs added to the SGL.
2423  **/
2424 static int
2425 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2426                 struct sli4_sge *sgl, int datacnt, int protcnt)
2427 {
2428         struct scatterlist *sgde = NULL; /* s/g data entry */
2429         struct scatterlist *sgpe = NULL; /* s/g prot entry */
2430         struct sli4_sge_diseed *diseed = NULL;
2431         dma_addr_t dataphysaddr, protphysaddr;
2432         unsigned short curr_data = 0, curr_prot = 0;
2433         unsigned int split_offset;
2434         unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2435         unsigned int protgrp_blks, protgrp_bytes;
2436         unsigned int remainder, subtotal;
2437         int status;
2438         unsigned char pgdone = 0, alldone = 0;
2439         unsigned blksize;
2440         uint32_t reftag;
2441         uint8_t txop, rxop;
2442         uint32_t dma_len;
2443 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2444         uint32_t rc;
2445 #endif
2446         uint32_t checking = 1;
2447         uint32_t dma_offset = 0;
2448         int num_sge = 0;
2449
2450         sgpe = scsi_prot_sglist(sc);
2451         sgde = scsi_sglist(sc);
2452
2453         if (!sgpe || !sgde) {
2454                 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2455                                 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2456                                 sgpe, sgde);
2457                 return 0;
2458         }
2459
2460         status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2461         if (status)
2462                 goto out;
2463
2464         /* extract some info from the scsi command */
2465         blksize = lpfc_cmd_blksize(sc);
2466         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2467
2468 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2469         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2470         if (rc) {
2471                 if (rc & BG_ERR_SWAP)
2472                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2473                 if (rc & BG_ERR_CHECK)
2474                         checking = 0;
2475         }
2476 #endif
2477
2478         split_offset = 0;
2479         do {
2480                 /* setup DISEED with what we have */
2481                 diseed = (struct sli4_sge_diseed *) sgl;
2482                 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2483                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2484
2485                 /* Endianness conversion if necessary */
2486                 diseed->ref_tag = cpu_to_le32(reftag);
2487                 diseed->ref_tag_tran = diseed->ref_tag;
2488
2489                 /* setup DISEED with the rest of the info */
2490                 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2491                 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2492                 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2493                 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2494                 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2495                 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2496
2497                 /* Endianness conversion if necessary for DISEED */
2498                 diseed->word2 = cpu_to_le32(diseed->word2);
2499                 diseed->word3 = cpu_to_le32(diseed->word3);
2500
2501                 /* advance sgl and increment bde count */
2502                 num_sge++;
2503                 sgl++;
2504
2505                 /* setup the first BDE that points to protection buffer */
2506                 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2507                 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2508
2509                 /* must be integer multiple of the DIF block length */
2510                 BUG_ON(protgroup_len % 8);
2511
2512                 /* Now setup DIF SGE */
2513                 sgl->word2 = 0;
2514                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2515                 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2516                 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2517                 sgl->word2 = cpu_to_le32(sgl->word2);
2518
2519                 protgrp_blks = protgroup_len / 8;
2520                 protgrp_bytes = protgrp_blks * blksize;
2521
2522                 /* check if DIF SGE is crossing the 4K boundary; if so split */
2523                 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2524                         protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2525                         protgroup_offset += protgroup_remainder;
2526                         protgrp_blks = protgroup_remainder / 8;
2527                         protgrp_bytes = protgrp_blks * blksize;
2528                 } else {
2529                         protgroup_offset = 0;
2530                         curr_prot++;
2531                 }
2532
2533                 num_sge++;
2534
2535                 /* setup SGE's for data blocks associated with DIF data */
2536                 pgdone = 0;
2537                 subtotal = 0; /* total bytes processed for current prot grp */
2538                 while (!pgdone) {
2539                         if (!sgde) {
2540                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2541                                         "9086 BLKGRD:%s Invalid data segment\n",
2542                                                 __func__);
2543                                 return 0;
2544                         }
2545                         sgl++;
2546                         dataphysaddr = sg_dma_address(sgde) + split_offset;
2547
2548                         remainder = sg_dma_len(sgde) - split_offset;
2549
2550                         if ((subtotal + remainder) <= protgrp_bytes) {
2551                                 /* we can use this whole buffer */
2552                                 dma_len = remainder;
2553                                 split_offset = 0;
2554
2555                                 if ((subtotal + remainder) == protgrp_bytes)
2556                                         pgdone = 1;
2557                         } else {
2558                                 /* must split this buffer with next prot grp */
2559                                 dma_len = protgrp_bytes - subtotal;
2560                                 split_offset += dma_len;
2561                         }
2562
2563                         subtotal += dma_len;
2564
2565                         sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2566                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2567                         bf_set(lpfc_sli4_sge_last, sgl, 0);
2568                         bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2569                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2570
2571                         sgl->sge_len = cpu_to_le32(dma_len);
2572                         dma_offset += dma_len;
2573
2574                         num_sge++;
2575                         curr_data++;
2576
2577                         if (split_offset)
2578                                 break;
2579
2580                         /* Move to the next s/g segment if possible */
2581                         sgde = sg_next(sgde);
2582                 }
2583
2584                 if (protgroup_offset) {
2585                         /* update the reference tag */
2586                         reftag += protgrp_blks;
2587                         sgl++;
2588                         continue;
2589                 }
2590
2591                 /* are we done ? */
2592                 if (curr_prot == protcnt) {
2593                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2594                         alldone = 1;
2595                 } else if (curr_prot < protcnt) {
2596                         /* advance to next prot buffer */
2597                         sgpe = sg_next(sgpe);
2598                         sgl++;
2599
2600                         /* update the reference tag */
2601                         reftag += protgrp_blks;
2602                 } else {
2603                         /* if we're here, we have a bug */
2604                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2605                                 "9085 BLKGRD: bug in %s\n", __func__);
2606                 }
2607
2608         } while (!alldone);
2609
2610 out:
2611
2612         return num_sge;
2613 }
2614
2615 /**
2616  * lpfc_prot_group_type - Get prtotection group type of SCSI command
2617  * @phba: The Hba for which this call is being executed.
2618  * @sc: pointer to scsi command we're working on
2619  *
2620  * Given a SCSI command that supports DIF, determine composition of protection
2621  * groups involved in setting up buffer lists
2622  *
2623  * Returns: Protection group type (with or without DIF)
2624  *
2625  **/
2626 static int
2627 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2628 {
2629         int ret = LPFC_PG_TYPE_INVALID;
2630         unsigned char op = scsi_get_prot_op(sc);
2631
2632         switch (op) {
2633         case SCSI_PROT_READ_STRIP:
2634         case SCSI_PROT_WRITE_INSERT:
2635                 ret = LPFC_PG_TYPE_NO_DIF;
2636                 break;
2637         case SCSI_PROT_READ_INSERT:
2638         case SCSI_PROT_WRITE_STRIP:
2639         case SCSI_PROT_READ_PASS:
2640         case SCSI_PROT_WRITE_PASS:
2641                 ret = LPFC_PG_TYPE_DIF_BUF;
2642                 break;
2643         default:
2644                 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2645                                 "9021 Unsupported protection op:%d\n", op);
2646                 break;
2647         }
2648
2649         return ret;
2650 }
2651
2652 /**
2653  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2654  * @phba: The Hba for which this call is being executed.
2655  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2656  *
2657  * This is the protection/DIF aware version of
2658  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2659  * two functions eventually, but for now, it's here
2660  **/
2661 static int
2662 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2663                 struct lpfc_scsi_buf *lpfc_cmd)
2664 {
2665         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2666         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2667         struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
2668         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2669         uint32_t num_bde = 0;
2670         int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2671         int prot_group_type = 0;
2672         int diflen, fcpdl;
2673         unsigned blksize;
2674
2675         /*
2676          * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2677          *  fcp_rsp regions to the first data bde entry
2678          */
2679         bpl += 2;
2680         if (scsi_sg_count(scsi_cmnd)) {
2681                 /*
2682                  * The driver stores the segment count returned from pci_map_sg
2683                  * because this a count of dma-mappings used to map the use_sg
2684                  * pages.  They are not guaranteed to be the same for those
2685                  * architectures that implement an IOMMU.
2686                  */
2687                 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2688                                         scsi_sglist(scsi_cmnd),
2689                                         scsi_sg_count(scsi_cmnd), datadir);
2690                 if (unlikely(!datasegcnt))
2691                         return 1;
2692
2693                 lpfc_cmd->seg_cnt = datasegcnt;
2694                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2695                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2696                                         "9067 BLKGRD: %s: Too many sg segments"
2697                                         " from dma_map_sg.  Config %d, seg_cnt"
2698                                         " %d\n",
2699                                         __func__, phba->cfg_sg_seg_cnt,
2700                                         lpfc_cmd->seg_cnt);
2701                         scsi_dma_unmap(scsi_cmnd);
2702                         return 1;
2703                 }
2704
2705                 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2706
2707                 switch (prot_group_type) {
2708                 case LPFC_PG_TYPE_NO_DIF:
2709                         num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2710                                         datasegcnt);
2711                         /* we should have 2 or more entries in buffer list */
2712                         if (num_bde < 2)
2713                                 goto err;
2714                         break;
2715                 case LPFC_PG_TYPE_DIF_BUF:{
2716                         /*
2717                          * This type indicates that protection buffers are
2718                          * passed to the driver, so that needs to be prepared
2719                          * for DMA
2720                          */
2721                         protsegcnt = dma_map_sg(&phba->pcidev->dev,
2722                                         scsi_prot_sglist(scsi_cmnd),
2723                                         scsi_prot_sg_count(scsi_cmnd), datadir);
2724                         if (unlikely(!protsegcnt)) {
2725                                 scsi_dma_unmap(scsi_cmnd);
2726                                 return 1;
2727                         }
2728
2729                         lpfc_cmd->prot_seg_cnt = protsegcnt;
2730                         if (lpfc_cmd->prot_seg_cnt
2731                             > phba->cfg_prot_sg_seg_cnt) {
2732                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2733                                         "9068 BLKGRD: %s: Too many prot sg "
2734                                         "segments from dma_map_sg.  Config %d,"
2735                                                 "prot_seg_cnt %d\n", __func__,
2736                                                 phba->cfg_prot_sg_seg_cnt,
2737                                                 lpfc_cmd->prot_seg_cnt);
2738                                 dma_unmap_sg(&phba->pcidev->dev,
2739                                              scsi_prot_sglist(scsi_cmnd),
2740                                              scsi_prot_sg_count(scsi_cmnd),
2741                                              datadir);
2742                                 scsi_dma_unmap(scsi_cmnd);
2743                                 return 1;
2744                         }
2745
2746                         num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2747                                         datasegcnt, protsegcnt);
2748                         /* we should have 3 or more entries in buffer list */
2749                         if (num_bde < 3)
2750                                 goto err;
2751                         break;
2752                 }
2753                 case LPFC_PG_TYPE_INVALID:
2754                 default:
2755                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2756                                         "9022 Unexpected protection group %i\n",
2757                                         prot_group_type);
2758                         return 1;
2759                 }
2760         }
2761
2762         /*
2763          * Finish initializing those IOCB fields that are dependent on the
2764          * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2765          * reinitialized since all iocb memory resources are used many times
2766          * for transmit, receive, and continuation bpl's.
2767          */
2768         iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2769         iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2770         iocb_cmd->ulpBdeCount = 1;
2771         iocb_cmd->ulpLe = 1;
2772
2773         fcpdl = scsi_bufflen(scsi_cmnd);
2774
2775         if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
2776                 /*
2777                  * We are in DIF Type 1 mode
2778                  * Every data block has a 8 byte DIF (trailer)
2779                  * attached to it.  Must ajust FCP data length
2780                  */
2781                 blksize = lpfc_cmd_blksize(scsi_cmnd);
2782                 diflen = (fcpdl / blksize) * 8;
2783                 fcpdl += diflen;
2784         }
2785         fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2786
2787         /*
2788          * Due to difference in data length between DIF/non-DIF paths,
2789          * we need to set word 4 of IOCB here
2790          */
2791         iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2792
2793         return 0;
2794 err:
2795         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2796                         "9023 Could not setup all needed BDE's"
2797                         "prot_group_type=%d, num_bde=%d\n",
2798                         prot_group_type, num_bde);
2799         return 1;
2800 }
2801
2802 /*
2803  * This function checks for BlockGuard errors detected by
2804  * the HBA.  In case of errors, the ASC/ASCQ fields in the
2805  * sense buffer will be set accordingly, paired with
2806  * ILLEGAL_REQUEST to signal to the kernel that the HBA
2807  * detected corruption.
2808  *
2809  * Returns:
2810  *  0 - No error found
2811  *  1 - BlockGuard error found
2812  * -1 - Internal error (bad profile, ...etc)
2813  */
2814 static int
2815 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2816                         struct lpfc_iocbq *pIocbOut)
2817 {
2818         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2819         struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2820         int ret = 0;
2821         uint32_t bghm = bgf->bghm;
2822         uint32_t bgstat = bgf->bgstat;
2823         uint64_t failing_sector = 0;
2824
2825         lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
2826                         " 0x%x lba 0x%llx blk cnt 0x%x "
2827                         "bgstat=0x%x bghm=0x%x\n",
2828                         cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
2829                         blk_rq_sectors(cmd->request), bgstat, bghm);
2830
2831         spin_lock(&_dump_buf_lock);
2832         if (!_dump_buf_done) {
2833                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
2834                         " Data for %u blocks to debugfs\n",
2835                                 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2836                 lpfc_debug_save_data(phba, cmd);
2837
2838                 /* If we have a prot sgl, save the DIF buffer */
2839                 if (lpfc_prot_group_type(phba, cmd) ==
2840                                 LPFC_PG_TYPE_DIF_BUF) {
2841                         lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
2842                                 "Saving DIF for %u blocks to debugfs\n",
2843                                 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2844                         lpfc_debug_save_dif(phba, cmd);
2845                 }
2846
2847                 _dump_buf_done = 1;
2848         }
2849         spin_unlock(&_dump_buf_lock);
2850
2851         if (lpfc_bgs_get_invalid_prof(bgstat)) {
2852                 cmd->result = ScsiResult(DID_ERROR, 0);
2853                 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
2854                         " BlockGuard profile. bgstat:0x%x\n",
2855                         bgstat);
2856                 ret = (-1);
2857                 goto out;
2858         }
2859
2860         if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2861                 cmd->result = ScsiResult(DID_ERROR, 0);
2862                 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
2863                                 "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
2864                                 bgstat);
2865                 ret = (-1);
2866                 goto out;
2867         }
2868
2869         if (lpfc_bgs_get_guard_err(bgstat)) {
2870                 ret = 1;
2871
2872                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2873                                 0x10, 0x1);
2874                 cmd->result = DRIVER_SENSE << 24
2875                         | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2876                 phba->bg_guard_err_cnt++;
2877                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2878                         "9055 BLKGRD: guard_tag error\n");
2879         }
2880
2881         if (lpfc_bgs_get_reftag_err(bgstat)) {
2882                 ret = 1;
2883
2884                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2885                                 0x10, 0x3);
2886                 cmd->result = DRIVER_SENSE << 24
2887                         | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2888
2889                 phba->bg_reftag_err_cnt++;
2890                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2891                         "9056 BLKGRD: ref_tag error\n");
2892         }
2893
2894         if (lpfc_bgs_get_apptag_err(bgstat)) {
2895                 ret = 1;
2896
2897                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2898                                 0x10, 0x2);
2899                 cmd->result = DRIVER_SENSE << 24
2900                         | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2901
2902                 phba->bg_apptag_err_cnt++;
2903                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2904                         "9061 BLKGRD: app_tag error\n");
2905         }
2906
2907         if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2908                 /*
2909                  * setup sense data descriptor 0 per SPC-4 as an information
2910                  * field, and put the failing LBA in it.
2911                  * This code assumes there was also a guard/app/ref tag error
2912                  * indication.
2913                  */
2914                 cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
2915                 cmd->sense_buffer[8] = 0;     /* Information descriptor type */
2916                 cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
2917                 cmd->sense_buffer[10] = 0x80; /* Validity bit */
2918
2919                 /* bghm is a "on the wire" FC frame based count */
2920                 switch (scsi_get_prot_op(cmd)) {
2921                 case SCSI_PROT_READ_INSERT:
2922                 case SCSI_PROT_WRITE_STRIP:
2923                         bghm /= cmd->device->sector_size;
2924                         break;
2925                 case SCSI_PROT_READ_STRIP:
2926                 case SCSI_PROT_WRITE_INSERT:
2927                 case SCSI_PROT_READ_PASS:
2928                 case SCSI_PROT_WRITE_PASS:
2929                         bghm /= (cmd->device->sector_size +
2930                                 sizeof(struct scsi_dif_tuple));
2931                         break;
2932                 }
2933
2934                 failing_sector = scsi_get_lba(cmd);
2935                 failing_sector += bghm;
2936
2937                 /* Descriptor Information */
2938                 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
2939         }
2940
2941         if (!ret) {
2942                 /* No error was reported - problem in FW? */
2943                 cmd->result = ScsiResult(DID_ERROR, 0);
2944                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2945                         "9057 BLKGRD: Unknown error reported!\n");
2946         }
2947
2948 out:
2949         return ret;
2950 }
2951
2952 /**
2953  * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
2954  * @phba: The Hba for which this call is being executed.
2955  * @lpfc_cmd: The scsi buffer which is going to be mapped.
2956  *
2957  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
2958  * field of @lpfc_cmd for device with SLI-4 interface spec.
2959  *
2960  * Return codes:
2961  *      1 - Error
2962  *      0 - Success
2963  **/
2964 static int
2965 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2966 {
2967         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2968         struct scatterlist *sgel = NULL;
2969         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2970         struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
2971         struct sli4_sge *first_data_sgl;
2972         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2973         dma_addr_t physaddr;
2974         uint32_t num_bde = 0;
2975         uint32_t dma_len;
2976         uint32_t dma_offset = 0;
2977         int nseg;
2978         struct ulp_bde64 *bde;
2979
2980         /*
2981          * There are three possibilities here - use scatter-gather segment, use
2982          * the single mapping, or neither.  Start the lpfc command prep by
2983          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2984          * data bde entry.
2985          */
2986         if (scsi_sg_count(scsi_cmnd)) {
2987                 /*
2988                  * The driver stores the segment count returned from pci_map_sg
2989                  * because this a count of dma-mappings used to map the use_sg
2990                  * pages.  They are not guaranteed to be the same for those
2991                  * architectures that implement an IOMMU.
2992                  */
2993
2994                 nseg = scsi_dma_map(scsi_cmnd);
2995                 if (unlikely(!nseg))
2996                         return 1;
2997                 sgl += 1;
2998                 /* clear the last flag in the fcp_rsp map entry */
2999                 sgl->word2 = le32_to_cpu(sgl->word2);
3000                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3001                 sgl->word2 = cpu_to_le32(sgl->word2);
3002                 sgl += 1;
3003                 first_data_sgl = sgl;
3004                 lpfc_cmd->seg_cnt = nseg;
3005                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3006                         lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3007                                 " %s: Too many sg segments from "
3008                                 "dma_map_sg.  Config %d, seg_cnt %d\n",
3009                                 __func__, phba->cfg_sg_seg_cnt,
3010                                lpfc_cmd->seg_cnt);
3011                         scsi_dma_unmap(scsi_cmnd);
3012                         return 1;
3013                 }
3014
3015                 /*
3016                  * The driver established a maximum scatter-gather segment count
3017                  * during probe that limits the number of sg elements in any
3018                  * single scsi command.  Just run through the seg_cnt and format
3019                  * the sge's.
3020                  * When using SLI-3 the driver will try to fit all the BDEs into
3021                  * the IOCB. If it can't then the BDEs get added to a BPL as it
3022                  * does for SLI-2 mode.
3023                  */
3024                 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3025                         physaddr = sg_dma_address(sgel);
3026                         dma_len = sg_dma_len(sgel);
3027                         sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3028                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3029                         sgl->word2 = le32_to_cpu(sgl->word2);
3030                         if ((num_bde + 1) == nseg)
3031                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
3032                         else
3033                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3034                         bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3035                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3036                         sgl->word2 = cpu_to_le32(sgl->word2);
3037                         sgl->sge_len = cpu_to_le32(dma_len);
3038                         dma_offset += dma_len;
3039                         sgl++;
3040                 }
3041                 /* setup the performance hint (first data BDE) if enabled */
3042                 if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
3043                         bde = (struct ulp_bde64 *)
3044                                         &(iocb_cmd->unsli3.sli3Words[5]);
3045                         bde->addrLow = first_data_sgl->addr_lo;
3046                         bde->addrHigh = first_data_sgl->addr_hi;
3047                         bde->tus.f.bdeSize =
3048                                         le32_to_cpu(first_data_sgl->sge_len);
3049                         bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3050                         bde->tus.w = cpu_to_le32(bde->tus.w);
3051                 }
3052         } else {
3053                 sgl += 1;
3054                 /* clear the last flag in the fcp_rsp map entry */
3055                 sgl->word2 = le32_to_cpu(sgl->word2);
3056                 bf_set(lpfc_sli4_sge_last, sgl, 1);
3057                 sgl->word2 = cpu_to_le32(sgl->word2);
3058         }
3059
3060         /*
3061          * Finish initializing those IOCB fields that are dependent on the
3062          * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3063          * explicitly reinitialized.
3064          * all iocb memory resources are reused.
3065          */
3066         fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3067
3068         /*
3069          * Due to difference in data length between DIF/non-DIF paths,
3070          * we need to set word 4 of IOCB here
3071          */
3072         iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3073         return 0;
3074 }
3075
3076 /**
3077  * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
3078  * @phba: The Hba for which this call is being executed.
3079  * @lpfc_cmd: The scsi buffer which is going to be adjusted.
3080  *
3081  * Adjust the data length to account for how much data
3082  * is actually on the wire.
3083  *
3084  * returns the adjusted data length
3085  **/
3086 static int
3087 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
3088                 struct lpfc_scsi_buf *lpfc_cmd)
3089 {
3090         struct scsi_cmnd *sc = lpfc_cmd->pCmd;
3091         int diflen, fcpdl;
3092         unsigned blksize;
3093
3094         fcpdl = scsi_bufflen(sc);
3095
3096         /* Check if there is protection data on the wire */
3097         if (sc->sc_data_direction == DMA_FROM_DEVICE) {
3098                 /* Read */
3099                 if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
3100                         return fcpdl;
3101
3102         } else {
3103                 /* Write */
3104                 if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
3105                         return fcpdl;
3106         }
3107
3108         /* If protection data on the wire, adjust the count accordingly */
3109         blksize = lpfc_cmd_blksize(sc);
3110         diflen = (fcpdl / blksize) * 8;
3111         fcpdl += diflen;
3112         return fcpdl;
3113 }
3114
3115 /**
3116  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3117  * @phba: The Hba for which this call is being executed.
3118  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3119  *
3120  * This is the protection/DIF aware version of
3121  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3122  * two functions eventually, but for now, it's here
3123  **/
3124 static int
3125 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3126                 struct lpfc_scsi_buf *lpfc_cmd)
3127 {
3128         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3129         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3130         struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3131         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3132         uint32_t num_bde = 0;
3133         int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3134         int prot_group_type = 0;
3135         int fcpdl;
3136
3137         /*
3138          * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3139          *  fcp_rsp regions to the first data bde entry
3140          */
3141         if (scsi_sg_count(scsi_cmnd)) {
3142                 /*
3143                  * The driver stores the segment count returned from pci_map_sg
3144                  * because this a count of dma-mappings used to map the use_sg
3145                  * pages.  They are not guaranteed to be the same for those
3146                  * architectures that implement an IOMMU.
3147                  */
3148                 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3149                                         scsi_sglist(scsi_cmnd),
3150                                         scsi_sg_count(scsi_cmnd), datadir);
3151                 if (unlikely(!datasegcnt))
3152                         return 1;
3153
3154                 sgl += 1;
3155                 /* clear the last flag in the fcp_rsp map entry */
3156                 sgl->word2 = le32_to_cpu(sgl->word2);
3157                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3158                 sgl->word2 = cpu_to_le32(sgl->word2);
3159
3160                 sgl += 1;
3161                 lpfc_cmd->seg_cnt = datasegcnt;
3162                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3163                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3164                                         "9087 BLKGRD: %s: Too many sg segments"
3165                                         " from dma_map_sg.  Config %d, seg_cnt"
3166                                         " %d\n",
3167                                         __func__, phba->cfg_sg_seg_cnt,
3168                                         lpfc_cmd->seg_cnt);
3169                         scsi_dma_unmap(scsi_cmnd);
3170                         return 1;
3171                 }
3172
3173                 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3174
3175                 switch (prot_group_type) {
3176                 case LPFC_PG_TYPE_NO_DIF:
3177                         num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3178                                         datasegcnt);
3179                         /* we should have 2 or more entries in buffer list */
3180                         if (num_bde < 2)
3181                                 goto err;
3182                         break;
3183                 case LPFC_PG_TYPE_DIF_BUF:{
3184                         /*
3185                          * This type indicates that protection buffers are
3186                          * passed to the driver, so that needs to be prepared
3187                          * for DMA
3188                          */
3189                         protsegcnt = dma_map_sg(&phba->pcidev->dev,
3190                                         scsi_prot_sglist(scsi_cmnd),
3191                                         scsi_prot_sg_count(scsi_cmnd), datadir);
3192                         if (unlikely(!protsegcnt)) {
3193                                 scsi_dma_unmap(scsi_cmnd);
3194                                 return 1;
3195                         }
3196
3197                         lpfc_cmd->prot_seg_cnt = protsegcnt;
3198                         if (lpfc_cmd->prot_seg_cnt
3199                             > phba->cfg_prot_sg_seg_cnt) {
3200                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3201                                         "9088 BLKGRD: %s: Too many prot sg "
3202                                         "segments from dma_map_sg.  Config %d,"
3203                                                 "prot_seg_cnt %d\n", __func__,
3204                                                 phba->cfg_prot_sg_seg_cnt,
3205                                                 lpfc_cmd->prot_seg_cnt);
3206                                 dma_unmap_sg(&phba->pcidev->dev,
3207                                              scsi_prot_sglist(scsi_cmnd),
3208                                              scsi_prot_sg_count(scsi_cmnd),
3209                                              datadir);
3210                                 scsi_dma_unmap(scsi_cmnd);
3211                                 return 1;
3212                         }
3213
3214                         num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3215                                         datasegcnt, protsegcnt);
3216                         /* we should have 3 or more entries in buffer list */
3217                         if (num_bde < 3)
3218                                 goto err;
3219                         break;
3220                 }
3221                 case LPFC_PG_TYPE_INVALID:
3222                 default:
3223                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3224                                         "9083 Unexpected protection group %i\n",
3225                                         prot_group_type);
3226                         return 1;
3227                 }
3228         }
3229
3230         switch (scsi_get_prot_op(scsi_cmnd)) {
3231         case SCSI_PROT_WRITE_STRIP:
3232         case SCSI_PROT_READ_STRIP:
3233                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3234                 break;
3235         case SCSI_PROT_WRITE_INSERT:
3236         case SCSI_PROT_READ_INSERT:
3237                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3238                 break;
3239         case SCSI_PROT_WRITE_PASS:
3240         case SCSI_PROT_READ_PASS:
3241                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3242                 break;
3243         }
3244
3245         fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3246
3247         fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3248
3249         /*
3250          * Due to difference in data length between DIF/non-DIF paths,
3251          * we need to set word 4 of IOCB here
3252          */
3253         iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3254
3255         return 0;
3256 err:
3257         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3258                         "9084 Could not setup all needed BDE's"
3259                         "prot_group_type=%d, num_bde=%d\n",
3260                         prot_group_type, num_bde);
3261         return 1;
3262 }
3263
3264 /**
3265  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3266  * @phba: The Hba for which this call is being executed.
3267  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3268  *
3269  * This routine wraps the actual DMA mapping function pointer from the
3270  * lpfc_hba struct.
3271  *
3272  * Return codes:
3273  *      1 - Error
3274  *      0 - Success
3275  **/
3276 static inline int
3277 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3278 {
3279         return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3280 }
3281
3282 /**
3283  * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3284  * using BlockGuard.
3285  * @phba: The Hba for which this call is being executed.
3286  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3287  *
3288  * This routine wraps the actual DMA mapping function pointer from the
3289  * lpfc_hba struct.
3290  *
3291  * Return codes:
3292  *      1 - Error
3293  *      0 - Success
3294  **/
3295 static inline int
3296 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3297 {
3298         return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3299 }
3300
3301 /**
3302  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3303  * @phba: Pointer to hba context object.
3304  * @vport: Pointer to vport object.
3305  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3306  * @rsp_iocb: Pointer to response iocb object which reported error.
3307  *
3308  * This function posts an event when there is a SCSI command reporting
3309  * error from the scsi device.
3310  **/
3311 static void
3312 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3313                 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3314         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3315         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3316         uint32_t resp_info = fcprsp->rspStatus2;
3317         uint32_t scsi_status = fcprsp->rspStatus3;
3318         uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3319         struct lpfc_fast_path_event *fast_path_evt = NULL;
3320         struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3321         unsigned long flags;
3322
3323         if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3324                 return;
3325
3326         /* If there is queuefull or busy condition send a scsi event */
3327         if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3328                 (cmnd->result == SAM_STAT_BUSY)) {
3329                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3330                 if (!fast_path_evt)
3331                         return;
3332                 fast_path_evt->un.scsi_evt.event_type =
3333                         FC_REG_SCSI_EVENT;
3334                 fast_path_evt->un.scsi_evt.subcategory =
3335                 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3336                 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3337                 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3338                 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3339                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3340                 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3341                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3342         } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3343                 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3344                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3345                 if (!fast_path_evt)
3346                         return;
3347                 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3348                         FC_REG_SCSI_EVENT;
3349                 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3350                         LPFC_EVENT_CHECK_COND;
3351                 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3352                         cmnd->device->lun;
3353                 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3354                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3355                 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3356                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3357                 fast_path_evt->un.check_cond_evt.sense_key =
3358                         cmnd->sense_buffer[2] & 0xf;
3359                 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3360                 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3361         } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3362                      fcpi_parm &&
3363                      ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3364                         ((scsi_status == SAM_STAT_GOOD) &&
3365                         !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3366                 /*
3367                  * If status is good or resid does not match with fcp_param and
3368                  * there is valid fcpi_parm, then there is a read_check error
3369                  */
3370                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3371                 if (!fast_path_evt)
3372                         return;
3373                 fast_path_evt->un.read_check_error.header.event_type =
3374                         FC_REG_FABRIC_EVENT;
3375                 fast_path_evt->un.read_check_error.header.subcategory =
3376                         LPFC_EVENT_FCPRDCHKERR;
3377                 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3378                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3379                 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3380                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3381                 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3382                 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3383                 fast_path_evt->un.read_check_error.fcpiparam =
3384                         fcpi_parm;
3385         } else
3386                 return;
3387
3388         fast_path_evt->vport = vport;
3389         spin_lock_irqsave(&phba->hbalock, flags);
3390         list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3391         spin_unlock_irqrestore(&phba->hbalock, flags);
3392         lpfc_worker_wake_up(phba);
3393         return;
3394 }
3395
3396 /**
3397  * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3398  * @phba: The HBA for which this call is being executed.
3399  * @psb: The scsi buffer which is going to be un-mapped.
3400  *
3401  * This routine does DMA un-mapping of scatter gather list of scsi command
3402  * field of @lpfc_cmd for device with SLI-3 interface spec.
3403  **/
3404 static void
3405 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3406 {
3407         /*
3408          * There are only two special cases to consider.  (1) the scsi command
3409          * requested scatter-gather usage or (2) the scsi command allocated
3410          * a request buffer, but did not request use_sg.  There is a third
3411          * case, but it does not require resource deallocation.
3412          */
3413         if (psb->seg_cnt > 0)
3414                 scsi_dma_unmap(psb->pCmd);
3415         if (psb->prot_seg_cnt > 0)
3416                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3417                                 scsi_prot_sg_count(psb->pCmd),
3418                                 psb->pCmd->sc_data_direction);
3419 }
3420
3421 /**
3422  * lpfc_handler_fcp_err - FCP response handler
3423  * @vport: The virtual port for which this call is being executed.
3424  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3425  * @rsp_iocb: The response IOCB which contains FCP error.
3426  *
3427  * This routine is called to process response IOCB with status field
3428  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3429  * based upon SCSI and FCP error.
3430  **/
3431 static void
3432 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3433                     struct lpfc_iocbq *rsp_iocb)
3434 {
3435         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3436         struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3437         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3438         uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3439         uint32_t resp_info = fcprsp->rspStatus2;
3440         uint32_t scsi_status = fcprsp->rspStatus3;
3441         uint32_t *lp;
3442         uint32_t host_status = DID_OK;
3443         uint32_t rsplen = 0;
3444         uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3445
3446
3447         /*
3448          *  If this is a task management command, there is no
3449          *  scsi packet associated with this lpfc_cmd.  The driver
3450          *  consumes it.
3451          */
3452         if (fcpcmd->fcpCntl2) {
3453                 scsi_status = 0;
3454                 goto out;
3455         }
3456
3457         if (resp_info & RSP_LEN_VALID) {
3458                 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3459                 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3460                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3461                                  "2719 Invalid response length: "
3462                                  "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
3463                                  cmnd->device->id,
3464                                  cmnd->device->lun, cmnd->cmnd[0],
3465                                  rsplen);
3466                         host_status = DID_ERROR;
3467                         goto out;
3468                 }
3469                 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3470                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3471                                  "2757 Protocol failure detected during "
3472                                  "processing of FCP I/O op: "
3473                                  "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
3474                                  cmnd->device->id,
3475                                  cmnd->device->lun, cmnd->cmnd[0],
3476                                  fcprsp->rspInfo3);
3477                         host_status = DID_ERROR;
3478                         goto out;
3479                 }
3480         }
3481
3482         if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3483                 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3484                 if (snslen > SCSI_SENSE_BUFFERSIZE)
3485                         snslen = SCSI_SENSE_BUFFERSIZE;
3486
3487                 if (resp_info & RSP_LEN_VALID)
3488                   rsplen = be32_to_cpu(fcprsp->rspRspLen);
3489                 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3490         }
3491         lp = (uint32_t *)cmnd->sense_buffer;
3492
3493         /* special handling for under run conditions */
3494         if (!scsi_status && (resp_info & RESID_UNDER)) {
3495                 /* don't log under runs if fcp set... */
3496                 if (vport->cfg_log_verbose & LOG_FCP)
3497                         logit = LOG_FCP_ERROR;
3498                 /* unless operator says so */
3499                 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3500                         logit = LOG_FCP_UNDER;
3501         }
3502
3503         lpfc_printf_vlog(vport, KERN_WARNING, logit,
3504                          "9024 FCP command x%x failed: x%x SNS x%x x%x "
3505                          "Data: x%x x%x x%x x%x x%x\n",
3506                          cmnd->cmnd[0], scsi_status,
3507                          be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3508                          be32_to_cpu(fcprsp->rspResId),
3509                          be32_to_cpu(fcprsp->rspSnsLen),
3510                          be32_to_cpu(fcprsp->rspRspLen),
3511                          fcprsp->rspInfo3);
3512
3513         scsi_set_resid(cmnd, 0);
3514         if (resp_info & RESID_UNDER) {
3515                 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3516
3517                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3518                                  "9025 FCP Read Underrun, expected %d, "
3519                                  "residual %d Data: x%x x%x x%x\n",
3520                                  be32_to_cpu(fcpcmd->fcpDl),
3521                                  scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3522                                  cmnd->underflow);
3523
3524                 /*
3525                  * If there is an under run check if under run reported by
3526                  * storage array is same as the under run reported by HBA.
3527                  * If this is not same, there is a dropped frame.
3528                  */
3529                 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3530                         fcpi_parm &&
3531                         (scsi_get_resid(cmnd) != fcpi_parm)) {
3532                         lpfc_printf_vlog(vport, KERN_WARNING,
3533                                          LOG_FCP | LOG_FCP_ERROR,
3534                                          "9026 FCP Read Check Error "
3535                                          "and Underrun Data: x%x x%x x%x x%x\n",
3536                                          be32_to_cpu(fcpcmd->fcpDl),
3537                                          scsi_get_resid(cmnd), fcpi_parm,
3538                                          cmnd->cmnd[0]);
3539                         scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3540                         host_status = DID_ERROR;
3541                 }
3542                 /*
3543                  * The cmnd->underflow is the minimum number of bytes that must
3544                  * be transferred for this command.  Provided a sense condition
3545                  * is not present, make sure the actual amount transferred is at
3546                  * least the underflow value or fail.
3547                  */
3548                 if (!(resp_info & SNS_LEN_VALID) &&
3549                     (scsi_status == SAM_STAT_GOOD) &&
3550                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3551                      < cmnd->underflow)) {
3552                         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3553                                          "9027 FCP command x%x residual "
3554                                          "underrun converted to error "
3555                                          "Data: x%x x%x x%x\n",
3556                                          cmnd->cmnd[0], scsi_bufflen(cmnd),
3557                                          scsi_get_resid(cmnd), cmnd->underflow);
3558                         host_status = DID_ERROR;
3559                 }
3560         } else if (resp_info & RESID_OVER) {
3561                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3562                                  "9028 FCP command x%x residual overrun error. "
3563                                  "Data: x%x x%x\n", cmnd->cmnd[0],
3564                                  scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3565                 host_status = DID_ERROR;
3566
3567         /*
3568          * Check SLI validation that all the transfer was actually done
3569          * (fcpi_parm should be zero).
3570          */
3571         } else if (fcpi_parm) {
3572                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3573                                  "9029 FCP Data Transfer Check Error: "
3574                                  "x%x x%x x%x x%x x%x\n",
3575                                  be32_to_cpu(fcpcmd->fcpDl),
3576                                  be32_to_cpu(fcprsp->rspResId),
3577                                  fcpi_parm, cmnd->cmnd[0], scsi_status);
3578                 switch (scsi_status) {
3579                 case SAM_STAT_GOOD:
3580                 case SAM_STAT_CHECK_CONDITION:
3581                         /* Fabric dropped a data frame. Fail any successful
3582                          * command in which we detected dropped frames.
3583                          * A status of good or some check conditions could
3584                          * be considered a successful command.
3585                          */
3586                         host_status = DID_ERROR;
3587                         break;
3588                 }
3589                 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3590         }
3591
3592  out:
3593         cmnd->result = ScsiResult(host_status, scsi_status);
3594         lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3595 }
3596
3597 /**
3598  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3599  * @phba: The Hba for which this call is being executed.
3600  * @pIocbIn: The command IOCBQ for the scsi cmnd.
3601  * @pIocbOut: The response IOCBQ for the scsi cmnd.
3602  *
3603  * This routine assigns scsi command result by looking into response IOCB
3604  * status field appropriately. This routine handles QUEUE FULL condition as
3605  * well by ramping down device queue depth.
3606  **/
3607 static void
3608 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3609                         struct lpfc_iocbq *pIocbOut)
3610 {
3611         struct lpfc_scsi_buf *lpfc_cmd =
3612                 (struct lpfc_scsi_buf *) pIocbIn->context1;
3613         struct lpfc_vport      *vport = pIocbIn->vport;
3614         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3615         struct lpfc_nodelist *pnode = rdata->pnode;
3616         struct scsi_cmnd *cmd;
3617         int result;
3618         struct scsi_device *tmp_sdev;
3619         int depth;
3620         unsigned long flags;
3621         struct lpfc_fast_path_event *fast_path_evt;
3622         struct Scsi_Host *shost;
3623         uint32_t queue_depth, scsi_id;
3624         uint32_t logit = LOG_FCP;
3625
3626         /* Sanity check on return of outstanding command */
3627         if (!(lpfc_cmd->pCmd))
3628                 return;
3629         cmd = lpfc_cmd->pCmd;
3630         shost = cmd->device->host;
3631
3632         lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3633         lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3634         /* pick up SLI4 exhange busy status from HBA */
3635         lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3636
3637 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3638         if (lpfc_cmd->prot_data_type) {
3639                 struct scsi_dif_tuple *src = NULL;
3640
3641                 src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3642                 /*
3643                  * Used to restore any changes to protection
3644                  * data for error injection.
3645                  */
3646                 switch (lpfc_cmd->prot_data_type) {
3647                 case LPFC_INJERR_REFTAG:
3648                         src->ref_tag =
3649                                 lpfc_cmd->prot_data;
3650                         break;
3651                 case LPFC_INJERR_APPTAG:
3652                         src->app_tag =
3653                                 (uint16_t)lpfc_cmd->prot_data;
3654                         break;
3655                 case LPFC_INJERR_GUARD:
3656                         src->guard_tag =
3657                                 (uint16_t)lpfc_cmd->prot_data;
3658                         break;
3659                 default:
3660                         break;
3661                 }
3662
3663                 lpfc_cmd->prot_data = 0;
3664                 lpfc_cmd->prot_data_type = 0;
3665                 lpfc_cmd->prot_data_segment = NULL;
3666         }
3667 #endif
3668         if (pnode && NLP_CHK_NODE_ACT(pnode))
3669                 atomic_dec(&pnode->cmd_pending);
3670
3671         if (lpfc_cmd->status) {
3672                 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3673                     (lpfc_cmd->result & IOERR_DRVR_MASK))
3674                         lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3675                 else if (lpfc_cmd->status >= IOSTAT_CNT)
3676                         lpfc_cmd->status = IOSTAT_DEFAULT;
3677                 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3678                     !lpfc_cmd->fcp_rsp->rspStatus3 &&
3679                     (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3680                     !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3681                         logit = 0;
3682                 else
3683                         logit = LOG_FCP | LOG_FCP_UNDER;
3684                 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3685                          "9030 FCP cmd x%x failed <%d/%d> "
3686                          "status: x%x result: x%x "
3687                          "sid: x%x did: x%x oxid: x%x "
3688                          "Data: x%x x%x\n",
3689                          cmd->cmnd[0],
3690                          cmd->device ? cmd->device->id : 0xffff,
3691                          cmd->device ? cmd->device->lun : 0xffff,
3692                          lpfc_cmd->status, lpfc_cmd->result,
3693                          vport->fc_myDID, pnode->nlp_DID,
3694                          phba->sli_rev == LPFC_SLI_REV4 ?
3695                              lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3696                          pIocbOut->iocb.ulpContext,
3697                          lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
3698
3699                 switch (lpfc_cmd->status) {
3700                 case IOSTAT_FCP_RSP_ERROR:
3701                         /* Call FCP RSP handler to determine result */
3702                         lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
3703                         break;
3704                 case IOSTAT_NPORT_BSY:
3705                 case IOSTAT_FABRIC_BSY:
3706                         cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
3707                         fast_path_evt = lpfc_alloc_fast_evt(phba);
3708                         if (!fast_path_evt)
3709                                 break;
3710                         fast_path_evt->un.fabric_evt.event_type =
3711                                 FC_REG_FABRIC_EVENT;
3712                         fast_path_evt->un.fabric_evt.subcategory =
3713                                 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3714                                 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
3715                         if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3716                                 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3717                                         &pnode->nlp_portname,
3718                                         sizeof(struct lpfc_name));
3719                                 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3720                                         &pnode->nlp_nodename,
3721                                         sizeof(struct lpfc_name));
3722                         }
3723                         fast_path_evt->vport = vport;
3724                         fast_path_evt->work_evt.evt =
3725                                 LPFC_EVT_FASTPATH_MGMT_EVT;
3726                         spin_lock_irqsave(&phba->hbalock, flags);
3727                         list_add_tail(&fast_path_evt->work_evt.evt_listp,
3728                                 &phba->work_list);
3729                         spin_unlock_irqrestore(&phba->hbalock, flags);
3730                         lpfc_worker_wake_up(phba);
3731                         break;
3732                 case IOSTAT_LOCAL_REJECT:
3733                 case IOSTAT_REMOTE_STOP:
3734                         if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3735                             lpfc_cmd->result ==
3736                                         IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
3737                             lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3738                             lpfc_cmd->result ==
3739                                         IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
3740                                 cmd->result = ScsiResult(DID_NO_CONNECT, 0);
3741                                 break;
3742                         }
3743                         if (lpfc_cmd->result == IOERR_INVALID_RPI ||
3744                             lpfc_cmd->result == IOERR_NO_RESOURCES ||
3745                             lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3746                             lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
3747                                 cmd->result = ScsiResult(DID_REQUEUE, 0);
3748                                 break;
3749                         }
3750                         if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3751                              lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3752                              pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3753                                 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3754                                         /*
3755                                          * This is a response for a BG enabled
3756                                          * cmd. Parse BG error
3757                                          */
3758                                         lpfc_parse_bg_err(phba, lpfc_cmd,
3759                                                         pIocbOut);
3760                                         break;
3761                                 } else {
3762                                         lpfc_printf_vlog(vport, KERN_WARNING,
3763                                                         LOG_BG,
3764                                                         "9031 non-zero BGSTAT "
3765                                                         "on unprotected cmd\n");
3766                                 }
3767                         }
3768                         if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3769                                 && (phba->sli_rev == LPFC_SLI_REV4)
3770                                 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
3771                                 /* This IO was aborted by the target, we don't
3772                                  * know the rxid and because we did not send the
3773                                  * ABTS we cannot generate and RRQ.
3774                                  */
3775                                 lpfc_set_rrq_active(phba, pnode,
3776                                         lpfc_cmd->cur_iocbq.sli4_lxritag,
3777                                         0, 0);
3778                         }
3779                 /* else: fall through */
3780                 default:
3781                         cmd->result = ScsiResult(DID_ERROR, 0);
3782                         break;
3783                 }
3784
3785                 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
3786                     || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3787                         cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
3788                                                  SAM_STAT_BUSY);
3789         } else
3790                 cmd->result = ScsiResult(DID_OK, 0);
3791
3792         if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
3793                 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3794
3795                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3796                                  "0710 Iodone <%d/%d> cmd %p, error "
3797                                  "x%x SNS x%x x%x Data: x%x x%x\n",
3798                                  cmd->device->id, cmd->device->lun, cmd,
3799                                  cmd->result, *lp, *(lp + 3), cmd->retries,
3800                                  scsi_get_resid(cmd));
3801         }
3802
3803         lpfc_update_stats(phba, lpfc_cmd);
3804         result = cmd->result;
3805         if (vport->cfg_max_scsicmpl_time &&
3806            time_after(jiffies, lpfc_cmd->start_time +
3807                 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
3808                 spin_lock_irqsave(shost->host_lock, flags);
3809                 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3810                         if (pnode->cmd_qdepth >
3811                                 atomic_read(&pnode->cmd_pending) &&
3812                                 (atomic_read(&pnode->cmd_pending) >
3813                                 LPFC_MIN_TGT_QDEPTH) &&
3814                                 ((cmd->cmnd[0] == READ_10) ||
3815                                 (cmd->cmnd[0] == WRITE_10)))
3816                                 pnode->cmd_qdepth =
3817                                         atomic_read(&pnode->cmd_pending);
3818
3819                         pnode->last_change_time = jiffies;
3820                 }
3821                 spin_unlock_irqrestore(shost->host_lock, flags);
3822         } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3823                 if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
3824                    time_after(jiffies, pnode->last_change_time +
3825                               msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
3826                         spin_lock_irqsave(shost->host_lock, flags);
3827                         depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
3828                                 / 100;
3829                         depth = depth ? depth : 1;
3830                         pnode->cmd_qdepth += depth;
3831                         if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
3832                                 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
3833                         pnode->last_change_time = jiffies;
3834                         spin_unlock_irqrestore(shost->host_lock, flags);
3835                 }
3836         }
3837
3838         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
3839
3840         /* The sdev is not guaranteed to be valid post scsi_done upcall. */
3841         queue_depth = cmd->device->queue_depth;
3842         scsi_id = cmd->device->id;
3843         cmd->scsi_done(cmd);
3844
3845         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3846                 spin_lock_irqsave(&phba->hbalock, flags);
3847                 lpfc_cmd->pCmd = NULL;
3848                 spin_unlock_irqrestore(&phba->hbalock, flags);
3849
3850                 /*
3851                  * If there is a thread waiting for command completion
3852                  * wake up the thread.
3853                  */
3854                 spin_lock_irqsave(shost->host_lock, flags);
3855                 if (lpfc_cmd->waitq)
3856                         wake_up(lpfc_cmd->waitq);
3857                 spin_unlock_irqrestore(shost->host_lock, flags);
3858                 lpfc_release_scsi_buf(phba, lpfc_cmd);
3859                 return;
3860         }
3861
3862         if (!result)
3863                 lpfc_rampup_queue_depth(vport, queue_depth);
3864
3865         /*
3866          * Check for queue full.  If the lun is reporting queue full, then
3867          * back off the lun queue depth to prevent target overloads.
3868          */
3869         if (result == SAM_STAT_TASK_SET_FULL && pnode &&
3870             NLP_CHK_NODE_ACT(pnode)) {
3871                 shost_for_each_device(tmp_sdev, shost) {
3872                         if (tmp_sdev->id != scsi_id)
3873                                 continue;
3874                         depth = scsi_track_queue_full(tmp_sdev,
3875                                                       tmp_sdev->queue_depth-1);
3876                         if (depth <= 0)
3877                                 continue;
3878                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3879                                          "0711 detected queue full - lun queue "
3880                                          "depth adjusted to %d.\n", depth);
3881                         lpfc_send_sdev_queuedepth_change_event(phba, vport,
3882                                                                pnode,
3883                                                                tmp_sdev->lun,
3884                                                                depth+1, depth);
3885                 }
3886         }
3887
3888         spin_lock_irqsave(&phba->hbalock, flags);
3889         lpfc_cmd->pCmd = NULL;
3890         spin_unlock_irqrestore(&phba->hbalock, flags);
3891
3892         /*
3893          * If there is a thread waiting for command completion
3894          * wake up the thread.
3895          */
3896         spin_lock_irqsave(shost->host_lock, flags);
3897         if (lpfc_cmd->waitq)
3898                 wake_up(lpfc_cmd->waitq);
3899         spin_unlock_irqrestore(shost->host_lock, flags);
3900
3901         lpfc_release_scsi_buf(phba, lpfc_cmd);
3902 }
3903
3904 /**
3905  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
3906  * @data: A pointer to the immediate command data portion of the IOCB.
3907  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
3908  *
3909  * The routine copies the entire FCP command from @fcp_cmnd to @data while
3910  * byte swapping the data to big endian format for transmission on the wire.
3911  **/
3912 static void
3913 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
3914 {
3915         int i, j;
3916         for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
3917              i += sizeof(uint32_t), j++) {
3918                 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
3919         }
3920 }
3921
3922 /**
3923  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
3924  * @vport: The virtual port for which this call is being executed.
3925  * @lpfc_cmd: The scsi command which needs to send.
3926  * @pnode: Pointer to lpfc_nodelist.
3927  *
3928  * This routine initializes fcp_cmnd and iocb data structure from scsi command
3929  * to transfer for device with SLI3 interface spec.
3930  **/
3931 static void
3932 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3933                     struct lpfc_nodelist *pnode)
3934 {
3935         struct lpfc_hba *phba = vport->phba;
3936         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3937         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3938         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3939         struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
3940         int datadir = scsi_cmnd->sc_data_direction;
3941         char tag[2];
3942         uint8_t *ptr;
3943         bool sli4;
3944
3945         if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3946                 return;
3947
3948         lpfc_cmd->fcp_rsp->rspSnsLen = 0;
3949         /* clear task management bits */
3950         lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
3951
3952         int_to_scsilun(lpfc_cmd->pCmd->device->lun,
3953                         &lpfc_cmd->fcp_cmnd->fcp_lun);
3954
3955         ptr = &fcp_cmnd->fcpCdb[0];
3956         memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3957         if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
3958                 ptr += scsi_cmnd->cmd_len;
3959                 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
3960         }
3961
3962         if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
3963                 switch (tag[0]) {
3964                 case HEAD_OF_QUEUE_TAG:
3965                         fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
3966                         break;
3967                 case ORDERED_QUEUE_TAG:
3968                         fcp_cmnd->fcpCntl1 = ORDERED_Q;
3969                         break;
3970                 default:
3971                         fcp_cmnd->fcpCntl1 = SIMPLE_Q;
3972                         break;
3973                 }
3974         } else
3975                 fcp_cmnd->fcpCntl1 = 0;
3976
3977         sli4 = (phba->sli_rev == LPFC_SLI_REV4);
3978
3979         /*
3980          * There are three possibilities here - use scatter-gather segment, use
3981          * the single mapping, or neither.  Start the lpfc command prep by
3982          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3983          * data bde entry.
3984          */
3985         if (scsi_sg_count(scsi_cmnd)) {
3986                 if (datadir == DMA_TO_DEVICE) {
3987                         iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
3988                         if (sli4)
3989                                 iocb_cmd->ulpPU = PARM_READ_CHECK;
3990                         else {
3991                                 iocb_cmd->un.fcpi.fcpi_parm = 0;
3992                                 iocb_cmd->ulpPU = 0;
3993                         }
3994                         fcp_cmnd->fcpCntl3 = WRITE_DATA;
3995                         phba->fc4OutputRequests++;
3996                 } else {
3997                         iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
3998                         iocb_cmd->ulpPU = PARM_READ_CHECK;
3999                         fcp_cmnd->fcpCntl3 = READ_DATA;
4000                         phba->fc4InputRequests++;
4001                 }
4002         } else {
4003                 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4004                 iocb_cmd->un.fcpi.fcpi_parm = 0;
4005                 iocb_cmd->ulpPU = 0;
4006                 fcp_cmnd->fcpCntl3 = 0;
4007                 phba->fc4ControlRequests++;
4008         }
4009         if (phba->sli_rev == 3 &&
4010             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4011                 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4012         /*
4013          * Finish initializing those IOCB fields that are independent
4014          * of the scsi_cmnd request_buffer
4015          */
4016         piocbq->iocb.ulpContext = pnode->nlp_rpi;
4017         if (sli4)
4018                 piocbq->iocb.ulpContext =
4019                   phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4020         if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4021                 piocbq->iocb.ulpFCP2Rcvy = 1;
4022         else
4023                 piocbq->iocb.ulpFCP2Rcvy = 0;
4024
4025         piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4026         piocbq->context1  = lpfc_cmd;
4027         piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4028         piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4029         piocbq->vport = vport;
4030 }
4031
4032 /**
4033  * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4034  * @vport: The virtual port for which this call is being executed.
4035  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4036  * @lun: Logical unit number.
4037  * @task_mgmt_cmd: SCSI task management command.
4038  *
4039  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4040  * for device with SLI-3 interface spec.
4041  *
4042  * Return codes:
4043  *   0 - Error
4044  *   1 - Success
4045  **/
4046 static int
4047 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4048                              struct lpfc_scsi_buf *lpfc_cmd,
4049                              unsigned int lun,
4050                              uint8_t task_mgmt_cmd)
4051 {
4052         struct lpfc_iocbq *piocbq;
4053         IOCB_t *piocb;
4054         struct fcp_cmnd *fcp_cmnd;
4055         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4056         struct lpfc_nodelist *ndlp = rdata->pnode;
4057
4058         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4059             ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4060                 return 0;
4061
4062         piocbq = &(lpfc_cmd->cur_iocbq);
4063         piocbq->vport = vport;
4064
4065         piocb = &piocbq->iocb;
4066
4067         fcp_cmnd = lpfc_cmd->fcp_cmnd;
4068         /* Clear out any old data in the FCP command area */
4069         memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4070         int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4071         fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4072         if (vport->phba->sli_rev == 3 &&
4073             !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4074                 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4075         piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4076         piocb->ulpContext = ndlp->nlp_rpi;
4077         if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4078                 piocb->ulpContext =
4079                   vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4080         }
4081         if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4082                 piocb->ulpFCP2Rcvy = 1;
4083         }
4084         piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4085
4086         /* ulpTimeout is only one byte */
4087         if (lpfc_cmd->timeout > 0xff) {
4088                 /*
4089                  * Do not timeout the command at the firmware level.
4090                  * The driver will provide the timeout mechanism.
4091                  */
4092                 piocb->ulpTimeout = 0;
4093         } else
4094                 piocb->ulpTimeout = lpfc_cmd->timeout;
4095
4096         if (vport->phba->sli_rev == LPFC_SLI_REV4)
4097                 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4098
4099         return 1;
4100 }
4101
4102 /**
4103  * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4104  * @phba: The hba struct for which this call is being executed.
4105  * @dev_grp: The HBA PCI-Device group number.
4106  *
4107  * This routine sets up the SCSI interface API function jump table in @phba
4108  * struct.
4109  * Returns: 0 - success, -ENODEV - failure.
4110  **/
4111 int
4112 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4113 {
4114
4115         phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4116         phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4117
4118         switch (dev_grp) {
4119         case LPFC_PCI_DEV_LP:
4120                 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
4121                 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4122                 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4123                 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4124                 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4125                 break;
4126         case LPFC_PCI_DEV_OC:
4127                 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
4128                 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4129                 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4130                 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4131                 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4132                 break;
4133         default:
4134                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4135                                 "1418 Invalid HBA PCI-device group: 0x%x\n",
4136                                 dev_grp);
4137                 return -ENODEV;
4138                 break;
4139         }
4140         phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4141         phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4142         return 0;
4143 }
4144
4145 /**
4146  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4147  * @phba: The Hba for which this call is being executed.
4148  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4149  * @rspiocbq: Pointer to lpfc_iocbq data structure.
4150  *
4151  * This routine is IOCB completion routine for device reset and target reset
4152  * routine. This routine release scsi buffer associated with lpfc_cmd.
4153  **/
4154 static void
4155 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4156                         struct lpfc_iocbq *cmdiocbq,
4157                         struct lpfc_iocbq *rspiocbq)
4158 {
4159         struct lpfc_scsi_buf *lpfc_cmd =
4160                 (struct lpfc_scsi_buf *) cmdiocbq->context1;
4161         if (lpfc_cmd)
4162                 lpfc_release_scsi_buf(phba, lpfc_cmd);
4163         return;
4164 }
4165
4166 /**
4167  * lpfc_info - Info entry point of scsi_host_template data structure
4168  * @host: The scsi host for which this call is being executed.
4169  *
4170  * This routine provides module information about hba.
4171  *
4172  * Reutrn code:
4173  *   Pointer to char - Success.
4174  **/
4175 const char *
4176 lpfc_info(struct Scsi_Host *host)
4177 {
4178         struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4179         struct lpfc_hba   *phba = vport->phba;
4180         int len, link_speed = 0;
4181         static char  lpfcinfobuf[384];
4182
4183         memset(lpfcinfobuf,0,384);
4184         if (phba && phba->pcidev){
4185                 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
4186                 len = strlen(lpfcinfobuf);
4187                 snprintf(lpfcinfobuf + len,
4188                         384-len,
4189                         " on PCI bus %02x device %02x irq %d",
4190                         phba->pcidev->bus->number,
4191                         phba->pcidev->devfn,
4192                         phba->pcidev->irq);
4193                 len = strlen(lpfcinfobuf);
4194                 if (phba->Port[0]) {
4195                         snprintf(lpfcinfobuf + len,
4196                                  384-len,
4197                                  " port %s",
4198                                  phba->Port);
4199                 }
4200                 len = strlen(lpfcinfobuf);
4201                 if (phba->sli_rev <= LPFC_SLI_REV3) {
4202                         link_speed = lpfc_sli_port_speed_get(phba);
4203                 } else {
4204                         if (phba->sli4_hba.link_state.logical_speed)
4205                                 link_speed =
4206                                       phba->sli4_hba.link_state.logical_speed;
4207                         else
4208                                 link_speed = phba->sli4_hba.link_state.speed;
4209                 }
4210                 if (link_speed != 0)
4211                         snprintf(lpfcinfobuf + len, 384-len,
4212                                  " Logical Link Speed: %d Mbps", link_speed);
4213         }
4214         return lpfcinfobuf;
4215 }
4216
4217 /**
4218  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4219  * @phba: The Hba for which this call is being executed.
4220  *
4221  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
4222  * The default value of cfg_poll_tmo is 10 milliseconds.
4223  **/
4224 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4225 {
4226         unsigned long  poll_tmo_expires =
4227                 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4228
4229         if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
4230                 mod_timer(&phba->fcp_poll_timer,
4231                           poll_tmo_expires);
4232 }
4233
4234 /**
4235  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4236  * @phba: The Hba for which this call is being executed.
4237  *
4238  * This routine starts the fcp_poll_timer of @phba.
4239  **/
4240 void lpfc_poll_start_timer(struct lpfc_hba * phba)
4241 {
4242         lpfc_poll_rearm_timer(phba);
4243 }
4244
4245 /**
4246  * lpfc_poll_timeout - Restart polling timer
4247  * @ptr: Map to lpfc_hba data structure pointer.
4248  *
4249  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
4250  * and FCP Ring interrupt is disable.
4251  **/
4252
4253 void lpfc_poll_timeout(unsigned long ptr)
4254 {
4255         struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4256
4257         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4258                 lpfc_sli_handle_fast_ring_event(phba,
4259                         &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4260
4261                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4262                         lpfc_poll_rearm_timer(phba);
4263         }
4264 }
4265
4266 /**
4267  * lpfc_queuecommand - scsi_host_template queuecommand entry point
4268  * @cmnd: Pointer to scsi_cmnd data structure.
4269  * @done: Pointer to done routine.
4270  *
4271  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4272  * This routine prepares an IOCB from scsi command and provides to firmware.
4273  * The @done callback is invoked after driver finished processing the command.
4274  *
4275  * Return value :
4276  *   0 - Success
4277  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4278  **/
4279 static int
4280 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4281 {
4282         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4283         struct lpfc_hba   *phba = vport->phba;
4284         struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4285         struct lpfc_nodelist *ndlp;
4286         struct lpfc_scsi_buf *lpfc_cmd;
4287         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4288         int err;
4289
4290         err = fc_remote_port_chkready(rport);
4291         if (err) {
4292                 cmnd->result = err;
4293                 goto out_fail_command;
4294         }
4295         ndlp = rdata->pnode;
4296
4297         if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4298                 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4299
4300                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4301                                 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4302                                 " op:%02x str=%s without registering for"
4303                                 " BlockGuard - Rejecting command\n",
4304                                 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4305                                 dif_op_str[scsi_get_prot_op(cmnd)]);
4306                 goto out_fail_command;
4307         }
4308
4309         /*
4310          * Catch race where our node has transitioned, but the
4311          * transport is still transitioning.
4312          */
4313         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4314                 goto out_tgt_busy;
4315         if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
4316                 goto out_tgt_busy;
4317
4318         lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
4319         if (lpfc_cmd == NULL) {
4320                 lpfc_rampdown_queue_depth(phba);
4321
4322                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4323                                  "0707 driver's buffer pool is empty, "
4324                                  "IO busied\n");
4325                 goto out_host_busy;
4326         }
4327
4328         /*
4329          * Store the midlayer's command structure for the completion phase
4330          * and complete the command initialization.
4331          */
4332         lpfc_cmd->pCmd  = cmnd;
4333         lpfc_cmd->rdata = rdata;
4334         lpfc_cmd->timeout = 0;
4335         lpfc_cmd->start_time = jiffies;
4336         cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4337
4338         if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4339                 if (vport->phba->cfg_enable_bg) {
4340                         lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
4341                                          "9033 BLKGRD: rcvd %s cmd:x%x "
4342                                          "sector x%llx cnt %u pt %x\n",
4343                                          dif_op_str[scsi_get_prot_op(cmnd)],
4344                                          cmnd->cmnd[0],
4345                                          (unsigned long long)scsi_get_lba(cmnd),
4346                                          blk_rq_sectors(cmnd->request),
4347                                          (cmnd->cmnd[1]>>5));
4348                 }
4349                 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4350         } else {
4351                 if (vport->phba->cfg_enable_bg) {
4352                         lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
4353                                          "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4354                                          "x%x sector x%llx cnt %u pt %x\n",
4355                                          cmnd->cmnd[0],
4356                                          (unsigned long long)scsi_get_lba(cmnd),
4357                                          blk_rq_sectors(cmnd->request),
4358                                          (cmnd->cmnd[1]>>5));
4359                 }
4360                 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4361         }
4362
4363         if (err)
4364                 goto out_host_busy_free_buf;
4365
4366         lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4367
4368         atomic_inc(&ndlp->cmd_pending);
4369         err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4370                                   &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4371         if (err) {
4372                 atomic_dec(&ndlp->cmd_pending);
4373                 goto out_host_busy_free_buf;
4374         }
4375         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4376                 lpfc_sli_handle_fast_ring_event(phba,
4377                         &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4378
4379                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4380                         lpfc_poll_rearm_timer(phba);
4381         }
4382
4383         return 0;
4384
4385  out_host_busy_free_buf:
4386         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4387         lpfc_release_scsi_buf(phba, lpfc_cmd);
4388  out_host_busy:
4389         return SCSI_MLQUEUE_HOST_BUSY;
4390
4391  out_tgt_busy:
4392         return SCSI_MLQUEUE_TARGET_BUSY;
4393
4394  out_fail_command:
4395         cmnd->scsi_done(cmnd);
4396         return 0;
4397 }
4398
4399
4400 /**
4401  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4402  * @cmnd: Pointer to scsi_cmnd data structure.
4403  *
4404  * This routine aborts @cmnd pending in base driver.
4405  *
4406  * Return code :
4407  *   0x2003 - Error
4408  *   0x2002 - Success
4409  **/
4410 static int
4411 lpfc_abort_handler(struct scsi_cmnd *cmnd)
4412 {
4413         struct Scsi_Host  *shost = cmnd->device->host;
4414         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4415         struct lpfc_hba   *phba = vport->phba;
4416         struct lpfc_iocbq *iocb;
4417         struct lpfc_iocbq *abtsiocb;
4418         struct lpfc_scsi_buf *lpfc_cmd;
4419         IOCB_t *cmd, *icmd;
4420         int ret = SUCCESS, status = 0;
4421         unsigned long flags;
4422         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4423
4424         status = fc_block_scsi_eh(cmnd);
4425         if (status != 0 && status != SUCCESS)
4426                 return status;
4427
4428         spin_lock_irqsave(&phba->hbalock, flags);
4429         /* driver queued commands are in process of being flushed */
4430         if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4431                 spin_unlock_irqrestore(&phba->hbalock, flags);
4432                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4433                         "3168 SCSI Layer abort requested I/O has been "
4434                         "flushed by LLD.\n");
4435                 return FAILED;
4436         }
4437
4438         lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4439         if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4440                 spin_unlock_irqrestore(&phba->hbalock, flags);
4441                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4442                          "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4443                          "x%x ID %d LUN %d\n",
4444                          SUCCESS, cmnd->device->id, cmnd->device->lun);
4445                 return SUCCESS;
4446         }
4447
4448         iocb = &lpfc_cmd->cur_iocbq;
4449         /* the command is in process of being cancelled */
4450         if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4451                 spin_unlock_irqrestore(&phba->hbalock, flags);
4452                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4453                         "3169 SCSI Layer abort requested I/O has been "
4454                         "cancelled by LLD.\n");
4455                 return FAILED;
4456         }
4457         /*
4458          * If pCmd field of the corresponding lpfc_scsi_buf structure
4459          * points to a different SCSI command, then the driver has
4460          * already completed this command, but the midlayer did not
4461          * see the completion before the eh fired. Just return SUCCESS.
4462          */
4463         if (lpfc_cmd->pCmd != cmnd) {
4464                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4465                         "3170 SCSI Layer abort requested I/O has been "
4466                         "completed by LLD.\n");
4467                 goto out_unlock;
4468         }
4469
4470         BUG_ON(iocb->context1 != lpfc_cmd);
4471
4472         abtsiocb = __lpfc_sli_get_iocbq(phba);
4473         if (abtsiocb == NULL) {
4474                 ret = FAILED;
4475                 goto out_unlock;
4476         }
4477
4478         /*
4479          * The scsi command can not be in txq and it is in flight because the
4480          * pCmd is still pointig at the SCSI command we have to abort. There
4481          * is no need to search the txcmplq. Just send an abort to the FW.
4482          */
4483
4484         cmd = &iocb->iocb;
4485         icmd = &abtsiocb->iocb;
4486         icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4487         icmd->un.acxri.abortContextTag = cmd->ulpContext;
4488         if (phba->sli_rev == LPFC_SLI_REV4)
4489                 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4490         else
4491                 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4492
4493         icmd->ulpLe = 1;
4494         icmd->ulpClass = cmd->ulpClass;
4495
4496         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4497         abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
4498         abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4499
4500         if (lpfc_is_link_up(phba))
4501                 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4502         else
4503                 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4504
4505         abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4506         abtsiocb->vport = vport;
4507         /* no longer need the lock after this point */
4508         spin_unlock_irqrestore(&phba->hbalock, flags);
4509
4510         if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
4511             IOCB_ERROR) {
4512                 lpfc_sli_release_iocbq(phba, abtsiocb);
4513                 ret = FAILED;
4514                 goto out;
4515         }
4516
4517         if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4518                 lpfc_sli_handle_fast_ring_event(phba,
4519                         &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4520
4521         lpfc_cmd->waitq = &waitq;
4522         /* Wait for abort to complete */
4523         wait_event_timeout(waitq,
4524                           (lpfc_cmd->pCmd != cmnd),
4525                            (2*vport->cfg_devloss_tmo*HZ));
4526         lpfc_cmd->waitq = NULL;
4527
4528         if (lpfc_cmd->pCmd == cmnd) {
4529                 ret = FAILED;
4530                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4531                                  "0748 abort handler timed out waiting "
4532                                  "for abortng I/O (xri:x%x) to complete: "
4533                                  "ret %#x, ID %d, LUN %d\n",
4534                                  iocb->sli4_xritag, ret,
4535                                  cmnd->device->id, cmnd->device->lun);
4536         }
4537         goto out;
4538
4539 out_unlock:
4540         spin_unlock_irqrestore(&phba->hbalock, flags);
4541 out:
4542         lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4543                          "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4544                          "LUN %d\n", ret, cmnd->device->id,
4545                          cmnd->device->lun);
4546         return ret;
4547 }
4548
4549 static char *
4550 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4551 {
4552         switch (task_mgmt_cmd) {
4553         case FCP_ABORT_TASK_SET:
4554                 return "ABORT_TASK_SET";
4555         case FCP_CLEAR_TASK_SET:
4556                 return "FCP_CLEAR_TASK_SET";
4557         case FCP_BUS_RESET:
4558                 return "FCP_BUS_RESET";
4559         case FCP_LUN_RESET:
4560                 return "FCP_LUN_RESET";
4561         case FCP_TARGET_RESET:
4562                 return "FCP_TARGET_RESET";
4563         case FCP_CLEAR_ACA:
4564                 return "FCP_CLEAR_ACA";
4565         case FCP_TERMINATE_TASK:
4566                 return "FCP_TERMINATE_TASK";
4567         default:
4568                 return "unknown";
4569         }
4570 }
4571
4572 /**
4573  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
4574  * @vport: The virtual port for which this call is being executed.
4575  * @rdata: Pointer to remote port local data
4576  * @tgt_id: Target ID of remote device.
4577  * @lun_id: Lun number for the TMF
4578  * @task_mgmt_cmd: type of TMF to send
4579  *
4580  * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
4581  * a remote port.
4582  *
4583  * Return Code:
4584  *   0x2003 - Error
4585  *   0x2002 - Success.
4586  **/
4587 static int
4588 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
4589                     unsigned  tgt_id, unsigned int lun_id,
4590                     uint8_t task_mgmt_cmd)
4591 {
4592         struct lpfc_hba   *phba = vport->phba;
4593         struct lpfc_scsi_buf *lpfc_cmd;
4594         struct lpfc_iocbq *iocbq;
4595         struct lpfc_iocbq *iocbqrsp;
4596         struct lpfc_nodelist *pnode = rdata->pnode;
4597         int ret;
4598         int status;
4599
4600         if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4601                 return FAILED;
4602
4603         lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
4604         if (lpfc_cmd == NULL)
4605                 return FAILED;
4606         lpfc_cmd->timeout = 60;
4607         lpfc_cmd->rdata = rdata;
4608
4609         status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
4610                                            task_mgmt_cmd);
4611         if (!status) {
4612                 lpfc_release_scsi_buf(phba, lpfc_cmd);
4613                 return FAILED;
4614         }
4615
4616         iocbq = &lpfc_cmd->cur_iocbq;
4617         iocbqrsp = lpfc_sli_get_iocbq(phba);
4618         if (iocbqrsp == NULL) {
4619                 lpfc_release_scsi_buf(phba, lpfc_cmd);
4620                 return FAILED;
4621         }
4622
4623         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4624                          "0702 Issue %s to TGT %d LUN %d "
4625                          "rpi x%x nlp_flag x%x Data: x%x x%x\n",
4626                          lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
4627                          pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
4628                          iocbq->iocb_flag);
4629
4630         status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
4631                                           iocbq, iocbqrsp, lpfc_cmd->timeout);
4632         if (status != IOCB_SUCCESS) {
4633                 if (status == IOCB_TIMEDOUT) {
4634                         iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
4635                         ret = TIMEOUT_ERROR;
4636                 } else
4637                         ret = FAILED;
4638                 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4639                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4640                          "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
4641                          "iocb_flag x%x\n",
4642                          lpfc_taskmgmt_name(task_mgmt_cmd),
4643                          tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
4644                          iocbqrsp->iocb.un.ulpWord[4],
4645                          iocbq->iocb_flag);
4646         } else if (status == IOCB_BUSY)
4647                 ret = FAILED;
4648         else
4649                 ret = SUCCESS;
4650
4651         lpfc_sli_release_iocbq(phba, iocbqrsp);
4652
4653         if (ret != TIMEOUT_ERROR)
4654                 lpfc_release_scsi_buf(phba, lpfc_cmd);
4655
4656         return ret;
4657 }
4658
4659 /**
4660  * lpfc_chk_tgt_mapped -
4661  * @vport: The virtual port to check on
4662  * @cmnd: Pointer to scsi_cmnd data structure.
4663  *
4664  * This routine delays until the scsi target (aka rport) for the
4665  * command exists (is present and logged in) or we declare it non-existent.
4666  *
4667  * Return code :
4668  *  0x2003 - Error
4669  *  0x2002 - Success
4670  **/
4671 static int
4672 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
4673 {
4674         struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4675         struct lpfc_nodelist *pnode;
4676         unsigned long later;
4677
4678         if (!rdata) {
4679                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4680                         "0797 Tgt Map rport failure: rdata x%p\n", rdata);
4681                 return FAILED;
4682         }
4683         pnode = rdata->pnode;
4684         /*
4685          * If target is not in a MAPPED state, delay until
4686          * target is rediscovered or devloss timeout expires.
4687          */
4688         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
4689         while (time_after(later, jiffies)) {
4690                 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4691                         return FAILED;
4692                 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
4693                         return SUCCESS;
4694                 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
4695                 rdata = cmnd->device->hostdata;
4696                 if (!rdata)
4697                         return FAILED;
4698                 pnode = rdata->pnode;
4699         }
4700         if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
4701             (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4702                 return FAILED;
4703         return SUCCESS;
4704 }
4705
4706 /**
4707  * lpfc_reset_flush_io_context -
4708  * @vport: The virtual port (scsi_host) for the flush context
4709  * @tgt_id: If aborting by Target contect - specifies the target id
4710  * @lun_id: If aborting by Lun context - specifies the lun id
4711  * @context: specifies the context level to flush at.
4712  *
4713  * After a reset condition via TMF, we need to flush orphaned i/o
4714  * contexts from the adapter. This routine aborts any contexts
4715  * outstanding, then waits for their completions. The wait is
4716  * bounded by devloss_tmo though.
4717  *
4718  * Return code :
4719  *  0x2003 - Error
4720  *  0x2002 - Success
4721  **/
4722 static int
4723 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
4724                         uint64_t lun_id, lpfc_ctx_cmd context)
4725 {
4726         struct lpfc_hba   *phba = vport->phba;
4727         unsigned long later;
4728         int cnt;
4729
4730         cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
4731         if (cnt)
4732                 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
4733                                     tgt_id, lun_id, context);
4734         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
4735         while (time_after(later, jiffies) && cnt) {
4736                 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
4737                 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
4738         }
4739         if (cnt) {
4740                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4741                         "0724 I/O flush failure for context %s : cnt x%x\n",
4742                         ((context == LPFC_CTX_LUN) ? "LUN" :
4743                          ((context == LPFC_CTX_TGT) ? "TGT" :
4744                           ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
4745                         cnt);
4746                 return FAILED;
4747         }
4748         return SUCCESS;
4749 }
4750
4751 /**
4752  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
4753  * @cmnd: Pointer to scsi_cmnd data structure.
4754  *
4755  * This routine does a device reset by sending a LUN_RESET task management
4756  * command.
4757  *
4758  * Return code :
4759  *  0x2003 - Error
4760  *  0x2002 - Success
4761  **/
4762 static int
4763 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
4764 {
4765         struct Scsi_Host  *shost = cmnd->device->host;
4766         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4767         struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4768         struct lpfc_nodelist *pnode;
4769         unsigned tgt_id = cmnd->device->id;
4770         unsigned int lun_id = cmnd->device->lun;
4771         struct lpfc_scsi_event_header scsi_event;
4772         int status, ret = SUCCESS;
4773
4774         if (!rdata) {
4775                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4776                         "0798 Device Reset rport failure: rdata x%p\n", rdata);
4777                 return FAILED;
4778         }
4779         pnode = rdata->pnode;
4780         status = fc_block_scsi_eh(cmnd);
4781         if (status != 0 && status != SUCCESS)
4782                 return status;
4783
4784         status = lpfc_chk_tgt_mapped(vport, cmnd);
4785         if (status == FAILED) {
4786                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4787                         "0721 Device Reset rport failure: rdata x%p\n", rdata);
4788                 return FAILED;
4789         }
4790
4791         scsi_event.event_type = FC_REG_SCSI_EVENT;
4792         scsi_event.subcategory = LPFC_EVENT_LUNRESET;
4793         scsi_event.lun = lun_id;
4794         memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
4795         memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
4796
4797         fc_host_post_vendor_event(shost, fc_get_event_number(),
4798                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4799
4800         status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
4801                                                 FCP_LUN_RESET);
4802
4803         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4804                          "0713 SCSI layer issued Device Reset (%d, %d) "
4805                          "return x%x\n", tgt_id, lun_id, status);
4806
4807         /*
4808          * We have to clean up i/o as : they may be orphaned by the TMF;
4809          * or if the TMF failed, they may be in an indeterminate state.
4810          * So, continue on.
4811          * We will report success if all the i/o aborts successfully.
4812          */
4813         ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
4814                                                 LPFC_CTX_LUN);
4815         return ret;
4816 }
4817
4818 /**
4819  * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
4820  * @cmnd: Pointer to scsi_cmnd data structure.
4821  *
4822  * This routine does a target reset by sending a TARGET_RESET task management
4823  * command.
4824  *
4825  * Return code :
4826  *  0x2003 - Error
4827  *  0x2002 - Success
4828  **/
4829 static int
4830 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
4831 {
4832         struct Scsi_Host  *shost = cmnd->device->host;
4833         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4834         struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4835         struct lpfc_nodelist *pnode;
4836         unsigned tgt_id = cmnd->device->id;
4837         unsigned int lun_id = cmnd->device->lun;
4838         struct lpfc_scsi_event_header scsi_event;
4839         int status, ret = SUCCESS;
4840
4841         if (!rdata) {
4842                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4843                         "0799 Target Reset rport failure: rdata x%p\n", rdata);
4844                 return FAILED;
4845         }
4846         pnode = rdata->pnode;
4847         status = fc_block_scsi_eh(cmnd);
4848         if (status != 0 && status != SUCCESS)
4849                 return status;
4850
4851         status = lpfc_chk_tgt_mapped(vport, cmnd);
4852         if (status == FAILED) {
4853                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4854                         "0722 Target Reset rport failure: rdata x%p\n", rdata);
4855                 return FAILED;
4856         }
4857
4858         scsi_event.event_type = FC_REG_SCSI_EVENT;
4859         scsi_event.subcategory = LPFC_EVENT_TGTRESET;
4860         scsi_event.lun = 0;
4861         memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
4862         memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
4863
4864         fc_host_post_vendor_event(shost, fc_get_event_number(),
4865                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4866
4867         status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
4868                                         FCP_TARGET_RESET);
4869
4870         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4871                          "0723 SCSI layer issued Target Reset (%d, %d) "
4872                          "return x%x\n", tgt_id, lun_id, status);
4873
4874         /*
4875          * We have to clean up i/o as : they may be orphaned by the TMF;
4876          * or if the TMF failed, they may be in an indeterminate state.
4877          * So, continue on.
4878          * We will report success if all the i/o aborts successfully.
4879          */
4880         ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
4881                                           LPFC_CTX_TGT);
4882         return ret;
4883 }
4884
4885 /**
4886  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
4887  * @cmnd: Pointer to scsi_cmnd data structure.
4888  *
4889  * This routine does target reset to all targets on @cmnd->device->host.
4890  * This emulates Parallel SCSI Bus Reset Semantics.
4891  *
4892  * Return code :
4893  *  0x2003 - Error
4894  *  0x2002 - Success
4895  **/
4896 static int
4897 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
4898 {
4899         struct Scsi_Host  *shost = cmnd->device->host;
4900         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4901         struct lpfc_nodelist *ndlp = NULL;
4902         struct lpfc_scsi_event_header scsi_event;
4903         int match;
4904         int ret = SUCCESS, status, i;
4905
4906         scsi_event.event_type = FC_REG_SCSI_EVENT;
4907         scsi_event.subcategory = LPFC_EVENT_BUSRESET;
4908         scsi_event.lun = 0;
4909         memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
4910         memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
4911
4912         fc_host_post_vendor_event(shost, fc_get_event_number(),
4913                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4914
4915         status = fc_block_scsi_eh(cmnd);
4916         if (status != 0 && status != SUCCESS)
4917                 return status;
4918
4919         /*
4920          * Since the driver manages a single bus device, reset all
4921          * targets known to the driver.  Should any target reset
4922          * fail, this routine returns failure to the midlayer.
4923          */
4924         for (i = 0; i < LPFC_MAX_TARGET; i++) {
4925                 /* Search for mapped node by target ID */
4926                 match = 0;
4927                 spin_lock_irq(shost->host_lock);
4928                 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4929                         if (!NLP_CHK_NODE_ACT(ndlp))
4930                                 continue;
4931                         if (vport->phba->cfg_fcp2_no_tgt_reset &&
4932                             (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
4933                                 continue;
4934                         if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
4935                             ndlp->nlp_sid == i &&
4936                             ndlp->rport) {
4937                                 match = 1;
4938                                 break;
4939                         }
4940                 }
4941                 spin_unlock_irq(shost->host_lock);
4942                 if (!match)
4943                         continue;
4944
4945                 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
4946                                         i, 0, FCP_TARGET_RESET);
4947
4948                 if (status != SUCCESS) {
4949                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4950                                          "0700 Bus Reset on target %d failed\n",
4951                                          i);
4952                         ret = FAILED;
4953                 }
4954         }
4955         /*
4956          * We have to clean up i/o as : they may be orphaned by the TMFs
4957          * above; or if any of the TMFs failed, they may be in an
4958          * indeterminate state.
4959          * We will report success if all the i/o aborts successfully.
4960          */
4961
4962         status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
4963         if (status != SUCCESS)
4964                 ret = FAILED;
4965
4966         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4967                          "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
4968         return ret;
4969 }
4970
4971 /**
4972  * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
4973  * @cmnd: Pointer to scsi_cmnd data structure.
4974  *
4975  * This routine does host reset to the adaptor port. It brings the HBA
4976  * offline, performs a board restart, and then brings the board back online.
4977  * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
4978  * reject all outstanding SCSI commands to the host and error returned
4979  * back to SCSI mid-level. As this will be SCSI mid-level's last resort
4980  * of error handling, it will only return error if resetting of the adapter
4981  * is not successful; in all other cases, will return success.
4982  *
4983  * Return code :
4984  *  0x2003 - Error
4985  *  0x2002 - Success
4986  **/
4987 static int
4988 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
4989 {
4990         struct Scsi_Host *shost = cmnd->device->host;
4991         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4992         struct lpfc_hba *phba = vport->phba;
4993         int rc, ret = SUCCESS;
4994
4995         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
4996         lpfc_offline(phba);
4997         rc = lpfc_sli_brdrestart(phba);
4998         if (rc)
4999                 ret = FAILED;
5000         lpfc_online(phba);
5001         lpfc_unblock_mgmt_io(phba);
5002
5003         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
5004                         "3172 SCSI layer issued Host Reset Data: x%x\n", ret);
5005         return ret;
5006 }
5007
5008 /**
5009  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5010  * @sdev: Pointer to scsi_device.
5011  *
5012  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
5013  * globally available list of scsi buffers. This routine also makes sure scsi
5014  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5015  * of scsi buffer exists for the lifetime of the driver.
5016  *
5017  * Return codes:
5018  *   non-0 - Error
5019  *   0 - Success
5020  **/
5021 static int
5022 lpfc_slave_alloc(struct scsi_device *sdev)
5023 {
5024         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5025         struct lpfc_hba   *phba = vport->phba;
5026         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5027         uint32_t total = 0;
5028         uint32_t num_to_alloc = 0;
5029         int num_allocated = 0;
5030         uint32_t sdev_cnt;
5031
5032         if (!rport || fc_remote_port_chkready(rport))
5033                 return -ENXIO;
5034
5035         sdev->hostdata = rport->dd_data;
5036         sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5037
5038         /*
5039          * Populate the cmds_per_lun count scsi_bufs into this host's globally
5040          * available list of scsi buffers.  Don't allocate more than the
5041          * HBA limit conveyed to the midlayer via the host structure.  The
5042          * formula accounts for the lun_queue_depth + error handlers + 1
5043          * extra.  This list of scsi bufs exists for the lifetime of the driver.
5044          */
5045         total = phba->total_scsi_bufs;
5046         num_to_alloc = vport->cfg_lun_queue_depth + 2;
5047
5048         /* If allocated buffers are enough do nothing */
5049         if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5050                 return 0;
5051
5052         /* Allow some exchanges to be available always to complete discovery */
5053         if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5054                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5055                                  "0704 At limitation of %d preallocated "
5056                                  "command buffers\n", total);
5057                 return 0;
5058         /* Allow some exchanges to be available always to complete discovery */
5059         } else if (total + num_to_alloc >
5060                 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5061                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5062                                  "0705 Allocation request of %d "
5063                                  "command buffers will exceed max of %d.  "
5064                                  "Reducing allocation request to %d.\n",
5065                                  num_to_alloc, phba->cfg_hba_queue_depth,
5066                                  (phba->cfg_hba_queue_depth - total));
5067                 num_to_alloc = phba->cfg_hba_queue_depth - total;
5068         }
5069         num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5070         if (num_to_alloc != num_allocated) {
5071                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5072                                  "0708 Allocation request of %d "
5073                                  "command buffers did not succeed.  "
5074                                  "Allocated %d buffers.\n",
5075                                  num_to_alloc, num_allocated);
5076         }
5077         if (num_allocated > 0)
5078                 phba->total_scsi_bufs += num_allocated;
5079         return 0;
5080 }
5081
5082 /**
5083  * lpfc_slave_configure - scsi_host_template slave_configure entry point
5084  * @sdev: Pointer to scsi_device.
5085  *
5086  * This routine configures following items
5087  *   - Tag command queuing support for @sdev if supported.
5088  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5089  *
5090  * Return codes:
5091  *   0 - Success
5092  **/
5093 static int
5094 lpfc_slave_configure(struct scsi_device *sdev)
5095 {
5096         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5097         struct lpfc_hba   *phba = vport->phba;
5098
5099         if (sdev->tagged_supported)
5100                 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
5101         else
5102                 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
5103
5104         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5105                 lpfc_sli_handle_fast_ring_event(phba,
5106                         &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
5107                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5108                         lpfc_poll_rearm_timer(phba);
5109         }
5110
5111         return 0;
5112 }
5113
5114 /**
5115  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5116  * @sdev: Pointer to scsi_device.
5117  *
5118  * This routine sets @sdev hostatdata filed to null.
5119  **/
5120 static void
5121 lpfc_slave_destroy(struct scsi_device *sdev)
5122 {
5123         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5124         struct lpfc_hba   *phba = vport->phba;
5125         atomic_dec(&phba->sdev_cnt);
5126         sdev->hostdata = NULL;
5127         return;
5128 }
5129
5130
5131 struct scsi_host_template lpfc_template = {
5132         .module                 = THIS_MODULE,
5133         .name                   = LPFC_DRIVER_NAME,
5134         .info                   = lpfc_info,
5135         .queuecommand           = lpfc_queuecommand,
5136         .eh_abort_handler       = lpfc_abort_handler,
5137         .eh_device_reset_handler = lpfc_device_reset_handler,
5138         .eh_target_reset_handler = lpfc_target_reset_handler,
5139         .eh_bus_reset_handler   = lpfc_bus_reset_handler,
5140         .eh_host_reset_handler  = lpfc_host_reset_handler,
5141         .slave_alloc            = lpfc_slave_alloc,
5142         .slave_configure        = lpfc_slave_configure,
5143         .slave_destroy          = lpfc_slave_destroy,
5144         .scan_finished          = lpfc_scan_finished,
5145         .this_id                = -1,
5146         .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
5147         .cmd_per_lun            = LPFC_CMD_PER_LUN,
5148         .use_clustering         = ENABLE_CLUSTERING,
5149         .shost_attrs            = lpfc_hba_attrs,
5150         .max_sectors            = 0xFFFF,
5151         .vendor_id              = LPFC_NL_VENDOR_ID,
5152         .change_queue_depth     = lpfc_change_queue_depth,
5153 };
5154
5155 struct scsi_host_template lpfc_vport_template = {
5156         .module                 = THIS_MODULE,
5157         .name                   = LPFC_DRIVER_NAME,
5158         .info                   = lpfc_info,
5159         .queuecommand           = lpfc_queuecommand,
5160         .eh_abort_handler       = lpfc_abort_handler,
5161         .eh_device_reset_handler = lpfc_device_reset_handler,
5162         .eh_target_reset_handler = lpfc_target_reset_handler,
5163         .eh_bus_reset_handler   = lpfc_bus_reset_handler,
5164         .slave_alloc            = lpfc_slave_alloc,
5165         .slave_configure        = lpfc_slave_configure,
5166         .slave_destroy          = lpfc_slave_destroy,
5167         .scan_finished          = lpfc_scan_finished,
5168         .this_id                = -1,
5169         .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
5170         .cmd_per_lun            = LPFC_CMD_PER_LUN,
5171         .use_clustering         = ENABLE_CLUSTERING,
5172         .shost_attrs            = lpfc_vport_attrs,
5173         .max_sectors            = 0xFFFF,
5174         .change_queue_depth     = lpfc_change_queue_depth,
5175 };