]> Pileus Git - ~andy/linux/blob - drivers/scsi/qla2xxx/qla_iocb.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[~andy/linux] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2013 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
16 /**
17  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18  * @cmd: SCSI command
19  *
20  * Returns the proper CF_* direction based on CDB.
21  */
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
24 {
25         uint16_t cflags;
26         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27         struct scsi_qla_host *vha = sp->fcport->vha;
28
29         cflags = 0;
30
31         /* Set transfer direction */
32         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33                 cflags = CF_WRITE;
34                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38         }
39         return (cflags);
40 }
41
42 /**
43  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44  * Continuation Type 0 IOCBs to allocate.
45  *
46  * @dsds: number of data segment decriptors needed
47  *
48  * Returns the number of IOCB entries needed to store @dsds.
49  */
50 uint16_t
51 qla2x00_calc_iocbs_32(uint16_t dsds)
52 {
53         uint16_t iocbs;
54
55         iocbs = 1;
56         if (dsds > 3) {
57                 iocbs += (dsds - 3) / 7;
58                 if ((dsds - 3) % 7)
59                         iocbs++;
60         }
61         return (iocbs);
62 }
63
64 /**
65  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66  * Continuation Type 1 IOCBs to allocate.
67  *
68  * @dsds: number of data segment decriptors needed
69  *
70  * Returns the number of IOCB entries needed to store @dsds.
71  */
72 uint16_t
73 qla2x00_calc_iocbs_64(uint16_t dsds)
74 {
75         uint16_t iocbs;
76
77         iocbs = 1;
78         if (dsds > 2) {
79                 iocbs += (dsds - 2) / 5;
80                 if ((dsds - 2) % 5)
81                         iocbs++;
82         }
83         return (iocbs);
84 }
85
86 /**
87  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
88  * @ha: HA context
89  *
90  * Returns a pointer to the Continuation Type 0 IOCB packet.
91  */
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
94 {
95         cont_entry_t *cont_pkt;
96         struct req_que *req = vha->req;
97         /* Adjust ring index. */
98         req->ring_index++;
99         if (req->ring_index == req->length) {
100                 req->ring_index = 0;
101                 req->ring_ptr = req->ring;
102         } else {
103                 req->ring_ptr++;
104         }
105
106         cont_pkt = (cont_entry_t *)req->ring_ptr;
107
108         /* Load packet defaults. */
109         *((uint32_t *)(&cont_pkt->entry_type)) =
110             __constant_cpu_to_le32(CONTINUE_TYPE);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @ha: HA context
118  *
119  * Returns a pointer to the continuation type 1 IOCB packet.
120  */
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124         cont_a64_entry_t *cont_pkt;
125
126         /* Adjust ring index. */
127         req->ring_index++;
128         if (req->ring_index == req->length) {
129                 req->ring_index = 0;
130                 req->ring_ptr = req->ring;
131         } else {
132                 req->ring_ptr++;
133         }
134
135         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136
137         /* Load packet defaults. */
138         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139             __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
141
142         return (cont_pkt);
143 }
144
145 static inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149         uint8_t guard = scsi_host_get_guard(cmd->device->host);
150
151         /* We always use DIFF Bundling for best performance */
152         *fw_prot_opts = 0;
153
154         /* Translate SCSI opcode to a protection opcode */
155         switch (scsi_get_prot_op(cmd)) {
156         case SCSI_PROT_READ_STRIP:
157                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158                 break;
159         case SCSI_PROT_WRITE_INSERT:
160                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161                 break;
162         case SCSI_PROT_READ_INSERT:
163                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164                 break;
165         case SCSI_PROT_WRITE_STRIP:
166                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167                 break;
168         case SCSI_PROT_READ_PASS:
169         case SCSI_PROT_WRITE_PASS:
170                 if (guard & SHOST_DIX_GUARD_IP)
171                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172                 else
173                         *fw_prot_opts |= PO_MODE_DIF_PASS;
174                 break;
175         default:        /* Normal Request */
176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
177                 break;
178         }
179
180         return scsi_prot_sg_count(cmd);
181 }
182
183 /*
184  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185  * capable IOCB types.
186  *
187  * @sp: SRB command to process
188  * @cmd_pkt: Command type 2 IOCB
189  * @tot_dsds: Total number of segments to transfer
190  */
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192     uint16_t tot_dsds)
193 {
194         uint16_t        avail_dsds;
195         uint32_t        *cur_dsd;
196         scsi_qla_host_t *vha;
197         struct scsi_cmnd *cmd;
198         struct scatterlist *sg;
199         int i;
200
201         cmd = GET_CMD_SP(sp);
202
203         /* Update entry type to indicate Command Type 2 IOCB */
204         *((uint32_t *)(&cmd_pkt->entry_type)) =
205             __constant_cpu_to_le32(COMMAND_TYPE);
206
207         /* No data transfer */
208         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
210                 return;
211         }
212
213         vha = sp->fcport->vha;
214         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215
216         /* Three DSDs are available in the Command Type 2 IOCB */
217         avail_dsds = 3;
218         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219
220         /* Load data segments */
221         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222                 cont_entry_t *cont_pkt;
223
224                 /* Allocate additional continuation packets? */
225                 if (avail_dsds == 0) {
226                         /*
227                          * Seven DSDs are available in the Continuation
228                          * Type 0 IOCB.
229                          */
230                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
232                         avail_dsds = 7;
233                 }
234
235                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
237                 avail_dsds--;
238         }
239 }
240
241 /**
242  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243  * capable IOCB types.
244  *
245  * @sp: SRB command to process
246  * @cmd_pkt: Command type 3 IOCB
247  * @tot_dsds: Total number of segments to transfer
248  */
249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250     uint16_t tot_dsds)
251 {
252         uint16_t        avail_dsds;
253         uint32_t        *cur_dsd;
254         scsi_qla_host_t *vha;
255         struct scsi_cmnd *cmd;
256         struct scatterlist *sg;
257         int i;
258
259         cmd = GET_CMD_SP(sp);
260
261         /* Update entry type to indicate Command Type 3 IOCB */
262         *((uint32_t *)(&cmd_pkt->entry_type)) =
263             __constant_cpu_to_le32(COMMAND_A64_TYPE);
264
265         /* No data transfer */
266         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
268                 return;
269         }
270
271         vha = sp->fcport->vha;
272         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273
274         /* Two DSDs are available in the Command Type 3 IOCB */
275         avail_dsds = 2;
276         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277
278         /* Load data segments */
279         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280                 dma_addr_t      sle_dma;
281                 cont_a64_entry_t *cont_pkt;
282
283                 /* Allocate additional continuation packets? */
284                 if (avail_dsds == 0) {
285                         /*
286                          * Five DSDs are available in the Continuation
287                          * Type 1 IOCB.
288                          */
289                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
291                         avail_dsds = 5;
292                 }
293
294                 sle_dma = sg_dma_address(sg);
295                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
298                 avail_dsds--;
299         }
300 }
301
302 /**
303  * qla2x00_start_scsi() - Send a SCSI command to the ISP
304  * @sp: command to send to the ISP
305  *
306  * Returns non-zero if a failure occurred, else zero.
307  */
308 int
309 qla2x00_start_scsi(srb_t *sp)
310 {
311         int             ret, nseg;
312         unsigned long   flags;
313         scsi_qla_host_t *vha;
314         struct scsi_cmnd *cmd;
315         uint32_t        *clr_ptr;
316         uint32_t        index;
317         uint32_t        handle;
318         cmd_entry_t     *cmd_pkt;
319         uint16_t        cnt;
320         uint16_t        req_cnt;
321         uint16_t        tot_dsds;
322         struct device_reg_2xxx __iomem *reg;
323         struct qla_hw_data *ha;
324         struct req_que *req;
325         struct rsp_que *rsp;
326         char            tag[2];
327
328         /* Setup device pointers. */
329         ret = 0;
330         vha = sp->fcport->vha;
331         ha = vha->hw;
332         reg = &ha->iobase->isp;
333         cmd = GET_CMD_SP(sp);
334         req = ha->req_q_map[0];
335         rsp = ha->rsp_q_map[0];
336         /* So we know we haven't pci_map'ed anything yet */
337         tot_dsds = 0;
338
339         /* Send marker if required */
340         if (vha->marker_needed != 0) {
341                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
342                     QLA_SUCCESS) {
343                         return (QLA_FUNCTION_FAILED);
344                 }
345                 vha->marker_needed = 0;
346         }
347
348         /* Acquire ring specific lock */
349         spin_lock_irqsave(&ha->hardware_lock, flags);
350
351         /* Check for room in outstanding command list. */
352         handle = req->current_outstanding_cmd;
353         for (index = 1; index < req->num_outstanding_cmds; index++) {
354                 handle++;
355                 if (handle == req->num_outstanding_cmds)
356                         handle = 1;
357                 if (!req->outstanding_cmds[handle])
358                         break;
359         }
360         if (index == req->num_outstanding_cmds)
361                 goto queuing_error;
362
363         /* Map the sg table so we have an accurate count of sg entries needed */
364         if (scsi_sg_count(cmd)) {
365                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
366                     scsi_sg_count(cmd), cmd->sc_data_direction);
367                 if (unlikely(!nseg))
368                         goto queuing_error;
369         } else
370                 nseg = 0;
371
372         tot_dsds = nseg;
373
374         /* Calculate the number of request entries needed. */
375         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
376         if (req->cnt < (req_cnt + 2)) {
377                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
378                 if (req->ring_index < cnt)
379                         req->cnt = cnt - req->ring_index;
380                 else
381                         req->cnt = req->length -
382                             (req->ring_index - cnt);
383                 /* If still no head room then bail out */
384                 if (req->cnt < (req_cnt + 2))
385                         goto queuing_error;
386         }
387
388         /* Build command packet */
389         req->current_outstanding_cmd = handle;
390         req->outstanding_cmds[handle] = sp;
391         sp->handle = handle;
392         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
393         req->cnt -= req_cnt;
394
395         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
396         cmd_pkt->handle = handle;
397         /* Zero out remaining portion of packet. */
398         clr_ptr = (uint32_t *)cmd_pkt + 2;
399         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
400         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
401
402         /* Set target ID and LUN number*/
403         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
404         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
405
406         /* Update tagged queuing modifier */
407         if (scsi_populate_tag_msg(cmd, tag)) {
408                 switch (tag[0]) {
409                 case HEAD_OF_QUEUE_TAG:
410                         cmd_pkt->control_flags =
411                             __constant_cpu_to_le16(CF_HEAD_TAG);
412                         break;
413                 case ORDERED_QUEUE_TAG:
414                         cmd_pkt->control_flags =
415                             __constant_cpu_to_le16(CF_ORDERED_TAG);
416                         break;
417                 default:
418                         cmd_pkt->control_flags =
419                             __constant_cpu_to_le16(CF_SIMPLE_TAG);
420                         break;
421                 }
422         }
423
424         /* Load SCSI command packet. */
425         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
426         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
427
428         /* Build IOCB segments */
429         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
430
431         /* Set total data segment count. */
432         cmd_pkt->entry_count = (uint8_t)req_cnt;
433         wmb();
434
435         /* Adjust ring index. */
436         req->ring_index++;
437         if (req->ring_index == req->length) {
438                 req->ring_index = 0;
439                 req->ring_ptr = req->ring;
440         } else
441                 req->ring_ptr++;
442
443         sp->flags |= SRB_DMA_VALID;
444
445         /* Set chip new ring index. */
446         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
447         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
448
449         /* Manage unprocessed RIO/ZIO commands in response queue. */
450         if (vha->flags.process_response_queue &&
451             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
452                 qla2x00_process_response_queue(rsp);
453
454         spin_unlock_irqrestore(&ha->hardware_lock, flags);
455         return (QLA_SUCCESS);
456
457 queuing_error:
458         if (tot_dsds)
459                 scsi_dma_unmap(cmd);
460
461         spin_unlock_irqrestore(&ha->hardware_lock, flags);
462
463         return (QLA_FUNCTION_FAILED);
464 }
465
466 /**
467  * qla2x00_start_iocbs() - Execute the IOCB command
468  */
469 void
470 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
471 {
472         struct qla_hw_data *ha = vha->hw;
473         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
474
475         if (IS_QLA82XX(ha)) {
476                 qla82xx_start_iocbs(vha);
477         } else {
478                 /* Adjust ring index. */
479                 req->ring_index++;
480                 if (req->ring_index == req->length) {
481                         req->ring_index = 0;
482                         req->ring_ptr = req->ring;
483                 } else
484                         req->ring_ptr++;
485
486                 /* Set chip new ring index. */
487                 if (ha->mqenable || IS_QLA83XX(ha)) {
488                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
489                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
490                 } else if (IS_QLAFX00(ha)) {
491                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
492                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
493                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
494                 } else if (IS_FWI2_CAPABLE(ha)) {
495                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
496                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
497                 } else {
498                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
499                                 req->ring_index);
500                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
501                 }
502         }
503 }
504
505 /**
506  * qla2x00_marker() - Send a marker IOCB to the firmware.
507  * @ha: HA context
508  * @loop_id: loop ID
509  * @lun: LUN
510  * @type: marker modifier
511  *
512  * Can be called from both normal and interrupt context.
513  *
514  * Returns non-zero if a failure occurred, else zero.
515  */
516 static int
517 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
518                         struct rsp_que *rsp, uint16_t loop_id,
519                         uint16_t lun, uint8_t type)
520 {
521         mrk_entry_t *mrk;
522         struct mrk_entry_24xx *mrk24 = NULL;
523         struct mrk_entry_fx00 *mrkfx = NULL;
524
525         struct qla_hw_data *ha = vha->hw;
526         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
527
528         req = ha->req_q_map[0];
529         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
530         if (mrk == NULL) {
531                 ql_log(ql_log_warn, base_vha, 0x3026,
532                     "Failed to allocate Marker IOCB.\n");
533
534                 return (QLA_FUNCTION_FAILED);
535         }
536
537         mrk->entry_type = MARKER_TYPE;
538         mrk->modifier = type;
539         if (type != MK_SYNC_ALL) {
540                 if (IS_QLAFX00(ha)) {
541                         mrkfx = (struct mrk_entry_fx00 *) mrk;
542                         mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
543                         mrkfx->handle_hi = 0;
544                         mrkfx->tgt_id = cpu_to_le16(loop_id);
545                         mrkfx->lun[1] = LSB(lun);
546                         mrkfx->lun[2] = MSB(lun);
547                         host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
548                 } else if (IS_FWI2_CAPABLE(ha)) {
549                         mrk24 = (struct mrk_entry_24xx *) mrk;
550                         mrk24->nport_handle = cpu_to_le16(loop_id);
551                         mrk24->lun[1] = LSB(lun);
552                         mrk24->lun[2] = MSB(lun);
553                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
554                         mrk24->vp_index = vha->vp_idx;
555                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
556                 } else {
557                         SET_TARGET_ID(ha, mrk->target, loop_id);
558                         mrk->lun = cpu_to_le16(lun);
559                 }
560         }
561         wmb();
562
563         qla2x00_start_iocbs(vha, req);
564
565         return (QLA_SUCCESS);
566 }
567
568 int
569 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
570                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
571                 uint8_t type)
572 {
573         int ret;
574         unsigned long flags = 0;
575
576         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
577         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
578         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
579
580         return (ret);
581 }
582
583 /*
584  * qla2x00_issue_marker
585  *
586  * Issue marker
587  * Caller CAN have hardware lock held as specified by ha_locked parameter.
588  * Might release it, then reaquire.
589  */
590 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
591 {
592         if (ha_locked) {
593                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
594                                         MK_SYNC_ALL) != QLA_SUCCESS)
595                         return QLA_FUNCTION_FAILED;
596         } else {
597                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
598                                         MK_SYNC_ALL) != QLA_SUCCESS)
599                         return QLA_FUNCTION_FAILED;
600         }
601         vha->marker_needed = 0;
602
603         return QLA_SUCCESS;
604 }
605
606 static inline int
607 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
608         uint16_t tot_dsds)
609 {
610         uint32_t *cur_dsd = NULL;
611         scsi_qla_host_t *vha;
612         struct qla_hw_data *ha;
613         struct scsi_cmnd *cmd;
614         struct  scatterlist *cur_seg;
615         uint32_t *dsd_seg;
616         void *next_dsd;
617         uint8_t avail_dsds;
618         uint8_t first_iocb = 1;
619         uint32_t dsd_list_len;
620         struct dsd_dma *dsd_ptr;
621         struct ct6_dsd *ctx;
622
623         cmd = GET_CMD_SP(sp);
624
625         /* Update entry type to indicate Command Type 3 IOCB */
626         *((uint32_t *)(&cmd_pkt->entry_type)) =
627                 __constant_cpu_to_le32(COMMAND_TYPE_6);
628
629         /* No data transfer */
630         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
631                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
632                 return 0;
633         }
634
635         vha = sp->fcport->vha;
636         ha = vha->hw;
637
638         /* Set transfer direction */
639         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
640                 cmd_pkt->control_flags =
641                     __constant_cpu_to_le16(CF_WRITE_DATA);
642                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
643         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
644                 cmd_pkt->control_flags =
645                     __constant_cpu_to_le16(CF_READ_DATA);
646                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
647         }
648
649         cur_seg = scsi_sglist(cmd);
650         ctx = GET_CMD_CTX_SP(sp);
651
652         while (tot_dsds) {
653                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
654                     QLA_DSDS_PER_IOCB : tot_dsds;
655                 tot_dsds -= avail_dsds;
656                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
657
658                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
659                     struct dsd_dma, list);
660                 next_dsd = dsd_ptr->dsd_addr;
661                 list_del(&dsd_ptr->list);
662                 ha->gbl_dsd_avail--;
663                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
664                 ctx->dsd_use_cnt++;
665                 ha->gbl_dsd_inuse++;
666
667                 if (first_iocb) {
668                         first_iocb = 0;
669                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
670                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
671                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
672                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
673                 } else {
674                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
675                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
676                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
677                 }
678                 cur_dsd = (uint32_t *)next_dsd;
679                 while (avail_dsds) {
680                         dma_addr_t      sle_dma;
681
682                         sle_dma = sg_dma_address(cur_seg);
683                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
684                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
685                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
686                         cur_seg = sg_next(cur_seg);
687                         avail_dsds--;
688                 }
689         }
690
691         /* Null termination */
692         *cur_dsd++ =  0;
693         *cur_dsd++ = 0;
694         *cur_dsd++ = 0;
695         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
696         return 0;
697 }
698
699 /*
700  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
701  * for Command Type 6.
702  *
703  * @dsds: number of data segment decriptors needed
704  *
705  * Returns the number of dsd list needed to store @dsds.
706  */
707 inline uint16_t
708 qla24xx_calc_dsd_lists(uint16_t dsds)
709 {
710         uint16_t dsd_lists = 0;
711
712         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
713         if (dsds % QLA_DSDS_PER_IOCB)
714                 dsd_lists++;
715         return dsd_lists;
716 }
717
718
719 /**
720  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
721  * IOCB types.
722  *
723  * @sp: SRB command to process
724  * @cmd_pkt: Command type 3 IOCB
725  * @tot_dsds: Total number of segments to transfer
726  */
727 inline void
728 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
729     uint16_t tot_dsds)
730 {
731         uint16_t        avail_dsds;
732         uint32_t        *cur_dsd;
733         scsi_qla_host_t *vha;
734         struct scsi_cmnd *cmd;
735         struct scatterlist *sg;
736         int i;
737         struct req_que *req;
738
739         cmd = GET_CMD_SP(sp);
740
741         /* Update entry type to indicate Command Type 3 IOCB */
742         *((uint32_t *)(&cmd_pkt->entry_type)) =
743             __constant_cpu_to_le32(COMMAND_TYPE_7);
744
745         /* No data transfer */
746         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
747                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
748                 return;
749         }
750
751         vha = sp->fcport->vha;
752         req = vha->req;
753
754         /* Set transfer direction */
755         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
756                 cmd_pkt->task_mgmt_flags =
757                     __constant_cpu_to_le16(TMF_WRITE_DATA);
758                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
759         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
760                 cmd_pkt->task_mgmt_flags =
761                     __constant_cpu_to_le16(TMF_READ_DATA);
762                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
763         }
764
765         /* One DSD is available in the Command Type 3 IOCB */
766         avail_dsds = 1;
767         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
768
769         /* Load data segments */
770
771         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
772                 dma_addr_t      sle_dma;
773                 cont_a64_entry_t *cont_pkt;
774
775                 /* Allocate additional continuation packets? */
776                 if (avail_dsds == 0) {
777                         /*
778                          * Five DSDs are available in the Continuation
779                          * Type 1 IOCB.
780                          */
781                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
782                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
783                         avail_dsds = 5;
784                 }
785
786                 sle_dma = sg_dma_address(sg);
787                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
788                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
789                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
790                 avail_dsds--;
791         }
792 }
793
794 struct fw_dif_context {
795         uint32_t ref_tag;
796         uint16_t app_tag;
797         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
798         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
799 };
800
801 /*
802  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
803  *
804  */
805 static inline void
806 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
807     unsigned int protcnt)
808 {
809         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
810
811         switch (scsi_get_prot_type(cmd)) {
812         case SCSI_PROT_DIF_TYPE0:
813                 /*
814                  * No check for ql2xenablehba_err_chk, as it would be an
815                  * I/O error if hba tag generation is not done.
816                  */
817                 pkt->ref_tag = cpu_to_le32((uint32_t)
818                     (0xffffffff & scsi_get_lba(cmd)));
819
820                 if (!qla2x00_hba_err_chk_enabled(sp))
821                         break;
822
823                 pkt->ref_tag_mask[0] = 0xff;
824                 pkt->ref_tag_mask[1] = 0xff;
825                 pkt->ref_tag_mask[2] = 0xff;
826                 pkt->ref_tag_mask[3] = 0xff;
827                 break;
828
829         /*
830          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
831          * match LBA in CDB + N
832          */
833         case SCSI_PROT_DIF_TYPE2:
834                 pkt->app_tag = __constant_cpu_to_le16(0);
835                 pkt->app_tag_mask[0] = 0x0;
836                 pkt->app_tag_mask[1] = 0x0;
837
838                 pkt->ref_tag = cpu_to_le32((uint32_t)
839                     (0xffffffff & scsi_get_lba(cmd)));
840
841                 if (!qla2x00_hba_err_chk_enabled(sp))
842                         break;
843
844                 /* enable ALL bytes of the ref tag */
845                 pkt->ref_tag_mask[0] = 0xff;
846                 pkt->ref_tag_mask[1] = 0xff;
847                 pkt->ref_tag_mask[2] = 0xff;
848                 pkt->ref_tag_mask[3] = 0xff;
849                 break;
850
851         /* For Type 3 protection: 16 bit GUARD only */
852         case SCSI_PROT_DIF_TYPE3:
853                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
854                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
855                                                                 0x00;
856                 break;
857
858         /*
859          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
860          * 16 bit app tag.
861          */
862         case SCSI_PROT_DIF_TYPE1:
863                 pkt->ref_tag = cpu_to_le32((uint32_t)
864                     (0xffffffff & scsi_get_lba(cmd)));
865                 pkt->app_tag = __constant_cpu_to_le16(0);
866                 pkt->app_tag_mask[0] = 0x0;
867                 pkt->app_tag_mask[1] = 0x0;
868
869                 if (!qla2x00_hba_err_chk_enabled(sp))
870                         break;
871
872                 /* enable ALL bytes of the ref tag */
873                 pkt->ref_tag_mask[0] = 0xff;
874                 pkt->ref_tag_mask[1] = 0xff;
875                 pkt->ref_tag_mask[2] = 0xff;
876                 pkt->ref_tag_mask[3] = 0xff;
877                 break;
878         }
879 }
880
881 struct qla2_sgx {
882         dma_addr_t              dma_addr;       /* OUT */
883         uint32_t                dma_len;        /* OUT */
884
885         uint32_t                tot_bytes;      /* IN */
886         struct scatterlist      *cur_sg;        /* IN */
887
888         /* for book keeping, bzero on initial invocation */
889         uint32_t                bytes_consumed;
890         uint32_t                num_bytes;
891         uint32_t                tot_partial;
892
893         /* for debugging */
894         uint32_t                num_sg;
895         srb_t                   *sp;
896 };
897
898 static int
899 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
900         uint32_t *partial)
901 {
902         struct scatterlist *sg;
903         uint32_t cumulative_partial, sg_len;
904         dma_addr_t sg_dma_addr;
905
906         if (sgx->num_bytes == sgx->tot_bytes)
907                 return 0;
908
909         sg = sgx->cur_sg;
910         cumulative_partial = sgx->tot_partial;
911
912         sg_dma_addr = sg_dma_address(sg);
913         sg_len = sg_dma_len(sg);
914
915         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
916
917         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
918                 sgx->dma_len = (blk_sz - cumulative_partial);
919                 sgx->tot_partial = 0;
920                 sgx->num_bytes += blk_sz;
921                 *partial = 0;
922         } else {
923                 sgx->dma_len = sg_len - sgx->bytes_consumed;
924                 sgx->tot_partial += sgx->dma_len;
925                 *partial = 1;
926         }
927
928         sgx->bytes_consumed += sgx->dma_len;
929
930         if (sg_len == sgx->bytes_consumed) {
931                 sg = sg_next(sg);
932                 sgx->num_sg++;
933                 sgx->cur_sg = sg;
934                 sgx->bytes_consumed = 0;
935         }
936
937         return 1;
938 }
939
940 static int
941 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
942         uint32_t *dsd, uint16_t tot_dsds)
943 {
944         void *next_dsd;
945         uint8_t avail_dsds = 0;
946         uint32_t dsd_list_len;
947         struct dsd_dma *dsd_ptr;
948         struct scatterlist *sg_prot;
949         uint32_t *cur_dsd = dsd;
950         uint16_t        used_dsds = tot_dsds;
951
952         uint32_t        prot_int;
953         uint32_t        partial;
954         struct qla2_sgx sgx;
955         dma_addr_t      sle_dma;
956         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
957         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
958
959         prot_int = cmd->device->sector_size;
960
961         memset(&sgx, 0, sizeof(struct qla2_sgx));
962         sgx.tot_bytes = scsi_bufflen(cmd);
963         sgx.cur_sg = scsi_sglist(cmd);
964         sgx.sp = sp;
965
966         sg_prot = scsi_prot_sglist(cmd);
967
968         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
969
970                 sle_dma = sgx.dma_addr;
971                 sle_dma_len = sgx.dma_len;
972 alloc_and_fill:
973                 /* Allocate additional continuation packets? */
974                 if (avail_dsds == 0) {
975                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
976                                         QLA_DSDS_PER_IOCB : used_dsds;
977                         dsd_list_len = (avail_dsds + 1) * 12;
978                         used_dsds -= avail_dsds;
979
980                         /* allocate tracking DS */
981                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
982                         if (!dsd_ptr)
983                                 return 1;
984
985                         /* allocate new list */
986                         dsd_ptr->dsd_addr = next_dsd =
987                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
988                                 &dsd_ptr->dsd_list_dma);
989
990                         if (!next_dsd) {
991                                 /*
992                                  * Need to cleanup only this dsd_ptr, rest
993                                  * will be done by sp_free_dma()
994                                  */
995                                 kfree(dsd_ptr);
996                                 return 1;
997                         }
998
999                         list_add_tail(&dsd_ptr->list,
1000                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1001
1002                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1003
1004                         /* add new list to cmd iocb or last list */
1005                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1006                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1007                         *cur_dsd++ = dsd_list_len;
1008                         cur_dsd = (uint32_t *)next_dsd;
1009                 }
1010                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1011                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1012                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1013                 avail_dsds--;
1014
1015                 if (partial == 0) {
1016                         /* Got a full protection interval */
1017                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1018                         sle_dma_len = 8;
1019
1020                         tot_prot_dma_len += sle_dma_len;
1021                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1022                                 tot_prot_dma_len = 0;
1023                                 sg_prot = sg_next(sg_prot);
1024                         }
1025
1026                         partial = 1; /* So as to not re-enter this block */
1027                         goto alloc_and_fill;
1028                 }
1029         }
1030         /* Null termination */
1031         *cur_dsd++ = 0;
1032         *cur_dsd++ = 0;
1033         *cur_dsd++ = 0;
1034         return 0;
1035 }
1036
1037 static int
1038 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1039         uint16_t tot_dsds)
1040 {
1041         void *next_dsd;
1042         uint8_t avail_dsds = 0;
1043         uint32_t dsd_list_len;
1044         struct dsd_dma *dsd_ptr;
1045         struct scatterlist *sg;
1046         uint32_t *cur_dsd = dsd;
1047         int     i;
1048         uint16_t        used_dsds = tot_dsds;
1049         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1050
1051         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1052                 dma_addr_t      sle_dma;
1053
1054                 /* Allocate additional continuation packets? */
1055                 if (avail_dsds == 0) {
1056                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1057                                         QLA_DSDS_PER_IOCB : used_dsds;
1058                         dsd_list_len = (avail_dsds + 1) * 12;
1059                         used_dsds -= avail_dsds;
1060
1061                         /* allocate tracking DS */
1062                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1063                         if (!dsd_ptr)
1064                                 return 1;
1065
1066                         /* allocate new list */
1067                         dsd_ptr->dsd_addr = next_dsd =
1068                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1069                                 &dsd_ptr->dsd_list_dma);
1070
1071                         if (!next_dsd) {
1072                                 /*
1073                                  * Need to cleanup only this dsd_ptr, rest
1074                                  * will be done by sp_free_dma()
1075                                  */
1076                                 kfree(dsd_ptr);
1077                                 return 1;
1078                         }
1079
1080                         list_add_tail(&dsd_ptr->list,
1081                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1082
1083                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1084
1085                         /* add new list to cmd iocb or last list */
1086                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1087                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1088                         *cur_dsd++ = dsd_list_len;
1089                         cur_dsd = (uint32_t *)next_dsd;
1090                 }
1091                 sle_dma = sg_dma_address(sg);
1092
1093                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1094                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1095                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1096                 avail_dsds--;
1097
1098         }
1099         /* Null termination */
1100         *cur_dsd++ = 0;
1101         *cur_dsd++ = 0;
1102         *cur_dsd++ = 0;
1103         return 0;
1104 }
1105
1106 static int
1107 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1108                                                         uint32_t *dsd,
1109         uint16_t tot_dsds)
1110 {
1111         void *next_dsd;
1112         uint8_t avail_dsds = 0;
1113         uint32_t dsd_list_len;
1114         struct dsd_dma *dsd_ptr;
1115         struct scatterlist *sg;
1116         int     i;
1117         struct scsi_cmnd *cmd;
1118         uint32_t *cur_dsd = dsd;
1119         uint16_t        used_dsds = tot_dsds;
1120
1121         cmd = GET_CMD_SP(sp);
1122         scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1123                 dma_addr_t      sle_dma;
1124
1125                 /* Allocate additional continuation packets? */
1126                 if (avail_dsds == 0) {
1127                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1128                                                 QLA_DSDS_PER_IOCB : used_dsds;
1129                         dsd_list_len = (avail_dsds + 1) * 12;
1130                         used_dsds -= avail_dsds;
1131
1132                         /* allocate tracking DS */
1133                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1134                         if (!dsd_ptr)
1135                                 return 1;
1136
1137                         /* allocate new list */
1138                         dsd_ptr->dsd_addr = next_dsd =
1139                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1140                                 &dsd_ptr->dsd_list_dma);
1141
1142                         if (!next_dsd) {
1143                                 /*
1144                                  * Need to cleanup only this dsd_ptr, rest
1145                                  * will be done by sp_free_dma()
1146                                  */
1147                                 kfree(dsd_ptr);
1148                                 return 1;
1149                         }
1150
1151                         list_add_tail(&dsd_ptr->list,
1152                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1153
1154                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1155
1156                         /* add new list to cmd iocb or last list */
1157                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1158                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1159                         *cur_dsd++ = dsd_list_len;
1160                         cur_dsd = (uint32_t *)next_dsd;
1161                 }
1162                 sle_dma = sg_dma_address(sg);
1163
1164                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1165                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1166                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1167
1168                 avail_dsds--;
1169         }
1170         /* Null termination */
1171         *cur_dsd++ = 0;
1172         *cur_dsd++ = 0;
1173         *cur_dsd++ = 0;
1174         return 0;
1175 }
1176
1177 /**
1178  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1179  *                                                      Type 6 IOCB types.
1180  *
1181  * @sp: SRB command to process
1182  * @cmd_pkt: Command type 3 IOCB
1183  * @tot_dsds: Total number of segments to transfer
1184  */
1185 static inline int
1186 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1187     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1188 {
1189         uint32_t                *cur_dsd, *fcp_dl;
1190         scsi_qla_host_t         *vha;
1191         struct scsi_cmnd        *cmd;
1192         int                     sgc;
1193         uint32_t                total_bytes = 0;
1194         uint32_t                data_bytes;
1195         uint32_t                dif_bytes;
1196         uint8_t                 bundling = 1;
1197         uint16_t                blk_size;
1198         uint8_t                 *clr_ptr;
1199         struct crc_context      *crc_ctx_pkt = NULL;
1200         struct qla_hw_data      *ha;
1201         uint8_t                 additional_fcpcdb_len;
1202         uint16_t                fcp_cmnd_len;
1203         struct fcp_cmnd         *fcp_cmnd;
1204         dma_addr_t              crc_ctx_dma;
1205         char                    tag[2];
1206
1207         cmd = GET_CMD_SP(sp);
1208
1209         sgc = 0;
1210         /* Update entry type to indicate Command Type CRC_2 IOCB */
1211         *((uint32_t *)(&cmd_pkt->entry_type)) =
1212             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1213
1214         vha = sp->fcport->vha;
1215         ha = vha->hw;
1216
1217         /* No data transfer */
1218         data_bytes = scsi_bufflen(cmd);
1219         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1220                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1221                 return QLA_SUCCESS;
1222         }
1223
1224         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1225
1226         /* Set transfer direction */
1227         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1228                 cmd_pkt->control_flags =
1229                     __constant_cpu_to_le16(CF_WRITE_DATA);
1230         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1231                 cmd_pkt->control_flags =
1232                     __constant_cpu_to_le16(CF_READ_DATA);
1233         }
1234
1235         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1236             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1237             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1238             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1239                 bundling = 0;
1240
1241         /* Allocate CRC context from global pool */
1242         crc_ctx_pkt = sp->u.scmd.ctx =
1243             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1244
1245         if (!crc_ctx_pkt)
1246                 goto crc_queuing_error;
1247
1248         /* Zero out CTX area. */
1249         clr_ptr = (uint8_t *)crc_ctx_pkt;
1250         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1251
1252         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1253
1254         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1255
1256         /* Set handle */
1257         crc_ctx_pkt->handle = cmd_pkt->handle;
1258
1259         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1260
1261         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1262             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1263
1264         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1265         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1266         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1267
1268         /* Determine SCSI command length -- align to 4 byte boundary */
1269         if (cmd->cmd_len > 16) {
1270                 additional_fcpcdb_len = cmd->cmd_len - 16;
1271                 if ((cmd->cmd_len % 4) != 0) {
1272                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1273                         goto crc_queuing_error;
1274                 }
1275                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1276         } else {
1277                 additional_fcpcdb_len = 0;
1278                 fcp_cmnd_len = 12 + 16 + 4;
1279         }
1280
1281         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1282
1283         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1284         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1285                 fcp_cmnd->additional_cdb_len |= 1;
1286         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1287                 fcp_cmnd->additional_cdb_len |= 2;
1288
1289         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1290         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1291         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1292         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1293             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1294         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1295             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1296         fcp_cmnd->task_management = 0;
1297
1298         /*
1299          * Update tagged queuing modifier if using command tag queuing
1300          */
1301         if (scsi_populate_tag_msg(cmd, tag)) {
1302                 switch (tag[0]) {
1303                 case HEAD_OF_QUEUE_TAG:
1304                     fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1305                     break;
1306                 case ORDERED_QUEUE_TAG:
1307                     fcp_cmnd->task_attribute = TSK_ORDERED;
1308                     break;
1309                 default:
1310                     fcp_cmnd->task_attribute = 0;
1311                     break;
1312                 }
1313         } else {
1314                 fcp_cmnd->task_attribute = 0;
1315         }
1316
1317         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1318
1319         /* Compute dif len and adjust data len to incude protection */
1320         dif_bytes = 0;
1321         blk_size = cmd->device->sector_size;
1322         dif_bytes = (data_bytes / blk_size) * 8;
1323
1324         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1325         case SCSI_PROT_READ_INSERT:
1326         case SCSI_PROT_WRITE_STRIP:
1327             total_bytes = data_bytes;
1328             data_bytes += dif_bytes;
1329             break;
1330
1331         case SCSI_PROT_READ_STRIP:
1332         case SCSI_PROT_WRITE_INSERT:
1333         case SCSI_PROT_READ_PASS:
1334         case SCSI_PROT_WRITE_PASS:
1335             total_bytes = data_bytes + dif_bytes;
1336             break;
1337         default:
1338             BUG();
1339         }
1340
1341         if (!qla2x00_hba_err_chk_enabled(sp))
1342                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1343         /* HBA error checking enabled */
1344         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1345                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1346                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1347                         SCSI_PROT_DIF_TYPE2))
1348                         fw_prot_opts |= BIT_10;
1349                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1350                     SCSI_PROT_DIF_TYPE3)
1351                         fw_prot_opts |= BIT_11;
1352         }
1353
1354         if (!bundling) {
1355                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1356         } else {
1357                 /*
1358                  * Configure Bundling if we need to fetch interlaving
1359                  * protection PCI accesses
1360                  */
1361                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1362                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1363                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1364                                                         tot_prot_dsds);
1365                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1366         }
1367
1368         /* Finish the common fields of CRC pkt */
1369         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1370         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1371         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1372         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1373         /* Fibre channel byte count */
1374         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1375         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1376             additional_fcpcdb_len);
1377         *fcp_dl = htonl(total_bytes);
1378
1379         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1380                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1381                 return QLA_SUCCESS;
1382         }
1383         /* Walks data segments */
1384
1385         cmd_pkt->control_flags |=
1386             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1387
1388         if (!bundling && tot_prot_dsds) {
1389                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1390                     cur_dsd, tot_dsds))
1391                         goto crc_queuing_error;
1392         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1393             (tot_dsds - tot_prot_dsds)))
1394                 goto crc_queuing_error;
1395
1396         if (bundling && tot_prot_dsds) {
1397                 /* Walks dif segments */
1398                 cmd_pkt->control_flags |=
1399                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1400                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1401                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1402                     tot_prot_dsds))
1403                         goto crc_queuing_error;
1404         }
1405         return QLA_SUCCESS;
1406
1407 crc_queuing_error:
1408         /* Cleanup will be performed by the caller */
1409
1410         return QLA_FUNCTION_FAILED;
1411 }
1412
1413 /**
1414  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1415  * @sp: command to send to the ISP
1416  *
1417  * Returns non-zero if a failure occurred, else zero.
1418  */
1419 int
1420 qla24xx_start_scsi(srb_t *sp)
1421 {
1422         int             ret, nseg;
1423         unsigned long   flags;
1424         uint32_t        *clr_ptr;
1425         uint32_t        index;
1426         uint32_t        handle;
1427         struct cmd_type_7 *cmd_pkt;
1428         uint16_t        cnt;
1429         uint16_t        req_cnt;
1430         uint16_t        tot_dsds;
1431         struct req_que *req = NULL;
1432         struct rsp_que *rsp = NULL;
1433         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1434         struct scsi_qla_host *vha = sp->fcport->vha;
1435         struct qla_hw_data *ha = vha->hw;
1436         char            tag[2];
1437
1438         /* Setup device pointers. */
1439         ret = 0;
1440
1441         qla25xx_set_que(sp, &rsp);
1442         req = vha->req;
1443
1444         /* So we know we haven't pci_map'ed anything yet */
1445         tot_dsds = 0;
1446
1447         /* Send marker if required */
1448         if (vha->marker_needed != 0) {
1449                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1450                     QLA_SUCCESS)
1451                         return QLA_FUNCTION_FAILED;
1452                 vha->marker_needed = 0;
1453         }
1454
1455         /* Acquire ring specific lock */
1456         spin_lock_irqsave(&ha->hardware_lock, flags);
1457
1458         /* Check for room in outstanding command list. */
1459         handle = req->current_outstanding_cmd;
1460         for (index = 1; index < req->num_outstanding_cmds; index++) {
1461                 handle++;
1462                 if (handle == req->num_outstanding_cmds)
1463                         handle = 1;
1464                 if (!req->outstanding_cmds[handle])
1465                         break;
1466         }
1467         if (index == req->num_outstanding_cmds)
1468                 goto queuing_error;
1469
1470         /* Map the sg table so we have an accurate count of sg entries needed */
1471         if (scsi_sg_count(cmd)) {
1472                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1473                     scsi_sg_count(cmd), cmd->sc_data_direction);
1474                 if (unlikely(!nseg))
1475                         goto queuing_error;
1476         } else
1477                 nseg = 0;
1478
1479         tot_dsds = nseg;
1480         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1481         if (req->cnt < (req_cnt + 2)) {
1482                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1483
1484                 if (req->ring_index < cnt)
1485                         req->cnt = cnt - req->ring_index;
1486                 else
1487                         req->cnt = req->length -
1488                                 (req->ring_index - cnt);
1489                 if (req->cnt < (req_cnt + 2))
1490                         goto queuing_error;
1491         }
1492
1493         /* Build command packet. */
1494         req->current_outstanding_cmd = handle;
1495         req->outstanding_cmds[handle] = sp;
1496         sp->handle = handle;
1497         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1498         req->cnt -= req_cnt;
1499
1500         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1501         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1502
1503         /* Zero out remaining portion of packet. */
1504         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1505         clr_ptr = (uint32_t *)cmd_pkt + 2;
1506         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1507         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1508
1509         /* Set NPORT-ID and LUN number*/
1510         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1511         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1512         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1513         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1514         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1515
1516         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1517         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1518
1519         /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1520         if (scsi_populate_tag_msg(cmd, tag)) {
1521                 switch (tag[0]) {
1522                 case HEAD_OF_QUEUE_TAG:
1523                         cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1524                         break;
1525                 case ORDERED_QUEUE_TAG:
1526                         cmd_pkt->task = TSK_ORDERED;
1527                         break;
1528                 }
1529         }
1530
1531         /* Load SCSI command packet. */
1532         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1533         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1534
1535         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1536
1537         /* Build IOCB segments */
1538         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1539
1540         /* Set total data segment count. */
1541         cmd_pkt->entry_count = (uint8_t)req_cnt;
1542         /* Specify response queue number where completion should happen */
1543         cmd_pkt->entry_status = (uint8_t) rsp->id;
1544         wmb();
1545         /* Adjust ring index. */
1546         req->ring_index++;
1547         if (req->ring_index == req->length) {
1548                 req->ring_index = 0;
1549                 req->ring_ptr = req->ring;
1550         } else
1551                 req->ring_ptr++;
1552
1553         sp->flags |= SRB_DMA_VALID;
1554
1555         /* Set chip new ring index. */
1556         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1557         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1558
1559         /* Manage unprocessed RIO/ZIO commands in response queue. */
1560         if (vha->flags.process_response_queue &&
1561                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1562                 qla24xx_process_response_queue(vha, rsp);
1563
1564         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1565         return QLA_SUCCESS;
1566
1567 queuing_error:
1568         if (tot_dsds)
1569                 scsi_dma_unmap(cmd);
1570
1571         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1572
1573         return QLA_FUNCTION_FAILED;
1574 }
1575
1576 /**
1577  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1578  * @sp: command to send to the ISP
1579  *
1580  * Returns non-zero if a failure occurred, else zero.
1581  */
1582 int
1583 qla24xx_dif_start_scsi(srb_t *sp)
1584 {
1585         int                     nseg;
1586         unsigned long           flags;
1587         uint32_t                *clr_ptr;
1588         uint32_t                index;
1589         uint32_t                handle;
1590         uint16_t                cnt;
1591         uint16_t                req_cnt = 0;
1592         uint16_t                tot_dsds;
1593         uint16_t                tot_prot_dsds;
1594         uint16_t                fw_prot_opts = 0;
1595         struct req_que          *req = NULL;
1596         struct rsp_que          *rsp = NULL;
1597         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1598         struct scsi_qla_host    *vha = sp->fcport->vha;
1599         struct qla_hw_data      *ha = vha->hw;
1600         struct cmd_type_crc_2   *cmd_pkt;
1601         uint32_t                status = 0;
1602
1603 #define QDSS_GOT_Q_SPACE        BIT_0
1604
1605         /* Only process protection or >16 cdb in this routine */
1606         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1607                 if (cmd->cmd_len <= 16)
1608                         return qla24xx_start_scsi(sp);
1609         }
1610
1611         /* Setup device pointers. */
1612
1613         qla25xx_set_que(sp, &rsp);
1614         req = vha->req;
1615
1616         /* So we know we haven't pci_map'ed anything yet */
1617         tot_dsds = 0;
1618
1619         /* Send marker if required */
1620         if (vha->marker_needed != 0) {
1621                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1622                     QLA_SUCCESS)
1623                         return QLA_FUNCTION_FAILED;
1624                 vha->marker_needed = 0;
1625         }
1626
1627         /* Acquire ring specific lock */
1628         spin_lock_irqsave(&ha->hardware_lock, flags);
1629
1630         /* Check for room in outstanding command list. */
1631         handle = req->current_outstanding_cmd;
1632         for (index = 1; index < req->num_outstanding_cmds; index++) {
1633                 handle++;
1634                 if (handle == req->num_outstanding_cmds)
1635                         handle = 1;
1636                 if (!req->outstanding_cmds[handle])
1637                         break;
1638         }
1639
1640         if (index == req->num_outstanding_cmds)
1641                 goto queuing_error;
1642
1643         /* Compute number of required data segments */
1644         /* Map the sg table so we have an accurate count of sg entries needed */
1645         if (scsi_sg_count(cmd)) {
1646                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1647                     scsi_sg_count(cmd), cmd->sc_data_direction);
1648                 if (unlikely(!nseg))
1649                         goto queuing_error;
1650                 else
1651                         sp->flags |= SRB_DMA_VALID;
1652
1653                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1654                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1655                         struct qla2_sgx sgx;
1656                         uint32_t        partial;
1657
1658                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1659                         sgx.tot_bytes = scsi_bufflen(cmd);
1660                         sgx.cur_sg = scsi_sglist(cmd);
1661                         sgx.sp = sp;
1662
1663                         nseg = 0;
1664                         while (qla24xx_get_one_block_sg(
1665                             cmd->device->sector_size, &sgx, &partial))
1666                                 nseg++;
1667                 }
1668         } else
1669                 nseg = 0;
1670
1671         /* number of required data segments */
1672         tot_dsds = nseg;
1673
1674         /* Compute number of required protection segments */
1675         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1676                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1677                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1678                 if (unlikely(!nseg))
1679                         goto queuing_error;
1680                 else
1681                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1682
1683                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1684                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1685                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1686                 }
1687         } else {
1688                 nseg = 0;
1689         }
1690
1691         req_cnt = 1;
1692         /* Total Data and protection sg segment(s) */
1693         tot_prot_dsds = nseg;
1694         tot_dsds += nseg;
1695         if (req->cnt < (req_cnt + 2)) {
1696                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1697
1698                 if (req->ring_index < cnt)
1699                         req->cnt = cnt - req->ring_index;
1700                 else
1701                         req->cnt = req->length -
1702                                 (req->ring_index - cnt);
1703                 if (req->cnt < (req_cnt + 2))
1704                         goto queuing_error;
1705         }
1706
1707         status |= QDSS_GOT_Q_SPACE;
1708
1709         /* Build header part of command packet (excluding the OPCODE). */
1710         req->current_outstanding_cmd = handle;
1711         req->outstanding_cmds[handle] = sp;
1712         sp->handle = handle;
1713         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1714         req->cnt -= req_cnt;
1715
1716         /* Fill-in common area */
1717         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1718         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1719
1720         clr_ptr = (uint32_t *)cmd_pkt + 2;
1721         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1722
1723         /* Set NPORT-ID and LUN number*/
1724         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1725         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1726         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1727         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1728
1729         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1730         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1731
1732         /* Total Data and protection segment(s) */
1733         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1734
1735         /* Build IOCB segments and adjust for data protection segments */
1736         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1737             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1738                 QLA_SUCCESS)
1739                 goto queuing_error;
1740
1741         cmd_pkt->entry_count = (uint8_t)req_cnt;
1742         /* Specify response queue number where completion should happen */
1743         cmd_pkt->entry_status = (uint8_t) rsp->id;
1744         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1745         wmb();
1746
1747         /* Adjust ring index. */
1748         req->ring_index++;
1749         if (req->ring_index == req->length) {
1750                 req->ring_index = 0;
1751                 req->ring_ptr = req->ring;
1752         } else
1753                 req->ring_ptr++;
1754
1755         /* Set chip new ring index. */
1756         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1757         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1758
1759         /* Manage unprocessed RIO/ZIO commands in response queue. */
1760         if (vha->flags.process_response_queue &&
1761             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1762                 qla24xx_process_response_queue(vha, rsp);
1763
1764         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1765
1766         return QLA_SUCCESS;
1767
1768 queuing_error:
1769         if (status & QDSS_GOT_Q_SPACE) {
1770                 req->outstanding_cmds[handle] = NULL;
1771                 req->cnt += req_cnt;
1772         }
1773         /* Cleanup will be performed by the caller (queuecommand) */
1774
1775         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1776         return QLA_FUNCTION_FAILED;
1777 }
1778
1779
1780 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1781 {
1782         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1783         struct qla_hw_data *ha = sp->fcport->vha->hw;
1784         int affinity = cmd->request->cpu;
1785
1786         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1787                 affinity < ha->max_rsp_queues - 1)
1788                 *rsp = ha->rsp_q_map[affinity + 1];
1789          else
1790                 *rsp = ha->rsp_q_map[0];
1791 }
1792
1793 /* Generic Control-SRB manipulation functions. */
1794 void *
1795 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1796 {
1797         struct qla_hw_data *ha = vha->hw;
1798         struct req_que *req = ha->req_q_map[0];
1799         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1800         uint32_t index, handle;
1801         request_t *pkt;
1802         uint16_t cnt, req_cnt;
1803
1804         pkt = NULL;
1805         req_cnt = 1;
1806         handle = 0;
1807
1808         if (!sp)
1809                 goto skip_cmd_array;
1810
1811         /* Check for room in outstanding command list. */
1812         handle = req->current_outstanding_cmd;
1813         for (index = 1; req->num_outstanding_cmds; index++) {
1814                 handle++;
1815                 if (handle == req->num_outstanding_cmds)
1816                         handle = 1;
1817                 if (!req->outstanding_cmds[handle])
1818                         break;
1819         }
1820         if (index == req->num_outstanding_cmds) {
1821                 ql_log(ql_log_warn, vha, 0x700b,
1822                     "No room on outstanding cmd array.\n");
1823                 goto queuing_error;
1824         }
1825
1826         /* Prep command array. */
1827         req->current_outstanding_cmd = handle;
1828         req->outstanding_cmds[handle] = sp;
1829         sp->handle = handle;
1830
1831         /* Adjust entry-counts as needed. */
1832         if (sp->type != SRB_SCSI_CMD)
1833                 req_cnt = sp->iocbs;
1834
1835 skip_cmd_array:
1836         /* Check for room on request queue. */
1837         if (req->cnt < req_cnt) {
1838                 if (ha->mqenable || IS_QLA83XX(ha))
1839                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1840                 else if (IS_QLA82XX(ha))
1841                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1842                 else if (IS_FWI2_CAPABLE(ha))
1843                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1844                 else if (IS_QLAFX00(ha))
1845                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
1846                 else
1847                         cnt = qla2x00_debounce_register(
1848                             ISP_REQ_Q_OUT(ha, &reg->isp));
1849
1850                 if  (req->ring_index < cnt)
1851                         req->cnt = cnt - req->ring_index;
1852                 else
1853                         req->cnt = req->length -
1854                             (req->ring_index - cnt);
1855         }
1856         if (req->cnt < req_cnt)
1857                 goto queuing_error;
1858
1859         /* Prep packet */
1860         req->cnt -= req_cnt;
1861         pkt = req->ring_ptr;
1862         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1863         if (IS_QLAFX00(ha)) {
1864                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1865                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1866         } else {
1867                 pkt->entry_count = req_cnt;
1868                 pkt->handle = handle;
1869         }
1870
1871 queuing_error:
1872         return pkt;
1873 }
1874
1875 static void
1876 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1877 {
1878         struct srb_iocb *lio = &sp->u.iocb_cmd;
1879
1880         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1881         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1882         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1883                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1884         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1885                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1886         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1887         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1888         logio->port_id[1] = sp->fcport->d_id.b.area;
1889         logio->port_id[2] = sp->fcport->d_id.b.domain;
1890         logio->vp_index = sp->fcport->vha->vp_idx;
1891 }
1892
1893 static void
1894 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1895 {
1896         struct qla_hw_data *ha = sp->fcport->vha->hw;
1897         struct srb_iocb *lio = &sp->u.iocb_cmd;
1898         uint16_t opts;
1899
1900         mbx->entry_type = MBX_IOCB_TYPE;
1901         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1902         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1903         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1904         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1905         if (HAS_EXTENDED_IDS(ha)) {
1906                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1907                 mbx->mb10 = cpu_to_le16(opts);
1908         } else {
1909                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1910         }
1911         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1912         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1913             sp->fcport->d_id.b.al_pa);
1914         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1915 }
1916
1917 static void
1918 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1919 {
1920         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1921         logio->control_flags =
1922             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1923         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1924         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1925         logio->port_id[1] = sp->fcport->d_id.b.area;
1926         logio->port_id[2] = sp->fcport->d_id.b.domain;
1927         logio->vp_index = sp->fcport->vha->vp_idx;
1928 }
1929
1930 static void
1931 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1932 {
1933         struct qla_hw_data *ha = sp->fcport->vha->hw;
1934
1935         mbx->entry_type = MBX_IOCB_TYPE;
1936         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1937         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1938         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1939             cpu_to_le16(sp->fcport->loop_id):
1940             cpu_to_le16(sp->fcport->loop_id << 8);
1941         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1942         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1943             sp->fcport->d_id.b.al_pa);
1944         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1945         /* Implicit: mbx->mbx10 = 0. */
1946 }
1947
1948 static void
1949 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1950 {
1951         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1952         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1953         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1954         logio->vp_index = sp->fcport->vha->vp_idx;
1955 }
1956
1957 static void
1958 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1959 {
1960         struct qla_hw_data *ha = sp->fcport->vha->hw;
1961
1962         mbx->entry_type = MBX_IOCB_TYPE;
1963         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1964         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1965         if (HAS_EXTENDED_IDS(ha)) {
1966                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1967                 mbx->mb10 = cpu_to_le16(BIT_0);
1968         } else {
1969                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1970         }
1971         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1972         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1973         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1974         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1975         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1976 }
1977
1978 static void
1979 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1980 {
1981         uint32_t flags;
1982         unsigned int lun;
1983         struct fc_port *fcport = sp->fcport;
1984         scsi_qla_host_t *vha = fcport->vha;
1985         struct qla_hw_data *ha = vha->hw;
1986         struct srb_iocb *iocb = &sp->u.iocb_cmd;
1987         struct req_que *req = vha->req;
1988
1989         flags = iocb->u.tmf.flags;
1990         lun = iocb->u.tmf.lun;
1991
1992         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1993         tsk->entry_count = 1;
1994         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1995         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1996         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1997         tsk->control_flags = cpu_to_le32(flags);
1998         tsk->port_id[0] = fcport->d_id.b.al_pa;
1999         tsk->port_id[1] = fcport->d_id.b.area;
2000         tsk->port_id[2] = fcport->d_id.b.domain;
2001         tsk->vp_index = fcport->vha->vp_idx;
2002
2003         if (flags == TCF_LUN_RESET) {
2004                 int_to_scsilun(lun, &tsk->lun);
2005                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2006                         sizeof(tsk->lun));
2007         }
2008 }
2009
2010 static void
2011 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2012 {
2013         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2014
2015         els_iocb->entry_type = ELS_IOCB_TYPE;
2016         els_iocb->entry_count = 1;
2017         els_iocb->sys_define = 0;
2018         els_iocb->entry_status = 0;
2019         els_iocb->handle = sp->handle;
2020         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2021         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2022         els_iocb->vp_index = sp->fcport->vha->vp_idx;
2023         els_iocb->sof_type = EST_SOFI3;
2024         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2025
2026         els_iocb->opcode =
2027             sp->type == SRB_ELS_CMD_RPT ?
2028             bsg_job->request->rqst_data.r_els.els_code :
2029             bsg_job->request->rqst_data.h_els.command_code;
2030         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2031         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2032         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2033         els_iocb->control_flags = 0;
2034         els_iocb->rx_byte_count =
2035             cpu_to_le32(bsg_job->reply_payload.payload_len);
2036         els_iocb->tx_byte_count =
2037             cpu_to_le32(bsg_job->request_payload.payload_len);
2038
2039         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2040             (bsg_job->request_payload.sg_list)));
2041         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2042             (bsg_job->request_payload.sg_list)));
2043         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2044             (bsg_job->request_payload.sg_list));
2045
2046         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2047             (bsg_job->reply_payload.sg_list)));
2048         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2049             (bsg_job->reply_payload.sg_list)));
2050         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2051             (bsg_job->reply_payload.sg_list));
2052 }
2053
2054 static void
2055 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2056 {
2057         uint16_t        avail_dsds;
2058         uint32_t        *cur_dsd;
2059         struct scatterlist *sg;
2060         int index;
2061         uint16_t tot_dsds;
2062         scsi_qla_host_t *vha = sp->fcport->vha;
2063         struct qla_hw_data *ha = vha->hw;
2064         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2065         int loop_iterartion = 0;
2066         int cont_iocb_prsnt = 0;
2067         int entry_count = 1;
2068
2069         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2070         ct_iocb->entry_type = CT_IOCB_TYPE;
2071         ct_iocb->entry_status = 0;
2072         ct_iocb->handle1 = sp->handle;
2073         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2074         ct_iocb->status = __constant_cpu_to_le16(0);
2075         ct_iocb->control_flags = __constant_cpu_to_le16(0);
2076         ct_iocb->timeout = 0;
2077         ct_iocb->cmd_dsd_count =
2078             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2079         ct_iocb->total_dsd_count =
2080             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2081         ct_iocb->req_bytecount =
2082             cpu_to_le32(bsg_job->request_payload.payload_len);
2083         ct_iocb->rsp_bytecount =
2084             cpu_to_le32(bsg_job->reply_payload.payload_len);
2085
2086         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2087             (bsg_job->request_payload.sg_list)));
2088         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2089             (bsg_job->request_payload.sg_list)));
2090         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2091
2092         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2093             (bsg_job->reply_payload.sg_list)));
2094         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2095             (bsg_job->reply_payload.sg_list)));
2096         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2097
2098         avail_dsds = 1;
2099         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2100         index = 0;
2101         tot_dsds = bsg_job->reply_payload.sg_cnt;
2102
2103         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2104                 dma_addr_t       sle_dma;
2105                 cont_a64_entry_t *cont_pkt;
2106
2107                 /* Allocate additional continuation packets? */
2108                 if (avail_dsds == 0) {
2109                         /*
2110                         * Five DSDs are available in the Cont.
2111                         * Type 1 IOCB.
2112                                */
2113                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2114                             vha->hw->req_q_map[0]);
2115                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2116                         avail_dsds = 5;
2117                         cont_iocb_prsnt = 1;
2118                         entry_count++;
2119                 }
2120
2121                 sle_dma = sg_dma_address(sg);
2122                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2123                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2124                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2125                 loop_iterartion++;
2126                 avail_dsds--;
2127         }
2128         ct_iocb->entry_count = entry_count;
2129 }
2130
2131 static void
2132 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2133 {
2134         uint16_t        avail_dsds;
2135         uint32_t        *cur_dsd;
2136         struct scatterlist *sg;
2137         int index;
2138         uint16_t tot_dsds;
2139         scsi_qla_host_t *vha = sp->fcport->vha;
2140         struct qla_hw_data *ha = vha->hw;
2141         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2142         int loop_iterartion = 0;
2143         int cont_iocb_prsnt = 0;
2144         int entry_count = 1;
2145
2146         ct_iocb->entry_type = CT_IOCB_TYPE;
2147         ct_iocb->entry_status = 0;
2148         ct_iocb->sys_define = 0;
2149         ct_iocb->handle = sp->handle;
2150
2151         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2152         ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2153         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2154
2155         ct_iocb->cmd_dsd_count =
2156             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2157         ct_iocb->timeout = 0;
2158         ct_iocb->rsp_dsd_count =
2159             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2160         ct_iocb->rsp_byte_count =
2161             cpu_to_le32(bsg_job->reply_payload.payload_len);
2162         ct_iocb->cmd_byte_count =
2163             cpu_to_le32(bsg_job->request_payload.payload_len);
2164         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2165             (bsg_job->request_payload.sg_list)));
2166         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2167            (bsg_job->request_payload.sg_list)));
2168         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2169             (bsg_job->request_payload.sg_list));
2170
2171         avail_dsds = 1;
2172         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2173         index = 0;
2174         tot_dsds = bsg_job->reply_payload.sg_cnt;
2175
2176         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2177                 dma_addr_t       sle_dma;
2178                 cont_a64_entry_t *cont_pkt;
2179
2180                 /* Allocate additional continuation packets? */
2181                 if (avail_dsds == 0) {
2182                         /*
2183                         * Five DSDs are available in the Cont.
2184                         * Type 1 IOCB.
2185                                */
2186                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2187                             ha->req_q_map[0]);
2188                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2189                         avail_dsds = 5;
2190                         cont_iocb_prsnt = 1;
2191                         entry_count++;
2192                 }
2193
2194                 sle_dma = sg_dma_address(sg);
2195                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2196                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2197                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2198                 loop_iterartion++;
2199                 avail_dsds--;
2200         }
2201         ct_iocb->entry_count = entry_count;
2202 }
2203
2204 /*
2205  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2206  * @sp: command to send to the ISP
2207  *
2208  * Returns non-zero if a failure occurred, else zero.
2209  */
2210 int
2211 qla82xx_start_scsi(srb_t *sp)
2212 {
2213         int             ret, nseg;
2214         unsigned long   flags;
2215         struct scsi_cmnd *cmd;
2216         uint32_t        *clr_ptr;
2217         uint32_t        index;
2218         uint32_t        handle;
2219         uint16_t        cnt;
2220         uint16_t        req_cnt;
2221         uint16_t        tot_dsds;
2222         struct device_reg_82xx __iomem *reg;
2223         uint32_t dbval;
2224         uint32_t *fcp_dl;
2225         uint8_t additional_cdb_len;
2226         struct ct6_dsd *ctx;
2227         struct scsi_qla_host *vha = sp->fcport->vha;
2228         struct qla_hw_data *ha = vha->hw;
2229         struct req_que *req = NULL;
2230         struct rsp_que *rsp = NULL;
2231         char tag[2];
2232
2233         /* Setup device pointers. */
2234         ret = 0;
2235         reg = &ha->iobase->isp82;
2236         cmd = GET_CMD_SP(sp);
2237         req = vha->req;
2238         rsp = ha->rsp_q_map[0];
2239
2240         /* So we know we haven't pci_map'ed anything yet */
2241         tot_dsds = 0;
2242
2243         dbval = 0x04 | (ha->portnum << 5);
2244
2245         /* Send marker if required */
2246         if (vha->marker_needed != 0) {
2247                 if (qla2x00_marker(vha, req,
2248                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2249                         ql_log(ql_log_warn, vha, 0x300c,
2250                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2251                         return QLA_FUNCTION_FAILED;
2252                 }
2253                 vha->marker_needed = 0;
2254         }
2255
2256         /* Acquire ring specific lock */
2257         spin_lock_irqsave(&ha->hardware_lock, flags);
2258
2259         /* Check for room in outstanding command list. */
2260         handle = req->current_outstanding_cmd;
2261         for (index = 1; index < req->num_outstanding_cmds; index++) {
2262                 handle++;
2263                 if (handle == req->num_outstanding_cmds)
2264                         handle = 1;
2265                 if (!req->outstanding_cmds[handle])
2266                         break;
2267         }
2268         if (index == req->num_outstanding_cmds)
2269                 goto queuing_error;
2270
2271         /* Map the sg table so we have an accurate count of sg entries needed */
2272         if (scsi_sg_count(cmd)) {
2273                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2274                     scsi_sg_count(cmd), cmd->sc_data_direction);
2275                 if (unlikely(!nseg))
2276                         goto queuing_error;
2277         } else
2278                 nseg = 0;
2279
2280         tot_dsds = nseg;
2281
2282         if (tot_dsds > ql2xshiftctondsd) {
2283                 struct cmd_type_6 *cmd_pkt;
2284                 uint16_t more_dsd_lists = 0;
2285                 struct dsd_dma *dsd_ptr;
2286                 uint16_t i;
2287
2288                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2289                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2290                         ql_dbg(ql_dbg_io, vha, 0x300d,
2291                             "Num of DSD list %d is than %d for cmd=%p.\n",
2292                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2293                             cmd);
2294                         goto queuing_error;
2295                 }
2296
2297                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2298                         goto sufficient_dsds;
2299                 else
2300                         more_dsd_lists -= ha->gbl_dsd_avail;
2301
2302                 for (i = 0; i < more_dsd_lists; i++) {
2303                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2304                         if (!dsd_ptr) {
2305                                 ql_log(ql_log_fatal, vha, 0x300e,
2306                                     "Failed to allocate memory for dsd_dma "
2307                                     "for cmd=%p.\n", cmd);
2308                                 goto queuing_error;
2309                         }
2310
2311                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2312                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2313                         if (!dsd_ptr->dsd_addr) {
2314                                 kfree(dsd_ptr);
2315                                 ql_log(ql_log_fatal, vha, 0x300f,
2316                                     "Failed to allocate memory for dsd_addr "
2317                                     "for cmd=%p.\n", cmd);
2318                                 goto queuing_error;
2319                         }
2320                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2321                         ha->gbl_dsd_avail++;
2322                 }
2323
2324 sufficient_dsds:
2325                 req_cnt = 1;
2326
2327                 if (req->cnt < (req_cnt + 2)) {
2328                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2329                                 &reg->req_q_out[0]);
2330                         if (req->ring_index < cnt)
2331                                 req->cnt = cnt - req->ring_index;
2332                         else
2333                                 req->cnt = req->length -
2334                                         (req->ring_index - cnt);
2335                         if (req->cnt < (req_cnt + 2))
2336                                 goto queuing_error;
2337                 }
2338
2339                 ctx = sp->u.scmd.ctx =
2340                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2341                 if (!ctx) {
2342                         ql_log(ql_log_fatal, vha, 0x3010,
2343                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2344                         goto queuing_error;
2345                 }
2346
2347                 memset(ctx, 0, sizeof(struct ct6_dsd));
2348                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2349                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2350                 if (!ctx->fcp_cmnd) {
2351                         ql_log(ql_log_fatal, vha, 0x3011,
2352                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2353                         goto queuing_error;
2354                 }
2355
2356                 /* Initialize the DSD list and dma handle */
2357                 INIT_LIST_HEAD(&ctx->dsd_list);
2358                 ctx->dsd_use_cnt = 0;
2359
2360                 if (cmd->cmd_len > 16) {
2361                         additional_cdb_len = cmd->cmd_len - 16;
2362                         if ((cmd->cmd_len % 4) != 0) {
2363                                 /* SCSI command bigger than 16 bytes must be
2364                                  * multiple of 4
2365                                  */
2366                                 ql_log(ql_log_warn, vha, 0x3012,
2367                                     "scsi cmd len %d not multiple of 4 "
2368                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2369                                 goto queuing_error_fcp_cmnd;
2370                         }
2371                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2372                 } else {
2373                         additional_cdb_len = 0;
2374                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2375                 }
2376
2377                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2378                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2379
2380                 /* Zero out remaining portion of packet. */
2381                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2382                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2383                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2384                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2385
2386                 /* Set NPORT-ID and LUN number*/
2387                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2388                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2389                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2390                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2391                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2392
2393                 /* Build IOCB segments */
2394                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2395                         goto queuing_error_fcp_cmnd;
2396
2397                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2398                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2399
2400                 /* build FCP_CMND IU */
2401                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2402                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2403                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2404
2405                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2406                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2407                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2408                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2409
2410                 /*
2411                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2412                  */
2413                 if (scsi_populate_tag_msg(cmd, tag)) {
2414                         switch (tag[0]) {
2415                         case HEAD_OF_QUEUE_TAG:
2416                                 ctx->fcp_cmnd->task_attribute =
2417                                     TSK_HEAD_OF_QUEUE;
2418                                 break;
2419                         case ORDERED_QUEUE_TAG:
2420                                 ctx->fcp_cmnd->task_attribute =
2421                                     TSK_ORDERED;
2422                                 break;
2423                         }
2424                 }
2425
2426                 /* Populate the FCP_PRIO. */
2427                 if (ha->flags.fcp_prio_enabled)
2428                         ctx->fcp_cmnd->task_attribute |=
2429                             sp->fcport->fcp_prio << 3;
2430
2431                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2432
2433                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2434                     additional_cdb_len);
2435                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2436
2437                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2438                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2439                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2440                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2441                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2442
2443                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2444                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2445                 /* Set total data segment count. */
2446                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2447                 /* Specify response queue number where
2448                  * completion should happen
2449                  */
2450                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2451         } else {
2452                 struct cmd_type_7 *cmd_pkt;
2453                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2454                 if (req->cnt < (req_cnt + 2)) {
2455                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2456                             &reg->req_q_out[0]);
2457                         if (req->ring_index < cnt)
2458                                 req->cnt = cnt - req->ring_index;
2459                         else
2460                                 req->cnt = req->length -
2461                                         (req->ring_index - cnt);
2462                 }
2463                 if (req->cnt < (req_cnt + 2))
2464                         goto queuing_error;
2465
2466                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2467                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2468
2469                 /* Zero out remaining portion of packet. */
2470                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2471                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2472                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2473                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2474
2475                 /* Set NPORT-ID and LUN number*/
2476                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2477                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2478                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2479                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2480                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2481
2482                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2483                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2484                     sizeof(cmd_pkt->lun));
2485
2486                 /*
2487                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2488                  */
2489                 if (scsi_populate_tag_msg(cmd, tag)) {
2490                         switch (tag[0]) {
2491                         case HEAD_OF_QUEUE_TAG:
2492                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2493                                 break;
2494                         case ORDERED_QUEUE_TAG:
2495                                 cmd_pkt->task = TSK_ORDERED;
2496                                 break;
2497                         }
2498                 }
2499
2500                 /* Populate the FCP_PRIO. */
2501                 if (ha->flags.fcp_prio_enabled)
2502                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2503
2504                 /* Load SCSI command packet. */
2505                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2506                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2507
2508                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2509
2510                 /* Build IOCB segments */
2511                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2512
2513                 /* Set total data segment count. */
2514                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2515                 /* Specify response queue number where
2516                  * completion should happen.
2517                  */
2518                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2519
2520         }
2521         /* Build command packet. */
2522         req->current_outstanding_cmd = handle;
2523         req->outstanding_cmds[handle] = sp;
2524         sp->handle = handle;
2525         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2526         req->cnt -= req_cnt;
2527         wmb();
2528
2529         /* Adjust ring index. */
2530         req->ring_index++;
2531         if (req->ring_index == req->length) {
2532                 req->ring_index = 0;
2533                 req->ring_ptr = req->ring;
2534         } else
2535                 req->ring_ptr++;
2536
2537         sp->flags |= SRB_DMA_VALID;
2538
2539         /* Set chip new ring index. */
2540         /* write, read and verify logic */
2541         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2542         if (ql2xdbwr)
2543                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2544         else {
2545                 WRT_REG_DWORD(
2546                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2547                         dbval);
2548                 wmb();
2549                 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2550                         WRT_REG_DWORD(
2551                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2552                                 dbval);
2553                         wmb();
2554                 }
2555         }
2556
2557         /* Manage unprocessed RIO/ZIO commands in response queue. */
2558         if (vha->flags.process_response_queue &&
2559             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2560                 qla24xx_process_response_queue(vha, rsp);
2561
2562         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2563         return QLA_SUCCESS;
2564
2565 queuing_error_fcp_cmnd:
2566         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2567 queuing_error:
2568         if (tot_dsds)
2569                 scsi_dma_unmap(cmd);
2570
2571         if (sp->u.scmd.ctx) {
2572                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2573                 sp->u.scmd.ctx = NULL;
2574         }
2575         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2576
2577         return QLA_FUNCTION_FAILED;
2578 }
2579
2580 int
2581 qla2x00_start_sp(srb_t *sp)
2582 {
2583         int rval;
2584         struct qla_hw_data *ha = sp->fcport->vha->hw;
2585         void *pkt;
2586         unsigned long flags;
2587
2588         rval = QLA_FUNCTION_FAILED;
2589         spin_lock_irqsave(&ha->hardware_lock, flags);
2590         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2591         if (!pkt) {
2592                 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2593                     "qla2x00_alloc_iocbs failed.\n");
2594                 goto done;
2595         }
2596
2597         rval = QLA_SUCCESS;
2598         switch (sp->type) {
2599         case SRB_LOGIN_CMD:
2600                 IS_FWI2_CAPABLE(ha) ?
2601                     qla24xx_login_iocb(sp, pkt) :
2602                     qla2x00_login_iocb(sp, pkt);
2603                 break;
2604         case SRB_LOGOUT_CMD:
2605                 IS_FWI2_CAPABLE(ha) ?
2606                     qla24xx_logout_iocb(sp, pkt) :
2607                     qla2x00_logout_iocb(sp, pkt);
2608                 break;
2609         case SRB_ELS_CMD_RPT:
2610         case SRB_ELS_CMD_HST:
2611                 qla24xx_els_iocb(sp, pkt);
2612                 break;
2613         case SRB_CT_CMD:
2614                 IS_FWI2_CAPABLE(ha) ?
2615                     qla24xx_ct_iocb(sp, pkt) :
2616                     qla2x00_ct_iocb(sp, pkt);
2617                 break;
2618         case SRB_ADISC_CMD:
2619                 IS_FWI2_CAPABLE(ha) ?
2620                     qla24xx_adisc_iocb(sp, pkt) :
2621                     qla2x00_adisc_iocb(sp, pkt);
2622                 break;
2623         case SRB_TM_CMD:
2624                 IS_QLAFX00(ha) ?
2625                     qlafx00_tm_iocb(sp, pkt) :
2626                     qla24xx_tm_iocb(sp, pkt);
2627                 break;
2628         case SRB_FXIOCB_DCMD:
2629         case SRB_FXIOCB_BCMD:
2630                 qlafx00_fxdisc_iocb(sp, pkt);
2631                 break;
2632         case SRB_ABT_CMD:
2633                 qlafx00_abort_iocb(sp, pkt);
2634                 break;
2635         default:
2636                 break;
2637         }
2638
2639         wmb();
2640         qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2641 done:
2642         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2643         return rval;
2644 }
2645
2646 static void
2647 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2648                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2649 {
2650         uint16_t avail_dsds;
2651         uint32_t *cur_dsd;
2652         uint32_t req_data_len = 0;
2653         uint32_t rsp_data_len = 0;
2654         struct scatterlist *sg;
2655         int index;
2656         int entry_count = 1;
2657         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2658
2659         /*Update entry type to indicate bidir command */
2660         *((uint32_t *)(&cmd_pkt->entry_type)) =
2661                 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2662
2663         /* Set the transfer direction, in this set both flags
2664          * Also set the BD_WRAP_BACK flag, firmware will take care
2665          * assigning DID=SID for outgoing pkts.
2666          */
2667         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2668         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2669         cmd_pkt->control_flags =
2670                         __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2671                                                         BD_WRAP_BACK);
2672
2673         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2674         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2675         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2676         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2677
2678         vha->bidi_stats.transfer_bytes += req_data_len;
2679         vha->bidi_stats.io_count++;
2680
2681         /* Only one dsd is available for bidirectional IOCB, remaining dsds
2682          * are bundled in continuation iocb
2683          */
2684         avail_dsds = 1;
2685         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2686
2687         index = 0;
2688
2689         for_each_sg(bsg_job->request_payload.sg_list, sg,
2690                                 bsg_job->request_payload.sg_cnt, index) {
2691                 dma_addr_t sle_dma;
2692                 cont_a64_entry_t *cont_pkt;
2693
2694                 /* Allocate additional continuation packets */
2695                 if (avail_dsds == 0) {
2696                         /* Continuation type 1 IOCB can accomodate
2697                          * 5 DSDS
2698                          */
2699                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2700                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2701                         avail_dsds = 5;
2702                         entry_count++;
2703                 }
2704                 sle_dma = sg_dma_address(sg);
2705                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2706                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2707                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2708                 avail_dsds--;
2709         }
2710         /* For read request DSD will always goes to continuation IOCB
2711          * and follow the write DSD. If there is room on the current IOCB
2712          * then it is added to that IOCB else new continuation IOCB is
2713          * allocated.
2714          */
2715         for_each_sg(bsg_job->reply_payload.sg_list, sg,
2716                                 bsg_job->reply_payload.sg_cnt, index) {
2717                 dma_addr_t sle_dma;
2718                 cont_a64_entry_t *cont_pkt;
2719
2720                 /* Allocate additional continuation packets */
2721                 if (avail_dsds == 0) {
2722                         /* Continuation type 1 IOCB can accomodate
2723                          * 5 DSDS
2724                          */
2725                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2726                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2727                         avail_dsds = 5;
2728                         entry_count++;
2729                 }
2730                 sle_dma = sg_dma_address(sg);
2731                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2732                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2733                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2734                 avail_dsds--;
2735         }
2736         /* This value should be same as number of IOCB required for this cmd */
2737         cmd_pkt->entry_count = entry_count;
2738 }
2739
2740 int
2741 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2742 {
2743
2744         struct qla_hw_data *ha = vha->hw;
2745         unsigned long flags;
2746         uint32_t handle;
2747         uint32_t index;
2748         uint16_t req_cnt;
2749         uint16_t cnt;
2750         uint32_t *clr_ptr;
2751         struct cmd_bidir *cmd_pkt = NULL;
2752         struct rsp_que *rsp;
2753         struct req_que *req;
2754         int rval = EXT_STATUS_OK;
2755
2756         rval = QLA_SUCCESS;
2757
2758         rsp = ha->rsp_q_map[0];
2759         req = vha->req;
2760
2761         /* Send marker if required */
2762         if (vha->marker_needed != 0) {
2763                 if (qla2x00_marker(vha, req,
2764                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2765                         return EXT_STATUS_MAILBOX;
2766                 vha->marker_needed = 0;
2767         }
2768
2769         /* Acquire ring specific lock */
2770         spin_lock_irqsave(&ha->hardware_lock, flags);
2771
2772         /* Check for room in outstanding command list. */
2773         handle = req->current_outstanding_cmd;
2774         for (index = 1; index < req->num_outstanding_cmds; index++) {
2775                 handle++;
2776         if (handle == req->num_outstanding_cmds)
2777                 handle = 1;
2778         if (!req->outstanding_cmds[handle])
2779                 break;
2780         }
2781
2782         if (index == req->num_outstanding_cmds) {
2783                 rval = EXT_STATUS_BUSY;
2784                 goto queuing_error;
2785         }
2786
2787         /* Calculate number of IOCB required */
2788         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2789
2790         /* Check for room on request queue. */
2791         if (req->cnt < req_cnt + 2) {
2792                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
2793
2794                 if  (req->ring_index < cnt)
2795                         req->cnt = cnt - req->ring_index;
2796                 else
2797                         req->cnt = req->length -
2798                                 (req->ring_index - cnt);
2799         }
2800         if (req->cnt < req_cnt + 2) {
2801                 rval = EXT_STATUS_BUSY;
2802                 goto queuing_error;
2803         }
2804
2805         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2806         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2807
2808         /* Zero out remaining portion of packet. */
2809         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2810         clr_ptr = (uint32_t *)cmd_pkt + 2;
2811         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2812
2813         /* Set NPORT-ID  (of vha)*/
2814         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2815         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2816         cmd_pkt->port_id[1] = vha->d_id.b.area;
2817         cmd_pkt->port_id[2] = vha->d_id.b.domain;
2818
2819         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2820         cmd_pkt->entry_status = (uint8_t) rsp->id;
2821         /* Build command packet. */
2822         req->current_outstanding_cmd = handle;
2823         req->outstanding_cmds[handle] = sp;
2824         sp->handle = handle;
2825         req->cnt -= req_cnt;
2826
2827         /* Send the command to the firmware */
2828         wmb();
2829         qla2x00_start_iocbs(vha, req);
2830 queuing_error:
2831         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2832         return rval;
2833 }