2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
17 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20 * Returns the proper CF_* direction based on CDB.
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
26 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27 struct scsi_qla_host *vha = sp->fcport->vha;
31 /* Set transfer direction */
32 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
34 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
46 * @dsds: number of data segment decriptors needed
48 * Returns the number of IOCB entries needed to store @dsds.
51 qla2x00_calc_iocbs_32(uint16_t dsds)
57 iocbs += (dsds - 3) / 7;
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
68 * @dsds: number of data segment decriptors needed
70 * Returns the number of IOCB entries needed to store @dsds.
73 qla2x00_calc_iocbs_64(uint16_t dsds)
79 iocbs += (dsds - 2) / 5;
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 cont_entry_t *cont_pkt;
96 struct req_que *req = vha->req;
97 /* Adjust ring index. */
99 if (req->ring_index == req->length) {
101 req->ring_ptr = req->ring;
106 cont_pkt = (cont_entry_t *)req->ring_ptr;
108 /* Load packet defaults. */
109 *((uint32_t *)(&cont_pkt->entry_type)) =
110 __constant_cpu_to_le32(CONTINUE_TYPE);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119 * Returns a pointer to the continuation type 1 IOCB packet.
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 cont_a64_entry_t *cont_pkt;
126 /* Adjust ring index. */
128 if (req->ring_index == req->length) {
130 req->ring_ptr = req->ring;
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139 __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
151 /* We always use DIFF Bundling for best performance */
154 /* Translate SCSI opcode to a protection opcode */
155 switch (scsi_get_prot_op(cmd)) {
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
168 case SCSI_PROT_READ_PASS:
169 case SCSI_PROT_WRITE_PASS:
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
180 return scsi_prot_sg_count(cmd);
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
196 scsi_qla_host_t *vha;
197 struct scsi_cmnd *cmd;
198 struct scatterlist *sg;
201 cmd = GET_CMD_SP(sp);
203 /* Update entry type to indicate Command Type 2 IOCB */
204 *((uint32_t *)(&cmd_pkt->entry_type)) =
205 __constant_cpu_to_le32(COMMAND_TYPE);
207 /* No data transfer */
208 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
213 vha = sp->fcport->vha;
214 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216 /* Three DSDs are available in the Command Type 2 IOCB */
218 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
220 /* Load data segments */
221 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222 cont_entry_t *cont_pkt;
224 /* Allocate additional continuation packets? */
225 if (avail_dsds == 0) {
227 * Seven DSDs are available in the Continuation
230 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
235 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243 * capable IOCB types.
245 * @sp: SRB command to process
246 * @cmd_pkt: Command type 3 IOCB
247 * @tot_dsds: Total number of segments to transfer
249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
254 scsi_qla_host_t *vha;
255 struct scsi_cmnd *cmd;
256 struct scatterlist *sg;
259 cmd = GET_CMD_SP(sp);
261 /* Update entry type to indicate Command Type 3 IOCB */
262 *((uint32_t *)(&cmd_pkt->entry_type)) =
263 __constant_cpu_to_le32(COMMAND_A64_TYPE);
265 /* No data transfer */
266 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
271 vha = sp->fcport->vha;
272 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
274 /* Two DSDs are available in the Command Type 3 IOCB */
276 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
278 /* Load data segments */
279 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
281 cont_a64_entry_t *cont_pkt;
283 /* Allocate additional continuation packets? */
284 if (avail_dsds == 0) {
286 * Five DSDs are available in the Continuation
289 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
294 sle_dma = sg_dma_address(sg);
295 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303 * qla2x00_start_scsi() - Send a SCSI command to the ISP
304 * @sp: command to send to the ISP
306 * Returns non-zero if a failure occurred, else zero.
309 qla2x00_start_scsi(srb_t *sp)
313 scsi_qla_host_t *vha;
314 struct scsi_cmnd *cmd;
318 cmd_entry_t *cmd_pkt;
322 struct device_reg_2xxx __iomem *reg;
323 struct qla_hw_data *ha;
328 /* Setup device pointers. */
330 vha = sp->fcport->vha;
332 reg = &ha->iobase->isp;
333 cmd = GET_CMD_SP(sp);
334 req = ha->req_q_map[0];
335 rsp = ha->rsp_q_map[0];
336 /* So we know we haven't pci_map'ed anything yet */
339 /* Send marker if required */
340 if (vha->marker_needed != 0) {
341 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
343 return (QLA_FUNCTION_FAILED);
345 vha->marker_needed = 0;
348 /* Acquire ring specific lock */
349 spin_lock_irqsave(&ha->hardware_lock, flags);
351 /* Check for room in outstanding command list. */
352 handle = req->current_outstanding_cmd;
353 for (index = 1; index < req->num_outstanding_cmds; index++) {
355 if (handle == req->num_outstanding_cmds)
357 if (!req->outstanding_cmds[handle])
360 if (index == req->num_outstanding_cmds)
363 /* Map the sg table so we have an accurate count of sg entries needed */
364 if (scsi_sg_count(cmd)) {
365 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
366 scsi_sg_count(cmd), cmd->sc_data_direction);
374 /* Calculate the number of request entries needed. */
375 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
376 if (req->cnt < (req_cnt + 2)) {
377 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
378 if (req->ring_index < cnt)
379 req->cnt = cnt - req->ring_index;
381 req->cnt = req->length -
382 (req->ring_index - cnt);
383 /* If still no head room then bail out */
384 if (req->cnt < (req_cnt + 2))
388 /* Build command packet */
389 req->current_outstanding_cmd = handle;
390 req->outstanding_cmds[handle] = sp;
392 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
396 cmd_pkt->handle = handle;
397 /* Zero out remaining portion of packet. */
398 clr_ptr = (uint32_t *)cmd_pkt + 2;
399 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
400 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
402 /* Set target ID and LUN number*/
403 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
404 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
406 /* Update tagged queuing modifier */
407 if (scsi_populate_tag_msg(cmd, tag)) {
409 case HEAD_OF_QUEUE_TAG:
410 cmd_pkt->control_flags =
411 __constant_cpu_to_le16(CF_HEAD_TAG);
413 case ORDERED_QUEUE_TAG:
414 cmd_pkt->control_flags =
415 __constant_cpu_to_le16(CF_ORDERED_TAG);
418 cmd_pkt->control_flags =
419 __constant_cpu_to_le16(CF_SIMPLE_TAG);
424 /* Load SCSI command packet. */
425 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
426 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
428 /* Build IOCB segments */
429 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
431 /* Set total data segment count. */
432 cmd_pkt->entry_count = (uint8_t)req_cnt;
435 /* Adjust ring index. */
437 if (req->ring_index == req->length) {
439 req->ring_ptr = req->ring;
443 sp->flags |= SRB_DMA_VALID;
445 /* Set chip new ring index. */
446 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
447 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
449 /* Manage unprocessed RIO/ZIO commands in response queue. */
450 if (vha->flags.process_response_queue &&
451 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
452 qla2x00_process_response_queue(rsp);
454 spin_unlock_irqrestore(&ha->hardware_lock, flags);
455 return (QLA_SUCCESS);
461 spin_unlock_irqrestore(&ha->hardware_lock, flags);
463 return (QLA_FUNCTION_FAILED);
467 * qla2x00_start_iocbs() - Execute the IOCB command
470 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
472 struct qla_hw_data *ha = vha->hw;
473 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
475 if (IS_QLA82XX(ha)) {
476 qla82xx_start_iocbs(vha);
478 /* Adjust ring index. */
480 if (req->ring_index == req->length) {
482 req->ring_ptr = req->ring;
486 /* Set chip new ring index. */
487 if (ha->mqenable || IS_QLA83XX(ha)) {
488 WRT_REG_DWORD(req->req_q_in, req->ring_index);
489 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
490 } else if (IS_QLAFX00(ha)) {
491 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index);
492 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in);
493 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
494 } else if (IS_FWI2_CAPABLE(ha)) {
495 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
496 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
498 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
500 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
506 * qla2x00_marker() - Send a marker IOCB to the firmware.
510 * @type: marker modifier
512 * Can be called from both normal and interrupt context.
514 * Returns non-zero if a failure occurred, else zero.
517 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
518 struct rsp_que *rsp, uint16_t loop_id,
519 uint16_t lun, uint8_t type)
522 struct mrk_entry_24xx *mrk24 = NULL;
523 struct mrk_entry_fx00 *mrkfx = NULL;
525 struct qla_hw_data *ha = vha->hw;
526 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
528 req = ha->req_q_map[0];
529 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
531 ql_log(ql_log_warn, base_vha, 0x3026,
532 "Failed to allocate Marker IOCB.\n");
534 return (QLA_FUNCTION_FAILED);
537 mrk->entry_type = MARKER_TYPE;
538 mrk->modifier = type;
539 if (type != MK_SYNC_ALL) {
540 if (IS_QLAFX00(ha)) {
541 mrkfx = (struct mrk_entry_fx00 *) mrk;
542 mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
543 mrkfx->handle_hi = 0;
544 mrkfx->tgt_id = cpu_to_le16(loop_id);
545 mrkfx->lun[1] = LSB(lun);
546 mrkfx->lun[2] = MSB(lun);
547 host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
548 } else if (IS_FWI2_CAPABLE(ha)) {
549 mrk24 = (struct mrk_entry_24xx *) mrk;
550 mrk24->nport_handle = cpu_to_le16(loop_id);
551 mrk24->lun[1] = LSB(lun);
552 mrk24->lun[2] = MSB(lun);
553 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
554 mrk24->vp_index = vha->vp_idx;
555 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
557 SET_TARGET_ID(ha, mrk->target, loop_id);
558 mrk->lun = cpu_to_le16(lun);
563 qla2x00_start_iocbs(vha, req);
565 return (QLA_SUCCESS);
569 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
570 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
574 unsigned long flags = 0;
576 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
577 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
578 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
584 * qla2x00_issue_marker
587 * Caller CAN have hardware lock held as specified by ha_locked parameter.
588 * Might release it, then reaquire.
590 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
593 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
594 MK_SYNC_ALL) != QLA_SUCCESS)
595 return QLA_FUNCTION_FAILED;
597 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
598 MK_SYNC_ALL) != QLA_SUCCESS)
599 return QLA_FUNCTION_FAILED;
601 vha->marker_needed = 0;
607 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
610 uint32_t *cur_dsd = NULL;
611 scsi_qla_host_t *vha;
612 struct qla_hw_data *ha;
613 struct scsi_cmnd *cmd;
614 struct scatterlist *cur_seg;
618 uint8_t first_iocb = 1;
619 uint32_t dsd_list_len;
620 struct dsd_dma *dsd_ptr;
623 cmd = GET_CMD_SP(sp);
625 /* Update entry type to indicate Command Type 3 IOCB */
626 *((uint32_t *)(&cmd_pkt->entry_type)) =
627 __constant_cpu_to_le32(COMMAND_TYPE_6);
629 /* No data transfer */
630 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
631 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
635 vha = sp->fcport->vha;
638 /* Set transfer direction */
639 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
640 cmd_pkt->control_flags =
641 __constant_cpu_to_le16(CF_WRITE_DATA);
642 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
643 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
644 cmd_pkt->control_flags =
645 __constant_cpu_to_le16(CF_READ_DATA);
646 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
649 cur_seg = scsi_sglist(cmd);
650 ctx = GET_CMD_CTX_SP(sp);
653 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
654 QLA_DSDS_PER_IOCB : tot_dsds;
655 tot_dsds -= avail_dsds;
656 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
658 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
659 struct dsd_dma, list);
660 next_dsd = dsd_ptr->dsd_addr;
661 list_del(&dsd_ptr->list);
663 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
669 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
670 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
671 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
672 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
674 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
675 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
676 *cur_dsd++ = cpu_to_le32(dsd_list_len);
678 cur_dsd = (uint32_t *)next_dsd;
682 sle_dma = sg_dma_address(cur_seg);
683 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
684 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
685 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
686 cur_seg = sg_next(cur_seg);
691 /* Null termination */
695 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
700 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
701 * for Command Type 6.
703 * @dsds: number of data segment decriptors needed
705 * Returns the number of dsd list needed to store @dsds.
708 qla24xx_calc_dsd_lists(uint16_t dsds)
710 uint16_t dsd_lists = 0;
712 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
713 if (dsds % QLA_DSDS_PER_IOCB)
720 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
723 * @sp: SRB command to process
724 * @cmd_pkt: Command type 3 IOCB
725 * @tot_dsds: Total number of segments to transfer
728 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
733 scsi_qla_host_t *vha;
734 struct scsi_cmnd *cmd;
735 struct scatterlist *sg;
739 cmd = GET_CMD_SP(sp);
741 /* Update entry type to indicate Command Type 3 IOCB */
742 *((uint32_t *)(&cmd_pkt->entry_type)) =
743 __constant_cpu_to_le32(COMMAND_TYPE_7);
745 /* No data transfer */
746 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
747 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
751 vha = sp->fcport->vha;
754 /* Set transfer direction */
755 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
756 cmd_pkt->task_mgmt_flags =
757 __constant_cpu_to_le16(TMF_WRITE_DATA);
758 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
759 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
760 cmd_pkt->task_mgmt_flags =
761 __constant_cpu_to_le16(TMF_READ_DATA);
762 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
765 /* One DSD is available in the Command Type 3 IOCB */
767 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
769 /* Load data segments */
771 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
773 cont_a64_entry_t *cont_pkt;
775 /* Allocate additional continuation packets? */
776 if (avail_dsds == 0) {
778 * Five DSDs are available in the Continuation
781 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
782 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
786 sle_dma = sg_dma_address(sg);
787 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
788 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
789 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
794 struct fw_dif_context {
797 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
798 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
802 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
806 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
807 unsigned int protcnt)
809 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
811 switch (scsi_get_prot_type(cmd)) {
812 case SCSI_PROT_DIF_TYPE0:
814 * No check for ql2xenablehba_err_chk, as it would be an
815 * I/O error if hba tag generation is not done.
817 pkt->ref_tag = cpu_to_le32((uint32_t)
818 (0xffffffff & scsi_get_lba(cmd)));
820 if (!qla2x00_hba_err_chk_enabled(sp))
823 pkt->ref_tag_mask[0] = 0xff;
824 pkt->ref_tag_mask[1] = 0xff;
825 pkt->ref_tag_mask[2] = 0xff;
826 pkt->ref_tag_mask[3] = 0xff;
830 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
831 * match LBA in CDB + N
833 case SCSI_PROT_DIF_TYPE2:
834 pkt->app_tag = __constant_cpu_to_le16(0);
835 pkt->app_tag_mask[0] = 0x0;
836 pkt->app_tag_mask[1] = 0x0;
838 pkt->ref_tag = cpu_to_le32((uint32_t)
839 (0xffffffff & scsi_get_lba(cmd)));
841 if (!qla2x00_hba_err_chk_enabled(sp))
844 /* enable ALL bytes of the ref tag */
845 pkt->ref_tag_mask[0] = 0xff;
846 pkt->ref_tag_mask[1] = 0xff;
847 pkt->ref_tag_mask[2] = 0xff;
848 pkt->ref_tag_mask[3] = 0xff;
851 /* For Type 3 protection: 16 bit GUARD only */
852 case SCSI_PROT_DIF_TYPE3:
853 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
854 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
859 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
862 case SCSI_PROT_DIF_TYPE1:
863 pkt->ref_tag = cpu_to_le32((uint32_t)
864 (0xffffffff & scsi_get_lba(cmd)));
865 pkt->app_tag = __constant_cpu_to_le16(0);
866 pkt->app_tag_mask[0] = 0x0;
867 pkt->app_tag_mask[1] = 0x0;
869 if (!qla2x00_hba_err_chk_enabled(sp))
872 /* enable ALL bytes of the ref tag */
873 pkt->ref_tag_mask[0] = 0xff;
874 pkt->ref_tag_mask[1] = 0xff;
875 pkt->ref_tag_mask[2] = 0xff;
876 pkt->ref_tag_mask[3] = 0xff;
882 dma_addr_t dma_addr; /* OUT */
883 uint32_t dma_len; /* OUT */
885 uint32_t tot_bytes; /* IN */
886 struct scatterlist *cur_sg; /* IN */
888 /* for book keeping, bzero on initial invocation */
889 uint32_t bytes_consumed;
891 uint32_t tot_partial;
899 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
902 struct scatterlist *sg;
903 uint32_t cumulative_partial, sg_len;
904 dma_addr_t sg_dma_addr;
906 if (sgx->num_bytes == sgx->tot_bytes)
910 cumulative_partial = sgx->tot_partial;
912 sg_dma_addr = sg_dma_address(sg);
913 sg_len = sg_dma_len(sg);
915 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
917 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
918 sgx->dma_len = (blk_sz - cumulative_partial);
919 sgx->tot_partial = 0;
920 sgx->num_bytes += blk_sz;
923 sgx->dma_len = sg_len - sgx->bytes_consumed;
924 sgx->tot_partial += sgx->dma_len;
928 sgx->bytes_consumed += sgx->dma_len;
930 if (sg_len == sgx->bytes_consumed) {
934 sgx->bytes_consumed = 0;
941 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
942 uint32_t *dsd, uint16_t tot_dsds)
945 uint8_t avail_dsds = 0;
946 uint32_t dsd_list_len;
947 struct dsd_dma *dsd_ptr;
948 struct scatterlist *sg_prot;
949 uint32_t *cur_dsd = dsd;
950 uint16_t used_dsds = tot_dsds;
956 uint32_t sle_dma_len, tot_prot_dma_len = 0;
957 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
959 prot_int = cmd->device->sector_size;
961 memset(&sgx, 0, sizeof(struct qla2_sgx));
962 sgx.tot_bytes = scsi_bufflen(cmd);
963 sgx.cur_sg = scsi_sglist(cmd);
966 sg_prot = scsi_prot_sglist(cmd);
968 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
970 sle_dma = sgx.dma_addr;
971 sle_dma_len = sgx.dma_len;
973 /* Allocate additional continuation packets? */
974 if (avail_dsds == 0) {
975 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
976 QLA_DSDS_PER_IOCB : used_dsds;
977 dsd_list_len = (avail_dsds + 1) * 12;
978 used_dsds -= avail_dsds;
980 /* allocate tracking DS */
981 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
985 /* allocate new list */
986 dsd_ptr->dsd_addr = next_dsd =
987 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
988 &dsd_ptr->dsd_list_dma);
992 * Need to cleanup only this dsd_ptr, rest
993 * will be done by sp_free_dma()
999 list_add_tail(&dsd_ptr->list,
1000 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1002 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1004 /* add new list to cmd iocb or last list */
1005 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1006 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1007 *cur_dsd++ = dsd_list_len;
1008 cur_dsd = (uint32_t *)next_dsd;
1010 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1011 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1012 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1016 /* Got a full protection interval */
1017 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1020 tot_prot_dma_len += sle_dma_len;
1021 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1022 tot_prot_dma_len = 0;
1023 sg_prot = sg_next(sg_prot);
1026 partial = 1; /* So as to not re-enter this block */
1027 goto alloc_and_fill;
1030 /* Null termination */
1038 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1042 uint8_t avail_dsds = 0;
1043 uint32_t dsd_list_len;
1044 struct dsd_dma *dsd_ptr;
1045 struct scatterlist *sg;
1046 uint32_t *cur_dsd = dsd;
1048 uint16_t used_dsds = tot_dsds;
1049 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1051 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1054 /* Allocate additional continuation packets? */
1055 if (avail_dsds == 0) {
1056 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1057 QLA_DSDS_PER_IOCB : used_dsds;
1058 dsd_list_len = (avail_dsds + 1) * 12;
1059 used_dsds -= avail_dsds;
1061 /* allocate tracking DS */
1062 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1066 /* allocate new list */
1067 dsd_ptr->dsd_addr = next_dsd =
1068 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1069 &dsd_ptr->dsd_list_dma);
1073 * Need to cleanup only this dsd_ptr, rest
1074 * will be done by sp_free_dma()
1080 list_add_tail(&dsd_ptr->list,
1081 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1083 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1085 /* add new list to cmd iocb or last list */
1086 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1087 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1088 *cur_dsd++ = dsd_list_len;
1089 cur_dsd = (uint32_t *)next_dsd;
1091 sle_dma = sg_dma_address(sg);
1093 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1094 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1095 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1099 /* Null termination */
1107 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1112 uint8_t avail_dsds = 0;
1113 uint32_t dsd_list_len;
1114 struct dsd_dma *dsd_ptr;
1115 struct scatterlist *sg;
1117 struct scsi_cmnd *cmd;
1118 uint32_t *cur_dsd = dsd;
1119 uint16_t used_dsds = tot_dsds;
1121 cmd = GET_CMD_SP(sp);
1122 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1125 /* Allocate additional continuation packets? */
1126 if (avail_dsds == 0) {
1127 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1128 QLA_DSDS_PER_IOCB : used_dsds;
1129 dsd_list_len = (avail_dsds + 1) * 12;
1130 used_dsds -= avail_dsds;
1132 /* allocate tracking DS */
1133 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1137 /* allocate new list */
1138 dsd_ptr->dsd_addr = next_dsd =
1139 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1140 &dsd_ptr->dsd_list_dma);
1144 * Need to cleanup only this dsd_ptr, rest
1145 * will be done by sp_free_dma()
1151 list_add_tail(&dsd_ptr->list,
1152 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1154 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1156 /* add new list to cmd iocb or last list */
1157 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1158 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1159 *cur_dsd++ = dsd_list_len;
1160 cur_dsd = (uint32_t *)next_dsd;
1162 sle_dma = sg_dma_address(sg);
1164 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1165 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1166 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1170 /* Null termination */
1178 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1179 * Type 6 IOCB types.
1181 * @sp: SRB command to process
1182 * @cmd_pkt: Command type 3 IOCB
1183 * @tot_dsds: Total number of segments to transfer
1186 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1187 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1189 uint32_t *cur_dsd, *fcp_dl;
1190 scsi_qla_host_t *vha;
1191 struct scsi_cmnd *cmd;
1193 uint32_t total_bytes = 0;
1194 uint32_t data_bytes;
1196 uint8_t bundling = 1;
1199 struct crc_context *crc_ctx_pkt = NULL;
1200 struct qla_hw_data *ha;
1201 uint8_t additional_fcpcdb_len;
1202 uint16_t fcp_cmnd_len;
1203 struct fcp_cmnd *fcp_cmnd;
1204 dma_addr_t crc_ctx_dma;
1207 cmd = GET_CMD_SP(sp);
1210 /* Update entry type to indicate Command Type CRC_2 IOCB */
1211 *((uint32_t *)(&cmd_pkt->entry_type)) =
1212 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1214 vha = sp->fcport->vha;
1217 /* No data transfer */
1218 data_bytes = scsi_bufflen(cmd);
1219 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1220 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1224 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1226 /* Set transfer direction */
1227 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1228 cmd_pkt->control_flags =
1229 __constant_cpu_to_le16(CF_WRITE_DATA);
1230 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1231 cmd_pkt->control_flags =
1232 __constant_cpu_to_le16(CF_READ_DATA);
1235 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1236 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1237 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1238 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1241 /* Allocate CRC context from global pool */
1242 crc_ctx_pkt = sp->u.scmd.ctx =
1243 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1246 goto crc_queuing_error;
1248 /* Zero out CTX area. */
1249 clr_ptr = (uint8_t *)crc_ctx_pkt;
1250 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1252 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1254 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1257 crc_ctx_pkt->handle = cmd_pkt->handle;
1259 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1261 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1262 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1264 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1265 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1266 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1268 /* Determine SCSI command length -- align to 4 byte boundary */
1269 if (cmd->cmd_len > 16) {
1270 additional_fcpcdb_len = cmd->cmd_len - 16;
1271 if ((cmd->cmd_len % 4) != 0) {
1272 /* SCSI cmd > 16 bytes must be multiple of 4 */
1273 goto crc_queuing_error;
1275 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1277 additional_fcpcdb_len = 0;
1278 fcp_cmnd_len = 12 + 16 + 4;
1281 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1283 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1284 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1285 fcp_cmnd->additional_cdb_len |= 1;
1286 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1287 fcp_cmnd->additional_cdb_len |= 2;
1289 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1290 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1291 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1292 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1293 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1294 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1295 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1296 fcp_cmnd->task_management = 0;
1299 * Update tagged queuing modifier if using command tag queuing
1301 if (scsi_populate_tag_msg(cmd, tag)) {
1303 case HEAD_OF_QUEUE_TAG:
1304 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1306 case ORDERED_QUEUE_TAG:
1307 fcp_cmnd->task_attribute = TSK_ORDERED;
1310 fcp_cmnd->task_attribute = 0;
1314 fcp_cmnd->task_attribute = 0;
1317 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1319 /* Compute dif len and adjust data len to incude protection */
1321 blk_size = cmd->device->sector_size;
1322 dif_bytes = (data_bytes / blk_size) * 8;
1324 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1325 case SCSI_PROT_READ_INSERT:
1326 case SCSI_PROT_WRITE_STRIP:
1327 total_bytes = data_bytes;
1328 data_bytes += dif_bytes;
1331 case SCSI_PROT_READ_STRIP:
1332 case SCSI_PROT_WRITE_INSERT:
1333 case SCSI_PROT_READ_PASS:
1334 case SCSI_PROT_WRITE_PASS:
1335 total_bytes = data_bytes + dif_bytes;
1341 if (!qla2x00_hba_err_chk_enabled(sp))
1342 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1343 /* HBA error checking enabled */
1344 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1345 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1346 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1347 SCSI_PROT_DIF_TYPE2))
1348 fw_prot_opts |= BIT_10;
1349 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1350 SCSI_PROT_DIF_TYPE3)
1351 fw_prot_opts |= BIT_11;
1355 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1358 * Configure Bundling if we need to fetch interlaving
1359 * protection PCI accesses
1361 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1362 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1363 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1365 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1368 /* Finish the common fields of CRC pkt */
1369 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1370 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1371 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1372 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1373 /* Fibre channel byte count */
1374 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1375 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1376 additional_fcpcdb_len);
1377 *fcp_dl = htonl(total_bytes);
1379 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1380 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1383 /* Walks data segments */
1385 cmd_pkt->control_flags |=
1386 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1388 if (!bundling && tot_prot_dsds) {
1389 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1391 goto crc_queuing_error;
1392 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1393 (tot_dsds - tot_prot_dsds)))
1394 goto crc_queuing_error;
1396 if (bundling && tot_prot_dsds) {
1397 /* Walks dif segments */
1398 cmd_pkt->control_flags |=
1399 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1400 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1401 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1403 goto crc_queuing_error;
1408 /* Cleanup will be performed by the caller */
1410 return QLA_FUNCTION_FAILED;
1414 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1415 * @sp: command to send to the ISP
1417 * Returns non-zero if a failure occurred, else zero.
1420 qla24xx_start_scsi(srb_t *sp)
1423 unsigned long flags;
1427 struct cmd_type_7 *cmd_pkt;
1431 struct req_que *req = NULL;
1432 struct rsp_que *rsp = NULL;
1433 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1434 struct scsi_qla_host *vha = sp->fcport->vha;
1435 struct qla_hw_data *ha = vha->hw;
1438 /* Setup device pointers. */
1441 qla25xx_set_que(sp, &rsp);
1444 /* So we know we haven't pci_map'ed anything yet */
1447 /* Send marker if required */
1448 if (vha->marker_needed != 0) {
1449 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1451 return QLA_FUNCTION_FAILED;
1452 vha->marker_needed = 0;
1455 /* Acquire ring specific lock */
1456 spin_lock_irqsave(&ha->hardware_lock, flags);
1458 /* Check for room in outstanding command list. */
1459 handle = req->current_outstanding_cmd;
1460 for (index = 1; index < req->num_outstanding_cmds; index++) {
1462 if (handle == req->num_outstanding_cmds)
1464 if (!req->outstanding_cmds[handle])
1467 if (index == req->num_outstanding_cmds)
1470 /* Map the sg table so we have an accurate count of sg entries needed */
1471 if (scsi_sg_count(cmd)) {
1472 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1473 scsi_sg_count(cmd), cmd->sc_data_direction);
1474 if (unlikely(!nseg))
1480 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1481 if (req->cnt < (req_cnt + 2)) {
1482 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1484 if (req->ring_index < cnt)
1485 req->cnt = cnt - req->ring_index;
1487 req->cnt = req->length -
1488 (req->ring_index - cnt);
1489 if (req->cnt < (req_cnt + 2))
1493 /* Build command packet. */
1494 req->current_outstanding_cmd = handle;
1495 req->outstanding_cmds[handle] = sp;
1496 sp->handle = handle;
1497 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1498 req->cnt -= req_cnt;
1500 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1501 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1503 /* Zero out remaining portion of packet. */
1504 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1505 clr_ptr = (uint32_t *)cmd_pkt + 2;
1506 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1507 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1509 /* Set NPORT-ID and LUN number*/
1510 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1511 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1512 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1513 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1514 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1516 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1517 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1519 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1520 if (scsi_populate_tag_msg(cmd, tag)) {
1522 case HEAD_OF_QUEUE_TAG:
1523 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1525 case ORDERED_QUEUE_TAG:
1526 cmd_pkt->task = TSK_ORDERED;
1531 /* Load SCSI command packet. */
1532 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1533 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1535 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1537 /* Build IOCB segments */
1538 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1540 /* Set total data segment count. */
1541 cmd_pkt->entry_count = (uint8_t)req_cnt;
1542 /* Specify response queue number where completion should happen */
1543 cmd_pkt->entry_status = (uint8_t) rsp->id;
1545 /* Adjust ring index. */
1547 if (req->ring_index == req->length) {
1548 req->ring_index = 0;
1549 req->ring_ptr = req->ring;
1553 sp->flags |= SRB_DMA_VALID;
1555 /* Set chip new ring index. */
1556 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1557 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1559 /* Manage unprocessed RIO/ZIO commands in response queue. */
1560 if (vha->flags.process_response_queue &&
1561 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1562 qla24xx_process_response_queue(vha, rsp);
1564 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1569 scsi_dma_unmap(cmd);
1571 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1573 return QLA_FUNCTION_FAILED;
1577 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1578 * @sp: command to send to the ISP
1580 * Returns non-zero if a failure occurred, else zero.
1583 qla24xx_dif_start_scsi(srb_t *sp)
1586 unsigned long flags;
1591 uint16_t req_cnt = 0;
1593 uint16_t tot_prot_dsds;
1594 uint16_t fw_prot_opts = 0;
1595 struct req_que *req = NULL;
1596 struct rsp_que *rsp = NULL;
1597 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1598 struct scsi_qla_host *vha = sp->fcport->vha;
1599 struct qla_hw_data *ha = vha->hw;
1600 struct cmd_type_crc_2 *cmd_pkt;
1601 uint32_t status = 0;
1603 #define QDSS_GOT_Q_SPACE BIT_0
1605 /* Only process protection or >16 cdb in this routine */
1606 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1607 if (cmd->cmd_len <= 16)
1608 return qla24xx_start_scsi(sp);
1611 /* Setup device pointers. */
1613 qla25xx_set_que(sp, &rsp);
1616 /* So we know we haven't pci_map'ed anything yet */
1619 /* Send marker if required */
1620 if (vha->marker_needed != 0) {
1621 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1623 return QLA_FUNCTION_FAILED;
1624 vha->marker_needed = 0;
1627 /* Acquire ring specific lock */
1628 spin_lock_irqsave(&ha->hardware_lock, flags);
1630 /* Check for room in outstanding command list. */
1631 handle = req->current_outstanding_cmd;
1632 for (index = 1; index < req->num_outstanding_cmds; index++) {
1634 if (handle == req->num_outstanding_cmds)
1636 if (!req->outstanding_cmds[handle])
1640 if (index == req->num_outstanding_cmds)
1643 /* Compute number of required data segments */
1644 /* Map the sg table so we have an accurate count of sg entries needed */
1645 if (scsi_sg_count(cmd)) {
1646 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1647 scsi_sg_count(cmd), cmd->sc_data_direction);
1648 if (unlikely(!nseg))
1651 sp->flags |= SRB_DMA_VALID;
1653 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1654 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1655 struct qla2_sgx sgx;
1658 memset(&sgx, 0, sizeof(struct qla2_sgx));
1659 sgx.tot_bytes = scsi_bufflen(cmd);
1660 sgx.cur_sg = scsi_sglist(cmd);
1664 while (qla24xx_get_one_block_sg(
1665 cmd->device->sector_size, &sgx, &partial))
1671 /* number of required data segments */
1674 /* Compute number of required protection segments */
1675 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1676 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1677 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1678 if (unlikely(!nseg))
1681 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1683 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1684 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1685 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1692 /* Total Data and protection sg segment(s) */
1693 tot_prot_dsds = nseg;
1695 if (req->cnt < (req_cnt + 2)) {
1696 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1698 if (req->ring_index < cnt)
1699 req->cnt = cnt - req->ring_index;
1701 req->cnt = req->length -
1702 (req->ring_index - cnt);
1703 if (req->cnt < (req_cnt + 2))
1707 status |= QDSS_GOT_Q_SPACE;
1709 /* Build header part of command packet (excluding the OPCODE). */
1710 req->current_outstanding_cmd = handle;
1711 req->outstanding_cmds[handle] = sp;
1712 sp->handle = handle;
1713 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1714 req->cnt -= req_cnt;
1716 /* Fill-in common area */
1717 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1718 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1720 clr_ptr = (uint32_t *)cmd_pkt + 2;
1721 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1723 /* Set NPORT-ID and LUN number*/
1724 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1725 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1726 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1727 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1729 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1730 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1732 /* Total Data and protection segment(s) */
1733 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1735 /* Build IOCB segments and adjust for data protection segments */
1736 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1737 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1741 cmd_pkt->entry_count = (uint8_t)req_cnt;
1742 /* Specify response queue number where completion should happen */
1743 cmd_pkt->entry_status = (uint8_t) rsp->id;
1744 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1747 /* Adjust ring index. */
1749 if (req->ring_index == req->length) {
1750 req->ring_index = 0;
1751 req->ring_ptr = req->ring;
1755 /* Set chip new ring index. */
1756 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1757 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1759 /* Manage unprocessed RIO/ZIO commands in response queue. */
1760 if (vha->flags.process_response_queue &&
1761 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1762 qla24xx_process_response_queue(vha, rsp);
1764 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1769 if (status & QDSS_GOT_Q_SPACE) {
1770 req->outstanding_cmds[handle] = NULL;
1771 req->cnt += req_cnt;
1773 /* Cleanup will be performed by the caller (queuecommand) */
1775 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1776 return QLA_FUNCTION_FAILED;
1780 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1782 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1783 struct qla_hw_data *ha = sp->fcport->vha->hw;
1784 int affinity = cmd->request->cpu;
1786 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1787 affinity < ha->max_rsp_queues - 1)
1788 *rsp = ha->rsp_q_map[affinity + 1];
1790 *rsp = ha->rsp_q_map[0];
1793 /* Generic Control-SRB manipulation functions. */
1795 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1797 struct qla_hw_data *ha = vha->hw;
1798 struct req_que *req = ha->req_q_map[0];
1799 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1800 uint32_t index, handle;
1802 uint16_t cnt, req_cnt;
1809 goto skip_cmd_array;
1811 /* Check for room in outstanding command list. */
1812 handle = req->current_outstanding_cmd;
1813 for (index = 1; req->num_outstanding_cmds; index++) {
1815 if (handle == req->num_outstanding_cmds)
1817 if (!req->outstanding_cmds[handle])
1820 if (index == req->num_outstanding_cmds) {
1821 ql_log(ql_log_warn, vha, 0x700b,
1822 "No room on outstanding cmd array.\n");
1826 /* Prep command array. */
1827 req->current_outstanding_cmd = handle;
1828 req->outstanding_cmds[handle] = sp;
1829 sp->handle = handle;
1831 /* Adjust entry-counts as needed. */
1832 if (sp->type != SRB_SCSI_CMD)
1833 req_cnt = sp->iocbs;
1836 /* Check for room on request queue. */
1837 if (req->cnt < req_cnt) {
1838 if (ha->mqenable || IS_QLA83XX(ha))
1839 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
1840 else if (IS_QLA82XX(ha))
1841 cnt = RD_REG_DWORD(®->isp82.req_q_out);
1842 else if (IS_FWI2_CAPABLE(ha))
1843 cnt = RD_REG_DWORD(®->isp24.req_q_out);
1844 else if (IS_QLAFX00(ha))
1845 cnt = RD_REG_DWORD(®->ispfx00.req_q_out);
1847 cnt = qla2x00_debounce_register(
1848 ISP_REQ_Q_OUT(ha, ®->isp));
1850 if (req->ring_index < cnt)
1851 req->cnt = cnt - req->ring_index;
1853 req->cnt = req->length -
1854 (req->ring_index - cnt);
1856 if (req->cnt < req_cnt)
1860 req->cnt -= req_cnt;
1861 pkt = req->ring_ptr;
1862 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1863 if (IS_QLAFX00(ha)) {
1864 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1865 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1867 pkt->entry_count = req_cnt;
1868 pkt->handle = handle;
1876 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1878 struct srb_iocb *lio = &sp->u.iocb_cmd;
1880 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1881 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1882 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1883 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1884 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1885 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1886 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1887 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1888 logio->port_id[1] = sp->fcport->d_id.b.area;
1889 logio->port_id[2] = sp->fcport->d_id.b.domain;
1890 logio->vp_index = sp->fcport->vha->vp_idx;
1894 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1896 struct qla_hw_data *ha = sp->fcport->vha->hw;
1897 struct srb_iocb *lio = &sp->u.iocb_cmd;
1900 mbx->entry_type = MBX_IOCB_TYPE;
1901 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1902 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1903 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1904 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1905 if (HAS_EXTENDED_IDS(ha)) {
1906 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1907 mbx->mb10 = cpu_to_le16(opts);
1909 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1911 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1912 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1913 sp->fcport->d_id.b.al_pa);
1914 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1918 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1920 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1921 logio->control_flags =
1922 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1923 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1924 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1925 logio->port_id[1] = sp->fcport->d_id.b.area;
1926 logio->port_id[2] = sp->fcport->d_id.b.domain;
1927 logio->vp_index = sp->fcport->vha->vp_idx;
1931 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1933 struct qla_hw_data *ha = sp->fcport->vha->hw;
1935 mbx->entry_type = MBX_IOCB_TYPE;
1936 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1937 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1938 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1939 cpu_to_le16(sp->fcport->loop_id):
1940 cpu_to_le16(sp->fcport->loop_id << 8);
1941 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1942 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1943 sp->fcport->d_id.b.al_pa);
1944 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1945 /* Implicit: mbx->mbx10 = 0. */
1949 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1951 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1952 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1953 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1954 logio->vp_index = sp->fcport->vha->vp_idx;
1958 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1960 struct qla_hw_data *ha = sp->fcport->vha->hw;
1962 mbx->entry_type = MBX_IOCB_TYPE;
1963 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1964 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1965 if (HAS_EXTENDED_IDS(ha)) {
1966 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1967 mbx->mb10 = cpu_to_le16(BIT_0);
1969 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1971 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1972 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1973 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1974 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1975 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1979 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1983 struct fc_port *fcport = sp->fcport;
1984 scsi_qla_host_t *vha = fcport->vha;
1985 struct qla_hw_data *ha = vha->hw;
1986 struct srb_iocb *iocb = &sp->u.iocb_cmd;
1987 struct req_que *req = vha->req;
1989 flags = iocb->u.tmf.flags;
1990 lun = iocb->u.tmf.lun;
1992 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1993 tsk->entry_count = 1;
1994 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1995 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1996 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1997 tsk->control_flags = cpu_to_le32(flags);
1998 tsk->port_id[0] = fcport->d_id.b.al_pa;
1999 tsk->port_id[1] = fcport->d_id.b.area;
2000 tsk->port_id[2] = fcport->d_id.b.domain;
2001 tsk->vp_index = fcport->vha->vp_idx;
2003 if (flags == TCF_LUN_RESET) {
2004 int_to_scsilun(lun, &tsk->lun);
2005 host_to_fcp_swap((uint8_t *)&tsk->lun,
2011 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2013 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2015 els_iocb->entry_type = ELS_IOCB_TYPE;
2016 els_iocb->entry_count = 1;
2017 els_iocb->sys_define = 0;
2018 els_iocb->entry_status = 0;
2019 els_iocb->handle = sp->handle;
2020 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2021 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2022 els_iocb->vp_index = sp->fcport->vha->vp_idx;
2023 els_iocb->sof_type = EST_SOFI3;
2024 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2027 sp->type == SRB_ELS_CMD_RPT ?
2028 bsg_job->request->rqst_data.r_els.els_code :
2029 bsg_job->request->rqst_data.h_els.command_code;
2030 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2031 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2032 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2033 els_iocb->control_flags = 0;
2034 els_iocb->rx_byte_count =
2035 cpu_to_le32(bsg_job->reply_payload.payload_len);
2036 els_iocb->tx_byte_count =
2037 cpu_to_le32(bsg_job->request_payload.payload_len);
2039 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2040 (bsg_job->request_payload.sg_list)));
2041 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2042 (bsg_job->request_payload.sg_list)));
2043 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2044 (bsg_job->request_payload.sg_list));
2046 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2047 (bsg_job->reply_payload.sg_list)));
2048 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2049 (bsg_job->reply_payload.sg_list)));
2050 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2051 (bsg_job->reply_payload.sg_list));
2055 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2057 uint16_t avail_dsds;
2059 struct scatterlist *sg;
2062 scsi_qla_host_t *vha = sp->fcport->vha;
2063 struct qla_hw_data *ha = vha->hw;
2064 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2065 int loop_iterartion = 0;
2066 int cont_iocb_prsnt = 0;
2067 int entry_count = 1;
2069 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2070 ct_iocb->entry_type = CT_IOCB_TYPE;
2071 ct_iocb->entry_status = 0;
2072 ct_iocb->handle1 = sp->handle;
2073 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2074 ct_iocb->status = __constant_cpu_to_le16(0);
2075 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2076 ct_iocb->timeout = 0;
2077 ct_iocb->cmd_dsd_count =
2078 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2079 ct_iocb->total_dsd_count =
2080 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2081 ct_iocb->req_bytecount =
2082 cpu_to_le32(bsg_job->request_payload.payload_len);
2083 ct_iocb->rsp_bytecount =
2084 cpu_to_le32(bsg_job->reply_payload.payload_len);
2086 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2087 (bsg_job->request_payload.sg_list)));
2088 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2089 (bsg_job->request_payload.sg_list)));
2090 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2092 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2093 (bsg_job->reply_payload.sg_list)));
2094 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2095 (bsg_job->reply_payload.sg_list)));
2096 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2099 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2101 tot_dsds = bsg_job->reply_payload.sg_cnt;
2103 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2105 cont_a64_entry_t *cont_pkt;
2107 /* Allocate additional continuation packets? */
2108 if (avail_dsds == 0) {
2110 * Five DSDs are available in the Cont.
2113 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2114 vha->hw->req_q_map[0]);
2115 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2117 cont_iocb_prsnt = 1;
2121 sle_dma = sg_dma_address(sg);
2122 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2123 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2124 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2128 ct_iocb->entry_count = entry_count;
2132 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2134 uint16_t avail_dsds;
2136 struct scatterlist *sg;
2139 scsi_qla_host_t *vha = sp->fcport->vha;
2140 struct qla_hw_data *ha = vha->hw;
2141 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2142 int loop_iterartion = 0;
2143 int cont_iocb_prsnt = 0;
2144 int entry_count = 1;
2146 ct_iocb->entry_type = CT_IOCB_TYPE;
2147 ct_iocb->entry_status = 0;
2148 ct_iocb->sys_define = 0;
2149 ct_iocb->handle = sp->handle;
2151 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2152 ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2153 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2155 ct_iocb->cmd_dsd_count =
2156 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2157 ct_iocb->timeout = 0;
2158 ct_iocb->rsp_dsd_count =
2159 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2160 ct_iocb->rsp_byte_count =
2161 cpu_to_le32(bsg_job->reply_payload.payload_len);
2162 ct_iocb->cmd_byte_count =
2163 cpu_to_le32(bsg_job->request_payload.payload_len);
2164 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2165 (bsg_job->request_payload.sg_list)));
2166 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2167 (bsg_job->request_payload.sg_list)));
2168 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2169 (bsg_job->request_payload.sg_list));
2172 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2174 tot_dsds = bsg_job->reply_payload.sg_cnt;
2176 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2178 cont_a64_entry_t *cont_pkt;
2180 /* Allocate additional continuation packets? */
2181 if (avail_dsds == 0) {
2183 * Five DSDs are available in the Cont.
2186 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2188 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2190 cont_iocb_prsnt = 1;
2194 sle_dma = sg_dma_address(sg);
2195 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2196 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2197 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2201 ct_iocb->entry_count = entry_count;
2205 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2206 * @sp: command to send to the ISP
2208 * Returns non-zero if a failure occurred, else zero.
2211 qla82xx_start_scsi(srb_t *sp)
2214 unsigned long flags;
2215 struct scsi_cmnd *cmd;
2222 struct device_reg_82xx __iomem *reg;
2225 uint8_t additional_cdb_len;
2226 struct ct6_dsd *ctx;
2227 struct scsi_qla_host *vha = sp->fcport->vha;
2228 struct qla_hw_data *ha = vha->hw;
2229 struct req_que *req = NULL;
2230 struct rsp_que *rsp = NULL;
2233 /* Setup device pointers. */
2235 reg = &ha->iobase->isp82;
2236 cmd = GET_CMD_SP(sp);
2238 rsp = ha->rsp_q_map[0];
2240 /* So we know we haven't pci_map'ed anything yet */
2243 dbval = 0x04 | (ha->portnum << 5);
2245 /* Send marker if required */
2246 if (vha->marker_needed != 0) {
2247 if (qla2x00_marker(vha, req,
2248 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2249 ql_log(ql_log_warn, vha, 0x300c,
2250 "qla2x00_marker failed for cmd=%p.\n", cmd);
2251 return QLA_FUNCTION_FAILED;
2253 vha->marker_needed = 0;
2256 /* Acquire ring specific lock */
2257 spin_lock_irqsave(&ha->hardware_lock, flags);
2259 /* Check for room in outstanding command list. */
2260 handle = req->current_outstanding_cmd;
2261 for (index = 1; index < req->num_outstanding_cmds; index++) {
2263 if (handle == req->num_outstanding_cmds)
2265 if (!req->outstanding_cmds[handle])
2268 if (index == req->num_outstanding_cmds)
2271 /* Map the sg table so we have an accurate count of sg entries needed */
2272 if (scsi_sg_count(cmd)) {
2273 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2274 scsi_sg_count(cmd), cmd->sc_data_direction);
2275 if (unlikely(!nseg))
2282 if (tot_dsds > ql2xshiftctondsd) {
2283 struct cmd_type_6 *cmd_pkt;
2284 uint16_t more_dsd_lists = 0;
2285 struct dsd_dma *dsd_ptr;
2288 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2289 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2290 ql_dbg(ql_dbg_io, vha, 0x300d,
2291 "Num of DSD list %d is than %d for cmd=%p.\n",
2292 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2297 if (more_dsd_lists <= ha->gbl_dsd_avail)
2298 goto sufficient_dsds;
2300 more_dsd_lists -= ha->gbl_dsd_avail;
2302 for (i = 0; i < more_dsd_lists; i++) {
2303 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2305 ql_log(ql_log_fatal, vha, 0x300e,
2306 "Failed to allocate memory for dsd_dma "
2307 "for cmd=%p.\n", cmd);
2311 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2312 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2313 if (!dsd_ptr->dsd_addr) {
2315 ql_log(ql_log_fatal, vha, 0x300f,
2316 "Failed to allocate memory for dsd_addr "
2317 "for cmd=%p.\n", cmd);
2320 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2321 ha->gbl_dsd_avail++;
2327 if (req->cnt < (req_cnt + 2)) {
2328 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2329 ®->req_q_out[0]);
2330 if (req->ring_index < cnt)
2331 req->cnt = cnt - req->ring_index;
2333 req->cnt = req->length -
2334 (req->ring_index - cnt);
2335 if (req->cnt < (req_cnt + 2))
2339 ctx = sp->u.scmd.ctx =
2340 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2342 ql_log(ql_log_fatal, vha, 0x3010,
2343 "Failed to allocate ctx for cmd=%p.\n", cmd);
2347 memset(ctx, 0, sizeof(struct ct6_dsd));
2348 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2349 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2350 if (!ctx->fcp_cmnd) {
2351 ql_log(ql_log_fatal, vha, 0x3011,
2352 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2356 /* Initialize the DSD list and dma handle */
2357 INIT_LIST_HEAD(&ctx->dsd_list);
2358 ctx->dsd_use_cnt = 0;
2360 if (cmd->cmd_len > 16) {
2361 additional_cdb_len = cmd->cmd_len - 16;
2362 if ((cmd->cmd_len % 4) != 0) {
2363 /* SCSI command bigger than 16 bytes must be
2366 ql_log(ql_log_warn, vha, 0x3012,
2367 "scsi cmd len %d not multiple of 4 "
2368 "for cmd=%p.\n", cmd->cmd_len, cmd);
2369 goto queuing_error_fcp_cmnd;
2371 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2373 additional_cdb_len = 0;
2374 ctx->fcp_cmnd_len = 12 + 16 + 4;
2377 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2378 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2380 /* Zero out remaining portion of packet. */
2381 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2382 clr_ptr = (uint32_t *)cmd_pkt + 2;
2383 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2384 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2386 /* Set NPORT-ID and LUN number*/
2387 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2388 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2389 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2390 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2391 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2393 /* Build IOCB segments */
2394 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2395 goto queuing_error_fcp_cmnd;
2397 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2398 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2400 /* build FCP_CMND IU */
2401 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2402 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2403 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2405 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2406 ctx->fcp_cmnd->additional_cdb_len |= 1;
2407 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2408 ctx->fcp_cmnd->additional_cdb_len |= 2;
2411 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2413 if (scsi_populate_tag_msg(cmd, tag)) {
2415 case HEAD_OF_QUEUE_TAG:
2416 ctx->fcp_cmnd->task_attribute =
2419 case ORDERED_QUEUE_TAG:
2420 ctx->fcp_cmnd->task_attribute =
2426 /* Populate the FCP_PRIO. */
2427 if (ha->flags.fcp_prio_enabled)
2428 ctx->fcp_cmnd->task_attribute |=
2429 sp->fcport->fcp_prio << 3;
2431 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2433 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2434 additional_cdb_len);
2435 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2437 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2438 cmd_pkt->fcp_cmnd_dseg_address[0] =
2439 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2440 cmd_pkt->fcp_cmnd_dseg_address[1] =
2441 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2443 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2444 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2445 /* Set total data segment count. */
2446 cmd_pkt->entry_count = (uint8_t)req_cnt;
2447 /* Specify response queue number where
2448 * completion should happen
2450 cmd_pkt->entry_status = (uint8_t) rsp->id;
2452 struct cmd_type_7 *cmd_pkt;
2453 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2454 if (req->cnt < (req_cnt + 2)) {
2455 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2456 ®->req_q_out[0]);
2457 if (req->ring_index < cnt)
2458 req->cnt = cnt - req->ring_index;
2460 req->cnt = req->length -
2461 (req->ring_index - cnt);
2463 if (req->cnt < (req_cnt + 2))
2466 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2467 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2469 /* Zero out remaining portion of packet. */
2470 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2471 clr_ptr = (uint32_t *)cmd_pkt + 2;
2472 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2473 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2475 /* Set NPORT-ID and LUN number*/
2476 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2477 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2478 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2479 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2480 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2482 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2483 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2484 sizeof(cmd_pkt->lun));
2487 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2489 if (scsi_populate_tag_msg(cmd, tag)) {
2491 case HEAD_OF_QUEUE_TAG:
2492 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2494 case ORDERED_QUEUE_TAG:
2495 cmd_pkt->task = TSK_ORDERED;
2500 /* Populate the FCP_PRIO. */
2501 if (ha->flags.fcp_prio_enabled)
2502 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2504 /* Load SCSI command packet. */
2505 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2506 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2508 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2510 /* Build IOCB segments */
2511 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2513 /* Set total data segment count. */
2514 cmd_pkt->entry_count = (uint8_t)req_cnt;
2515 /* Specify response queue number where
2516 * completion should happen.
2518 cmd_pkt->entry_status = (uint8_t) rsp->id;
2521 /* Build command packet. */
2522 req->current_outstanding_cmd = handle;
2523 req->outstanding_cmds[handle] = sp;
2524 sp->handle = handle;
2525 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2526 req->cnt -= req_cnt;
2529 /* Adjust ring index. */
2531 if (req->ring_index == req->length) {
2532 req->ring_index = 0;
2533 req->ring_ptr = req->ring;
2537 sp->flags |= SRB_DMA_VALID;
2539 /* Set chip new ring index. */
2540 /* write, read and verify logic */
2541 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2543 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2546 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2549 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2551 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2557 /* Manage unprocessed RIO/ZIO commands in response queue. */
2558 if (vha->flags.process_response_queue &&
2559 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2560 qla24xx_process_response_queue(vha, rsp);
2562 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2565 queuing_error_fcp_cmnd:
2566 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2569 scsi_dma_unmap(cmd);
2571 if (sp->u.scmd.ctx) {
2572 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2573 sp->u.scmd.ctx = NULL;
2575 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2577 return QLA_FUNCTION_FAILED;
2581 qla2x00_start_sp(srb_t *sp)
2584 struct qla_hw_data *ha = sp->fcport->vha->hw;
2586 unsigned long flags;
2588 rval = QLA_FUNCTION_FAILED;
2589 spin_lock_irqsave(&ha->hardware_lock, flags);
2590 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2592 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2593 "qla2x00_alloc_iocbs failed.\n");
2600 IS_FWI2_CAPABLE(ha) ?
2601 qla24xx_login_iocb(sp, pkt) :
2602 qla2x00_login_iocb(sp, pkt);
2604 case SRB_LOGOUT_CMD:
2605 IS_FWI2_CAPABLE(ha) ?
2606 qla24xx_logout_iocb(sp, pkt) :
2607 qla2x00_logout_iocb(sp, pkt);
2609 case SRB_ELS_CMD_RPT:
2610 case SRB_ELS_CMD_HST:
2611 qla24xx_els_iocb(sp, pkt);
2614 IS_FWI2_CAPABLE(ha) ?
2615 qla24xx_ct_iocb(sp, pkt) :
2616 qla2x00_ct_iocb(sp, pkt);
2619 IS_FWI2_CAPABLE(ha) ?
2620 qla24xx_adisc_iocb(sp, pkt) :
2621 qla2x00_adisc_iocb(sp, pkt);
2625 qlafx00_tm_iocb(sp, pkt) :
2626 qla24xx_tm_iocb(sp, pkt);
2628 case SRB_FXIOCB_DCMD:
2629 case SRB_FXIOCB_BCMD:
2630 qlafx00_fxdisc_iocb(sp, pkt);
2633 qlafx00_abort_iocb(sp, pkt);
2640 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2642 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2647 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2648 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2650 uint16_t avail_dsds;
2652 uint32_t req_data_len = 0;
2653 uint32_t rsp_data_len = 0;
2654 struct scatterlist *sg;
2656 int entry_count = 1;
2657 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2659 /*Update entry type to indicate bidir command */
2660 *((uint32_t *)(&cmd_pkt->entry_type)) =
2661 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2663 /* Set the transfer direction, in this set both flags
2664 * Also set the BD_WRAP_BACK flag, firmware will take care
2665 * assigning DID=SID for outgoing pkts.
2667 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2668 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2669 cmd_pkt->control_flags =
2670 __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2673 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2674 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2675 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2676 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2678 vha->bidi_stats.transfer_bytes += req_data_len;
2679 vha->bidi_stats.io_count++;
2681 /* Only one dsd is available for bidirectional IOCB, remaining dsds
2682 * are bundled in continuation iocb
2685 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2689 for_each_sg(bsg_job->request_payload.sg_list, sg,
2690 bsg_job->request_payload.sg_cnt, index) {
2692 cont_a64_entry_t *cont_pkt;
2694 /* Allocate additional continuation packets */
2695 if (avail_dsds == 0) {
2696 /* Continuation type 1 IOCB can accomodate
2699 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2700 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2704 sle_dma = sg_dma_address(sg);
2705 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2706 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2707 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2710 /* For read request DSD will always goes to continuation IOCB
2711 * and follow the write DSD. If there is room on the current IOCB
2712 * then it is added to that IOCB else new continuation IOCB is
2715 for_each_sg(bsg_job->reply_payload.sg_list, sg,
2716 bsg_job->reply_payload.sg_cnt, index) {
2718 cont_a64_entry_t *cont_pkt;
2720 /* Allocate additional continuation packets */
2721 if (avail_dsds == 0) {
2722 /* Continuation type 1 IOCB can accomodate
2725 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2726 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2730 sle_dma = sg_dma_address(sg);
2731 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2732 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2733 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2736 /* This value should be same as number of IOCB required for this cmd */
2737 cmd_pkt->entry_count = entry_count;
2741 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2744 struct qla_hw_data *ha = vha->hw;
2745 unsigned long flags;
2751 struct cmd_bidir *cmd_pkt = NULL;
2752 struct rsp_que *rsp;
2753 struct req_que *req;
2754 int rval = EXT_STATUS_OK;
2758 rsp = ha->rsp_q_map[0];
2761 /* Send marker if required */
2762 if (vha->marker_needed != 0) {
2763 if (qla2x00_marker(vha, req,
2764 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2765 return EXT_STATUS_MAILBOX;
2766 vha->marker_needed = 0;
2769 /* Acquire ring specific lock */
2770 spin_lock_irqsave(&ha->hardware_lock, flags);
2772 /* Check for room in outstanding command list. */
2773 handle = req->current_outstanding_cmd;
2774 for (index = 1; index < req->num_outstanding_cmds; index++) {
2776 if (handle == req->num_outstanding_cmds)
2778 if (!req->outstanding_cmds[handle])
2782 if (index == req->num_outstanding_cmds) {
2783 rval = EXT_STATUS_BUSY;
2787 /* Calculate number of IOCB required */
2788 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2790 /* Check for room on request queue. */
2791 if (req->cnt < req_cnt + 2) {
2792 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
2794 if (req->ring_index < cnt)
2795 req->cnt = cnt - req->ring_index;
2797 req->cnt = req->length -
2798 (req->ring_index - cnt);
2800 if (req->cnt < req_cnt + 2) {
2801 rval = EXT_STATUS_BUSY;
2805 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2806 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2808 /* Zero out remaining portion of packet. */
2809 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2810 clr_ptr = (uint32_t *)cmd_pkt + 2;
2811 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2813 /* Set NPORT-ID (of vha)*/
2814 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2815 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2816 cmd_pkt->port_id[1] = vha->d_id.b.area;
2817 cmd_pkt->port_id[2] = vha->d_id.b.domain;
2819 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2820 cmd_pkt->entry_status = (uint8_t) rsp->id;
2821 /* Build command packet. */
2822 req->current_outstanding_cmd = handle;
2823 req->outstanding_cmds[handle] = sp;
2824 sp->handle = handle;
2825 req->cnt -= req_cnt;
2827 /* Send the command to the firmware */
2829 qla2x00_start_iocbs(vha, req);
2831 spin_unlock_irqrestore(&ha->hardware_lock, flags);