2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsi_bsg_fc.h>
14 #include <scsi/scsi_eh.h>
16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
17 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
18 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
19 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
23 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
25 * @dev_id: SCSI driver HA context
27 * Called by system whenever the host adapter generates an interrupt.
29 * Returns handled flag.
32 qla2100_intr_handler(int irq, void *dev_id)
35 struct qla_hw_data *ha;
36 struct device_reg_2xxx __iomem *reg;
44 rsp = (struct rsp_que *) dev_id;
46 ql_log(ql_log_info, NULL, 0x505d,
47 "%s: NULL response queue pointer.\n", __func__);
52 reg = &ha->iobase->isp;
55 spin_lock_irqsave(&ha->hardware_lock, flags);
56 vha = pci_get_drvdata(ha->pdev);
57 for (iter = 50; iter--; ) {
58 hccr = RD_REG_WORD(®->hccr);
59 if (hccr & HCCR_RISC_PAUSE) {
60 if (pci_channel_offline(ha->pdev))
64 * Issue a "HARD" reset in order for the RISC interrupt
65 * bit to be cleared. Schedule a big hammer to get
66 * out of the RISC PAUSED state.
68 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
69 RD_REG_WORD(®->hccr);
71 ha->isp_ops->fw_dump(vha, 1);
72 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
74 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
77 if (RD_REG_WORD(®->semaphore) & BIT_0) {
78 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
79 RD_REG_WORD(®->hccr);
81 /* Get mailbox data. */
82 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
83 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
84 qla2x00_mbx_completion(vha, mb[0]);
85 status |= MBX_INTERRUPT;
86 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
87 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
88 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
89 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
90 qla2x00_async_event(vha, rsp, mb);
93 ql_dbg(ql_dbg_async, vha, 0x5025,
94 "Unrecognized interrupt type (%d).\n",
97 /* Release mailbox registers. */
98 WRT_REG_WORD(®->semaphore, 0);
99 RD_REG_WORD(®->semaphore);
101 qla2x00_process_response_queue(rsp);
103 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
104 RD_REG_WORD(®->hccr);
107 qla2x00_handle_mbx_completion(ha, status);
108 spin_unlock_irqrestore(&ha->hardware_lock, flags);
110 return (IRQ_HANDLED);
114 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
116 * @dev_id: SCSI driver HA context
118 * Called by system whenever the host adapter generates an interrupt.
120 * Returns handled flag.
123 qla2300_intr_handler(int irq, void *dev_id)
125 scsi_qla_host_t *vha;
126 struct device_reg_2xxx __iomem *reg;
133 struct qla_hw_data *ha;
136 rsp = (struct rsp_que *) dev_id;
138 ql_log(ql_log_info, NULL, 0x5058,
139 "%s: NULL response queue pointer.\n", __func__);
144 reg = &ha->iobase->isp;
147 spin_lock_irqsave(&ha->hardware_lock, flags);
148 vha = pci_get_drvdata(ha->pdev);
149 for (iter = 50; iter--; ) {
150 stat = RD_REG_DWORD(®->u.isp2300.host_status);
151 if (stat & HSR_RISC_PAUSED) {
152 if (unlikely(pci_channel_offline(ha->pdev)))
155 hccr = RD_REG_WORD(®->hccr);
156 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
157 ql_log(ql_log_warn, vha, 0x5026,
158 "Parity error -- HCCR=%x, Dumping "
159 "firmware.\n", hccr);
161 ql_log(ql_log_warn, vha, 0x5027,
162 "RISC paused -- HCCR=%x, Dumping "
163 "firmware.\n", hccr);
166 * Issue a "HARD" reset in order for the RISC
167 * interrupt bit to be cleared. Schedule a big
168 * hammer to get out of the RISC PAUSED state.
170 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
171 RD_REG_WORD(®->hccr);
173 ha->isp_ops->fw_dump(vha, 1);
174 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
176 } else if ((stat & HSR_RISC_INT) == 0)
179 switch (stat & 0xff) {
184 qla2x00_mbx_completion(vha, MSW(stat));
185 status |= MBX_INTERRUPT;
187 /* Release mailbox registers. */
188 WRT_REG_WORD(®->semaphore, 0);
192 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
195 qla2x00_async_event(vha, rsp, mb);
198 qla2x00_process_response_queue(rsp);
201 mb[0] = MBA_CMPLT_1_16BIT;
203 qla2x00_async_event(vha, rsp, mb);
206 mb[0] = MBA_SCSI_COMPLETION;
208 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
209 qla2x00_async_event(vha, rsp, mb);
212 ql_dbg(ql_dbg_async, vha, 0x5028,
213 "Unrecognized interrupt type (%d).\n", stat & 0xff);
216 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
217 RD_REG_WORD_RELAXED(®->hccr);
219 qla2x00_handle_mbx_completion(ha, status);
220 spin_unlock_irqrestore(&ha->hardware_lock, flags);
222 return (IRQ_HANDLED);
226 * qla2x00_mbx_completion() - Process mailbox command completions.
227 * @ha: SCSI driver HA context
228 * @mb0: Mailbox0 register
231 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
235 uint16_t __iomem *wptr;
236 struct qla_hw_data *ha = vha->hw;
237 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
239 /* Read all mbox registers? */
240 mboxes = (1 << ha->mbx_count) - 1;
242 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
244 mboxes = ha->mcp->in_mb;
246 /* Load return mailbox registers. */
247 ha->flags.mbox_int = 1;
248 ha->mailbox_out[0] = mb0;
250 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
252 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
253 if (IS_QLA2200(ha) && cnt == 8)
254 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
255 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
256 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
257 else if (mboxes & BIT_0)
258 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
266 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
268 static char *event[] =
269 { "Complete", "Request Notification", "Time Extension" };
271 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
272 uint16_t __iomem *wptr;
273 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
275 /* Seed data -- mailbox1 -> mailbox7. */
276 wptr = (uint16_t __iomem *)®24->mailbox1;
277 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
278 mb[cnt] = RD_REG_WORD(wptr);
280 ql_dbg(ql_dbg_async, vha, 0x5021,
281 "Inter-Driver Communication %s -- "
282 "%04x %04x %04x %04x %04x %04x %04x.\n",
283 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
284 mb[4], mb[5], mb[6]);
285 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
286 vha->hw->flags.idc_compl_status = 1;
287 if (vha->hw->notify_dcbx_comp)
288 complete(&vha->hw->dcbx_comp);
291 /* Acknowledgement needed? [Notify && non-zero timeout]. */
292 timeout = (descr >> 8) & 0xf;
293 if (aen != MBA_IDC_NOTIFY || !timeout)
296 ql_dbg(ql_dbg_async, vha, 0x5022,
297 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
298 vha->host_no, event[aen & 0xff], timeout);
300 rval = qla2x00_post_idc_ack_work(vha, mb);
301 if (rval != QLA_SUCCESS)
302 ql_log(ql_log_warn, vha, 0x5023,
303 "IDC failed to post ACK.\n");
308 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
310 static const char * const link_speeds[] = {
311 "1", "2", "?", "4", "8", "16", "10"
314 if (IS_QLA2100(ha) || IS_QLA2200(ha))
315 return link_speeds[0];
316 else if (speed == 0x13)
317 return link_speeds[6];
319 return link_speeds[speed];
321 return link_speeds[LS_UNKNOWN];
325 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
327 struct qla_hw_data *ha = vha->hw;
330 * 8200 AEN Interpretation:
332 * mb[1] = AEN Reason code
333 * mb[2] = LSW of Peg-Halt Status-1 Register
334 * mb[6] = MSW of Peg-Halt Status-1 Register
335 * mb[3] = LSW of Peg-Halt Status-2 register
336 * mb[7] = MSW of Peg-Halt Status-2 register
337 * mb[4] = IDC Device-State Register value
338 * mb[5] = IDC Driver-Presence Register value
340 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
341 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
342 mb[0], mb[1], mb[2], mb[6]);
343 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
344 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
345 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
347 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
348 IDC_HEARTBEAT_FAILURE)) {
349 ha->flags.nic_core_hung = 1;
350 ql_log(ql_log_warn, vha, 0x5060,
351 "83XX: F/W Error Reported: Check if reset required.\n");
353 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
354 uint32_t protocol_engine_id, fw_err_code, err_level;
357 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
358 * - PEG-Halt Status-1 Register:
359 * (LSW = mb[2], MSW = mb[6])
360 * Bits 0-7 = protocol-engine ID
361 * Bits 8-28 = f/w error code
362 * Bits 29-31 = Error-level
363 * Error-level 0x1 = Non-Fatal error
364 * Error-level 0x2 = Recoverable Fatal error
365 * Error-level 0x4 = UnRecoverable Fatal error
366 * - PEG-Halt Status-2 Register:
367 * (LSW = mb[3], MSW = mb[7])
369 protocol_engine_id = (mb[2] & 0xff);
370 fw_err_code = (((mb[2] & 0xff00) >> 8) |
371 ((mb[6] & 0x1fff) << 8));
372 err_level = ((mb[6] & 0xe000) >> 13);
373 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
374 "Register: protocol_engine_id=0x%x "
375 "fw_err_code=0x%x err_level=0x%x.\n",
376 protocol_engine_id, fw_err_code, err_level);
377 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
378 "Register: 0x%x%x.\n", mb[7], mb[3]);
379 if (err_level == ERR_LEVEL_NON_FATAL) {
380 ql_log(ql_log_warn, vha, 0x5063,
381 "Not a fatal error, f/w has recovered "
383 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
384 ql_log(ql_log_fatal, vha, 0x5064,
385 "Recoverable Fatal error: Chip reset "
387 qla83xx_schedule_work(vha,
388 QLA83XX_NIC_CORE_RESET);
389 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
390 ql_log(ql_log_fatal, vha, 0x5065,
391 "Unrecoverable Fatal error: Set FAILED "
392 "state, reboot required.\n");
393 qla83xx_schedule_work(vha,
394 QLA83XX_NIC_CORE_UNRECOVERABLE);
398 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
399 uint16_t peg_fw_state, nw_interface_link_up;
400 uint16_t nw_interface_signal_detect, sfp_status;
401 uint16_t htbt_counter, htbt_monitor_enable;
402 uint16_t sfp_additonal_info, sfp_multirate;
403 uint16_t sfp_tx_fault, link_speed, dcbx_status;
406 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
407 * - PEG-to-FC Status Register:
408 * (LSW = mb[2], MSW = mb[6])
409 * Bits 0-7 = Peg-Firmware state
410 * Bit 8 = N/W Interface Link-up
411 * Bit 9 = N/W Interface signal detected
412 * Bits 10-11 = SFP Status
413 * SFP Status 0x0 = SFP+ transceiver not expected
414 * SFP Status 0x1 = SFP+ transceiver not present
415 * SFP Status 0x2 = SFP+ transceiver invalid
416 * SFP Status 0x3 = SFP+ transceiver present and
418 * Bits 12-14 = Heartbeat Counter
419 * Bit 15 = Heartbeat Monitor Enable
420 * Bits 16-17 = SFP Additional Info
421 * SFP info 0x0 = Unregocnized transceiver for
423 * SFP info 0x1 = SFP+ brand validation failed
424 * SFP info 0x2 = SFP+ speed validation failed
425 * SFP info 0x3 = SFP+ access error
426 * Bit 18 = SFP Multirate
427 * Bit 19 = SFP Tx Fault
428 * Bits 20-22 = Link Speed
429 * Bits 23-27 = Reserved
430 * Bits 28-30 = DCBX Status
431 * DCBX Status 0x0 = DCBX Disabled
432 * DCBX Status 0x1 = DCBX Enabled
433 * DCBX Status 0x2 = DCBX Exchange error
436 peg_fw_state = (mb[2] & 0x00ff);
437 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
438 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
439 sfp_status = ((mb[2] & 0x0c00) >> 10);
440 htbt_counter = ((mb[2] & 0x7000) >> 12);
441 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
442 sfp_additonal_info = (mb[6] & 0x0003);
443 sfp_multirate = ((mb[6] & 0x0004) >> 2);
444 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
445 link_speed = ((mb[6] & 0x0070) >> 4);
446 dcbx_status = ((mb[6] & 0x7000) >> 12);
448 ql_log(ql_log_warn, vha, 0x5066,
449 "Peg-to-Fc Status Register:\n"
450 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
451 "nw_interface_signal_detect=0x%x"
452 "\nsfp_statis=0x%x.\n ", peg_fw_state,
453 nw_interface_link_up, nw_interface_signal_detect,
455 ql_log(ql_log_warn, vha, 0x5067,
456 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
457 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
458 htbt_counter, htbt_monitor_enable,
459 sfp_additonal_info, sfp_multirate);
460 ql_log(ql_log_warn, vha, 0x5068,
461 "sfp_tx_fault=0x%x, link_state=0x%x, "
462 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
465 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
468 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
469 ql_log(ql_log_warn, vha, 0x5069,
470 "Heartbeat Failure encountered, chip reset "
473 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
477 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
478 ql_log(ql_log_info, vha, 0x506a,
479 "IDC Device-State changed = 0x%x.\n", mb[4]);
480 if (ha->flags.nic_core_reset_owner)
482 qla83xx_schedule_work(vha, MBA_IDC_AEN);
487 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
489 struct qla_hw_data *ha = vha->hw;
498 spin_lock_irqsave(&ha->vport_slock, flags);
499 list_for_each_entry(vp, &ha->vp_list, list) {
500 vp_did = vp->d_id.b24;
501 if (vp_did == rscn_entry) {
506 spin_unlock_irqrestore(&ha->vport_slock, flags);
512 * qla2x00_async_event() - Process aynchronous events.
513 * @ha: SCSI driver HA context
514 * @mb: Mailbox registers (0 - 3)
517 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
522 struct qla_hw_data *ha = vha->hw;
523 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
524 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
525 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
526 uint32_t rscn_entry, host_pid;
529 /* Setup to process RIO completion. */
531 if (IS_CNA_CAPABLE(ha))
534 case MBA_SCSI_COMPLETION:
535 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
538 case MBA_CMPLT_1_16BIT:
541 mb[0] = MBA_SCSI_COMPLETION;
543 case MBA_CMPLT_2_16BIT:
547 mb[0] = MBA_SCSI_COMPLETION;
549 case MBA_CMPLT_3_16BIT:
554 mb[0] = MBA_SCSI_COMPLETION;
556 case MBA_CMPLT_4_16BIT:
560 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
562 mb[0] = MBA_SCSI_COMPLETION;
564 case MBA_CMPLT_5_16BIT:
568 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
569 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
571 mb[0] = MBA_SCSI_COMPLETION;
573 case MBA_CMPLT_2_32BIT:
574 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
575 handles[1] = le32_to_cpu(
576 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
577 RD_MAILBOX_REG(ha, reg, 6));
579 mb[0] = MBA_SCSI_COMPLETION;
586 case MBA_SCSI_COMPLETION: /* Fast Post */
587 if (!vha->flags.online)
590 for (cnt = 0; cnt < handle_cnt; cnt++)
591 qla2x00_process_completed_request(vha, rsp->req,
595 case MBA_RESET: /* Reset */
596 ql_dbg(ql_dbg_async, vha, 0x5002,
597 "Asynchronous RESET.\n");
599 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
602 case MBA_SYSTEM_ERR: /* System Error */
603 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
604 RD_REG_WORD(®24->mailbox7) : 0;
605 ql_log(ql_log_warn, vha, 0x5003,
606 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
607 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
609 ha->isp_ops->fw_dump(vha, 1);
611 if (IS_FWI2_CAPABLE(ha)) {
612 if (mb[1] == 0 && mb[2] == 0) {
613 ql_log(ql_log_fatal, vha, 0x5004,
614 "Unrecoverable Hardware Error: adapter "
615 "marked OFFLINE!\n");
616 vha->flags.online = 0;
617 vha->device_flags |= DFLG_DEV_FAILED;
619 /* Check to see if MPI timeout occurred */
620 if ((mbx & MBX_3) && (ha->flags.port0))
621 set_bit(MPI_RESET_NEEDED,
624 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
626 } else if (mb[1] == 0) {
627 ql_log(ql_log_fatal, vha, 0x5005,
628 "Unrecoverable Hardware Error: adapter marked "
630 vha->flags.online = 0;
631 vha->device_flags |= DFLG_DEV_FAILED;
633 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
636 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
637 ql_log(ql_log_warn, vha, 0x5006,
638 "ISP Request Transfer Error (%x).\n", mb[1]);
640 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
643 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
644 ql_log(ql_log_warn, vha, 0x5007,
645 "ISP Response Transfer Error.\n");
647 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
650 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
651 ql_dbg(ql_dbg_async, vha, 0x5008,
652 "Asynchronous WAKEUP_THRES.\n");
655 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
656 ql_dbg(ql_dbg_async, vha, 0x5009,
657 "LIP occurred (%x).\n", mb[1]);
659 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
660 atomic_set(&vha->loop_state, LOOP_DOWN);
661 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
662 qla2x00_mark_all_devices_lost(vha, 1);
666 atomic_set(&vha->vp_state, VP_FAILED);
667 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
670 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
671 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
673 vha->flags.management_server_logged_in = 0;
674 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
677 case MBA_LOOP_UP: /* Loop Up Event */
678 if (IS_QLA2100(ha) || IS_QLA2200(ha))
679 ha->link_data_rate = PORT_SPEED_1GB;
681 ha->link_data_rate = mb[1];
683 ql_dbg(ql_dbg_async, vha, 0x500a,
684 "LOOP UP detected (%s Gbps).\n",
685 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
687 vha->flags.management_server_logged_in = 0;
688 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
691 case MBA_LOOP_DOWN: /* Loop Down Event */
692 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
693 ? RD_REG_WORD(®24->mailbox4) : 0;
694 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx;
695 ql_dbg(ql_dbg_async, vha, 0x500b,
696 "LOOP DOWN detected (%x %x %x %x).\n",
697 mb[1], mb[2], mb[3], mbx);
699 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
700 atomic_set(&vha->loop_state, LOOP_DOWN);
701 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
702 vha->device_flags |= DFLG_NO_CABLE;
703 qla2x00_mark_all_devices_lost(vha, 1);
707 atomic_set(&vha->vp_state, VP_FAILED);
708 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
711 vha->flags.management_server_logged_in = 0;
712 ha->link_data_rate = PORT_SPEED_UNKNOWN;
713 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
716 case MBA_LIP_RESET: /* LIP reset occurred */
717 ql_dbg(ql_dbg_async, vha, 0x500c,
718 "LIP reset occurred (%x).\n", mb[1]);
720 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
721 atomic_set(&vha->loop_state, LOOP_DOWN);
722 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
723 qla2x00_mark_all_devices_lost(vha, 1);
727 atomic_set(&vha->vp_state, VP_FAILED);
728 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
731 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
733 ha->operating_mode = LOOP;
734 vha->flags.management_server_logged_in = 0;
735 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
738 /* case MBA_DCBX_COMPLETE: */
739 case MBA_POINT_TO_POINT: /* Point-to-Point */
743 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
744 ql_dbg(ql_dbg_async, vha, 0x500d,
745 "DCBX Completed -- %04x %04x %04x.\n",
746 mb[1], mb[2], mb[3]);
747 if (ha->notify_dcbx_comp)
748 complete(&ha->dcbx_comp);
751 ql_dbg(ql_dbg_async, vha, 0x500e,
752 "Asynchronous P2P MODE received.\n");
755 * Until there's a transition from loop down to loop up, treat
756 * this as loop down only.
758 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
759 atomic_set(&vha->loop_state, LOOP_DOWN);
760 if (!atomic_read(&vha->loop_down_timer))
761 atomic_set(&vha->loop_down_timer,
763 qla2x00_mark_all_devices_lost(vha, 1);
767 atomic_set(&vha->vp_state, VP_FAILED);
768 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
771 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
772 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
774 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
775 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
777 ha->flags.gpsc_supported = 1;
778 vha->flags.management_server_logged_in = 0;
781 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
785 ql_dbg(ql_dbg_async, vha, 0x500f,
786 "Configuration change detected: value=%x.\n", mb[1]);
788 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
789 atomic_set(&vha->loop_state, LOOP_DOWN);
790 if (!atomic_read(&vha->loop_down_timer))
791 atomic_set(&vha->loop_down_timer,
793 qla2x00_mark_all_devices_lost(vha, 1);
797 atomic_set(&vha->vp_state, VP_FAILED);
798 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
801 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
802 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
805 case MBA_PORT_UPDATE: /* Port database update */
807 * Handle only global and vn-port update events
810 * mb[1] = N_Port handle of changed port
811 * OR 0xffff for global event
812 * mb[2] = New login state
813 * 7 = Port logged out
814 * mb[3] = LSB is vp_idx, 0xff = all vps
816 * Skip processing if:
817 * Event is global, vp_idx is NOT all vps,
818 * vp_idx does not match
819 * Event is not global, vp_idx does not match
821 if (IS_QLA2XXX_MIDTYPE(ha) &&
822 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
823 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
826 /* Global event -- port logout or port unavailable. */
827 if (mb[1] == 0xffff && mb[2] == 0x7) {
828 ql_dbg(ql_dbg_async, vha, 0x5010,
829 "Port unavailable %04x %04x %04x.\n",
830 mb[1], mb[2], mb[3]);
831 ql_log(ql_log_warn, vha, 0x505e,
832 "Link is offline.\n");
834 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
835 atomic_set(&vha->loop_state, LOOP_DOWN);
836 atomic_set(&vha->loop_down_timer,
838 vha->device_flags |= DFLG_NO_CABLE;
839 qla2x00_mark_all_devices_lost(vha, 1);
843 atomic_set(&vha->vp_state, VP_FAILED);
844 fc_vport_set_state(vha->fc_vport,
846 qla2x00_mark_all_devices_lost(vha, 1);
849 vha->flags.management_server_logged_in = 0;
850 ha->link_data_rate = PORT_SPEED_UNKNOWN;
855 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
856 * event etc. earlier indicating loop is down) then process
857 * it. Otherwise ignore it and Wait for RSCN to come in.
859 atomic_set(&vha->loop_down_timer, 0);
860 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
861 ql_dbg(ql_dbg_async, vha, 0x5011,
862 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
863 mb[1], mb[2], mb[3]);
865 qlt_async_event(mb[0], vha, mb);
869 ql_dbg(ql_dbg_async, vha, 0x5012,
870 "Port database changed %04x %04x %04x.\n",
871 mb[1], mb[2], mb[3]);
872 ql_log(ql_log_warn, vha, 0x505f,
873 "Link is operational (%s Gbps).\n",
874 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
877 * Mark all devices as missing so we will login again.
879 atomic_set(&vha->loop_state, LOOP_UP);
881 qla2x00_mark_all_devices_lost(vha, 1);
883 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
884 set_bit(SCR_PENDING, &vha->dpc_flags);
886 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
887 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
889 qlt_async_event(mb[0], vha, mb);
892 case MBA_RSCN_UPDATE: /* State Change Registration */
893 /* Check if the Vport has issued a SCR */
894 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
896 /* Only handle SCNs for our Vport index. */
897 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
900 ql_dbg(ql_dbg_async, vha, 0x5013,
901 "RSCN database changed -- %04x %04x %04x.\n",
902 mb[1], mb[2], mb[3]);
904 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
905 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
907 if (rscn_entry == host_pid) {
908 ql_dbg(ql_dbg_async, vha, 0x5014,
909 "Ignoring RSCN update to local host "
910 "port ID (%06x).\n", host_pid);
914 /* Ignore reserved bits from RSCN-payload. */
915 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
917 /* Skip RSCNs for virtual ports on the same physical port */
918 if (qla2x00_is_a_vp_did(vha, rscn_entry))
921 atomic_set(&vha->loop_down_timer, 0);
922 vha->flags.management_server_logged_in = 0;
924 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
925 set_bit(RSCN_UPDATE, &vha->dpc_flags);
926 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
929 /* case MBA_RIO_RESPONSE: */
930 case MBA_ZIO_RESPONSE:
931 ql_dbg(ql_dbg_async, vha, 0x5015,
932 "[R|Z]IO update completion.\n");
934 if (IS_FWI2_CAPABLE(ha))
935 qla24xx_process_response_queue(vha, rsp);
937 qla2x00_process_response_queue(rsp);
940 case MBA_DISCARD_RND_FRAME:
941 ql_dbg(ql_dbg_async, vha, 0x5016,
942 "Discard RND Frame -- %04x %04x %04x.\n",
943 mb[1], mb[2], mb[3]);
946 case MBA_TRACE_NOTIFICATION:
947 ql_dbg(ql_dbg_async, vha, 0x5017,
948 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
951 case MBA_ISP84XX_ALERT:
952 ql_dbg(ql_dbg_async, vha, 0x5018,
953 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
954 mb[1], mb[2], mb[3]);
956 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
958 case A84_PANIC_RECOVERY:
959 ql_log(ql_log_info, vha, 0x5019,
960 "Alert 84XX: panic recovery %04x %04x.\n",
963 case A84_OP_LOGIN_COMPLETE:
964 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
965 ql_log(ql_log_info, vha, 0x501a,
966 "Alert 84XX: firmware version %x.\n",
967 ha->cs84xx->op_fw_version);
969 case A84_DIAG_LOGIN_COMPLETE:
970 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
971 ql_log(ql_log_info, vha, 0x501b,
972 "Alert 84XX: diagnostic firmware version %x.\n",
973 ha->cs84xx->diag_fw_version);
975 case A84_GOLD_LOGIN_COMPLETE:
976 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
977 ha->cs84xx->fw_update = 1;
978 ql_log(ql_log_info, vha, 0x501c,
979 "Alert 84XX: gold firmware version %x.\n",
980 ha->cs84xx->gold_fw_version);
983 ql_log(ql_log_warn, vha, 0x501d,
984 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
985 mb[1], mb[2], mb[3]);
987 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
990 ql_dbg(ql_dbg_async, vha, 0x501e,
991 "DCBX Started -- %04x %04x %04x.\n",
992 mb[1], mb[2], mb[3]);
994 case MBA_DCBX_PARAM_UPDATE:
995 ql_dbg(ql_dbg_async, vha, 0x501f,
996 "DCBX Parameters Updated -- %04x %04x %04x.\n",
997 mb[1], mb[2], mb[3]);
999 case MBA_FCF_CONF_ERR:
1000 ql_dbg(ql_dbg_async, vha, 0x5020,
1001 "FCF Configuration Error -- %04x %04x %04x.\n",
1002 mb[1], mb[2], mb[3]);
1004 case MBA_IDC_NOTIFY:
1005 if (IS_QLA8031(vha->hw)) {
1006 mb[4] = RD_REG_WORD(®24->mailbox4);
1007 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1008 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1009 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1010 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1012 * Extend loop down timer since port is active.
1014 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1015 atomic_set(&vha->loop_down_timer,
1017 qla2xxx_wake_dpc(vha);
1020 case MBA_IDC_COMPLETE:
1021 if (ha->notify_lb_portup_comp)
1022 complete(&ha->lb_portup_comp);
1024 case MBA_IDC_TIME_EXT:
1025 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
1026 qla81xx_idc_event(vha, mb[0], mb[1]);
1030 mb[4] = RD_REG_WORD(®24->mailbox4);
1031 mb[5] = RD_REG_WORD(®24->mailbox5);
1032 mb[6] = RD_REG_WORD(®24->mailbox6);
1033 mb[7] = RD_REG_WORD(®24->mailbox7);
1034 qla83xx_handle_8200_aen(vha, mb);
1038 ql_dbg(ql_dbg_async, vha, 0x5057,
1039 "Unknown AEN:%04x %04x %04x %04x\n",
1040 mb[0], mb[1], mb[2], mb[3]);
1043 qlt_async_event(mb[0], vha, mb);
1045 if (!vha->vp_idx && ha->num_vhosts)
1046 qla2x00_alert_all_vps(rsp, mb);
1050 * qla2x00_process_completed_request() - Process a Fast Post response.
1051 * @ha: SCSI driver HA context
1055 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1056 struct req_que *req, uint32_t index)
1059 struct qla_hw_data *ha = vha->hw;
1061 /* Validate handle. */
1062 if (index >= req->num_outstanding_cmds) {
1063 ql_log(ql_log_warn, vha, 0x3014,
1064 "Invalid SCSI command index (%x).\n", index);
1067 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1069 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1073 sp = req->outstanding_cmds[index];
1075 /* Free outstanding command slot. */
1076 req->outstanding_cmds[index] = NULL;
1078 /* Save ISP completion status */
1079 sp->done(ha, sp, DID_OK << 16);
1081 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1084 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1086 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1091 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1092 struct req_que *req, void *iocb)
1094 struct qla_hw_data *ha = vha->hw;
1095 sts_entry_t *pkt = iocb;
1099 index = LSW(pkt->handle);
1100 if (index >= req->num_outstanding_cmds) {
1101 ql_log(ql_log_warn, vha, 0x5031,
1102 "Invalid command index (%x).\n", index);
1104 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1106 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1109 sp = req->outstanding_cmds[index];
1111 ql_log(ql_log_warn, vha, 0x5032,
1112 "Invalid completion handle (%x) -- timed-out.\n", index);
1115 if (sp->handle != index) {
1116 ql_log(ql_log_warn, vha, 0x5033,
1117 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1121 req->outstanding_cmds[index] = NULL;
1128 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1129 struct mbx_entry *mbx)
1131 const char func[] = "MBX-IOCB";
1135 struct srb_iocb *lio;
1139 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1143 lio = &sp->u.iocb_cmd;
1145 fcport = sp->fcport;
1146 data = lio->u.logio.data;
1148 data[0] = MBS_COMMAND_ERROR;
1149 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1150 QLA_LOGIO_LOGIN_RETRIED : 0;
1151 if (mbx->entry_status) {
1152 ql_dbg(ql_dbg_async, vha, 0x5043,
1153 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1154 "entry-status=%x status=%x state-flag=%x "
1155 "status-flags=%x.\n", type, sp->handle,
1156 fcport->d_id.b.domain, fcport->d_id.b.area,
1157 fcport->d_id.b.al_pa, mbx->entry_status,
1158 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1159 le16_to_cpu(mbx->status_flags));
1161 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1162 (uint8_t *)mbx, sizeof(*mbx));
1167 status = le16_to_cpu(mbx->status);
1168 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1169 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1171 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1172 ql_dbg(ql_dbg_async, vha, 0x5045,
1173 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1174 type, sp->handle, fcport->d_id.b.domain,
1175 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1176 le16_to_cpu(mbx->mb1));
1178 data[0] = MBS_COMMAND_COMPLETE;
1179 if (sp->type == SRB_LOGIN_CMD) {
1180 fcport->port_type = FCT_TARGET;
1181 if (le16_to_cpu(mbx->mb1) & BIT_0)
1182 fcport->port_type = FCT_INITIATOR;
1183 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1184 fcport->flags |= FCF_FCP2_DEVICE;
1189 data[0] = le16_to_cpu(mbx->mb0);
1191 case MBS_PORT_ID_USED:
1192 data[1] = le16_to_cpu(mbx->mb1);
1194 case MBS_LOOP_ID_USED:
1197 data[0] = MBS_COMMAND_ERROR;
1201 ql_log(ql_log_warn, vha, 0x5046,
1202 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1203 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1204 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1205 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1206 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1207 le16_to_cpu(mbx->mb7));
1210 sp->done(vha, sp, 0);
1214 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1215 sts_entry_t *pkt, int iocb_type)
1217 const char func[] = "CT_IOCB";
1220 struct fc_bsg_job *bsg_job;
1221 uint16_t comp_status;
1224 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1228 bsg_job = sp->u.bsg_job;
1230 type = "ct pass-through";
1232 comp_status = le16_to_cpu(pkt->comp_status);
1234 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1235 * fc payload to the caller
1237 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1238 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1240 if (comp_status != CS_COMPLETE) {
1241 if (comp_status == CS_DATA_UNDERRUN) {
1243 bsg_job->reply->reply_payload_rcv_len =
1244 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1246 ql_log(ql_log_warn, vha, 0x5048,
1247 "CT pass-through-%s error "
1248 "comp_status-status=0x%x total_byte = 0x%x.\n",
1250 bsg_job->reply->reply_payload_rcv_len);
1252 ql_log(ql_log_warn, vha, 0x5049,
1253 "CT pass-through-%s error "
1254 "comp_status-status=0x%x.\n", type, comp_status);
1255 res = DID_ERROR << 16;
1256 bsg_job->reply->reply_payload_rcv_len = 0;
1258 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1259 (uint8_t *)pkt, sizeof(*pkt));
1262 bsg_job->reply->reply_payload_rcv_len =
1263 bsg_job->reply_payload.payload_len;
1264 bsg_job->reply_len = 0;
1267 sp->done(vha, sp, res);
1271 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1272 struct sts_entry_24xx *pkt, int iocb_type)
1274 const char func[] = "ELS_CT_IOCB";
1277 struct fc_bsg_job *bsg_job;
1278 uint16_t comp_status;
1279 uint32_t fw_status[3];
1280 uint8_t* fw_sts_ptr;
1283 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1286 bsg_job = sp->u.bsg_job;
1290 case SRB_ELS_CMD_RPT:
1291 case SRB_ELS_CMD_HST:
1295 type = "ct pass-through";
1298 ql_dbg(ql_dbg_user, vha, 0x503e,
1299 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1303 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1304 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1305 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1307 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1308 * fc payload to the caller
1310 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1311 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1313 if (comp_status != CS_COMPLETE) {
1314 if (comp_status == CS_DATA_UNDERRUN) {
1316 bsg_job->reply->reply_payload_rcv_len =
1317 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1319 ql_dbg(ql_dbg_user, vha, 0x503f,
1320 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1321 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1322 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1323 le16_to_cpu(((struct els_sts_entry_24xx *)
1324 pkt)->total_byte_count));
1325 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1326 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1329 ql_dbg(ql_dbg_user, vha, 0x5040,
1330 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1331 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1332 type, sp->handle, comp_status,
1333 le16_to_cpu(((struct els_sts_entry_24xx *)
1334 pkt)->error_subcode_1),
1335 le16_to_cpu(((struct els_sts_entry_24xx *)
1336 pkt)->error_subcode_2));
1337 res = DID_ERROR << 16;
1338 bsg_job->reply->reply_payload_rcv_len = 0;
1339 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1340 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1342 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1343 (uint8_t *)pkt, sizeof(*pkt));
1347 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1348 bsg_job->reply_len = 0;
1351 sp->done(vha, sp, res);
1355 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1356 struct logio_entry_24xx *logio)
1358 const char func[] = "LOGIO-IOCB";
1362 struct srb_iocb *lio;
1366 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1370 lio = &sp->u.iocb_cmd;
1372 fcport = sp->fcport;
1373 data = lio->u.logio.data;
1375 data[0] = MBS_COMMAND_ERROR;
1376 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1377 QLA_LOGIO_LOGIN_RETRIED : 0;
1378 if (logio->entry_status) {
1379 ql_log(ql_log_warn, fcport->vha, 0x5034,
1380 "Async-%s error entry - hdl=%x"
1381 "portid=%02x%02x%02x entry-status=%x.\n",
1382 type, sp->handle, fcport->d_id.b.domain,
1383 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1384 logio->entry_status);
1385 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1386 (uint8_t *)logio, sizeof(*logio));
1391 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1392 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1393 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1394 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1395 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1396 le32_to_cpu(logio->io_parameter[0]));
1398 data[0] = MBS_COMMAND_COMPLETE;
1399 if (sp->type != SRB_LOGIN_CMD)
1402 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1403 if (iop[0] & BIT_4) {
1404 fcport->port_type = FCT_TARGET;
1406 fcport->flags |= FCF_FCP2_DEVICE;
1407 } else if (iop[0] & BIT_5)
1408 fcport->port_type = FCT_INITIATOR;
1411 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1413 if (logio->io_parameter[7] || logio->io_parameter[8])
1414 fcport->supported_classes |= FC_COS_CLASS2;
1415 if (logio->io_parameter[9] || logio->io_parameter[10])
1416 fcport->supported_classes |= FC_COS_CLASS3;
1421 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1422 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1424 case LSC_SCODE_PORTID_USED:
1425 data[0] = MBS_PORT_ID_USED;
1426 data[1] = LSW(iop[1]);
1428 case LSC_SCODE_NPORT_USED:
1429 data[0] = MBS_LOOP_ID_USED;
1432 data[0] = MBS_COMMAND_ERROR;
1436 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1437 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1438 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1439 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1440 le16_to_cpu(logio->comp_status),
1441 le32_to_cpu(logio->io_parameter[0]),
1442 le32_to_cpu(logio->io_parameter[1]));
1445 sp->done(vha, sp, 0);
1449 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1450 struct tsk_mgmt_entry *tsk)
1452 const char func[] = "TMF-IOCB";
1456 struct srb_iocb *iocb;
1457 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1460 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1464 iocb = &sp->u.iocb_cmd;
1466 fcport = sp->fcport;
1468 if (sts->entry_status) {
1469 ql_log(ql_log_warn, fcport->vha, 0x5038,
1470 "Async-%s error - hdl=%x entry-status(%x).\n",
1471 type, sp->handle, sts->entry_status);
1472 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1473 ql_log(ql_log_warn, fcport->vha, 0x5039,
1474 "Async-%s error - hdl=%x completion status(%x).\n",
1475 type, sp->handle, sts->comp_status);
1476 } else if (!(le16_to_cpu(sts->scsi_status) &
1477 SS_RESPONSE_INFO_LEN_VALID)) {
1478 ql_log(ql_log_warn, fcport->vha, 0x503a,
1479 "Async-%s error - hdl=%x no response info(%x).\n",
1480 type, sp->handle, sts->scsi_status);
1481 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1482 ql_log(ql_log_warn, fcport->vha, 0x503b,
1483 "Async-%s error - hdl=%x not enough response(%d).\n",
1484 type, sp->handle, sts->rsp_data_len);
1485 } else if (sts->data[3]) {
1486 ql_log(ql_log_warn, fcport->vha, 0x503c,
1487 "Async-%s error - hdl=%x response(%x).\n",
1488 type, sp->handle, sts->data[3]);
1494 iocb->u.tmf.data = error;
1495 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1496 (uint8_t *)sts, sizeof(*sts));
1499 sp->done(vha, sp, 0);
1503 * qla2x00_process_response_queue() - Process response queue entries.
1504 * @ha: SCSI driver HA context
1507 qla2x00_process_response_queue(struct rsp_que *rsp)
1509 struct scsi_qla_host *vha;
1510 struct qla_hw_data *ha = rsp->hw;
1511 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1513 uint16_t handle_cnt;
1516 vha = pci_get_drvdata(ha->pdev);
1518 if (!vha->flags.online)
1521 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1522 pkt = (sts_entry_t *)rsp->ring_ptr;
1525 if (rsp->ring_index == rsp->length) {
1526 rsp->ring_index = 0;
1527 rsp->ring_ptr = rsp->ring;
1532 if (pkt->entry_status != 0) {
1533 qla2x00_error_entry(vha, rsp, pkt);
1534 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1539 switch (pkt->entry_type) {
1541 qla2x00_status_entry(vha, rsp, pkt);
1543 case STATUS_TYPE_21:
1544 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1545 for (cnt = 0; cnt < handle_cnt; cnt++) {
1546 qla2x00_process_completed_request(vha, rsp->req,
1547 ((sts21_entry_t *)pkt)->handle[cnt]);
1550 case STATUS_TYPE_22:
1551 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1552 for (cnt = 0; cnt < handle_cnt; cnt++) {
1553 qla2x00_process_completed_request(vha, rsp->req,
1554 ((sts22_entry_t *)pkt)->handle[cnt]);
1557 case STATUS_CONT_TYPE:
1558 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1561 qla2x00_mbx_iocb_entry(vha, rsp->req,
1562 (struct mbx_entry *)pkt);
1565 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1568 /* Type Not Supported. */
1569 ql_log(ql_log_warn, vha, 0x504a,
1570 "Received unknown response pkt type %x "
1571 "entry status=%x.\n",
1572 pkt->entry_type, pkt->entry_status);
1575 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1579 /* Adjust ring index */
1580 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1584 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1585 uint32_t sense_len, struct rsp_que *rsp, int res)
1587 struct scsi_qla_host *vha = sp->fcport->vha;
1588 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1589 uint32_t track_sense_len;
1591 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1592 sense_len = SCSI_SENSE_BUFFERSIZE;
1594 SET_CMD_SENSE_LEN(sp, sense_len);
1595 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1596 track_sense_len = sense_len;
1598 if (sense_len > par_sense_len)
1599 sense_len = par_sense_len;
1601 memcpy(cp->sense_buffer, sense_data, sense_len);
1603 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1604 track_sense_len -= sense_len;
1605 SET_CMD_SENSE_LEN(sp, track_sense_len);
1607 if (track_sense_len != 0) {
1608 rsp->status_srb = sp;
1613 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1614 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1615 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1617 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1618 cp->sense_buffer, sense_len);
1622 struct scsi_dif_tuple {
1623 __be16 guard; /* Checksum */
1624 __be16 app_tag; /* APPL identifier */
1625 __be32 ref_tag; /* Target LBA or indirect LBA */
1629 * Checks the guard or meta-data for the type of error
1630 * detected by the HBA. In case of errors, we set the
1631 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1632 * to indicate to the kernel that the HBA detected error.
1635 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1637 struct scsi_qla_host *vha = sp->fcport->vha;
1638 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1639 uint8_t *ap = &sts24->data[12];
1640 uint8_t *ep = &sts24->data[20];
1641 uint32_t e_ref_tag, a_ref_tag;
1642 uint16_t e_app_tag, a_app_tag;
1643 uint16_t e_guard, a_guard;
1646 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1647 * would make guard field appear at offset 2
1649 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1650 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1651 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1652 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1653 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1654 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1656 ql_dbg(ql_dbg_io, vha, 0x3023,
1657 "iocb(s) %p Returned STATUS.\n", sts24);
1659 ql_dbg(ql_dbg_io, vha, 0x3024,
1660 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1661 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1662 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1663 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1664 a_app_tag, e_app_tag, a_guard, e_guard);
1668 * For type 3: ref & app tag is all 'f's
1669 * For type 0,1,2: app tag is all 'f's
1671 if ((a_app_tag == 0xffff) &&
1672 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1673 (a_ref_tag == 0xffffffff))) {
1674 uint32_t blocks_done, resid;
1675 sector_t lba_s = scsi_get_lba(cmd);
1677 /* 2TB boundary case covered automatically with this */
1678 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1680 resid = scsi_bufflen(cmd) - (blocks_done *
1681 cmd->device->sector_size);
1683 scsi_set_resid(cmd, resid);
1684 cmd->result = DID_OK << 16;
1686 /* Update protection tag */
1687 if (scsi_prot_sg_count(cmd)) {
1688 uint32_t i, j = 0, k = 0, num_ent;
1689 struct scatterlist *sg;
1690 struct sd_dif_tuple *spt;
1692 /* Patch the corresponding protection tags */
1693 scsi_for_each_prot_sg(cmd, sg,
1694 scsi_prot_sg_count(cmd), i) {
1695 num_ent = sg_dma_len(sg) / 8;
1696 if (k + num_ent < blocks_done) {
1700 j = blocks_done - k - 1;
1705 if (k != blocks_done) {
1706 ql_log(ql_log_warn, vha, 0x302f,
1707 "unexpected tag values tag:lba=%x:%llx)\n",
1708 e_ref_tag, (unsigned long long)lba_s);
1712 spt = page_address(sg_page(sg)) + sg->offset;
1715 spt->app_tag = 0xffff;
1716 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1717 spt->ref_tag = 0xffffffff;
1724 if (e_guard != a_guard) {
1725 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1727 set_driver_byte(cmd, DRIVER_SENSE);
1728 set_host_byte(cmd, DID_ABORT);
1729 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1734 if (e_ref_tag != a_ref_tag) {
1735 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1737 set_driver_byte(cmd, DRIVER_SENSE);
1738 set_host_byte(cmd, DID_ABORT);
1739 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1743 /* check appl tag */
1744 if (e_app_tag != a_app_tag) {
1745 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1747 set_driver_byte(cmd, DRIVER_SENSE);
1748 set_host_byte(cmd, DID_ABORT);
1749 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1757 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1758 struct req_que *req, uint32_t index)
1760 struct qla_hw_data *ha = vha->hw;
1762 uint16_t comp_status;
1763 uint16_t scsi_status;
1765 uint32_t rval = EXT_STATUS_OK;
1766 struct fc_bsg_job *bsg_job = NULL;
1768 struct sts_entry_24xx *sts24;
1769 sts = (sts_entry_t *) pkt;
1770 sts24 = (struct sts_entry_24xx *) pkt;
1772 /* Validate handle. */
1773 if (index >= req->num_outstanding_cmds) {
1774 ql_log(ql_log_warn, vha, 0x70af,
1775 "Invalid SCSI completion handle 0x%x.\n", index);
1776 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1780 sp = req->outstanding_cmds[index];
1782 /* Free outstanding command slot. */
1783 req->outstanding_cmds[index] = NULL;
1784 bsg_job = sp->u.bsg_job;
1786 ql_log(ql_log_warn, vha, 0x70b0,
1787 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1790 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1794 if (IS_FWI2_CAPABLE(ha)) {
1795 comp_status = le16_to_cpu(sts24->comp_status);
1796 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1798 comp_status = le16_to_cpu(sts->comp_status);
1799 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1802 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1803 switch (comp_status) {
1805 if (scsi_status == 0) {
1806 bsg_job->reply->reply_payload_rcv_len =
1807 bsg_job->reply_payload.payload_len;
1808 rval = EXT_STATUS_OK;
1812 case CS_DATA_OVERRUN:
1813 ql_dbg(ql_dbg_user, vha, 0x70b1,
1814 "Command completed with date overrun thread_id=%d\n",
1816 rval = EXT_STATUS_DATA_OVERRUN;
1819 case CS_DATA_UNDERRUN:
1820 ql_dbg(ql_dbg_user, vha, 0x70b2,
1821 "Command completed with date underrun thread_id=%d\n",
1823 rval = EXT_STATUS_DATA_UNDERRUN;
1825 case CS_BIDIR_RD_OVERRUN:
1826 ql_dbg(ql_dbg_user, vha, 0x70b3,
1827 "Command completed with read data overrun thread_id=%d\n",
1829 rval = EXT_STATUS_DATA_OVERRUN;
1832 case CS_BIDIR_RD_WR_OVERRUN:
1833 ql_dbg(ql_dbg_user, vha, 0x70b4,
1834 "Command completed with read and write data overrun "
1835 "thread_id=%d\n", thread_id);
1836 rval = EXT_STATUS_DATA_OVERRUN;
1839 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1840 ql_dbg(ql_dbg_user, vha, 0x70b5,
1841 "Command completed with read data over and write data "
1842 "underrun thread_id=%d\n", thread_id);
1843 rval = EXT_STATUS_DATA_OVERRUN;
1846 case CS_BIDIR_RD_UNDERRUN:
1847 ql_dbg(ql_dbg_user, vha, 0x70b6,
1848 "Command completed with read data data underrun "
1849 "thread_id=%d\n", thread_id);
1850 rval = EXT_STATUS_DATA_UNDERRUN;
1853 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1854 ql_dbg(ql_dbg_user, vha, 0x70b7,
1855 "Command completed with read data under and write data "
1856 "overrun thread_id=%d\n", thread_id);
1857 rval = EXT_STATUS_DATA_UNDERRUN;
1860 case CS_BIDIR_RD_WR_UNDERRUN:
1861 ql_dbg(ql_dbg_user, vha, 0x70b8,
1862 "Command completed with read and write data underrun "
1863 "thread_id=%d\n", thread_id);
1864 rval = EXT_STATUS_DATA_UNDERRUN;
1868 ql_dbg(ql_dbg_user, vha, 0x70b9,
1869 "Command completed with data DMA error thread_id=%d\n",
1871 rval = EXT_STATUS_DMA_ERR;
1875 ql_dbg(ql_dbg_user, vha, 0x70ba,
1876 "Command completed with timeout thread_id=%d\n",
1878 rval = EXT_STATUS_TIMEOUT;
1881 ql_dbg(ql_dbg_user, vha, 0x70bb,
1882 "Command completed with completion status=0x%x "
1883 "thread_id=%d\n", comp_status, thread_id);
1884 rval = EXT_STATUS_ERR;
1887 bsg_job->reply->reply_payload_rcv_len = 0;
1890 /* Return the vendor specific reply to API */
1891 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1892 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1893 /* Always return DID_OK, bsg will send the vendor specific response
1894 * in this case only */
1895 sp->done(vha, sp, (DID_OK << 6));
1900 * qla2x00_status_entry() - Process a Status IOCB entry.
1901 * @ha: SCSI driver HA context
1902 * @pkt: Entry pointer
1905 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1909 struct scsi_cmnd *cp;
1911 struct sts_entry_24xx *sts24;
1912 uint16_t comp_status;
1913 uint16_t scsi_status;
1915 uint8_t lscsi_status;
1917 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1919 uint8_t *rsp_info, *sense_data;
1920 struct qla_hw_data *ha = vha->hw;
1923 struct req_que *req;
1926 uint16_t state_flags = 0;
1928 sts = (sts_entry_t *) pkt;
1929 sts24 = (struct sts_entry_24xx *) pkt;
1930 if (IS_FWI2_CAPABLE(ha)) {
1931 comp_status = le16_to_cpu(sts24->comp_status);
1932 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1933 state_flags = le16_to_cpu(sts24->state_flags);
1935 comp_status = le16_to_cpu(sts->comp_status);
1936 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1938 handle = (uint32_t) LSW(sts->handle);
1939 que = MSW(sts->handle);
1940 req = ha->req_q_map[que];
1942 /* Validate handle. */
1943 if (handle < req->num_outstanding_cmds)
1944 sp = req->outstanding_cmds[handle];
1949 ql_dbg(ql_dbg_io, vha, 0x3017,
1950 "Invalid status handle (0x%x).\n", sts->handle);
1953 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1955 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1956 qla2xxx_wake_dpc(vha);
1960 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1961 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1965 /* Fast path completion. */
1966 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1967 qla2x00_do_host_ramp_up(vha);
1968 qla2x00_process_completed_request(vha, req, handle);
1973 req->outstanding_cmds[handle] = NULL;
1974 cp = GET_CMD_SP(sp);
1976 ql_dbg(ql_dbg_io, vha, 0x3018,
1977 "Command already returned (0x%x/%p).\n",
1983 lscsi_status = scsi_status & STATUS_MASK;
1985 fcport = sp->fcport;
1988 sense_len = par_sense_len = rsp_info_len = resid_len =
1990 if (IS_FWI2_CAPABLE(ha)) {
1991 if (scsi_status & SS_SENSE_LEN_VALID)
1992 sense_len = le32_to_cpu(sts24->sense_len);
1993 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1994 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1995 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1996 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1997 if (comp_status == CS_DATA_UNDERRUN)
1998 fw_resid_len = le32_to_cpu(sts24->residual_len);
1999 rsp_info = sts24->data;
2000 sense_data = sts24->data;
2001 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2002 ox_id = le16_to_cpu(sts24->ox_id);
2003 par_sense_len = sizeof(sts24->data);
2005 if (scsi_status & SS_SENSE_LEN_VALID)
2006 sense_len = le16_to_cpu(sts->req_sense_length);
2007 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2008 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2009 resid_len = le32_to_cpu(sts->residual_length);
2010 rsp_info = sts->rsp_info;
2011 sense_data = sts->req_sense_data;
2012 par_sense_len = sizeof(sts->req_sense_data);
2015 /* Check for any FCP transport errors. */
2016 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2017 /* Sense data lies beyond any FCP RESPONSE data. */
2018 if (IS_FWI2_CAPABLE(ha)) {
2019 sense_data += rsp_info_len;
2020 par_sense_len -= rsp_info_len;
2022 if (rsp_info_len > 3 && rsp_info[3]) {
2023 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2024 "FCP I/O protocol failure (0x%x/0x%x).\n",
2025 rsp_info_len, rsp_info[3]);
2027 res = DID_BUS_BUSY << 16;
2032 /* Check for overrun. */
2033 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2034 scsi_status & SS_RESIDUAL_OVER)
2035 comp_status = CS_DATA_OVERRUN;
2038 * Based on Host and scsi status generate status code for Linux
2040 switch (comp_status) {
2043 if (scsi_status == 0) {
2047 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2049 scsi_set_resid(cp, resid);
2051 if (!lscsi_status &&
2052 ((unsigned)(scsi_bufflen(cp) - resid) <
2054 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2055 "Mid-layer underflow "
2056 "detected (0x%x of 0x%x bytes).\n",
2057 resid, scsi_bufflen(cp));
2059 res = DID_ERROR << 16;
2063 res = DID_OK << 16 | lscsi_status;
2065 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2066 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2067 "QUEUE FULL detected.\n");
2071 if (lscsi_status != SS_CHECK_CONDITION)
2074 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2075 if (!(scsi_status & SS_SENSE_LEN_VALID))
2078 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2082 case CS_DATA_UNDERRUN:
2083 /* Use F/W calculated residual length. */
2084 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2085 scsi_set_resid(cp, resid);
2086 if (scsi_status & SS_RESIDUAL_UNDER) {
2087 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2088 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2089 "Dropped frame(s) detected "
2090 "(0x%x of 0x%x bytes).\n",
2091 resid, scsi_bufflen(cp));
2093 res = DID_ERROR << 16 | lscsi_status;
2094 goto check_scsi_status;
2097 if (!lscsi_status &&
2098 ((unsigned)(scsi_bufflen(cp) - resid) <
2100 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2101 "Mid-layer underflow "
2102 "detected (0x%x of 0x%x bytes).\n",
2103 resid, scsi_bufflen(cp));
2105 res = DID_ERROR << 16;
2108 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2109 lscsi_status != SAM_STAT_BUSY) {
2111 * scsi status of task set and busy are considered to be
2112 * task not completed.
2115 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2116 "Dropped frame(s) detected (0x%x "
2117 "of 0x%x bytes).\n", resid,
2120 res = DID_ERROR << 16 | lscsi_status;
2121 goto check_scsi_status;
2123 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2124 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2125 scsi_status, lscsi_status);
2128 res = DID_OK << 16 | lscsi_status;
2133 * Check to see if SCSI Status is non zero. If so report SCSI
2136 if (lscsi_status != 0) {
2137 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2138 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2139 "QUEUE FULL detected.\n");
2143 if (lscsi_status != SS_CHECK_CONDITION)
2146 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2147 if (!(scsi_status & SS_SENSE_LEN_VALID))
2150 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2151 sense_len, rsp, res);
2155 case CS_PORT_LOGGED_OUT:
2156 case CS_PORT_CONFIG_CHG:
2159 case CS_PORT_UNAVAILABLE:
2164 * We are going to have the fc class block the rport
2165 * while we try to recover so instruct the mid layer
2166 * to requeue until the class decides how to handle this.
2168 res = DID_TRANSPORT_DISRUPTED << 16;
2170 if (comp_status == CS_TIMEOUT) {
2171 if (IS_FWI2_CAPABLE(ha))
2173 else if ((le16_to_cpu(sts->status_flags) &
2174 SF_LOGOUT_SENT) == 0)
2178 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2179 "Port down status: port-state=0x%x.\n",
2180 atomic_read(&fcport->state));
2182 if (atomic_read(&fcport->state) == FCS_ONLINE)
2183 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2187 res = DID_RESET << 16;
2191 logit = qla2x00_handle_dif_error(sp, sts24);
2196 res = DID_ERROR << 16;
2198 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2201 if (state_flags & BIT_4)
2202 scmd_printk(KERN_WARNING, cp,
2203 "Unsupported device '%s' found.\n",
2204 cp->device->vendor);
2208 res = DID_ERROR << 16;
2214 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2215 "FCP command status: 0x%x-0x%x (0x%x) "
2216 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
2217 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
2218 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
2219 comp_status, scsi_status, res, vha->host_no,
2220 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2221 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2222 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
2223 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
2224 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
2225 resid_len, fw_resid_len);
2228 qla2x00_do_host_ramp_up(vha);
2230 if (rsp->status_srb == NULL)
2231 sp->done(ha, sp, res);
2235 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2236 * @ha: SCSI driver HA context
2237 * @pkt: Entry pointer
2239 * Extended sense data.
2242 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2244 uint8_t sense_sz = 0;
2245 struct qla_hw_data *ha = rsp->hw;
2246 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2247 srb_t *sp = rsp->status_srb;
2248 struct scsi_cmnd *cp;
2252 if (!sp || !GET_CMD_SENSE_LEN(sp))
2255 sense_len = GET_CMD_SENSE_LEN(sp);
2256 sense_ptr = GET_CMD_SENSE_PTR(sp);
2258 cp = GET_CMD_SP(sp);
2260 ql_log(ql_log_warn, vha, 0x3025,
2261 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2263 rsp->status_srb = NULL;
2267 if (sense_len > sizeof(pkt->data))
2268 sense_sz = sizeof(pkt->data);
2270 sense_sz = sense_len;
2272 /* Move sense data. */
2273 if (IS_FWI2_CAPABLE(ha))
2274 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2275 memcpy(sense_ptr, pkt->data, sense_sz);
2276 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2277 sense_ptr, sense_sz);
2279 sense_len -= sense_sz;
2280 sense_ptr += sense_sz;
2282 SET_CMD_SENSE_PTR(sp, sense_ptr);
2283 SET_CMD_SENSE_LEN(sp, sense_len);
2285 /* Place command on done queue. */
2286 if (sense_len == 0) {
2287 rsp->status_srb = NULL;
2288 sp->done(ha, sp, cp->result);
2293 * qla2x00_error_entry() - Process an error entry.
2294 * @ha: SCSI driver HA context
2295 * @pkt: Entry pointer
2298 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2301 struct qla_hw_data *ha = vha->hw;
2302 const char func[] = "ERROR-IOCB";
2303 uint16_t que = MSW(pkt->handle);
2304 struct req_que *req = NULL;
2305 int res = DID_ERROR << 16;
2307 ql_dbg(ql_dbg_async, vha, 0x502a,
2308 "type of error status in response: 0x%x\n", pkt->entry_status);
2310 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2313 req = ha->req_q_map[que];
2315 if (pkt->entry_status & RF_BUSY)
2316 res = DID_BUS_BUSY << 16;
2318 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2320 sp->done(ha, sp, res);
2324 ql_log(ql_log_warn, vha, 0x5030,
2325 "Error entry - invalid handle/queue.\n");
2328 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2330 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2331 qla2xxx_wake_dpc(vha);
2335 * qla24xx_mbx_completion() - Process mailbox command completions.
2336 * @ha: SCSI driver HA context
2337 * @mb0: Mailbox0 register
2340 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2344 uint16_t __iomem *wptr;
2345 struct qla_hw_data *ha = vha->hw;
2346 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2348 /* Read all mbox registers? */
2349 mboxes = (1 << ha->mbx_count) - 1;
2351 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2353 mboxes = ha->mcp->in_mb;
2355 /* Load return mailbox registers. */
2356 ha->flags.mbox_int = 1;
2357 ha->mailbox_out[0] = mb0;
2359 wptr = (uint16_t __iomem *)®->mailbox1;
2361 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2363 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2371 * qla24xx_process_response_queue() - Process response queue entries.
2372 * @ha: SCSI driver HA context
2374 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2375 struct rsp_que *rsp)
2377 struct sts_entry_24xx *pkt;
2378 struct qla_hw_data *ha = vha->hw;
2380 if (!vha->flags.online)
2383 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2384 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2387 if (rsp->ring_index == rsp->length) {
2388 rsp->ring_index = 0;
2389 rsp->ring_ptr = rsp->ring;
2394 if (pkt->entry_status != 0) {
2395 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2397 (void)qlt_24xx_process_response_error(vha, pkt);
2399 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2404 switch (pkt->entry_type) {
2406 qla2x00_status_entry(vha, rsp, pkt);
2408 case STATUS_CONT_TYPE:
2409 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2411 case VP_RPT_ID_IOCB_TYPE:
2412 qla24xx_report_id_acquisition(vha,
2413 (struct vp_rpt_id_entry_24xx *)pkt);
2415 case LOGINOUT_PORT_IOCB_TYPE:
2416 qla24xx_logio_entry(vha, rsp->req,
2417 (struct logio_entry_24xx *)pkt);
2419 case TSK_MGMT_IOCB_TYPE:
2420 qla24xx_tm_iocb_entry(vha, rsp->req,
2421 (struct tsk_mgmt_entry *)pkt);
2424 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2427 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2429 case ABTS_RECV_24XX:
2430 /* ensure that the ATIO queue is empty */
2431 qlt_24xx_process_atio_queue(vha);
2432 case ABTS_RESP_24XX:
2434 case NOTIFY_ACK_TYPE:
2435 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2438 /* Do nothing in this case, this check is to prevent it
2439 * from falling into default case
2443 /* Type Not Supported. */
2444 ql_dbg(ql_dbg_async, vha, 0x5042,
2445 "Received unknown response pkt type %x "
2446 "entry status=%x.\n",
2447 pkt->entry_type, pkt->entry_status);
2450 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2454 /* Adjust ring index */
2455 if (IS_QLA82XX(ha)) {
2456 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2457 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
2459 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2463 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2467 struct qla_hw_data *ha = vha->hw;
2468 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2470 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
2474 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
2475 RD_REG_DWORD(®->iobase_addr);
2476 WRT_REG_DWORD(®->iobase_window, 0x0001);
2477 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2478 rval == QLA_SUCCESS; cnt--) {
2480 WRT_REG_DWORD(®->iobase_window, 0x0001);
2483 rval = QLA_FUNCTION_TIMEOUT;
2485 if (rval == QLA_SUCCESS)
2488 WRT_REG_DWORD(®->iobase_window, 0x0003);
2489 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2490 rval == QLA_SUCCESS; cnt--) {
2492 WRT_REG_DWORD(®->iobase_window, 0x0003);
2495 rval = QLA_FUNCTION_TIMEOUT;
2497 if (rval != QLA_SUCCESS)
2501 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
2502 ql_log(ql_log_info, vha, 0x504c,
2503 "Additional code -- 0x55AA.\n");
2506 WRT_REG_DWORD(®->iobase_window, 0x0000);
2507 RD_REG_DWORD(®->iobase_window);
2511 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2513 * @dev_id: SCSI driver HA context
2515 * Called by system whenever the host adapter generates an interrupt.
2517 * Returns handled flag.
2520 qla24xx_intr_handler(int irq, void *dev_id)
2522 scsi_qla_host_t *vha;
2523 struct qla_hw_data *ha;
2524 struct device_reg_24xx __iomem *reg;
2530 struct rsp_que *rsp;
2531 unsigned long flags;
2533 rsp = (struct rsp_que *) dev_id;
2535 ql_log(ql_log_info, NULL, 0x5059,
2536 "%s: NULL response queue pointer.\n", __func__);
2541 reg = &ha->iobase->isp24;
2544 if (unlikely(pci_channel_offline(ha->pdev)))
2547 spin_lock_irqsave(&ha->hardware_lock, flags);
2548 vha = pci_get_drvdata(ha->pdev);
2549 for (iter = 50; iter--; ) {
2550 stat = RD_REG_DWORD(®->host_status);
2551 if (stat & HSRX_RISC_PAUSED) {
2552 if (unlikely(pci_channel_offline(ha->pdev)))
2555 hccr = RD_REG_DWORD(®->hccr);
2557 ql_log(ql_log_warn, vha, 0x504b,
2558 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2561 qla2xxx_check_risc_status(vha);
2563 ha->isp_ops->fw_dump(vha, 1);
2564 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2566 } else if ((stat & HSRX_RISC_INT) == 0)
2569 switch (stat & 0xff) {
2570 case INTR_ROM_MB_SUCCESS:
2571 case INTR_ROM_MB_FAILED:
2572 case INTR_MB_SUCCESS:
2573 case INTR_MB_FAILED:
2574 qla24xx_mbx_completion(vha, MSW(stat));
2575 status |= MBX_INTERRUPT;
2578 case INTR_ASYNC_EVENT:
2580 mb[1] = RD_REG_WORD(®->mailbox1);
2581 mb[2] = RD_REG_WORD(®->mailbox2);
2582 mb[3] = RD_REG_WORD(®->mailbox3);
2583 qla2x00_async_event(vha, rsp, mb);
2585 case INTR_RSP_QUE_UPDATE:
2586 case INTR_RSP_QUE_UPDATE_83XX:
2587 qla24xx_process_response_queue(vha, rsp);
2589 case INTR_ATIO_QUE_UPDATE:
2590 qlt_24xx_process_atio_queue(vha);
2592 case INTR_ATIO_RSP_QUE_UPDATE:
2593 qlt_24xx_process_atio_queue(vha);
2594 qla24xx_process_response_queue(vha, rsp);
2597 ql_dbg(ql_dbg_async, vha, 0x504f,
2598 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2601 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2602 RD_REG_DWORD_RELAXED(®->hccr);
2603 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2606 qla2x00_handle_mbx_completion(ha, status);
2607 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2613 qla24xx_msix_rsp_q(int irq, void *dev_id)
2615 struct qla_hw_data *ha;
2616 struct rsp_que *rsp;
2617 struct device_reg_24xx __iomem *reg;
2618 struct scsi_qla_host *vha;
2619 unsigned long flags;
2621 rsp = (struct rsp_que *) dev_id;
2623 ql_log(ql_log_info, NULL, 0x505a,
2624 "%s: NULL response queue pointer.\n", __func__);
2628 reg = &ha->iobase->isp24;
2630 spin_lock_irqsave(&ha->hardware_lock, flags);
2632 vha = pci_get_drvdata(ha->pdev);
2633 qla24xx_process_response_queue(vha, rsp);
2634 if (!ha->flags.disable_msix_handshake) {
2635 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2636 RD_REG_DWORD_RELAXED(®->hccr);
2638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2644 qla25xx_msix_rsp_q(int irq, void *dev_id)
2646 struct qla_hw_data *ha;
2647 struct rsp_que *rsp;
2648 struct device_reg_24xx __iomem *reg;
2649 unsigned long flags;
2651 rsp = (struct rsp_que *) dev_id;
2653 ql_log(ql_log_info, NULL, 0x505b,
2654 "%s: NULL response queue pointer.\n", __func__);
2659 /* Clear the interrupt, if enabled, for this response queue */
2660 if (!ha->flags.disable_msix_handshake) {
2661 reg = &ha->iobase->isp24;
2662 spin_lock_irqsave(&ha->hardware_lock, flags);
2663 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2664 RD_REG_DWORD_RELAXED(®->hccr);
2665 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2667 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2673 qla24xx_msix_default(int irq, void *dev_id)
2675 scsi_qla_host_t *vha;
2676 struct qla_hw_data *ha;
2677 struct rsp_que *rsp;
2678 struct device_reg_24xx __iomem *reg;
2683 unsigned long flags;
2685 rsp = (struct rsp_que *) dev_id;
2687 ql_log(ql_log_info, NULL, 0x505c,
2688 "%s: NULL response queue pointer.\n", __func__);
2692 reg = &ha->iobase->isp24;
2695 spin_lock_irqsave(&ha->hardware_lock, flags);
2696 vha = pci_get_drvdata(ha->pdev);
2698 stat = RD_REG_DWORD(®->host_status);
2699 if (stat & HSRX_RISC_PAUSED) {
2700 if (unlikely(pci_channel_offline(ha->pdev)))
2703 hccr = RD_REG_DWORD(®->hccr);
2705 ql_log(ql_log_info, vha, 0x5050,
2706 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2709 qla2xxx_check_risc_status(vha);
2711 ha->isp_ops->fw_dump(vha, 1);
2712 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2714 } else if ((stat & HSRX_RISC_INT) == 0)
2717 switch (stat & 0xff) {
2718 case INTR_ROM_MB_SUCCESS:
2719 case INTR_ROM_MB_FAILED:
2720 case INTR_MB_SUCCESS:
2721 case INTR_MB_FAILED:
2722 qla24xx_mbx_completion(vha, MSW(stat));
2723 status |= MBX_INTERRUPT;
2726 case INTR_ASYNC_EVENT:
2728 mb[1] = RD_REG_WORD(®->mailbox1);
2729 mb[2] = RD_REG_WORD(®->mailbox2);
2730 mb[3] = RD_REG_WORD(®->mailbox3);
2731 qla2x00_async_event(vha, rsp, mb);
2733 case INTR_RSP_QUE_UPDATE:
2734 case INTR_RSP_QUE_UPDATE_83XX:
2735 qla24xx_process_response_queue(vha, rsp);
2737 case INTR_ATIO_QUE_UPDATE:
2738 qlt_24xx_process_atio_queue(vha);
2740 case INTR_ATIO_RSP_QUE_UPDATE:
2741 qlt_24xx_process_atio_queue(vha);
2742 qla24xx_process_response_queue(vha, rsp);
2745 ql_dbg(ql_dbg_async, vha, 0x5051,
2746 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2749 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2751 qla2x00_handle_mbx_completion(ha, status);
2752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2757 /* Interrupt handling helpers. */
2759 struct qla_init_msix_entry {
2761 irq_handler_t handler;
2764 static struct qla_init_msix_entry msix_entries[3] = {
2765 { "qla2xxx (default)", qla24xx_msix_default },
2766 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2767 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2770 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2771 { "qla2xxx (default)", qla82xx_msix_default },
2772 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2775 static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
2776 { "qla2xxx (default)", qla24xx_msix_default },
2777 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2778 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
2782 qla24xx_disable_msix(struct qla_hw_data *ha)
2785 struct qla_msix_entry *qentry;
2786 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2788 for (i = 0; i < ha->msix_count; i++) {
2789 qentry = &ha->msix_entries[i];
2790 if (qentry->have_irq)
2791 free_irq(qentry->vector, qentry->rsp);
2793 pci_disable_msix(ha->pdev);
2794 kfree(ha->msix_entries);
2795 ha->msix_entries = NULL;
2796 ha->flags.msix_enabled = 0;
2797 ql_dbg(ql_dbg_init, vha, 0x0042,
2798 "Disabled the MSI.\n");
2802 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2804 #define MIN_MSIX_COUNT 2
2806 struct msix_entry *entries;
2807 struct qla_msix_entry *qentry;
2808 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2810 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2813 ql_log(ql_log_warn, vha, 0x00bc,
2814 "Failed to allocate memory for msix_entry.\n");
2818 for (i = 0; i < ha->msix_count; i++)
2819 entries[i].entry = i;
2821 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2823 if (ret < MIN_MSIX_COUNT)
2826 ql_log(ql_log_warn, vha, 0x00c6,
2827 "MSI-X: Failed to enable support "
2828 "-- %d/%d\n Retry with %d vectors.\n",
2829 ha->msix_count, ret, ret);
2830 ha->msix_count = ret;
2831 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2834 ql_log(ql_log_fatal, vha, 0x00c7,
2835 "MSI-X: Failed to enable support, "
2836 "giving up -- %d/%d.\n",
2837 ha->msix_count, ret);
2840 ha->max_rsp_queues = ha->msix_count - 1;
2842 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2843 ha->msix_count, GFP_KERNEL);
2844 if (!ha->msix_entries) {
2845 ql_log(ql_log_fatal, vha, 0x00c8,
2846 "Failed to allocate memory for ha->msix_entries.\n");
2850 ha->flags.msix_enabled = 1;
2852 for (i = 0; i < ha->msix_count; i++) {
2853 qentry = &ha->msix_entries[i];
2854 qentry->vector = entries[i].vector;
2855 qentry->entry = entries[i].entry;
2856 qentry->have_irq = 0;
2860 /* Enable MSI-X vectors for the base queue */
2861 for (i = 0; i < ha->msix_count; i++) {
2862 qentry = &ha->msix_entries[i];
2863 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
2864 ret = request_irq(qentry->vector,
2865 qla83xx_msix_entries[i].handler,
2866 0, qla83xx_msix_entries[i].name, rsp);
2867 } else if (IS_QLA82XX(ha)) {
2868 ret = request_irq(qentry->vector,
2869 qla82xx_msix_entries[i].handler,
2870 0, qla82xx_msix_entries[i].name, rsp);
2872 ret = request_irq(qentry->vector,
2873 msix_entries[i].handler,
2874 0, msix_entries[i].name, rsp);
2877 ql_log(ql_log_fatal, vha, 0x00cb,
2878 "MSI-X: unable to register handler -- %x/%d.\n",
2879 qentry->vector, ret);
2880 qla24xx_disable_msix(ha);
2884 qentry->have_irq = 1;
2889 /* Enable MSI-X vector for response queue update for queue 0 */
2890 if (IS_QLA83XX(ha)) {
2891 if (ha->msixbase && ha->mqiobase &&
2892 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2896 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2898 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2899 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2900 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2901 ql_dbg(ql_dbg_init, vha, 0x0055,
2902 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2903 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2911 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2914 device_reg_t __iomem *reg = ha->iobase;
2915 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2917 /* If possible, enable MSI-X. */
2918 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2919 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
2922 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2923 (ha->pdev->subsystem_device == 0x7040 ||
2924 ha->pdev->subsystem_device == 0x7041 ||
2925 ha->pdev->subsystem_device == 0x1705)) {
2926 ql_log(ql_log_warn, vha, 0x0034,
2927 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2928 ha->pdev->subsystem_vendor,
2929 ha->pdev->subsystem_device);
2933 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
2934 ql_log(ql_log_warn, vha, 0x0035,
2935 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2936 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
2940 ret = qla24xx_enable_msix(ha, rsp);
2942 ql_dbg(ql_dbg_init, vha, 0x0036,
2943 "MSI-X: Enabled (0x%X, 0x%X).\n",
2944 ha->chip_revision, ha->fw_attributes);
2945 goto clear_risc_ints;
2947 ql_log(ql_log_info, vha, 0x0037,
2948 "MSI-X Falling back-to MSI mode -%d.\n", ret);
2951 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2952 !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
2955 ret = pci_enable_msi(ha->pdev);
2957 ql_dbg(ql_dbg_init, vha, 0x0038,
2959 ha->flags.msi_enabled = 1;
2961 ql_log(ql_log_warn, vha, 0x0039,
2962 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2964 /* Skip INTx on ISP82xx. */
2965 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2966 return QLA_FUNCTION_FAILED;
2970 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2971 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2972 QLA2XXX_DRIVER_NAME, rsp);
2974 ql_log(ql_log_warn, vha, 0x003a,
2975 "Failed to reserve interrupt %d already in use.\n",
2978 } else if (!ha->flags.msi_enabled) {
2979 ql_dbg(ql_dbg_init, vha, 0x0125,
2980 "INTa mode: Enabled.\n");
2981 ha->flags.mr_intr_valid = 1;
2986 spin_lock_irq(&ha->hardware_lock);
2987 if (!IS_FWI2_CAPABLE(ha))
2988 WRT_REG_WORD(®->isp.semaphore, 0);
2989 spin_unlock_irq(&ha->hardware_lock);
2996 qla2x00_free_irqs(scsi_qla_host_t *vha)
2998 struct qla_hw_data *ha = vha->hw;
2999 struct rsp_que *rsp;
3002 * We need to check that ha->rsp_q_map is valid in case we are called
3003 * from a probe failure context.
3005 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3007 rsp = ha->rsp_q_map[0];
3009 if (ha->flags.msix_enabled)
3010 qla24xx_disable_msix(ha);
3011 else if (ha->flags.msi_enabled) {
3012 free_irq(ha->pdev->irq, rsp);
3013 pci_disable_msi(ha->pdev);
3015 free_irq(ha->pdev->irq, rsp);
3019 int qla25xx_request_irq(struct rsp_que *rsp)
3021 struct qla_hw_data *ha = rsp->hw;
3022 struct qla_init_msix_entry *intr = &msix_entries[2];
3023 struct qla_msix_entry *msix = rsp->msix;
3024 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3027 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
3029 ql_log(ql_log_fatal, vha, 0x00e6,
3030 "MSI-X: Unable to register handler -- %x/%d.\n",