1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/ctype.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
40 #include "lpfc_sli4.h"
42 #include "lpfc_disc.h"
43 #include "lpfc_scsi.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_crtn.h"
47 #include "lpfc_vport.h"
48 #include "lpfc_version.h"
51 unsigned long _dump_buf_data_order;
53 unsigned long _dump_buf_dif_order;
54 spinlock_t _dump_buf_lock;
56 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
57 static int lpfc_post_rcv_buf(struct lpfc_hba *);
58 static int lpfc_sli4_queue_create(struct lpfc_hba *);
59 static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61 static int lpfc_setup_endian_order(struct lpfc_hba *);
62 static int lpfc_sli4_read_config(struct lpfc_hba *);
63 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64 static void lpfc_free_sgl_list(struct lpfc_hba *);
65 static int lpfc_init_sgl_list(struct lpfc_hba *);
66 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67 static void lpfc_free_active_sgl(struct lpfc_hba *);
68 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
74 static struct scsi_transport_template *lpfc_transport_template = NULL;
75 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
76 static DEFINE_IDR(lpfc_hba_index);
79 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
80 * @phba: pointer to lpfc hba data structure.
82 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
83 * mailbox command. It retrieves the revision information from the HBA and
84 * collects the Vital Product Data (VPD) about the HBA for preparing the
85 * configuration of the HBA.
89 * -ERESTART - requests the SLI layer to reset the HBA and try again.
90 * Any other value - indicates an error.
93 lpfc_config_port_prep(struct lpfc_hba *phba)
95 lpfc_vpd_t *vp = &phba->vpd;
99 char *lpfc_vpd_data = NULL;
101 static char licensed[56] =
102 "key unlock for use with gnu public licensed code only\0";
103 static int init_key = 1;
105 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
107 phba->link_state = LPFC_HBA_ERROR;
112 phba->link_state = LPFC_INIT_MBX_CMDS;
114 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
116 uint32_t *ptext = (uint32_t *) licensed;
118 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
119 *ptext = cpu_to_be32(*ptext);
123 lpfc_read_nv(phba, pmb);
124 memset((char*)mb->un.varRDnvp.rsvd3, 0,
125 sizeof (mb->un.varRDnvp.rsvd3));
126 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
129 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
131 if (rc != MBX_SUCCESS) {
132 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
133 "0324 Config Port initialization "
134 "error, mbxCmd x%x READ_NVPARM, "
136 mb->mbxCommand, mb->mbxStatus);
137 mempool_free(pmb, phba->mbox_mem_pool);
140 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
142 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
146 phba->sli3_options = 0x0;
148 /* Setup and issue mailbox READ REV command */
149 lpfc_read_rev(phba, pmb);
150 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
151 if (rc != MBX_SUCCESS) {
152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
153 "0439 Adapter failed to init, mbxCmd x%x "
154 "READ_REV, mbxStatus x%x\n",
155 mb->mbxCommand, mb->mbxStatus);
156 mempool_free( pmb, phba->mbox_mem_pool);
162 * The value of rr must be 1 since the driver set the cv field to 1.
163 * This setting requires the FW to set all revision fields.
165 if (mb->un.varRdRev.rr == 0) {
167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
168 "0440 Adapter failed to init, READ_REV has "
169 "missing revision information.\n");
170 mempool_free(pmb, phba->mbox_mem_pool);
174 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
175 mempool_free(pmb, phba->mbox_mem_pool);
179 /* Save information as VPD data */
181 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
182 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
183 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
184 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
185 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
186 vp->rev.biuRev = mb->un.varRdRev.biuRev;
187 vp->rev.smRev = mb->un.varRdRev.smRev;
188 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
189 vp->rev.endecRev = mb->un.varRdRev.endecRev;
190 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
191 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
192 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
193 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
194 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
195 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
197 /* If the sli feature level is less then 9, we must
198 * tear down all RPIs and VPIs on link down if NPIV
201 if (vp->rev.feaLevelHigh < 9)
202 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
204 if (lpfc_is_LC_HBA(phba->pcidev->device))
205 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
206 sizeof (phba->RandomData));
208 /* Get adapter VPD information */
209 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
214 lpfc_dump_mem(phba, pmb, offset);
215 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
217 if (rc != MBX_SUCCESS) {
218 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
219 "0441 VPD not present on adapter, "
220 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
221 mb->mbxCommand, mb->mbxStatus);
222 mb->un.varDmp.word_cnt = 0;
224 /* dump mem may return a zero when finished or we got a
225 * mailbox error, either way we are done.
227 if (mb->un.varDmp.word_cnt == 0)
229 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
230 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
231 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
232 lpfc_vpd_data + offset,
233 mb->un.varDmp.word_cnt);
234 offset += mb->un.varDmp.word_cnt;
235 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
236 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
238 kfree(lpfc_vpd_data);
240 mempool_free(pmb, phba->mbox_mem_pool);
245 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
246 * @phba: pointer to lpfc hba data structure.
247 * @pmboxq: pointer to the driver internal queue element for mailbox command.
249 * This is the completion handler for driver's configuring asynchronous event
250 * mailbox command to the device. If the mailbox command returns successfully,
251 * it will set internal async event support flag to 1; otherwise, it will
252 * set internal async event support flag to 0.
255 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
257 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
258 phba->temp_sensor_support = 1;
260 phba->temp_sensor_support = 0;
261 mempool_free(pmboxq, phba->mbox_mem_pool);
266 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
267 * @phba: pointer to lpfc hba data structure.
268 * @pmboxq: pointer to the driver internal queue element for mailbox command.
270 * This is the completion handler for dump mailbox command for getting
271 * wake up parameters. When this command complete, the response contain
272 * Option rom version of the HBA. This function translate the version number
273 * into a human readable string and store it in OptionROMVersion.
276 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
279 uint32_t prog_id_word;
281 /* character array used for decoding dist type. */
282 char dist_char[] = "nabx";
284 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
285 mempool_free(pmboxq, phba->mbox_mem_pool);
289 prg = (struct prog_id *) &prog_id_word;
291 /* word 7 contain option rom version */
292 prog_id_word = pmboxq->u.mb.un.varWords[7];
294 /* Decode the Option rom version word to a readable string */
296 dist = dist_char[prg->dist];
298 if ((prg->dist == 3) && (prg->num == 0))
299 sprintf(phba->OptionROMVersion, "%d.%d%d",
300 prg->ver, prg->rev, prg->lev);
302 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
303 prg->ver, prg->rev, prg->lev,
305 mempool_free(pmboxq, phba->mbox_mem_pool);
310 * lpfc_config_port_post - Perform lpfc initialization after config port
311 * @phba: pointer to lpfc hba data structure.
313 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
314 * command call. It performs all internal resource and state setups on the
315 * port: post IOCB buffers, enable appropriate host interrupt attentions,
316 * ELS ring timers, etc.
320 * Any other value - error.
323 lpfc_config_port_post(struct lpfc_hba *phba)
325 struct lpfc_vport *vport = phba->pport;
326 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
329 struct lpfc_dmabuf *mp;
330 struct lpfc_sli *psli = &phba->sli;
331 uint32_t status, timeout;
335 spin_lock_irq(&phba->hbalock);
337 * If the Config port completed correctly the HBA is not
338 * over heated any more.
340 if (phba->over_temp_state == HBA_OVER_TEMP)
341 phba->over_temp_state = HBA_NORMAL_TEMP;
342 spin_unlock_irq(&phba->hbalock);
344 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
346 phba->link_state = LPFC_HBA_ERROR;
351 /* Get login parameters for NID. */
352 lpfc_read_sparam(phba, pmb, 0);
354 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
356 "0448 Adapter failed init, mbxCmd x%x "
357 "READ_SPARM mbxStatus x%x\n",
358 mb->mbxCommand, mb->mbxStatus);
359 phba->link_state = LPFC_HBA_ERROR;
360 mp = (struct lpfc_dmabuf *) pmb->context1;
361 mempool_free( pmb, phba->mbox_mem_pool);
362 lpfc_mbuf_free(phba, mp->virt, mp->phys);
367 mp = (struct lpfc_dmabuf *) pmb->context1;
369 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
370 lpfc_mbuf_free(phba, mp->virt, mp->phys);
372 pmb->context1 = NULL;
374 if (phba->cfg_soft_wwnn)
375 u64_to_wwn(phba->cfg_soft_wwnn,
376 vport->fc_sparam.nodeName.u.wwn);
377 if (phba->cfg_soft_wwpn)
378 u64_to_wwn(phba->cfg_soft_wwpn,
379 vport->fc_sparam.portName.u.wwn);
380 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
381 sizeof (struct lpfc_name));
382 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
383 sizeof (struct lpfc_name));
385 /* Update the fc_host data structures with new wwn. */
386 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
387 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
388 fc_host_max_npiv_vports(shost) = phba->max_vpi;
390 /* If no serial number in VPD data, use low 6 bytes of WWNN */
391 /* This should be consolidated into parse_vpd ? - mr */
392 if (phba->SerialNumber[0] == 0) {
395 outptr = &vport->fc_nodename.u.s.IEEE[0];
396 for (i = 0; i < 12; i++) {
398 j = ((status & 0xf0) >> 4);
400 phba->SerialNumber[i] =
401 (char)((uint8_t) 0x30 + (uint8_t) j);
403 phba->SerialNumber[i] =
404 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
408 phba->SerialNumber[i] =
409 (char)((uint8_t) 0x30 + (uint8_t) j);
411 phba->SerialNumber[i] =
412 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
416 lpfc_read_config(phba, pmb);
418 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
419 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
420 "0453 Adapter failed to init, mbxCmd x%x "
421 "READ_CONFIG, mbxStatus x%x\n",
422 mb->mbxCommand, mb->mbxStatus);
423 phba->link_state = LPFC_HBA_ERROR;
424 mempool_free( pmb, phba->mbox_mem_pool);
428 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
429 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
430 phba->cfg_hba_queue_depth =
431 mb->un.varRdConfig.max_xri + 1;
433 phba->lmt = mb->un.varRdConfig.lmt;
435 /* Get the default values for Model Name and Description */
436 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
438 if ((phba->cfg_link_speed > LINK_SPEED_10G)
439 || ((phba->cfg_link_speed == LINK_SPEED_1G)
440 && !(phba->lmt & LMT_1Gb))
441 || ((phba->cfg_link_speed == LINK_SPEED_2G)
442 && !(phba->lmt & LMT_2Gb))
443 || ((phba->cfg_link_speed == LINK_SPEED_4G)
444 && !(phba->lmt & LMT_4Gb))
445 || ((phba->cfg_link_speed == LINK_SPEED_8G)
446 && !(phba->lmt & LMT_8Gb))
447 || ((phba->cfg_link_speed == LINK_SPEED_10G)
448 && !(phba->lmt & LMT_10Gb))) {
449 /* Reset link speed to auto */
450 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
451 "1302 Invalid speed for this board: "
452 "Reset link speed to auto: x%x\n",
453 phba->cfg_link_speed);
454 phba->cfg_link_speed = LINK_SPEED_AUTO;
457 phba->link_state = LPFC_LINK_DOWN;
459 /* Only process IOCBs on ELS ring till hba_state is READY */
460 if (psli->ring[psli->extra_ring].cmdringaddr)
461 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
462 if (psli->ring[psli->fcp_ring].cmdringaddr)
463 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
464 if (psli->ring[psli->next_ring].cmdringaddr)
465 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
467 /* Post receive buffers for desired rings */
468 if (phba->sli_rev != 3)
469 lpfc_post_rcv_buf(phba);
472 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
474 if (phba->intr_type == MSIX) {
475 rc = lpfc_config_msi(phba, pmb);
477 mempool_free(pmb, phba->mbox_mem_pool);
480 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
481 if (rc != MBX_SUCCESS) {
482 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
483 "0352 Config MSI mailbox command "
484 "failed, mbxCmd x%x, mbxStatus x%x\n",
485 pmb->u.mb.mbxCommand,
486 pmb->u.mb.mbxStatus);
487 mempool_free(pmb, phba->mbox_mem_pool);
492 spin_lock_irq(&phba->hbalock);
493 /* Initialize ERATT handling flag */
494 phba->hba_flag &= ~HBA_ERATT_HANDLED;
496 /* Enable appropriate host interrupts */
497 status = readl(phba->HCregaddr);
498 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
499 if (psli->num_rings > 0)
500 status |= HC_R0INT_ENA;
501 if (psli->num_rings > 1)
502 status |= HC_R1INT_ENA;
503 if (psli->num_rings > 2)
504 status |= HC_R2INT_ENA;
505 if (psli->num_rings > 3)
506 status |= HC_R3INT_ENA;
508 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
509 (phba->cfg_poll & DISABLE_FCP_RING_INT))
510 status &= ~(HC_R0INT_ENA);
512 writel(status, phba->HCregaddr);
513 readl(phba->HCregaddr); /* flush */
514 spin_unlock_irq(&phba->hbalock);
516 /* Set up ring-0 (ELS) timer */
517 timeout = phba->fc_ratov * 2;
518 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
519 /* Set up heart beat (HB) timer */
520 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
521 phba->hb_outstanding = 0;
522 phba->last_completion_time = jiffies;
523 /* Set up error attention (ERATT) polling timer */
524 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
526 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
527 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
528 lpfc_set_loopback_flag(phba);
529 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
530 if (rc != MBX_SUCCESS) {
531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
532 "0454 Adapter failed to init, mbxCmd x%x "
533 "INIT_LINK, mbxStatus x%x\n",
534 mb->mbxCommand, mb->mbxStatus);
536 /* Clear all interrupt enable conditions */
537 writel(0, phba->HCregaddr);
538 readl(phba->HCregaddr); /* flush */
539 /* Clear all pending interrupts */
540 writel(0xffffffff, phba->HAregaddr);
541 readl(phba->HAregaddr); /* flush */
543 phba->link_state = LPFC_HBA_ERROR;
545 mempool_free(pmb, phba->mbox_mem_pool);
548 /* MBOX buffer will be freed in mbox compl */
549 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
550 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
551 pmb->mbox_cmpl = lpfc_config_async_cmpl;
552 pmb->vport = phba->pport;
553 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
555 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
556 lpfc_printf_log(phba,
559 "0456 Adapter failed to issue "
560 "ASYNCEVT_ENABLE mbox status x%x \n.",
562 mempool_free(pmb, phba->mbox_mem_pool);
565 /* Get Option rom version */
566 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
567 lpfc_dump_wakeup_param(phba, pmb);
568 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
569 pmb->vport = phba->pport;
570 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
572 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
574 "to get Option ROM version status x%x\n.", rc);
575 mempool_free(pmb, phba->mbox_mem_pool);
582 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
583 * @phba: pointer to lpfc HBA data structure.
585 * This routine will do LPFC uninitialization before the HBA is reset when
586 * bringing down the SLI Layer.
590 * Any other value - error.
593 lpfc_hba_down_prep(struct lpfc_hba *phba)
595 struct lpfc_vport **vports;
598 if (phba->sli_rev <= LPFC_SLI_REV3) {
599 /* Disable interrupts */
600 writel(0, phba->HCregaddr);
601 readl(phba->HCregaddr); /* flush */
604 if (phba->pport->load_flag & FC_UNLOADING)
605 lpfc_cleanup_discovery_resources(phba->pport);
607 vports = lpfc_create_vport_work_array(phba);
609 for (i = 0; i <= phba->max_vports &&
610 vports[i] != NULL; i++)
611 lpfc_cleanup_discovery_resources(vports[i]);
612 lpfc_destroy_vport_work_array(phba, vports);
618 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
619 * @phba: pointer to lpfc HBA data structure.
621 * This routine will do uninitialization after the HBA is reset when bring
622 * down the SLI Layer.
626 * Any other value - error.
629 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
631 struct lpfc_sli *psli = &phba->sli;
632 struct lpfc_sli_ring *pring;
633 struct lpfc_dmabuf *mp, *next_mp;
634 LIST_HEAD(completions);
637 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
638 lpfc_sli_hbqbuf_free_all(phba);
640 /* Cleanup preposted buffers on the ELS ring */
641 pring = &psli->ring[LPFC_ELS_RING];
642 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
644 pring->postbufq_cnt--;
645 lpfc_mbuf_free(phba, mp->virt, mp->phys);
650 spin_lock_irq(&phba->hbalock);
651 for (i = 0; i < psli->num_rings; i++) {
652 pring = &psli->ring[i];
654 /* At this point in time the HBA is either reset or DOA. Either
655 * way, nothing should be on txcmplq as it will NEVER complete.
657 list_splice_init(&pring->txcmplq, &completions);
658 pring->txcmplq_cnt = 0;
659 spin_unlock_irq(&phba->hbalock);
661 /* Cancel all the IOCBs from the completions list */
662 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
665 lpfc_sli_abort_iocb_ring(phba, pring);
666 spin_lock_irq(&phba->hbalock);
668 spin_unlock_irq(&phba->hbalock);
673 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
674 * @phba: pointer to lpfc HBA data structure.
676 * This routine will do uninitialization after the HBA is reset when bring
677 * down the SLI Layer.
681 * Any other value - error.
684 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
686 struct lpfc_scsi_buf *psb, *psb_next;
689 unsigned long iflag = 0;
690 ret = lpfc_hba_down_post_s3(phba);
693 /* At this point in time the HBA is either reset or DOA. Either
694 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
695 * on the lpfc_sgl_list so that it can either be freed if the
696 * driver is unloading or reposted if the driver is restarting
699 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
701 /* abts_sgl_list_lock required because worker thread uses this
704 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
705 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
706 &phba->sli4_hba.lpfc_sgl_list);
707 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
708 /* abts_scsi_buf_list_lock required because worker thread uses this
711 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
712 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
714 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
715 spin_unlock_irq(&phba->hbalock);
717 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
719 psb->status = IOSTAT_SUCCESS;
721 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
722 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
723 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
728 * lpfc_hba_down_post - Wrapper func for hba down post routine
729 * @phba: pointer to lpfc HBA data structure.
731 * This routine wraps the actual SLI3 or SLI4 routine for performing
732 * uninitialization after the HBA is reset when bring down the SLI Layer.
736 * Any other value - error.
739 lpfc_hba_down_post(struct lpfc_hba *phba)
741 return (*phba->lpfc_hba_down_post)(phba);
745 * lpfc_hb_timeout - The HBA-timer timeout handler
746 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
748 * This is the HBA-timer timeout handler registered to the lpfc driver. When
749 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
750 * work-port-events bitmap and the worker thread is notified. This timeout
751 * event will be used by the worker thread to invoke the actual timeout
752 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
753 * be performed in the timeout handler and the HBA timeout event bit shall
754 * be cleared by the worker thread after it has taken the event bitmap out.
757 lpfc_hb_timeout(unsigned long ptr)
759 struct lpfc_hba *phba;
763 phba = (struct lpfc_hba *)ptr;
765 /* Check for heart beat timeout conditions */
766 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
767 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
769 phba->pport->work_port_events |= WORKER_HB_TMO;
770 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
772 /* Tell the worker thread there is work to do */
774 lpfc_worker_wake_up(phba);
779 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
780 * @phba: pointer to lpfc hba data structure.
781 * @pmboxq: pointer to the driver internal queue element for mailbox command.
783 * This is the callback function to the lpfc heart-beat mailbox command.
784 * If configured, the lpfc driver issues the heart-beat mailbox command to
785 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
786 * heart-beat mailbox command is issued, the driver shall set up heart-beat
787 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
788 * heart-beat outstanding state. Once the mailbox command comes back and
789 * no error conditions detected, the heart-beat mailbox command timer is
790 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
791 * state is cleared for the next heart-beat. If the timer expired with the
792 * heart-beat outstanding state set, the driver will put the HBA offline.
795 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
797 unsigned long drvr_flag;
799 spin_lock_irqsave(&phba->hbalock, drvr_flag);
800 phba->hb_outstanding = 0;
801 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
803 /* Check and reset heart-beat timer is necessary */
804 mempool_free(pmboxq, phba->mbox_mem_pool);
805 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
806 !(phba->link_state == LPFC_HBA_ERROR) &&
807 !(phba->pport->load_flag & FC_UNLOADING))
808 mod_timer(&phba->hb_tmofunc,
809 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
814 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
815 * @phba: pointer to lpfc hba data structure.
817 * This is the actual HBA-timer timeout handler to be invoked by the worker
818 * thread whenever the HBA timer fired and HBA-timeout event posted. This
819 * handler performs any periodic operations needed for the device. If such
820 * periodic event has already been attended to either in the interrupt handler
821 * or by processing slow-ring or fast-ring events within the HBA-timer
822 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
823 * the timer for the next timeout period. If lpfc heart-beat mailbox command
824 * is configured and there is no heart-beat mailbox command outstanding, a
825 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
826 * has been a heart-beat mailbox command outstanding, the HBA shall be put
830 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
832 LPFC_MBOXQ_t *pmboxq;
833 struct lpfc_dmabuf *buf_ptr;
835 struct lpfc_sli *psli = &phba->sli;
836 LIST_HEAD(completions);
838 if ((phba->link_state == LPFC_HBA_ERROR) ||
839 (phba->pport->load_flag & FC_UNLOADING) ||
840 (phba->pport->fc_flag & FC_OFFLINE_MODE))
843 spin_lock_irq(&phba->pport->work_port_lock);
845 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
847 spin_unlock_irq(&phba->pport->work_port_lock);
848 if (!phba->hb_outstanding)
849 mod_timer(&phba->hb_tmofunc,
850 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
852 mod_timer(&phba->hb_tmofunc,
853 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
856 spin_unlock_irq(&phba->pport->work_port_lock);
858 if (phba->elsbuf_cnt &&
859 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
860 spin_lock_irq(&phba->hbalock);
861 list_splice_init(&phba->elsbuf, &completions);
862 phba->elsbuf_cnt = 0;
863 phba->elsbuf_prev_cnt = 0;
864 spin_unlock_irq(&phba->hbalock);
866 while (!list_empty(&completions)) {
867 list_remove_head(&completions, buf_ptr,
868 struct lpfc_dmabuf, list);
869 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
873 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
875 /* If there is no heart beat outstanding, issue a heartbeat command */
876 if (phba->cfg_enable_hba_heartbeat) {
877 if (!phba->hb_outstanding) {
878 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
880 mod_timer(&phba->hb_tmofunc,
881 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
885 lpfc_heart_beat(phba, pmboxq);
886 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
887 pmboxq->vport = phba->pport;
888 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
890 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
891 mempool_free(pmboxq, phba->mbox_mem_pool);
892 mod_timer(&phba->hb_tmofunc,
893 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
896 mod_timer(&phba->hb_tmofunc,
897 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
898 phba->hb_outstanding = 1;
902 * If heart beat timeout called with hb_outstanding set
903 * we need to take the HBA offline.
905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
906 "0459 Adapter heartbeat failure, "
907 "taking this port offline.\n");
909 spin_lock_irq(&phba->hbalock);
910 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
911 spin_unlock_irq(&phba->hbalock);
913 lpfc_offline_prep(phba);
915 lpfc_unblock_mgmt_io(phba);
916 phba->link_state = LPFC_HBA_ERROR;
917 lpfc_hba_down_post(phba);
923 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
924 * @phba: pointer to lpfc hba data structure.
926 * This routine is called to bring the HBA offline when HBA hardware error
927 * other than Port Error 6 has been detected.
930 lpfc_offline_eratt(struct lpfc_hba *phba)
932 struct lpfc_sli *psli = &phba->sli;
934 spin_lock_irq(&phba->hbalock);
935 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
936 spin_unlock_irq(&phba->hbalock);
937 lpfc_offline_prep(phba);
940 lpfc_reset_barrier(phba);
941 spin_lock_irq(&phba->hbalock);
942 lpfc_sli_brdreset(phba);
943 spin_unlock_irq(&phba->hbalock);
944 lpfc_hba_down_post(phba);
945 lpfc_sli_brdready(phba, HS_MBRDY);
946 lpfc_unblock_mgmt_io(phba);
947 phba->link_state = LPFC_HBA_ERROR;
952 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
953 * @phba: pointer to lpfc hba data structure.
955 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
956 * other than Port Error 6 has been detected.
959 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
961 lpfc_offline_prep(phba);
963 lpfc_sli4_brdreset(phba);
964 lpfc_hba_down_post(phba);
965 lpfc_sli4_post_status_check(phba);
966 lpfc_unblock_mgmt_io(phba);
967 phba->link_state = LPFC_HBA_ERROR;
971 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
972 * @phba: pointer to lpfc hba data structure.
974 * This routine is invoked to handle the deferred HBA hardware error
975 * conditions. This type of error is indicated by HBA by setting ER1
976 * and another ER bit in the host status register. The driver will
977 * wait until the ER1 bit clears before handling the error condition.
980 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
982 uint32_t old_host_status = phba->work_hs;
983 struct lpfc_sli_ring *pring;
984 struct lpfc_sli *psli = &phba->sli;
986 /* If the pci channel is offline, ignore possible errors,
987 * since we cannot communicate with the pci card anyway.
989 if (pci_channel_offline(phba->pcidev)) {
990 spin_lock_irq(&phba->hbalock);
991 phba->hba_flag &= ~DEFER_ERATT;
992 spin_unlock_irq(&phba->hbalock);
996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
997 "0479 Deferred Adapter Hardware Error "
998 "Data: x%x x%x x%x\n",
1000 phba->work_status[0], phba->work_status[1]);
1002 spin_lock_irq(&phba->hbalock);
1003 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1004 spin_unlock_irq(&phba->hbalock);
1008 * Firmware stops when it triggred erratt. That could cause the I/Os
1009 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1010 * SCSI layer retry it after re-establishing link.
1012 pring = &psli->ring[psli->fcp_ring];
1013 lpfc_sli_abort_iocb_ring(phba, pring);
1016 * There was a firmware error. Take the hba offline and then
1017 * attempt to restart it.
1019 lpfc_offline_prep(phba);
1022 /* Wait for the ER1 bit to clear.*/
1023 while (phba->work_hs & HS_FFER1) {
1025 phba->work_hs = readl(phba->HSregaddr);
1026 /* If driver is unloading let the worker thread continue */
1027 if (phba->pport->load_flag & FC_UNLOADING) {
1034 * This is to ptrotect against a race condition in which
1035 * first write to the host attention register clear the
1036 * host status register.
1038 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1039 phba->work_hs = old_host_status & ~HS_FFER1;
1041 spin_lock_irq(&phba->hbalock);
1042 phba->hba_flag &= ~DEFER_ERATT;
1043 spin_unlock_irq(&phba->hbalock);
1044 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1045 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1049 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1051 struct lpfc_board_event_header board_event;
1052 struct Scsi_Host *shost;
1054 board_event.event_type = FC_REG_BOARD_EVENT;
1055 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1056 shost = lpfc_shost_from_vport(phba->pport);
1057 fc_host_post_vendor_event(shost, fc_get_event_number(),
1058 sizeof(board_event),
1059 (char *) &board_event,
1064 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1065 * @phba: pointer to lpfc hba data structure.
1067 * This routine is invoked to handle the following HBA hardware error
1069 * 1 - HBA error attention interrupt
1070 * 2 - DMA ring index out of range
1071 * 3 - Mailbox command came back as unknown
1074 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1076 struct lpfc_vport *vport = phba->pport;
1077 struct lpfc_sli *psli = &phba->sli;
1078 struct lpfc_sli_ring *pring;
1079 uint32_t event_data;
1080 unsigned long temperature;
1081 struct temp_event temp_event_data;
1082 struct Scsi_Host *shost;
1084 /* If the pci channel is offline, ignore possible errors,
1085 * since we cannot communicate with the pci card anyway.
1087 if (pci_channel_offline(phba->pcidev)) {
1088 spin_lock_irq(&phba->hbalock);
1089 phba->hba_flag &= ~DEFER_ERATT;
1090 spin_unlock_irq(&phba->hbalock);
1094 /* If resets are disabled then leave the HBA alone and return */
1095 if (!phba->cfg_enable_hba_reset)
1098 /* Send an internal error event to mgmt application */
1099 lpfc_board_errevt_to_mgmt(phba);
1101 if (phba->hba_flag & DEFER_ERATT)
1102 lpfc_handle_deferred_eratt(phba);
1104 if (phba->work_hs & HS_FFER6) {
1105 /* Re-establishing Link */
1106 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1107 "1301 Re-establishing Link "
1108 "Data: x%x x%x x%x\n",
1110 phba->work_status[0], phba->work_status[1]);
1112 spin_lock_irq(&phba->hbalock);
1113 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1114 spin_unlock_irq(&phba->hbalock);
1117 * Firmware stops when it triggled erratt with HS_FFER6.
1118 * That could cause the I/Os dropped by the firmware.
1119 * Error iocb (I/O) on txcmplq and let the SCSI layer
1120 * retry it after re-establishing link.
1122 pring = &psli->ring[psli->fcp_ring];
1123 lpfc_sli_abort_iocb_ring(phba, pring);
1126 * There was a firmware error. Take the hba offline and then
1127 * attempt to restart it.
1129 lpfc_offline_prep(phba);
1131 lpfc_sli_brdrestart(phba);
1132 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1133 lpfc_unblock_mgmt_io(phba);
1136 lpfc_unblock_mgmt_io(phba);
1137 } else if (phba->work_hs & HS_CRIT_TEMP) {
1138 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1139 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1140 temp_event_data.event_code = LPFC_CRIT_TEMP;
1141 temp_event_data.data = (uint32_t)temperature;
1143 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1144 "0406 Adapter maximum temperature exceeded "
1145 "(%ld), taking this port offline "
1146 "Data: x%x x%x x%x\n",
1147 temperature, phba->work_hs,
1148 phba->work_status[0], phba->work_status[1]);
1150 shost = lpfc_shost_from_vport(phba->pport);
1151 fc_host_post_vendor_event(shost, fc_get_event_number(),
1152 sizeof(temp_event_data),
1153 (char *) &temp_event_data,
1154 SCSI_NL_VID_TYPE_PCI
1155 | PCI_VENDOR_ID_EMULEX);
1157 spin_lock_irq(&phba->hbalock);
1158 phba->over_temp_state = HBA_OVER_TEMP;
1159 spin_unlock_irq(&phba->hbalock);
1160 lpfc_offline_eratt(phba);
1163 /* The if clause above forces this code path when the status
1164 * failure is a value other than FFER6. Do not call the offline
1165 * twice. This is the adapter hardware error path.
1167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1168 "0457 Adapter Hardware Error "
1169 "Data: x%x x%x x%x\n",
1171 phba->work_status[0], phba->work_status[1]);
1173 event_data = FC_REG_DUMP_EVENT;
1174 shost = lpfc_shost_from_vport(vport);
1175 fc_host_post_vendor_event(shost, fc_get_event_number(),
1176 sizeof(event_data), (char *) &event_data,
1177 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1179 lpfc_offline_eratt(phba);
1185 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1186 * @phba: pointer to lpfc hba data structure.
1188 * This routine is invoked to handle the SLI4 HBA hardware error attention
1192 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1194 struct lpfc_vport *vport = phba->pport;
1195 uint32_t event_data;
1196 struct Scsi_Host *shost;
1198 /* If the pci channel is offline, ignore possible errors, since
1199 * we cannot communicate with the pci card anyway.
1201 if (pci_channel_offline(phba->pcidev))
1203 /* If resets are disabled then leave the HBA alone and return */
1204 if (!phba->cfg_enable_hba_reset)
1207 /* Send an internal error event to mgmt application */
1208 lpfc_board_errevt_to_mgmt(phba);
1210 /* For now, the actual action for SLI4 device handling is not
1211 * specified yet, just treated it as adaptor hardware failure
1213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1214 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1215 phba->work_status[0], phba->work_status[1]);
1217 event_data = FC_REG_DUMP_EVENT;
1218 shost = lpfc_shost_from_vport(vport);
1219 fc_host_post_vendor_event(shost, fc_get_event_number(),
1220 sizeof(event_data), (char *) &event_data,
1221 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1223 lpfc_sli4_offline_eratt(phba);
1227 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1228 * @phba: pointer to lpfc HBA data structure.
1230 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1231 * routine from the API jump table function pointer from the lpfc_hba struct.
1235 * Any other value - error.
1238 lpfc_handle_eratt(struct lpfc_hba *phba)
1240 (*phba->lpfc_handle_eratt)(phba);
1244 * lpfc_handle_latt - The HBA link event handler
1245 * @phba: pointer to lpfc hba data structure.
1247 * This routine is invoked from the worker thread to handle a HBA host
1248 * attention link event.
1251 lpfc_handle_latt(struct lpfc_hba *phba)
1253 struct lpfc_vport *vport = phba->pport;
1254 struct lpfc_sli *psli = &phba->sli;
1256 volatile uint32_t control;
1257 struct lpfc_dmabuf *mp;
1260 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1263 goto lpfc_handle_latt_err_exit;
1266 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1269 goto lpfc_handle_latt_free_pmb;
1272 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1275 goto lpfc_handle_latt_free_mp;
1278 /* Cleanup any outstanding ELS commands */
1279 lpfc_els_flush_all_cmd(phba);
1281 psli->slistat.link_event++;
1282 lpfc_read_la(phba, pmb, mp);
1283 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1285 /* Block ELS IOCBs until we have processed this mbox command */
1286 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1287 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1288 if (rc == MBX_NOT_FINISHED) {
1290 goto lpfc_handle_latt_free_mbuf;
1293 /* Clear Link Attention in HA REG */
1294 spin_lock_irq(&phba->hbalock);
1295 writel(HA_LATT, phba->HAregaddr);
1296 readl(phba->HAregaddr); /* flush */
1297 spin_unlock_irq(&phba->hbalock);
1301 lpfc_handle_latt_free_mbuf:
1302 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1303 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1304 lpfc_handle_latt_free_mp:
1306 lpfc_handle_latt_free_pmb:
1307 mempool_free(pmb, phba->mbox_mem_pool);
1308 lpfc_handle_latt_err_exit:
1309 /* Enable Link attention interrupts */
1310 spin_lock_irq(&phba->hbalock);
1311 psli->sli_flag |= LPFC_PROCESS_LA;
1312 control = readl(phba->HCregaddr);
1313 control |= HC_LAINT_ENA;
1314 writel(control, phba->HCregaddr);
1315 readl(phba->HCregaddr); /* flush */
1317 /* Clear Link Attention in HA REG */
1318 writel(HA_LATT, phba->HAregaddr);
1319 readl(phba->HAregaddr); /* flush */
1320 spin_unlock_irq(&phba->hbalock);
1321 lpfc_linkdown(phba);
1322 phba->link_state = LPFC_HBA_ERROR;
1324 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1325 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1331 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1332 * @phba: pointer to lpfc hba data structure.
1333 * @vpd: pointer to the vital product data.
1334 * @len: length of the vital product data in bytes.
1336 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1337 * an array of characters. In this routine, the ModelName, ProgramType, and
1338 * ModelDesc, etc. fields of the phba data structure will be populated.
1341 * 0 - pointer to the VPD passed in is NULL
1345 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1347 uint8_t lenlo, lenhi;
1357 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1358 "0455 Vital Product Data: x%x x%x x%x x%x\n",
1359 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1361 while (!finished && (index < (len - 4))) {
1362 switch (vpd[index]) {
1370 i = ((((unsigned short)lenhi) << 8) + lenlo);
1379 Length = ((((unsigned short)lenhi) << 8) + lenlo);
1380 if (Length > len - index)
1381 Length = len - index;
1382 while (Length > 0) {
1383 /* Look for Serial Number */
1384 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1391 phba->SerialNumber[j++] = vpd[index++];
1395 phba->SerialNumber[j] = 0;
1398 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1399 phba->vpd_flag |= VPD_MODEL_DESC;
1406 phba->ModelDesc[j++] = vpd[index++];
1410 phba->ModelDesc[j] = 0;
1413 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1414 phba->vpd_flag |= VPD_MODEL_NAME;
1421 phba->ModelName[j++] = vpd[index++];
1425 phba->ModelName[j] = 0;
1428 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1429 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1436 phba->ProgramType[j++] = vpd[index++];
1440 phba->ProgramType[j] = 0;
1443 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1444 phba->vpd_flag |= VPD_PORT;
1451 phba->Port[j++] = vpd[index++];
1481 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1482 * @phba: pointer to lpfc hba data structure.
1483 * @mdp: pointer to the data structure to hold the derived model name.
1484 * @descp: pointer to the data structure to hold the derived description.
1486 * This routine retrieves HBA's description based on its registered PCI device
1487 * ID. The @descp passed into this function points to an array of 256 chars. It
1488 * shall be returned with the model name, maximum speed, and the host bus type.
1489 * The @mdp passed into this function points to an array of 80 chars. When the
1490 * function returns, the @mdp will be filled with the model name.
1493 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1496 uint16_t dev_id = phba->pcidev->device;
1499 int oneConnect = 0; /* default is not a oneConnect */
1504 } m = {"<Unknown>", 0, ""};
1506 if (mdp && mdp[0] != '\0'
1507 && descp && descp[0] != '\0')
1510 if (phba->lmt & LMT_10Gb)
1512 else if (phba->lmt & LMT_8Gb)
1514 else if (phba->lmt & LMT_4Gb)
1516 else if (phba->lmt & LMT_2Gb)
1524 case PCI_DEVICE_ID_FIREFLY:
1525 m = (typeof(m)){"LP6000", max_speed, "PCI"};
1527 case PCI_DEVICE_ID_SUPERFLY:
1528 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1529 m = (typeof(m)){"LP7000", max_speed, "PCI"};
1531 m = (typeof(m)){"LP7000E", max_speed, "PCI"};
1533 case PCI_DEVICE_ID_DRAGONFLY:
1534 m = (typeof(m)){"LP8000", max_speed, "PCI"};
1536 case PCI_DEVICE_ID_CENTAUR:
1537 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1538 m = (typeof(m)){"LP9002", max_speed, "PCI"};
1540 m = (typeof(m)){"LP9000", max_speed, "PCI"};
1542 case PCI_DEVICE_ID_RFLY:
1543 m = (typeof(m)){"LP952", max_speed, "PCI"};
1545 case PCI_DEVICE_ID_PEGASUS:
1546 m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
1548 case PCI_DEVICE_ID_THOR:
1549 m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
1551 case PCI_DEVICE_ID_VIPER:
1552 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"};
1554 case PCI_DEVICE_ID_PFLY:
1555 m = (typeof(m)){"LP982", max_speed, "PCI-X"};
1557 case PCI_DEVICE_ID_TFLY:
1558 m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
1560 case PCI_DEVICE_ID_HELIOS:
1561 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
1563 case PCI_DEVICE_ID_HELIOS_SCSP:
1564 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
1566 case PCI_DEVICE_ID_HELIOS_DCSP:
1567 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
1569 case PCI_DEVICE_ID_NEPTUNE:
1570 m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
1572 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1573 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
1575 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1576 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
1578 case PCI_DEVICE_ID_BMID:
1579 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
1581 case PCI_DEVICE_ID_BSMB:
1582 m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
1584 case PCI_DEVICE_ID_ZEPHYR:
1585 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1587 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1588 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1590 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1591 m = (typeof(m)){"LP2105", max_speed, "PCIe"};
1594 case PCI_DEVICE_ID_ZMID:
1595 m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
1597 case PCI_DEVICE_ID_ZSMB:
1598 m = (typeof(m)){"LPe111", max_speed, "PCIe"};
1600 case PCI_DEVICE_ID_LP101:
1601 m = (typeof(m)){"LP101", max_speed, "PCI-X"};
1603 case PCI_DEVICE_ID_LP10000S:
1604 m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
1606 case PCI_DEVICE_ID_LP11000S:
1607 m = (typeof(m)){"LP11000-S", max_speed,
1610 case PCI_DEVICE_ID_LPE11000S:
1611 m = (typeof(m)){"LPe11000-S", max_speed,
1614 case PCI_DEVICE_ID_SAT:
1615 m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
1617 case PCI_DEVICE_ID_SAT_MID:
1618 m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
1620 case PCI_DEVICE_ID_SAT_SMB:
1621 m = (typeof(m)){"LPe121", max_speed, "PCIe"};
1623 case PCI_DEVICE_ID_SAT_DCSP:
1624 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
1626 case PCI_DEVICE_ID_SAT_SCSP:
1627 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
1629 case PCI_DEVICE_ID_SAT_S:
1630 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
1632 case PCI_DEVICE_ID_HORNET:
1633 m = (typeof(m)){"LP21000", max_speed, "PCIe"};
1636 case PCI_DEVICE_ID_PROTEUS_VF:
1637 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1639 case PCI_DEVICE_ID_PROTEUS_PF:
1640 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1642 case PCI_DEVICE_ID_PROTEUS_S:
1643 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1645 case PCI_DEVICE_ID_TIGERSHARK:
1647 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1649 case PCI_DEVICE_ID_TIGERSHARK_S:
1651 m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
1654 m = (typeof(m)){ NULL };
1658 if (mdp && mdp[0] == '\0')
1659 snprintf(mdp, 79,"%s", m.name);
1660 /* oneConnect hba requires special processing, they are all initiators
1661 * and we put the port number on the end
1663 if (descp && descp[0] == '\0') {
1665 snprintf(descp, 255,
1666 "Emulex OneConnect %s, FCoE Initiator, Port %s",
1670 snprintf(descp, 255,
1671 "Emulex %s %d%s %s %s",
1672 m.name, m.max_speed,
1675 (GE) ? "FCoE Adapter" :
1676 "Fibre Channel Adapter");
1681 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1682 * @phba: pointer to lpfc hba data structure.
1683 * @pring: pointer to a IOCB ring.
1684 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1686 * This routine posts a given number of IOCBs with the associated DMA buffer
1687 * descriptors specified by the cnt argument to the given IOCB ring.
1690 * The number of IOCBs NOT able to be posted to the IOCB ring.
1693 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1696 struct lpfc_iocbq *iocb;
1697 struct lpfc_dmabuf *mp1, *mp2;
1699 cnt += pring->missbufcnt;
1701 /* While there are buffers to post */
1703 /* Allocate buffer for command iocb */
1704 iocb = lpfc_sli_get_iocbq(phba);
1706 pring->missbufcnt = cnt;
1711 /* 2 buffers can be posted per command */
1712 /* Allocate buffer to post */
1713 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1715 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1716 if (!mp1 || !mp1->virt) {
1718 lpfc_sli_release_iocbq(phba, iocb);
1719 pring->missbufcnt = cnt;
1723 INIT_LIST_HEAD(&mp1->list);
1724 /* Allocate buffer to post */
1726 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1728 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1730 if (!mp2 || !mp2->virt) {
1732 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1734 lpfc_sli_release_iocbq(phba, iocb);
1735 pring->missbufcnt = cnt;
1739 INIT_LIST_HEAD(&mp2->list);
1744 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1745 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1746 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1747 icmd->ulpBdeCount = 1;
1750 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1751 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1752 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1754 icmd->ulpBdeCount = 2;
1757 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1760 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1762 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1766 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1770 lpfc_sli_release_iocbq(phba, iocb);
1771 pring->missbufcnt = cnt;
1774 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1776 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1778 pring->missbufcnt = 0;
1783 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1784 * @phba: pointer to lpfc hba data structure.
1786 * This routine posts initial receive IOCB buffers to the ELS ring. The
1787 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1791 * 0 - success (currently always success)
1794 lpfc_post_rcv_buf(struct lpfc_hba *phba)
1796 struct lpfc_sli *psli = &phba->sli;
1798 /* Ring 0, ELS / CT buffers */
1799 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1800 /* Ring 2 - FCP no buffers needed */
1805 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1808 * lpfc_sha_init - Set up initial array of hash table entries
1809 * @HashResultPointer: pointer to an array as hash table.
1811 * This routine sets up the initial values to the array of hash table entries
1815 lpfc_sha_init(uint32_t * HashResultPointer)
1817 HashResultPointer[0] = 0x67452301;
1818 HashResultPointer[1] = 0xEFCDAB89;
1819 HashResultPointer[2] = 0x98BADCFE;
1820 HashResultPointer[3] = 0x10325476;
1821 HashResultPointer[4] = 0xC3D2E1F0;
1825 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
1826 * @HashResultPointer: pointer to an initial/result hash table.
1827 * @HashWorkingPointer: pointer to an working hash table.
1829 * This routine iterates an initial hash table pointed by @HashResultPointer
1830 * with the values from the working hash table pointeed by @HashWorkingPointer.
1831 * The results are putting back to the initial hash table, returned through
1832 * the @HashResultPointer as the result hash table.
1835 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1839 uint32_t A, B, C, D, E;
1842 HashWorkingPointer[t] =
1844 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1846 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1847 } while (++t <= 79);
1849 A = HashResultPointer[0];
1850 B = HashResultPointer[1];
1851 C = HashResultPointer[2];
1852 D = HashResultPointer[3];
1853 E = HashResultPointer[4];
1857 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1858 } else if (t < 40) {
1859 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1860 } else if (t < 60) {
1861 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1863 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1865 TEMP += S(5, A) + E + HashWorkingPointer[t];
1871 } while (++t <= 79);
1873 HashResultPointer[0] += A;
1874 HashResultPointer[1] += B;
1875 HashResultPointer[2] += C;
1876 HashResultPointer[3] += D;
1877 HashResultPointer[4] += E;
1882 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
1883 * @RandomChallenge: pointer to the entry of host challenge random number array.
1884 * @HashWorking: pointer to the entry of the working hash array.
1886 * This routine calculates the working hash array referred by @HashWorking
1887 * from the challenge random numbers associated with the host, referred by
1888 * @RandomChallenge. The result is put into the entry of the working hash
1889 * array and returned by reference through @HashWorking.
1892 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1894 *HashWorking = (*RandomChallenge ^ *HashWorking);
1898 * lpfc_hba_init - Perform special handling for LC HBA initialization
1899 * @phba: pointer to lpfc hba data structure.
1900 * @hbainit: pointer to an array of unsigned 32-bit integers.
1902 * This routine performs the special handling for LC HBA initialization.
1905 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1908 uint32_t *HashWorking;
1909 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
1911 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
1915 HashWorking[0] = HashWorking[78] = *pwwnn++;
1916 HashWorking[1] = HashWorking[79] = *pwwnn;
1918 for (t = 0; t < 7; t++)
1919 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1921 lpfc_sha_init(hbainit);
1922 lpfc_sha_iterate(hbainit, HashWorking);
1927 * lpfc_cleanup - Performs vport cleanups before deleting a vport
1928 * @vport: pointer to a virtual N_Port data structure.
1930 * This routine performs the necessary cleanups before deleting the @vport.
1931 * It invokes the discovery state machine to perform necessary state
1932 * transitions and to release the ndlps associated with the @vport. Note,
1933 * the physical port is treated as @vport 0.
1936 lpfc_cleanup(struct lpfc_vport *vport)
1938 struct lpfc_hba *phba = vport->phba;
1939 struct lpfc_nodelist *ndlp, *next_ndlp;
1942 if (phba->link_state > LPFC_LINK_DOWN)
1943 lpfc_port_link_failure(vport);
1945 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1946 if (!NLP_CHK_NODE_ACT(ndlp)) {
1947 ndlp = lpfc_enable_node(vport, ndlp,
1948 NLP_STE_UNUSED_NODE);
1951 spin_lock_irq(&phba->ndlp_lock);
1952 NLP_SET_FREE_REQ(ndlp);
1953 spin_unlock_irq(&phba->ndlp_lock);
1954 /* Trigger the release of the ndlp memory */
1958 spin_lock_irq(&phba->ndlp_lock);
1959 if (NLP_CHK_FREE_REQ(ndlp)) {
1960 /* The ndlp should not be in memory free mode already */
1961 spin_unlock_irq(&phba->ndlp_lock);
1964 /* Indicate request for freeing ndlp memory */
1965 NLP_SET_FREE_REQ(ndlp);
1966 spin_unlock_irq(&phba->ndlp_lock);
1968 if (vport->port_type != LPFC_PHYSICAL_PORT &&
1969 ndlp->nlp_DID == Fabric_DID) {
1970 /* Just free up ndlp with Fabric_DID for vports */
1975 if (ndlp->nlp_type & NLP_FABRIC)
1976 lpfc_disc_state_machine(vport, ndlp, NULL,
1977 NLP_EVT_DEVICE_RECOVERY);
1979 lpfc_disc_state_machine(vport, ndlp, NULL,
1984 /* At this point, ALL ndlp's should be gone
1985 * because of the previous NLP_EVT_DEVICE_RM.
1986 * Lets wait for this to happen, if needed.
1988 while (!list_empty(&vport->fc_nodes)) {
1990 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1991 "0233 Nodelist not empty\n");
1992 list_for_each_entry_safe(ndlp, next_ndlp,
1993 &vport->fc_nodes, nlp_listp) {
1994 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
1996 "0282 did:x%x ndlp:x%p "
1997 "usgmap:x%x refcnt:%d\n",
1998 ndlp->nlp_DID, (void *)ndlp,
2001 &ndlp->kref.refcount));
2006 /* Wait for any activity on ndlps to settle */
2012 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2013 * @vport: pointer to a virtual N_Port data structure.
2015 * This routine stops all the timers associated with a @vport. This function
2016 * is invoked before disabling or deleting a @vport. Note that the physical
2017 * port is treated as @vport 0.
2020 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2022 del_timer_sync(&vport->els_tmofunc);
2023 del_timer_sync(&vport->fc_fdmitmo);
2024 lpfc_can_disctmo(vport);
2029 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2030 * @phba: pointer to lpfc hba data structure.
2032 * This routine stops all the timers associated with a HBA. This function is
2033 * invoked before either putting a HBA offline or unloading the driver.
2036 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2038 lpfc_stop_vport_timers(phba->pport);
2039 del_timer_sync(&phba->sli.mbox_tmo);
2040 del_timer_sync(&phba->fabric_block_timer);
2041 del_timer_sync(&phba->eratt_poll);
2042 del_timer_sync(&phba->hb_tmofunc);
2043 phba->hb_outstanding = 0;
2045 switch (phba->pci_dev_grp) {
2046 case LPFC_PCI_DEV_LP:
2047 /* Stop any LightPulse device specific driver timers */
2048 del_timer_sync(&phba->fcp_poll_timer);
2050 case LPFC_PCI_DEV_OC:
2051 /* Stop any OneConnect device sepcific driver timers */
2054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2055 "0297 Invalid device group (x%x)\n",
2063 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2064 * @phba: pointer to lpfc hba data structure.
2066 * This routine marks a HBA's management interface as blocked. Once the HBA's
2067 * management interface is marked as blocked, all the user space access to
2068 * the HBA, whether they are from sysfs interface or libdfc interface will
2069 * all be blocked. The HBA is set to block the management interface when the
2070 * driver prepares the HBA interface for online or offline.
2073 lpfc_block_mgmt_io(struct lpfc_hba * phba)
2075 unsigned long iflag;
2077 spin_lock_irqsave(&phba->hbalock, iflag);
2078 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2079 spin_unlock_irqrestore(&phba->hbalock, iflag);
2083 * lpfc_online - Initialize and bring a HBA online
2084 * @phba: pointer to lpfc hba data structure.
2086 * This routine initializes the HBA and brings a HBA online. During this
2087 * process, the management interface is blocked to prevent user space access
2088 * to the HBA interfering with the driver initialization.
2095 lpfc_online(struct lpfc_hba *phba)
2097 struct lpfc_vport *vport;
2098 struct lpfc_vport **vports;
2103 vport = phba->pport;
2105 if (!(vport->fc_flag & FC_OFFLINE_MODE))
2108 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2109 "0458 Bring Adapter online\n");
2111 lpfc_block_mgmt_io(phba);
2113 if (!lpfc_sli_queue_setup(phba)) {
2114 lpfc_unblock_mgmt_io(phba);
2118 if (phba->sli_rev == LPFC_SLI_REV4) {
2119 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2120 lpfc_unblock_mgmt_io(phba);
2124 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2125 lpfc_unblock_mgmt_io(phba);
2130 vports = lpfc_create_vport_work_array(phba);
2132 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2133 struct Scsi_Host *shost;
2134 shost = lpfc_shost_from_vport(vports[i]);
2135 spin_lock_irq(shost->host_lock);
2136 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2137 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2138 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2139 spin_unlock_irq(shost->host_lock);
2141 lpfc_destroy_vport_work_array(phba, vports);
2143 lpfc_unblock_mgmt_io(phba);
2148 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2149 * @phba: pointer to lpfc hba data structure.
2151 * This routine marks a HBA's management interface as not blocked. Once the
2152 * HBA's management interface is marked as not blocked, all the user space
2153 * access to the HBA, whether they are from sysfs interface or libdfc
2154 * interface will be allowed. The HBA is set to block the management interface
2155 * when the driver prepares the HBA interface for online or offline and then
2156 * set to unblock the management interface afterwards.
2159 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2161 unsigned long iflag;
2163 spin_lock_irqsave(&phba->hbalock, iflag);
2164 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2165 spin_unlock_irqrestore(&phba->hbalock, iflag);
2169 * lpfc_offline_prep - Prepare a HBA to be brought offline
2170 * @phba: pointer to lpfc hba data structure.
2172 * This routine is invoked to prepare a HBA to be brought offline. It performs
2173 * unregistration login to all the nodes on all vports and flushes the mailbox
2174 * queue to make it ready to be brought offline.
2177 lpfc_offline_prep(struct lpfc_hba * phba)
2179 struct lpfc_vport *vport = phba->pport;
2180 struct lpfc_nodelist *ndlp, *next_ndlp;
2181 struct lpfc_vport **vports;
2184 if (vport->fc_flag & FC_OFFLINE_MODE)
2187 lpfc_block_mgmt_io(phba);
2189 lpfc_linkdown(phba);
2191 /* Issue an unreg_login to all nodes on all vports */
2192 vports = lpfc_create_vport_work_array(phba);
2193 if (vports != NULL) {
2194 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2195 struct Scsi_Host *shost;
2197 if (vports[i]->load_flag & FC_UNLOADING)
2199 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
2200 shost = lpfc_shost_from_vport(vports[i]);
2201 list_for_each_entry_safe(ndlp, next_ndlp,
2202 &vports[i]->fc_nodes,
2204 if (!NLP_CHK_NODE_ACT(ndlp))
2206 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2208 if (ndlp->nlp_type & NLP_FABRIC) {
2209 lpfc_disc_state_machine(vports[i], ndlp,
2210 NULL, NLP_EVT_DEVICE_RECOVERY);
2211 lpfc_disc_state_machine(vports[i], ndlp,
2212 NULL, NLP_EVT_DEVICE_RM);
2214 spin_lock_irq(shost->host_lock);
2215 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2216 spin_unlock_irq(shost->host_lock);
2217 lpfc_unreg_rpi(vports[i], ndlp);
2221 lpfc_destroy_vport_work_array(phba, vports);
2223 lpfc_sli_mbox_sys_shutdown(phba);
2227 * lpfc_offline - Bring a HBA offline
2228 * @phba: pointer to lpfc hba data structure.
2230 * This routine actually brings a HBA offline. It stops all the timers
2231 * associated with the HBA, brings down the SLI layer, and eventually
2232 * marks the HBA as in offline state for the upper layer protocol.
2235 lpfc_offline(struct lpfc_hba *phba)
2237 struct Scsi_Host *shost;
2238 struct lpfc_vport **vports;
2241 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2244 /* stop port and all timers associated with this hba */
2245 lpfc_stop_port(phba);
2246 vports = lpfc_create_vport_work_array(phba);
2248 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2249 lpfc_stop_vport_timers(vports[i]);
2250 lpfc_destroy_vport_work_array(phba, vports);
2251 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2252 "0460 Bring Adapter offline\n");
2253 /* Bring down the SLI Layer and cleanup. The HBA is offline
2255 lpfc_sli_hba_down(phba);
2256 spin_lock_irq(&phba->hbalock);
2258 spin_unlock_irq(&phba->hbalock);
2259 vports = lpfc_create_vport_work_array(phba);
2261 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2262 shost = lpfc_shost_from_vport(vports[i]);
2263 spin_lock_irq(shost->host_lock);
2264 vports[i]->work_port_events = 0;
2265 vports[i]->fc_flag |= FC_OFFLINE_MODE;
2266 spin_unlock_irq(shost->host_lock);
2268 lpfc_destroy_vport_work_array(phba, vports);
2272 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2273 * @phba: pointer to lpfc hba data structure.
2275 * This routine is to free all the SCSI buffers and IOCBs from the driver
2276 * list back to kernel. It is called from lpfc_pci_remove_one to free
2277 * the internal resources before the device is removed from the system.
2280 * 0 - successful (for now, it always returns 0)
2283 lpfc_scsi_free(struct lpfc_hba *phba)
2285 struct lpfc_scsi_buf *sb, *sb_next;
2286 struct lpfc_iocbq *io, *io_next;
2288 spin_lock_irq(&phba->hbalock);
2289 /* Release all the lpfc_scsi_bufs maintained by this host. */
2290 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2291 list_del(&sb->list);
2292 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2295 phba->total_scsi_bufs--;
2298 /* Release all the lpfc_iocbq entries maintained by this host. */
2299 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2300 list_del(&io->list);
2302 phba->total_iocbq_bufs--;
2305 spin_unlock_irq(&phba->hbalock);
2311 * lpfc_create_port - Create an FC port
2312 * @phba: pointer to lpfc hba data structure.
2313 * @instance: a unique integer ID to this FC port.
2314 * @dev: pointer to the device data structure.
2316 * This routine creates a FC port for the upper layer protocol. The FC port
2317 * can be created on top of either a physical port or a virtual port provided
2318 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2319 * and associates the FC port created before adding the shost into the SCSI
2323 * @vport - pointer to the virtual N_Port data structure.
2324 * NULL - port create failed.
2327 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2329 struct lpfc_vport *vport;
2330 struct Scsi_Host *shost;
2333 if (dev != &phba->pcidev->dev)
2334 shost = scsi_host_alloc(&lpfc_vport_template,
2335 sizeof(struct lpfc_vport));
2337 shost = scsi_host_alloc(&lpfc_template,
2338 sizeof(struct lpfc_vport));
2342 vport = (struct lpfc_vport *) shost->hostdata;
2344 vport->load_flag |= FC_LOADING;
2345 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2346 vport->fc_rscn_flush = 0;
2348 lpfc_get_vport_cfgparam(vport);
2349 shost->unique_id = instance;
2350 shost->max_id = LPFC_MAX_TARGET;
2351 shost->max_lun = vport->cfg_max_luns;
2352 shost->this_id = -1;
2353 shost->max_cmd_len = 16;
2354 if (phba->sli_rev == LPFC_SLI_REV4) {
2355 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2356 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2360 * Set initial can_queue value since 0 is no longer supported and
2361 * scsi_add_host will fail. This will be adjusted later based on the
2362 * max xri value determined in hba setup.
2364 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2365 if (dev != &phba->pcidev->dev) {
2366 shost->transportt = lpfc_vport_transport_template;
2367 vport->port_type = LPFC_NPIV_PORT;
2369 shost->transportt = lpfc_transport_template;
2370 vport->port_type = LPFC_PHYSICAL_PORT;
2373 /* Initialize all internally managed lists. */
2374 INIT_LIST_HEAD(&vport->fc_nodes);
2375 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2376 spin_lock_init(&vport->work_port_lock);
2378 init_timer(&vport->fc_disctmo);
2379 vport->fc_disctmo.function = lpfc_disc_timeout;
2380 vport->fc_disctmo.data = (unsigned long)vport;
2382 init_timer(&vport->fc_fdmitmo);
2383 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2384 vport->fc_fdmitmo.data = (unsigned long)vport;
2386 init_timer(&vport->els_tmofunc);
2387 vport->els_tmofunc.function = lpfc_els_timeout;
2388 vport->els_tmofunc.data = (unsigned long)vport;
2390 error = scsi_add_host(shost, dev);
2394 spin_lock_irq(&phba->hbalock);
2395 list_add_tail(&vport->listentry, &phba->port_list);
2396 spin_unlock_irq(&phba->hbalock);
2400 scsi_host_put(shost);
2406 * destroy_port - destroy an FC port
2407 * @vport: pointer to an lpfc virtual N_Port data structure.
2409 * This routine destroys a FC port from the upper layer protocol. All the
2410 * resources associated with the port are released.
2413 destroy_port(struct lpfc_vport *vport)
2415 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2416 struct lpfc_hba *phba = vport->phba;
2418 lpfc_debugfs_terminate(vport);
2419 fc_remove_host(shost);
2420 scsi_remove_host(shost);
2422 spin_lock_irq(&phba->hbalock);
2423 list_del_init(&vport->listentry);
2424 spin_unlock_irq(&phba->hbalock);
2426 lpfc_cleanup(vport);
2431 * lpfc_get_instance - Get a unique integer ID
2433 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2434 * uses the kernel idr facility to perform the task.
2437 * instance - a unique integer ID allocated as the new instance.
2438 * -1 - lpfc get instance failed.
2441 lpfc_get_instance(void)
2445 /* Assign an unused number */
2446 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2448 if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2454 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2455 * @shost: pointer to SCSI host data structure.
2456 * @time: elapsed time of the scan in jiffies.
2458 * This routine is called by the SCSI layer with a SCSI host to determine
2459 * whether the scan host is finished.
2461 * Note: there is no scan_start function as adapter initialization will have
2462 * asynchronously kicked off the link initialization.
2465 * 0 - SCSI host scan is not over yet.
2466 * 1 - SCSI host scan is over.
2468 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2470 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2471 struct lpfc_hba *phba = vport->phba;
2474 spin_lock_irq(shost->host_lock);
2476 if (vport->load_flag & FC_UNLOADING) {
2480 if (time >= 30 * HZ) {
2481 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2482 "0461 Scanning longer than 30 "
2483 "seconds. Continuing initialization\n");
2487 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2488 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2489 "0465 Link down longer than 15 "
2490 "seconds. Continuing initialization\n");
2495 if (vport->port_state != LPFC_VPORT_READY)
2497 if (vport->num_disc_nodes || vport->fc_prli_sent)
2499 if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2501 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2507 spin_unlock_irq(shost->host_lock);
2512 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2513 * @shost: pointer to SCSI host data structure.
2515 * This routine initializes a given SCSI host attributes on a FC port. The
2516 * SCSI host can be either on top of a physical port or a virtual port.
2518 void lpfc_host_attrib_init(struct Scsi_Host *shost)
2520 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2521 struct lpfc_hba *phba = vport->phba;
2523 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
2526 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2527 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2528 fc_host_supported_classes(shost) = FC_COS_CLASS3;
2530 memset(fc_host_supported_fc4s(shost), 0,
2531 sizeof(fc_host_supported_fc4s(shost)));
2532 fc_host_supported_fc4s(shost)[2] = 1;
2533 fc_host_supported_fc4s(shost)[7] = 1;
2535 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2536 sizeof fc_host_symbolic_name(shost));
2538 fc_host_supported_speeds(shost) = 0;
2539 if (phba->lmt & LMT_10Gb)
2540 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2541 if (phba->lmt & LMT_8Gb)
2542 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2543 if (phba->lmt & LMT_4Gb)
2544 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2545 if (phba->lmt & LMT_2Gb)
2546 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2547 if (phba->lmt & LMT_1Gb)
2548 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2550 fc_host_maxframe_size(shost) =
2551 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2552 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2554 /* This value is also unchanging */
2555 memset(fc_host_active_fc4s(shost), 0,
2556 sizeof(fc_host_active_fc4s(shost)));
2557 fc_host_active_fc4s(shost)[2] = 1;
2558 fc_host_active_fc4s(shost)[7] = 1;
2560 fc_host_max_npiv_vports(shost) = phba->max_vpi;
2561 spin_lock_irq(shost->host_lock);
2562 vport->load_flag &= ~FC_LOADING;
2563 spin_unlock_irq(shost->host_lock);
2567 * lpfc_stop_port_s3 - Stop SLI3 device port
2568 * @phba: pointer to lpfc hba data structure.
2570 * This routine is invoked to stop an SLI3 device port, it stops the device
2571 * from generating interrupts and stops the device driver's timers for the
2575 lpfc_stop_port_s3(struct lpfc_hba *phba)
2577 /* Clear all interrupt enable conditions */
2578 writel(0, phba->HCregaddr);
2579 readl(phba->HCregaddr); /* flush */
2580 /* Clear all pending interrupts */
2581 writel(0xffffffff, phba->HAregaddr);
2582 readl(phba->HAregaddr); /* flush */
2584 /* Reset some HBA SLI setup states */
2585 lpfc_stop_hba_timers(phba);
2586 phba->pport->work_port_events = 0;
2590 * lpfc_stop_port_s4 - Stop SLI4 device port
2591 * @phba: pointer to lpfc hba data structure.
2593 * This routine is invoked to stop an SLI4 device port, it stops the device
2594 * from generating interrupts and stops the device driver's timers for the
2598 lpfc_stop_port_s4(struct lpfc_hba *phba)
2600 /* Reset some HBA SLI4 setup states */
2601 lpfc_stop_hba_timers(phba);
2602 phba->pport->work_port_events = 0;
2603 phba->sli4_hba.intr_enable = 0;
2604 /* Hard clear it for now, shall have more graceful way to wait later */
2605 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2609 * lpfc_stop_port - Wrapper function for stopping hba port
2610 * @phba: Pointer to HBA context object.
2612 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2613 * the API jump table function pointer from the lpfc_hba struct.
2616 lpfc_stop_port(struct lpfc_hba *phba)
2618 phba->lpfc_stop_port(phba);
2622 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2623 * @phba: pointer to lpfc hba data structure.
2625 * This routine is invoked to remove the driver default fcf record from
2626 * the port. This routine currently acts on FCF Index 0.
2630 lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2633 LPFC_MBOXQ_t *mboxq;
2634 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2635 uint32_t mbox_tmo, req_len;
2636 uint32_t shdr_status, shdr_add_status;
2638 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2641 "2020 Failed to allocate mbox for ADD_FCF cmd\n");
2645 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2646 sizeof(struct lpfc_sli4_cfg_mhdr);
2647 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2648 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2649 req_len, LPFC_SLI4_MBX_EMBED);
2651 * In phase 1, there is a single FCF index, 0. In phase2, the driver
2652 * supports multiple FCF indices.
2654 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2655 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2656 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2657 phba->fcf.fcf_indx);
2659 if (!phba->sli4_hba.intr_enable)
2660 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2662 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2663 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2665 /* The IOCTL status is embedded in the mailbox subheader. */
2666 shdr_status = bf_get(lpfc_mbox_hdr_status,
2667 &del_fcf_record->header.cfg_shdr.response);
2668 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2669 &del_fcf_record->header.cfg_shdr.response);
2670 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2671 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2672 "2516 DEL FCF of default FCF Index failed "
2673 "mbx status x%x, status x%x add_status x%x\n",
2674 rc, shdr_status, shdr_add_status);
2676 if (rc != MBX_TIMEOUT)
2677 mempool_free(mboxq, phba->mbox_mem_pool);
2681 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2682 * @phba: pointer to lpfc hba data structure.
2683 * @acqe_link: pointer to the async link completion queue entry.
2685 * This routine is to parse the SLI4 link-attention link fault code and
2686 * translate it into the base driver's read link attention mailbox command
2689 * Return: Link-attention status in terms of base driver's coding.
2692 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2693 struct lpfc_acqe_link *acqe_link)
2695 uint16_t latt_fault;
2697 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2698 case LPFC_ASYNC_LINK_FAULT_NONE:
2699 case LPFC_ASYNC_LINK_FAULT_LOCAL:
2700 case LPFC_ASYNC_LINK_FAULT_REMOTE:
2704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2705 "0398 Invalid link fault code: x%x\n",
2706 bf_get(lpfc_acqe_link_fault, acqe_link));
2707 latt_fault = MBXERR_ERROR;
2714 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2715 * @phba: pointer to lpfc hba data structure.
2716 * @acqe_link: pointer to the async link completion queue entry.
2718 * This routine is to parse the SLI4 link attention type and translate it
2719 * into the base driver's link attention type coding.
2721 * Return: Link attention type in terms of base driver's coding.
2724 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2725 struct lpfc_acqe_link *acqe_link)
2729 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2730 case LPFC_ASYNC_LINK_STATUS_DOWN:
2731 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2732 att_type = AT_LINK_DOWN;
2734 case LPFC_ASYNC_LINK_STATUS_UP:
2735 /* Ignore physical link up events - wait for logical link up */
2736 att_type = AT_RESERVED;
2738 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2739 att_type = AT_LINK_UP;
2742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2743 "0399 Invalid link attention type: x%x\n",
2744 bf_get(lpfc_acqe_link_status, acqe_link));
2745 att_type = AT_RESERVED;
2752 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2753 * @phba: pointer to lpfc hba data structure.
2754 * @acqe_link: pointer to the async link completion queue entry.
2756 * This routine is to parse the SLI4 link-attention link speed and translate
2757 * it into the base driver's link-attention link speed coding.
2759 * Return: Link-attention link speed in terms of base driver's coding.
2762 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2763 struct lpfc_acqe_link *acqe_link)
2767 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2768 case LPFC_ASYNC_LINK_SPEED_ZERO:
2769 link_speed = LA_UNKNW_LINK;
2771 case LPFC_ASYNC_LINK_SPEED_10MBPS:
2772 link_speed = LA_UNKNW_LINK;
2774 case LPFC_ASYNC_LINK_SPEED_100MBPS:
2775 link_speed = LA_UNKNW_LINK;
2777 case LPFC_ASYNC_LINK_SPEED_1GBPS:
2778 link_speed = LA_1GHZ_LINK;
2780 case LPFC_ASYNC_LINK_SPEED_10GBPS:
2781 link_speed = LA_10GHZ_LINK;
2784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2785 "0483 Invalid link-attention link speed: x%x\n",
2786 bf_get(lpfc_acqe_link_speed, acqe_link));
2787 link_speed = LA_UNKNW_LINK;
2794 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2795 * @phba: pointer to lpfc hba data structure.
2796 * @acqe_link: pointer to the async link completion queue entry.
2798 * This routine is to handle the SLI4 asynchronous link event.
2801 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2802 struct lpfc_acqe_link *acqe_link)
2804 struct lpfc_dmabuf *mp;
2810 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2811 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2813 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2816 "0395 The mboxq allocation failed\n");
2819 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2822 "0396 The lpfc_dmabuf allocation failed\n");
2825 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2827 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2828 "0397 The mbuf allocation failed\n");
2829 goto out_free_dmabuf;
2832 /* Cleanup any outstanding ELS commands */
2833 lpfc_els_flush_all_cmd(phba);
2835 /* Block ELS IOCBs until we have done process link event */
2836 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2838 /* Update link event statistics */
2839 phba->sli.slistat.link_event++;
2841 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2842 lpfc_read_la(phba, pmb, mp);
2843 pmb->vport = phba->pport;
2845 /* Parse and translate status field */
2847 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2849 /* Parse and translate link attention fields */
2850 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2851 la->eventTag = acqe_link->event_tag;
2852 la->attType = att_type;
2853 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2855 /* Fake the the following irrelvant fields */
2856 la->topology = TOPOLOGY_PT_PT;
2857 la->granted_AL_PA = 0;
2863 /* Keep the link status for extra SLI4 state machine reference */
2864 phba->sli4_hba.link_state.speed =
2865 bf_get(lpfc_acqe_link_speed, acqe_link);
2866 phba->sli4_hba.link_state.duplex =
2867 bf_get(lpfc_acqe_link_duplex, acqe_link);
2868 phba->sli4_hba.link_state.status =
2869 bf_get(lpfc_acqe_link_status, acqe_link);
2870 phba->sli4_hba.link_state.physical =
2871 bf_get(lpfc_acqe_link_physical, acqe_link);
2872 phba->sli4_hba.link_state.fault =
2873 bf_get(lpfc_acqe_link_fault, acqe_link);
2875 /* Invoke the lpfc_handle_latt mailbox command callback function */
2876 lpfc_mbx_cmpl_read_la(phba, pmb);
2883 mempool_free(pmb, phba->mbox_mem_pool);
2887 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2888 * @phba: pointer to lpfc hba data structure.
2889 * @acqe_link: pointer to the async fcoe completion queue entry.
2891 * This routine is to handle the SLI4 asynchronous fcoe event.
2894 lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2895 struct lpfc_acqe_fcoe *acqe_fcoe)
2897 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2900 switch (event_type) {
2901 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2902 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2903 "2546 New FCF found index 0x%x tag 0x%x \n",
2904 acqe_fcoe->fcf_index,
2905 acqe_fcoe->event_tag);
2907 * If the current FCF is in discovered state,
2910 spin_lock_irq(&phba->hbalock);
2911 if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
2912 spin_unlock_irq(&phba->hbalock);
2915 spin_unlock_irq(&phba->hbalock);
2917 /* Read the FCF table and re-discover SAN. */
2918 rc = lpfc_sli4_read_fcf_record(phba,
2919 LPFC_FCOE_FCF_GET_FIRST);
2921 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2922 "2547 Read FCF record failed 0x%x\n",
2926 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2928 "2548 FCF Table full count 0x%x tag 0x%x \n",
2929 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2930 acqe_fcoe->event_tag);
2933 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2934 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2935 "2549 FCF disconnected fron network index 0x%x"
2936 " tag 0x%x \n", acqe_fcoe->fcf_index,
2937 acqe_fcoe->event_tag);
2938 /* If the event is not for currently used fcf do nothing */
2939 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2942 * Currently, driver support only one FCF - so treat this as
2945 lpfc_linkdown(phba);
2946 /* Unregister FCF if no devices connected to it */
2947 lpfc_unregister_unused_fcf(phba);
2951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2952 "0288 Unknown FCoE event type 0x%x event tag "
2953 "0x%x\n", event_type, acqe_fcoe->event_tag);
2959 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2960 * @phba: pointer to lpfc hba data structure.
2961 * @acqe_link: pointer to the async dcbx completion queue entry.
2963 * This routine is to handle the SLI4 asynchronous dcbx event.
2966 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2967 struct lpfc_acqe_dcbx *acqe_dcbx)
2969 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2970 "0290 The SLI4 DCBX asynchronous event is not "
2975 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
2976 * @phba: pointer to lpfc hba data structure.
2978 * This routine is invoked by the worker thread to process all the pending
2979 * SLI4 asynchronous events.
2981 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
2983 struct lpfc_cq_event *cq_event;
2985 /* First, declare the async event has been handled */
2986 spin_lock_irq(&phba->hbalock);
2987 phba->hba_flag &= ~ASYNC_EVENT;
2988 spin_unlock_irq(&phba->hbalock);
2989 /* Now, handle all the async events */
2990 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
2991 /* Get the first event from the head of the event queue */
2992 spin_lock_irq(&phba->hbalock);
2993 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
2994 cq_event, struct lpfc_cq_event, list);
2995 spin_unlock_irq(&phba->hbalock);
2996 /* Process the asynchronous event */
2997 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
2998 case LPFC_TRAILER_CODE_LINK:
2999 lpfc_sli4_async_link_evt(phba,
3000 &cq_event->cqe.acqe_link);
3002 case LPFC_TRAILER_CODE_FCOE:
3003 lpfc_sli4_async_fcoe_evt(phba,
3004 &cq_event->cqe.acqe_fcoe);
3006 case LPFC_TRAILER_CODE_DCBX:
3007 lpfc_sli4_async_dcbx_evt(phba,
3008 &cq_event->cqe.acqe_dcbx);
3011 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3012 "1804 Invalid asynchrous event code: "
3013 "x%x\n", bf_get(lpfc_trailer_code,
3014 &cq_event->cqe.mcqe_cmpl));
3017 /* Free the completion event processed to the free pool */
3018 lpfc_sli4_cq_event_release(phba, cq_event);
3023 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3024 * @phba: pointer to lpfc hba data structure.
3025 * @dev_grp: The HBA PCI-Device group number.
3027 * This routine is invoked to set up the per HBA PCI-Device group function
3028 * API jump table entries.
3030 * Return: 0 if success, otherwise -ENODEV
3033 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3037 /* Set up lpfc PCI-device group */
3038 phba->pci_dev_grp = dev_grp;
3040 /* The LPFC_PCI_DEV_OC uses SLI4 */
3041 if (dev_grp == LPFC_PCI_DEV_OC)
3042 phba->sli_rev = LPFC_SLI_REV4;
3044 /* Set up device INIT API function jump table */
3045 rc = lpfc_init_api_table_setup(phba, dev_grp);
3048 /* Set up SCSI API function jump table */
3049 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3052 /* Set up SLI API function jump table */
3053 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3056 /* Set up MBOX API function jump table */
3057 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3065 * lpfc_log_intr_mode - Log the active interrupt mode
3066 * @phba: pointer to lpfc hba data structure.
3067 * @intr_mode: active interrupt mode adopted.
3069 * This routine it invoked to log the currently used active interrupt mode
3072 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3074 switch (intr_mode) {
3076 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3077 "0470 Enable INTx interrupt mode.\n");
3080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3081 "0481 Enabled MSI interrupt mode.\n");
3084 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3085 "0480 Enabled MSI-X interrupt mode.\n");
3088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3089 "0482 Illegal interrupt mode.\n");
3096 * lpfc_enable_pci_dev - Enable a generic PCI device.
3097 * @phba: pointer to lpfc hba data structure.
3099 * This routine is invoked to enable the PCI device that is common to all
3104 * other values - error
3107 lpfc_enable_pci_dev(struct lpfc_hba *phba)
3109 struct pci_dev *pdev;
3112 /* Obtain PCI device reference */
3116 pdev = phba->pcidev;
3117 /* Select PCI BARs */
3118 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3119 /* Enable PCI device */
3120 if (pci_enable_device_mem(pdev))
3122 /* Request PCI resource for the device */
3123 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3124 goto out_disable_device;
3125 /* Set up device as PCI master and save state for EEH */
3126 pci_set_master(pdev);
3127 pci_try_set_mwi(pdev);
3128 pci_save_state(pdev);
3133 pci_disable_device(pdev);
3139 * lpfc_disable_pci_dev - Disable a generic PCI device.
3140 * @phba: pointer to lpfc hba data structure.
3142 * This routine is invoked to disable the PCI device that is common to all
3146 lpfc_disable_pci_dev(struct lpfc_hba *phba)
3148 struct pci_dev *pdev;
3151 /* Obtain PCI device reference */
3155 pdev = phba->pcidev;
3156 /* Select PCI BARs */
3157 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3158 /* Release PCI resource and disable PCI device */
3159 pci_release_selected_regions(pdev, bars);
3160 pci_disable_device(pdev);
3161 /* Null out PCI private reference to driver */
3162 pci_set_drvdata(pdev, NULL);
3168 * lpfc_reset_hba - Reset a hba
3169 * @phba: pointer to lpfc hba data structure.
3171 * This routine is invoked to reset a hba device. It brings the HBA
3172 * offline, performs a board restart, and then brings the board back
3173 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3174 * on outstanding mailbox commands.
3177 lpfc_reset_hba(struct lpfc_hba *phba)
3179 /* If resets are disabled then set error state and return. */
3180 if (!phba->cfg_enable_hba_reset) {
3181 phba->link_state = LPFC_HBA_ERROR;
3184 lpfc_offline_prep(phba);
3186 lpfc_sli_brdrestart(phba);
3188 lpfc_unblock_mgmt_io(phba);
3192 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3193 * @phba: pointer to lpfc hba data structure.
3195 * This routine is invoked to set up the driver internal resources specific to
3196 * support the SLI-3 HBA device it attached to.
3200 * other values - error
3203 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3205 struct lpfc_sli *psli;
3208 * Initialize timers used by driver
3211 /* Heartbeat timer */
3212 init_timer(&phba->hb_tmofunc);
3213 phba->hb_tmofunc.function = lpfc_hb_timeout;
3214 phba->hb_tmofunc.data = (unsigned long)phba;
3217 /* MBOX heartbeat timer */
3218 init_timer(&psli->mbox_tmo);
3219 psli->mbox_tmo.function = lpfc_mbox_timeout;
3220 psli->mbox_tmo.data = (unsigned long) phba;
3221 /* FCP polling mode timer */
3222 init_timer(&phba->fcp_poll_timer);
3223 phba->fcp_poll_timer.function = lpfc_poll_timeout;
3224 phba->fcp_poll_timer.data = (unsigned long) phba;
3225 /* Fabric block timer */
3226 init_timer(&phba->fabric_block_timer);
3227 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3228 phba->fabric_block_timer.data = (unsigned long) phba;
3229 /* EA polling mode timer */
3230 init_timer(&phba->eratt_poll);
3231 phba->eratt_poll.function = lpfc_poll_eratt;
3232 phba->eratt_poll.data = (unsigned long) phba;
3234 /* Host attention work mask setup */
3235 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3236 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3238 /* Get all the module params for configuring this host */
3239 lpfc_get_cfgparam(phba);
3241 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3242 * used to create the sg_dma_buf_pool must be dynamically calculated.
3243 * 2 segments are added since the IOCB needs a command and response bde.
3245 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3246 sizeof(struct fcp_rsp) +
3247 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3249 if (phba->cfg_enable_bg) {
3250 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3251 phba->cfg_sg_dma_buf_size +=
3252 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3255 /* Also reinitialize the host templates with new values. */
3256 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3257 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3259 phba->max_vpi = LPFC_MAX_VPI;
3260 /* This will be set to correct value after config_port mbox */
3261 phba->max_vports = 0;
3264 * Initialize the SLI Layer to run with lpfc HBAs.
3266 lpfc_sli_setup(phba);
3267 lpfc_sli_queue_setup(phba);
3269 /* Allocate device driver memory */
3270 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3277 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3278 * @phba: pointer to lpfc hba data structure.
3280 * This routine is invoked to unset the driver internal resources set up
3281 * specific for supporting the SLI-3 HBA device it attached to.
3284 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3286 /* Free device driver memory allocated */
3287 lpfc_mem_free_all(phba);
3293 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3294 * @phba: pointer to lpfc hba data structure.
3296 * This routine is invoked to set up the driver internal resources specific to
3297 * support the SLI-4 HBA device it attached to.
3301 * other values - error
3304 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3306 struct lpfc_sli *psli;
3310 /* Before proceed, wait for POST done and device ready */
3311 rc = lpfc_sli4_post_status_check(phba);
3316 * Initialize timers used by driver
3319 /* Heartbeat timer */
3320 init_timer(&phba->hb_tmofunc);
3321 phba->hb_tmofunc.function = lpfc_hb_timeout;
3322 phba->hb_tmofunc.data = (unsigned long)phba;
3325 /* MBOX heartbeat timer */
3326 init_timer(&psli->mbox_tmo);
3327 psli->mbox_tmo.function = lpfc_mbox_timeout;
3328 psli->mbox_tmo.data = (unsigned long) phba;
3329 /* Fabric block timer */
3330 init_timer(&phba->fabric_block_timer);
3331 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3332 phba->fabric_block_timer.data = (unsigned long) phba;
3333 /* EA polling mode timer */
3334 init_timer(&phba->eratt_poll);
3335 phba->eratt_poll.function = lpfc_poll_eratt;
3336 phba->eratt_poll.data = (unsigned long) phba;
3338 * We need to do a READ_CONFIG mailbox command here before
3339 * calling lpfc_get_cfgparam. For VFs this will report the
3340 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3341 * All of the resources allocated
3342 * for this Port are tied to these values.
3344 /* Get all the module params for configuring this host */
3345 lpfc_get_cfgparam(phba);
3346 phba->max_vpi = LPFC_MAX_VPI;
3347 /* This will be set to correct value after the read_config mbox */
3348 phba->max_vports = 0;
3350 /* Program the default value of vlan_id and fc_map */
3351 phba->valid_vlan = 0;
3352 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3353 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3354 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3357 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3358 * used to create the sg_dma_buf_pool must be dynamically calculated.
3359 * 2 segments are added since the IOCB needs a command and response bde.
3360 * To insure that the scsi sgl does not cross a 4k page boundary only
3361 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3362 * Table of sgl sizes and seg_cnt:
3363 * sgl size, sg_seg_cnt total seg
3368 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3369 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3370 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3371 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3373 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3374 phba->cfg_sg_seg_cnt = 50;
3375 else if (phba->cfg_sg_seg_cnt <= 114)
3376 phba->cfg_sg_seg_cnt = 114;
3377 else if (phba->cfg_sg_seg_cnt <= 242)
3378 phba->cfg_sg_seg_cnt = 242;
3380 phba->cfg_sg_seg_cnt = 498;
3382 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3383 + sizeof(struct fcp_rsp);
3384 phba->cfg_sg_dma_buf_size +=
3385 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3387 /* Initialize buffer queue management fields */
3388 hbq_count = lpfc_sli_hbq_count();
3389 for (i = 0; i < hbq_count; ++i)
3390 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3391 INIT_LIST_HEAD(&phba->rb_pend_list);
3392 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3393 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3396 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3398 /* Initialize the Abort scsi buffer list used by driver */
3399 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3400 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3401 /* This abort list used by worker thread */
3402 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3405 * Initialize dirver internal slow-path work queues
3408 /* Driver internel slow-path CQ Event pool */
3409 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3410 /* Response IOCB work queue list */
3411 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3412 /* Asynchronous event CQ Event work queue list */
3413 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3414 /* Fast-path XRI aborted CQ Event work queue list */
3415 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3416 /* Slow-path XRI aborted CQ Event work queue list */
3417 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3418 /* Receive queue CQ Event work queue list */
3419 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3421 /* Initialize the driver internal SLI layer lists. */
3422 lpfc_sli_setup(phba);
3423 lpfc_sli_queue_setup(phba);
3425 /* Allocate device driver memory */
3426 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3430 /* Create the bootstrap mailbox command */
3431 rc = lpfc_create_bootstrap_mbox(phba);
3435 /* Set up the host's endian order with the device. */
3436 rc = lpfc_setup_endian_order(phba);
3438 goto out_free_bsmbx;
3440 /* Set up the hba's configuration parameters. */
3441 rc = lpfc_sli4_read_config(phba);
3443 goto out_free_bsmbx;
3445 /* Perform a function reset */
3446 rc = lpfc_pci_function_reset(phba);
3448 goto out_free_bsmbx;
3450 /* Create all the SLI4 queues */
3451 rc = lpfc_sli4_queue_create(phba);
3453 goto out_free_bsmbx;
3455 /* Create driver internal CQE event pool */
3456 rc = lpfc_sli4_cq_event_pool_create(phba);
3458 goto out_destroy_queue;
3460 /* Initialize and populate the iocb list per host */
3461 rc = lpfc_init_sgl_list(phba);
3463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3464 "1400 Failed to initialize sgl list.\n");
3465 goto out_destroy_cq_event_pool;
3467 rc = lpfc_init_active_sgl_array(phba);
3469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3470 "1430 Failed to initialize sgl list.\n");
3471 goto out_free_sgl_list;
3474 rc = lpfc_sli4_init_rpi_hdrs(phba);
3476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3477 "1432 Failed to initialize rpi headers.\n");
3478 goto out_free_active_sgl;
3481 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3482 phba->cfg_fcp_eq_count), GFP_KERNEL);
3483 if (!phba->sli4_hba.fcp_eq_hdl) {
3484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3485 "2572 Failed allocate memory for fast-path "
3486 "per-EQ handle array\n");
3487 goto out_remove_rpi_hdrs;
3490 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3491 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3492 if (!phba->sli4_hba.msix_entries) {
3493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3494 "2573 Failed allocate memory for msi-x "
3495 "interrupt vector entries\n");
3496 goto out_free_fcp_eq_hdl;
3501 out_free_fcp_eq_hdl:
3502 kfree(phba->sli4_hba.fcp_eq_hdl);
3503 out_remove_rpi_hdrs:
3504 lpfc_sli4_remove_rpi_hdrs(phba);
3505 out_free_active_sgl:
3506 lpfc_free_active_sgl(phba);
3508 lpfc_free_sgl_list(phba);
3509 out_destroy_cq_event_pool:
3510 lpfc_sli4_cq_event_pool_destroy(phba);
3512 lpfc_sli4_queue_destroy(phba);
3514 lpfc_destroy_bootstrap_mbox(phba);
3516 lpfc_mem_free(phba);
3521 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3522 * @phba: pointer to lpfc hba data structure.
3524 * This routine is invoked to unset the driver internal resources set up
3525 * specific for supporting the SLI-4 HBA device it attached to.
3528 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3530 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3532 /* unregister default FCFI from the HBA */
3533 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3535 /* Free the default FCR table */
3536 lpfc_sli_remove_dflt_fcf(phba);
3538 /* Free memory allocated for msi-x interrupt vector entries */
3539 kfree(phba->sli4_hba.msix_entries);
3541 /* Free memory allocated for fast-path work queue handles */
3542 kfree(phba->sli4_hba.fcp_eq_hdl);
3544 /* Free the allocated rpi headers. */
3545 lpfc_sli4_remove_rpi_hdrs(phba);
3547 /* Free the ELS sgl list */
3548 lpfc_free_active_sgl(phba);
3549 lpfc_free_sgl_list(phba);
3551 /* Free the SCSI sgl management array */
3552 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3554 /* Free the SLI4 queues */
3555 lpfc_sli4_queue_destroy(phba);
3557 /* Free the completion queue EQ event pool */
3558 lpfc_sli4_cq_event_release_all(phba);
3559 lpfc_sli4_cq_event_pool_destroy(phba);
3561 /* Reset SLI4 HBA FCoE function */
3562 lpfc_pci_function_reset(phba);
3564 /* Free the bsmbx region. */
3565 lpfc_destroy_bootstrap_mbox(phba);
3567 /* Free the SLI Layer memory with SLI4 HBAs */
3568 lpfc_mem_free_all(phba);
3570 /* Free the current connect table */
3571 list_for_each_entry_safe(conn_entry, next_conn_entry,
3572 &phba->fcf_conn_rec_list, list)
3579 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3580 * @phba: The hba struct for which this call is being executed.
3581 * @dev_grp: The HBA PCI-Device group number.
3583 * This routine sets up the device INIT interface API function jump table
3586 * Returns: 0 - success, -ENODEV - failure.
3589 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3592 case LPFC_PCI_DEV_LP:
3593 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3594 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3595 phba->lpfc_stop_port = lpfc_stop_port_s3;
3597 case LPFC_PCI_DEV_OC:
3598 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3599 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3600 phba->lpfc_stop_port = lpfc_stop_port_s4;
3603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3604 "1431 Invalid HBA PCI-device group: 0x%x\n",
3613 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3614 * @phba: pointer to lpfc hba data structure.
3616 * This routine is invoked to set up the driver internal resources before the
3617 * device specific resource setup to support the HBA device it attached to.
3621 * other values - error
3624 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3627 * Driver resources common to all SLI revisions
3629 atomic_set(&phba->fast_event_count, 0);
3630 spin_lock_init(&phba->hbalock);
3632 /* Initialize ndlp management spinlock */
3633 spin_lock_init(&phba->ndlp_lock);
3635 INIT_LIST_HEAD(&phba->port_list);
3636 INIT_LIST_HEAD(&phba->work_list);
3637 init_waitqueue_head(&phba->wait_4_mlo_m_q);
3639 /* Initialize the wait queue head for the kernel thread */
3640 init_waitqueue_head(&phba->work_waitq);
3642 /* Initialize the scsi buffer list used by driver for scsi IO */
3643 spin_lock_init(&phba->scsi_buf_list_lock);
3644 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3646 /* Initialize the fabric iocb list */
3647 INIT_LIST_HEAD(&phba->fabric_iocb_list);
3649 /* Initialize list to save ELS buffers */
3650 INIT_LIST_HEAD(&phba->elsbuf);
3652 /* Initialize FCF connection rec list */
3653 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3659 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3660 * @phba: pointer to lpfc hba data structure.
3662 * This routine is invoked to set up the driver internal resources after the
3663 * device specific resource setup to support the HBA device it attached to.
3667 * other values - error
3670 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3674 /* Startup the kernel thread for this host adapter. */
3675 phba->worker_thread = kthread_run(lpfc_do_work, phba,
3676 "lpfc_worker_%d", phba->brd_no);
3677 if (IS_ERR(phba->worker_thread)) {
3678 error = PTR_ERR(phba->worker_thread);
3686 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3687 * @phba: pointer to lpfc hba data structure.
3689 * This routine is invoked to unset the driver internal resources set up after
3690 * the device specific resource setup for supporting the HBA device it
3694 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3696 /* Stop kernel worker thread */
3697 kthread_stop(phba->worker_thread);
3701 * lpfc_free_iocb_list - Free iocb list.
3702 * @phba: pointer to lpfc hba data structure.
3704 * This routine is invoked to free the driver's IOCB list and memory.
3707 lpfc_free_iocb_list(struct lpfc_hba *phba)
3709 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3711 spin_lock_irq(&phba->hbalock);
3712 list_for_each_entry_safe(iocbq_entry, iocbq_next,
3713 &phba->lpfc_iocb_list, list) {
3714 list_del(&iocbq_entry->list);
3716 phba->total_iocbq_bufs--;
3718 spin_unlock_irq(&phba->hbalock);
3724 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3725 * @phba: pointer to lpfc hba data structure.
3727 * This routine is invoked to allocate and initizlize the driver's IOCB
3728 * list and set up the IOCB tag array accordingly.
3732 * other values - error
3735 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3737 struct lpfc_iocbq *iocbq_entry = NULL;
3741 /* Initialize and populate the iocb list per host. */
3742 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3743 for (i = 0; i < iocb_count; i++) {
3744 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3745 if (iocbq_entry == NULL) {
3746 printk(KERN_ERR "%s: only allocated %d iocbs of "
3747 "expected %d count. Unloading driver.\n",
3748 __func__, i, LPFC_IOCB_LIST_CNT);
3749 goto out_free_iocbq;
3752 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3755 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3756 "Unloading driver.\n", __func__);
3757 goto out_free_iocbq;
3759 iocbq_entry->sli4_xritag = NO_XRI;
3761 spin_lock_irq(&phba->hbalock);
3762 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3763 phba->total_iocbq_bufs++;
3764 spin_unlock_irq(&phba->hbalock);
3770 lpfc_free_iocb_list(phba);
3776 * lpfc_free_sgl_list - Free sgl list.
3777 * @phba: pointer to lpfc hba data structure.
3779 * This routine is invoked to free the driver's sgl list and memory.
3782 lpfc_free_sgl_list(struct lpfc_hba *phba)
3784 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3785 LIST_HEAD(sglq_list);
3788 spin_lock_irq(&phba->hbalock);
3789 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3790 spin_unlock_irq(&phba->hbalock);
3792 list_for_each_entry_safe(sglq_entry, sglq_next,
3794 list_del(&sglq_entry->list);
3795 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3797 phba->sli4_hba.total_sglq_bufs--;
3799 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3801 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3802 "2005 Unable to deregister pages from HBA: %x", rc);
3804 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3808 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3809 * @phba: pointer to lpfc hba data structure.
3811 * This routine is invoked to allocate the driver's active sgl memory.
3812 * This array will hold the sglq_entry's for active IOs.
3815 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3818 size = sizeof(struct lpfc_sglq *);
3819 size *= phba->sli4_hba.max_cfg_param.max_xri;
3821 phba->sli4_hba.lpfc_sglq_active_list =
3822 kzalloc(size, GFP_KERNEL);
3823 if (!phba->sli4_hba.lpfc_sglq_active_list)
3829 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3830 * @phba: pointer to lpfc hba data structure.
3832 * This routine is invoked to walk through the array of active sglq entries
3833 * and free all of the resources.
3834 * This is just a place holder for now.
3837 lpfc_free_active_sgl(struct lpfc_hba *phba)
3839 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3843 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3844 * @phba: pointer to lpfc hba data structure.
3846 * This routine is invoked to allocate and initizlize the driver's sgl
3847 * list and set up the sgl xritag tag array accordingly.
3851 * other values - error
3854 lpfc_init_sgl_list(struct lpfc_hba *phba)
3856 struct lpfc_sglq *sglq_entry = NULL;
3860 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3861 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3862 "2400 lpfc_init_sgl_list els %d.\n",
3864 /* Initialize and populate the sglq list per host/VF. */
3865 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3866 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3868 /* Sanity check on XRI management */
3869 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3871 "2562 No room left for SCSI XRI allocation: "
3872 "max_xri=%d, els_xri=%d\n",
3873 phba->sli4_hba.max_cfg_param.max_xri,
3878 /* Allocate memory for the ELS XRI management array */
3879 phba->sli4_hba.lpfc_els_sgl_array =
3880 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3883 if (!phba->sli4_hba.lpfc_els_sgl_array) {
3884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3885 "2401 Failed to allocate memory for ELS "
3886 "XRI management array of size %d.\n",
3891 /* Keep the SCSI XRI into the XRI management array */
3892 phba->sli4_hba.scsi_xri_max =
3893 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3894 phba->sli4_hba.scsi_xri_cnt = 0;
3896 phba->sli4_hba.lpfc_scsi_psb_array =
3897 kzalloc((sizeof(struct lpfc_scsi_buf *) *
3898 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3900 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3901 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3902 "2563 Failed to allocate memory for SCSI "
3903 "XRI management array of size %d.\n",
3904 phba->sli4_hba.scsi_xri_max);
3905 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3909 for (i = 0; i < els_xri_cnt; i++) {
3910 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3911 if (sglq_entry == NULL) {
3912 printk(KERN_ERR "%s: only allocated %d sgls of "
3913 "expected %d count. Unloading driver.\n",
3914 __func__, i, els_xri_cnt);
3918 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3919 if (sglq_entry->sli4_xritag == NO_XRI) {
3921 printk(KERN_ERR "%s: failed to allocate XRI.\n"
3922 "Unloading driver.\n", __func__);
3925 sglq_entry->buff_type = GEN_BUFF_TYPE;
3926 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3927 if (sglq_entry->virt == NULL) {
3929 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3930 "Unloading driver.\n", __func__);
3933 sglq_entry->sgl = sglq_entry->virt;
3934 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3936 /* The list order is used by later block SGL registraton */
3937 spin_lock_irq(&phba->hbalock);
3938 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3939 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3940 phba->sli4_hba.total_sglq_bufs++;
3941 spin_unlock_irq(&phba->hbalock);
3946 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3947 lpfc_free_sgl_list(phba);
3952 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
3953 * @phba: pointer to lpfc hba data structure.
3955 * This routine is invoked to post rpi header templates to the
3956 * HBA consistent with the SLI-4 interface spec. This routine
3957 * posts a PAGE_SIZE memory region to the port to hold up to
3958 * PAGE_SIZE modulo 64 rpi context headers.
3959 * No locks are held here because this is an initialization routine
3960 * called only from probe or lpfc_online when interrupts are not
3961 * enabled and the driver is reinitializing the device.
3965 * ENOMEM - No availble memory
3966 * EIO - The mailbox failed to complete successfully.
3969 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3974 struct lpfc_rpi_hdr *rpi_hdr;
3976 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
3979 * Provision an rpi bitmask range for discovery. The total count
3980 * is the difference between max and base + 1.
3982 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
3983 phba->sli4_hba.max_cfg_param.max_rpi - 1;
3985 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
3986 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
3988 if (!phba->sli4_hba.rpi_bmask)
3991 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
3993 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3994 "0391 Error during rpi post operation\n");
3995 lpfc_sli4_remove_rpis(phba);
4003 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4004 * @phba: pointer to lpfc hba data structure.
4006 * This routine is invoked to allocate a single 4KB memory region to
4007 * support rpis and stores them in the phba. This single region
4008 * provides support for up to 64 rpis. The region is used globally
4012 * A valid rpi hdr on success.
4013 * A NULL pointer on any failure.
4015 struct lpfc_rpi_hdr *
4016 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4018 uint16_t rpi_limit, curr_rpi_range;
4019 struct lpfc_dmabuf *dmabuf;
4020 struct lpfc_rpi_hdr *rpi_hdr;
4022 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4023 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4025 spin_lock_irq(&phba->hbalock);
4026 curr_rpi_range = phba->sli4_hba.next_rpi;
4027 spin_unlock_irq(&phba->hbalock);
4030 * The port has a limited number of rpis. The increment here
4031 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4032 * and to allow the full max_rpi range per port.
4034 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4038 * First allocate the protocol header region for the port. The
4039 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4041 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4045 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4046 LPFC_HDR_TEMPLATE_SIZE,
4049 if (!dmabuf->virt) {
4051 goto err_free_dmabuf;
4054 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4055 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4057 goto err_free_coherent;
4060 /* Save the rpi header data for cleanup later. */
4061 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4063 goto err_free_coherent;
4065 rpi_hdr->dmabuf = dmabuf;
4066 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4067 rpi_hdr->page_count = 1;
4068 spin_lock_irq(&phba->hbalock);
4069 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4070 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4073 * The next_rpi stores the next module-64 rpi value to post
4074 * in any subsequent rpi memory region postings.
4076 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4077 spin_unlock_irq(&phba->hbalock);
4081 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4082 dmabuf->virt, dmabuf->phys);
4089 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4090 * @phba: pointer to lpfc hba data structure.
4092 * This routine is invoked to remove all memory resources allocated
4093 * to support rpis. This routine presumes the caller has released all
4094 * rpis consumed by fabric or port logins and is prepared to have
4095 * the header pages removed.
4098 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4100 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4102 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4103 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4104 list_del(&rpi_hdr->list);
4105 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4106 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4107 kfree(rpi_hdr->dmabuf);
4111 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4112 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4116 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4117 * @pdev: pointer to pci device data structure.
4119 * This routine is invoked to allocate the driver hba data structure for an
4120 * HBA device. If the allocation is successful, the phba reference to the
4121 * PCI device data structure is set.
4124 * pointer to @phba - sucessful
4127 static struct lpfc_hba *
4128 lpfc_hba_alloc(struct pci_dev *pdev)
4130 struct lpfc_hba *phba;
4132 /* Allocate memory for HBA structure */
4133 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4136 "1417 Failed to allocate hba struct.\n");
4140 /* Set reference to PCI device in HBA structure */
4141 phba->pcidev = pdev;
4143 /* Assign an unused board number */
4144 phba->brd_no = lpfc_get_instance();
4145 if (phba->brd_no < 0) {
4154 * lpfc_hba_free - Free driver hba data structure with a device.
4155 * @phba: pointer to lpfc hba data structure.
4157 * This routine is invoked to free the driver hba data structure with an
4161 lpfc_hba_free(struct lpfc_hba *phba)
4163 /* Release the driver assigned board number */
4164 idr_remove(&lpfc_hba_index, phba->brd_no);
4171 * lpfc_create_shost - Create hba physical port with associated scsi host.
4172 * @phba: pointer to lpfc hba data structure.
4174 * This routine is invoked to create HBA physical port and associate a SCSI
4179 * other values - error
4182 lpfc_create_shost(struct lpfc_hba *phba)
4184 struct lpfc_vport *vport;
4185 struct Scsi_Host *shost;
4187 /* Initialize HBA FC structure */
4188 phba->fc_edtov = FF_DEF_EDTOV;
4189 phba->fc_ratov = FF_DEF_RATOV;
4190 phba->fc_altov = FF_DEF_ALTOV;
4191 phba->fc_arbtov = FF_DEF_ARBTOV;
4193 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4197 shost = lpfc_shost_from_vport(vport);
4198 phba->pport = vport;
4199 lpfc_debugfs_initialize(vport);
4200 /* Put reference to SCSI host to driver's device private data */
4201 pci_set_drvdata(phba->pcidev, shost);
4207 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4208 * @phba: pointer to lpfc hba data structure.
4210 * This routine is invoked to destroy HBA physical port and the associated
4214 lpfc_destroy_shost(struct lpfc_hba *phba)
4216 struct lpfc_vport *vport = phba->pport;
4218 /* Destroy physical port that associated with the SCSI host */
4219 destroy_port(vport);
4225 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4226 * @phba: pointer to lpfc hba data structure.
4227 * @shost: the shost to be used to detect Block guard settings.
4229 * This routine sets up the local Block guard protocol settings for @shost.
4230 * This routine also allocates memory for debugging bg buffers.
4233 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4236 if (lpfc_prot_mask && lpfc_prot_guard) {
4237 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4238 "1478 Registering BlockGuard with the "
4240 scsi_host_set_prot(shost, lpfc_prot_mask);
4241 scsi_host_set_guard(shost, lpfc_prot_guard);
4243 if (!_dump_buf_data) {
4245 spin_lock_init(&_dump_buf_lock);
4247 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4248 if (_dump_buf_data) {
4249 printk(KERN_ERR "BLKGRD allocated %d pages for "
4250 "_dump_buf_data at 0x%p\n",
4251 (1 << pagecnt), _dump_buf_data);
4252 _dump_buf_data_order = pagecnt;
4253 memset(_dump_buf_data, 0,
4254 ((1 << PAGE_SHIFT) << pagecnt));
4259 if (!_dump_buf_data_order)
4260 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4261 "memory for hexdump\n");
4263 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4264 "\n", _dump_buf_data);
4265 if (!_dump_buf_dif) {
4268 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4269 if (_dump_buf_dif) {
4270 printk(KERN_ERR "BLKGRD allocated %d pages for "
4271 "_dump_buf_dif at 0x%p\n",
4272 (1 << pagecnt), _dump_buf_dif);
4273 _dump_buf_dif_order = pagecnt;
4274 memset(_dump_buf_dif, 0,
4275 ((1 << PAGE_SHIFT) << pagecnt));
4280 if (!_dump_buf_dif_order)
4281 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4282 "memory for hexdump\n");
4284 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4289 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4290 * @phba: pointer to lpfc hba data structure.
4292 * This routine is invoked to perform all the necessary post initialization
4293 * setup for the device.
4296 lpfc_post_init_setup(struct lpfc_hba *phba)
4298 struct Scsi_Host *shost;
4299 struct lpfc_adapter_event_header adapter_event;
4301 /* Get the default values for Model Name and Description */
4302 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4305 * hba setup may have changed the hba_queue_depth so we need to
4306 * adjust the value of can_queue.
4308 shost = pci_get_drvdata(phba->pcidev);
4309 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4310 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4311 lpfc_setup_bg(phba, shost);
4313 lpfc_host_attrib_init(shost);
4315 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4316 spin_lock_irq(shost->host_lock);
4317 lpfc_poll_start_timer(phba);
4318 spin_unlock_irq(shost->host_lock);
4321 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4322 "0428 Perform SCSI scan\n");
4323 /* Send board arrival event to upper layer */
4324 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4325 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4326 fc_host_post_vendor_event(shost, fc_get_event_number(),
4327 sizeof(adapter_event),
4328 (char *) &adapter_event,
4334 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4335 * @phba: pointer to lpfc hba data structure.
4337 * This routine is invoked to set up the PCI device memory space for device
4338 * with SLI-3 interface spec.
4342 * other values - error
4345 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4347 struct pci_dev *pdev;
4348 unsigned long bar0map_len, bar2map_len;
4351 int error = -ENODEV;
4353 /* Obtain PCI device reference */
4357 pdev = phba->pcidev;
4359 /* Set the device DMA mask size */
4360 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4361 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4364 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4365 * required by each mapping.
4367 phba->pci_bar0_map = pci_resource_start(pdev, 0);
4368 bar0map_len = pci_resource_len(pdev, 0);
4370 phba->pci_bar2_map = pci_resource_start(pdev, 2);
4371 bar2map_len = pci_resource_len(pdev, 2);
4373 /* Map HBA SLIM to a kernel virtual address. */
4374 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4375 if (!phba->slim_memmap_p) {
4376 dev_printk(KERN_ERR, &pdev->dev,
4377 "ioremap failed for SLIM memory.\n");
4381 /* Map HBA Control Registers to a kernel virtual address. */
4382 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4383 if (!phba->ctrl_regs_memmap_p) {
4384 dev_printk(KERN_ERR, &pdev->dev,
4385 "ioremap failed for HBA control registers.\n");
4386 goto out_iounmap_slim;
4389 /* Allocate memory for SLI-2 structures */
4390 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4394 if (!phba->slim2p.virt)
4397 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4398 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4399 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4400 phba->IOCBs = (phba->slim2p.virt +
4401 offsetof(struct lpfc_sli2_slim, IOCBs));
4403 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4404 lpfc_sli_hbq_size(),
4405 &phba->hbqslimp.phys,
4407 if (!phba->hbqslimp.virt)
4410 hbq_count = lpfc_sli_hbq_count();
4411 ptr = phba->hbqslimp.virt;
4412 for (i = 0; i < hbq_count; ++i) {
4413 phba->hbqs[i].hbq_virt = ptr;
4414 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4415 ptr += (lpfc_hbq_defs[i]->entry_count *
4416 sizeof(struct lpfc_hbq_entry));
4418 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4419 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4421 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4423 INIT_LIST_HEAD(&phba->rb_pend_list);
4425 phba->MBslimaddr = phba->slim_memmap_p;
4426 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4427 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4428 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4429 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4434 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4435 phba->slim2p.virt, phba->slim2p.phys);
4437 iounmap(phba->ctrl_regs_memmap_p);
4439 iounmap(phba->slim_memmap_p);
4445 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4446 * @phba: pointer to lpfc hba data structure.
4448 * This routine is invoked to unset the PCI device memory space for device
4449 * with SLI-3 interface spec.
4452 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4454 struct pci_dev *pdev;
4456 /* Obtain PCI device reference */
4460 pdev = phba->pcidev;
4462 /* Free coherent DMA memory allocated */
4463 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4464 phba->hbqslimp.virt, phba->hbqslimp.phys);
4465 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4466 phba->slim2p.virt, phba->slim2p.phys);
4468 /* I/O memory unmap */
4469 iounmap(phba->ctrl_regs_memmap_p);
4470 iounmap(phba->slim_memmap_p);
4476 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4477 * @phba: pointer to lpfc hba data structure.
4479 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4480 * done and check status.
4482 * Return 0 if successful, otherwise -ENODEV.
4485 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4487 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4488 uint32_t onlnreg0, onlnreg1;
4489 int i, port_error = -ENODEV;
4491 if (!phba->sli4_hba.STAregaddr)
4494 /* With uncoverable error, log the error message and return error */
4495 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4496 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4497 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4498 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4499 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4500 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4502 "1422 HBA Unrecoverable error: "
4503 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4504 "online0_reg=0x%x, online1_reg=0x%x\n",
4505 uerrlo_reg.word0, uerrhi_reg.word0,
4506 onlnreg0, onlnreg1);
4511 /* Wait up to 30 seconds for the SLI Port POST done and ready */
4512 for (i = 0; i < 3000; i++) {
4513 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4514 /* Encounter fatal POST error, break out */
4515 if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4516 port_error = -ENODEV;
4519 if (LPFC_POST_STAGE_ARMFW_READY ==
4520 bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4529 "1408 Failure HBA POST Status: sta_reg=0x%x, "
4530 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4531 "dl=x%x, pstatus=x%x\n", sta_reg.word0,
4532 bf_get(lpfc_hst_state_perr, &sta_reg),
4533 bf_get(lpfc_hst_state_sfi, &sta_reg),
4534 bf_get(lpfc_hst_state_nip, &sta_reg),
4535 bf_get(lpfc_hst_state_ipc, &sta_reg),
4536 bf_get(lpfc_hst_state_xrom, &sta_reg),
4537 bf_get(lpfc_hst_state_dl, &sta_reg),
4538 bf_get(lpfc_hst_state_port_status, &sta_reg));
4540 /* Log device information */
4541 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr);
4542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4543 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4544 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4545 bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4546 bf_get(lpfc_scratchpad_slirev, &scratchpad),
4547 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4548 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4554 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4555 * @phba: pointer to lpfc hba data structure.
4557 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4561 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4563 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4564 LPFC_UERR_STATUS_LO;
4565 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4566 LPFC_UERR_STATUS_HI;
4567 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4569 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4571 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4576 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4577 * @phba: pointer to lpfc hba data structure.
4579 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4583 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4586 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4588 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4590 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4592 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4598 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4599 * @phba: pointer to lpfc hba data structure.
4600 * @vf: virtual function number
4602 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4603 * based on the given viftual function number, @vf.
4605 * Return 0 if successful, otherwise -ENODEV.
4608 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4610 if (vf > LPFC_VIR_FUNC_MAX)
4613 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4614 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4615 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4616 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4617 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4618 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4619 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4620 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4621 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4622 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4627 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4628 * @phba: pointer to lpfc hba data structure.
4630 * This routine is invoked to create the bootstrap mailbox
4631 * region consistent with the SLI-4 interface spec. This
4632 * routine allocates all memory necessary to communicate
4633 * mailbox commands to the port and sets up all alignment
4634 * needs. No locks are expected to be held when calling
4639 * ENOMEM - could not allocated memory.
4642 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4645 struct lpfc_dmabuf *dmabuf;
4646 struct dma_address *dma_address;
4650 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4655 * The bootstrap mailbox region is comprised of 2 parts
4656 * plus an alignment restriction of 16 bytes.
4658 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4659 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4663 if (!dmabuf->virt) {
4667 memset(dmabuf->virt, 0, bmbx_size);
4670 * Initialize the bootstrap mailbox pointers now so that the register
4671 * operations are simple later. The mailbox dma address is required
4672 * to be 16-byte aligned. Also align the virtual memory as each
4673 * maibox is copied into the bmbx mailbox region before issuing the
4674 * command to the port.
4676 phba->sli4_hba.bmbx.dmabuf = dmabuf;
4677 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4679 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4680 LPFC_ALIGN_16_BYTE);
4681 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4682 LPFC_ALIGN_16_BYTE);
4685 * Set the high and low physical addresses now. The SLI4 alignment
4686 * requirement is 16 bytes and the mailbox is posted to the port
4687 * as two 30-bit addresses. The other data is a bit marking whether
4688 * the 30-bit address is the high or low address.
4689 * Upcast bmbx aphys to 64bits so shift instruction compiles
4690 * clean on 32 bit machines.
4692 dma_address = &phba->sli4_hba.bmbx.dma_address;
4693 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4694 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4695 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4696 LPFC_BMBX_BIT1_ADDR_HI);
4698 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4699 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4700 LPFC_BMBX_BIT1_ADDR_LO);
4705 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4706 * @phba: pointer to lpfc hba data structure.
4708 * This routine is invoked to teardown the bootstrap mailbox
4709 * region and release all host resources. This routine requires
4710 * the caller to ensure all mailbox commands recovered, no
4711 * additional mailbox comands are sent, and interrupts are disabled
4712 * before calling this routine.
4716 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4718 dma_free_coherent(&phba->pcidev->dev,
4719 phba->sli4_hba.bmbx.bmbx_size,
4720 phba->sli4_hba.bmbx.dmabuf->virt,
4721 phba->sli4_hba.bmbx.dmabuf->phys);
4723 kfree(phba->sli4_hba.bmbx.dmabuf);
4724 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4728 * lpfc_sli4_read_config - Get the config parameters.
4729 * @phba: pointer to lpfc hba data structure.
4731 * This routine is invoked to read the configuration parameters from the HBA.
4732 * The configuration parameters are used to set the base and maximum values
4733 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4734 * allocation for the port.
4738 * ENOMEM - No availble memory
4739 * EIO - The mailbox failed to complete successfully.
4742 lpfc_sli4_read_config(struct lpfc_hba *phba)
4745 struct lpfc_mbx_read_config *rd_config;
4748 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4750 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4751 "2011 Unable to allocate memory for issuing "
4752 "SLI_CONFIG_SPECIAL mailbox command\n");
4756 lpfc_read_config(phba, pmb);
4758 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4759 if (rc != MBX_SUCCESS) {
4760 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4761 "2012 Mailbox failed , mbxCmd x%x "
4762 "READ_CONFIG, mbxStatus x%x\n",
4763 bf_get(lpfc_mqe_command, &pmb->u.mqe),
4764 bf_get(lpfc_mqe_status, &pmb->u.mqe));
4767 rd_config = &pmb->u.mqe.un.rd_config;
4768 phba->sli4_hba.max_cfg_param.max_xri =
4769 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4770 phba->sli4_hba.max_cfg_param.xri_base =
4771 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4772 phba->sli4_hba.max_cfg_param.max_vpi =
4773 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4774 phba->sli4_hba.max_cfg_param.vpi_base =
4775 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4776 phba->sli4_hba.max_cfg_param.max_rpi =
4777 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4778 phba->sli4_hba.max_cfg_param.rpi_base =
4779 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4780 phba->sli4_hba.max_cfg_param.max_vfi =
4781 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4782 phba->sli4_hba.max_cfg_param.vfi_base =
4783 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4784 phba->sli4_hba.max_cfg_param.max_fcfi =
4785 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4786 phba->sli4_hba.max_cfg_param.fcfi_base =
4787 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4788 phba->sli4_hba.max_cfg_param.max_eq =
4789 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4790 phba->sli4_hba.max_cfg_param.max_rq =
4791 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4792 phba->sli4_hba.max_cfg_param.max_wq =
4793 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4794 phba->sli4_hba.max_cfg_param.max_cq =
4795 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4796 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4797 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4798 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4799 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4800 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4801 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4802 phba->max_vports = phba->max_vpi;
4803 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4804 "2003 cfg params XRI(B:%d M:%d), "
4808 "FCFI(B:%d M:%d)\n",
4809 phba->sli4_hba.max_cfg_param.xri_base,
4810 phba->sli4_hba.max_cfg_param.max_xri,
4811 phba->sli4_hba.max_cfg_param.vpi_base,
4812 phba->sli4_hba.max_cfg_param.max_vpi,
4813 phba->sli4_hba.max_cfg_param.vfi_base,
4814 phba->sli4_hba.max_cfg_param.max_vfi,
4815 phba->sli4_hba.max_cfg_param.rpi_base,
4816 phba->sli4_hba.max_cfg_param.max_rpi,
4817 phba->sli4_hba.max_cfg_param.fcfi_base,
4818 phba->sli4_hba.max_cfg_param.max_fcfi);
4820 mempool_free(pmb, phba->mbox_mem_pool);
4822 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
4823 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4824 phba->cfg_hba_queue_depth =
4825 phba->sli4_hba.max_cfg_param.max_xri;
4830 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4831 * @phba: pointer to lpfc hba data structure.
4833 * This routine is invoked to setup the host-side endian order to the
4834 * HBA consistent with the SLI-4 interface spec.
4838 * ENOMEM - No availble memory
4839 * EIO - The mailbox failed to complete successfully.
4842 lpfc_setup_endian_order(struct lpfc_hba *phba)
4844 LPFC_MBOXQ_t *mboxq;
4846 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4847 HOST_ENDIAN_HIGH_WORD1};
4849 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4852 "0492 Unable to allocate memory for issuing "
4853 "SLI_CONFIG_SPECIAL mailbox command\n");
4858 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4859 * words to contain special data values and no other data.
4861 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4862 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4863 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4864 if (rc != MBX_SUCCESS) {
4865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4866 "0493 SLI_CONFIG_SPECIAL mailbox failed with "
4872 mempool_free(mboxq, phba->mbox_mem_pool);
4877 * lpfc_sli4_queue_create - Create all the SLI4 queues
4878 * @phba: pointer to lpfc hba data structure.
4880 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4881 * operation. For each SLI4 queue type, the parameters such as queue entry
4882 * count (queue depth) shall be taken from the module parameter. For now,
4883 * we just use some constant number as place holder.
4887 * ENOMEM - No availble memory
4888 * EIO - The mailbox failed to complete successfully.
4891 lpfc_sli4_queue_create(struct lpfc_hba *phba)
4893 struct lpfc_queue *qdesc;
4894 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4895 int cfg_fcp_wq_count;
4896 int cfg_fcp_eq_count;
4899 * Sanity check for confiugred queue parameters against the run-time
4903 /* Sanity check on FCP fast-path WQ parameters */
4904 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4905 if (cfg_fcp_wq_count >
4906 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4907 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4909 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4911 "2581 Not enough WQs (%d) from "
4912 "the pci function for supporting "
4914 phba->sli4_hba.max_cfg_param.max_wq,
4915 phba->cfg_fcp_wq_count);
4918 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4919 "2582 Not enough WQs (%d) from the pci "
4920 "function for supporting the requested "
4921 "FCP WQs (%d), the actual FCP WQs can "
4922 "be supported: %d\n",
4923 phba->sli4_hba.max_cfg_param.max_wq,
4924 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4926 /* The actual number of FCP work queues adopted */
4927 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4929 /* Sanity check on FCP fast-path EQ parameters */
4930 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4931 if (cfg_fcp_eq_count >
4932 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4933 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4935 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4937 "2574 Not enough EQs (%d) from the "
4938 "pci function for supporting FCP "
4940 phba->sli4_hba.max_cfg_param.max_eq,
4941 phba->cfg_fcp_eq_count);
4944 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4945 "2575 Not enough EQs (%d) from the pci "
4946 "function for supporting the requested "
4947 "FCP EQs (%d), the actual FCP EQs can "
4948 "be supported: %d\n",
4949 phba->sli4_hba.max_cfg_param.max_eq,
4950 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4952 /* It does not make sense to have more EQs than WQs */
4953 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4954 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4955 "2593 The number of FCP EQs (%d) is more "
4956 "than the number of FCP WQs (%d), take "
4957 "the number of FCP EQs same as than of "
4958 "WQs (%d)\n", cfg_fcp_eq_count,
4959 phba->cfg_fcp_wq_count,
4960 phba->cfg_fcp_wq_count);
4961 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4963 /* The actual number of FCP event queues adopted */
4964 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4965 /* The overall number of event queues used */
4966 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
4969 * Create Event Queues (EQs)
4972 /* Get EQ depth from module parameter, fake the default for now */
4973 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
4974 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
4976 /* Create slow path event queue */
4977 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4978 phba->sli4_hba.eq_ecount);
4980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4981 "0496 Failed allocate slow-path EQ\n");
4984 phba->sli4_hba.sp_eq = qdesc;
4986 /* Create fast-path FCP Event Queue(s) */
4987 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
4988 phba->cfg_fcp_eq_count), GFP_KERNEL);
4989 if (!phba->sli4_hba.fp_eq) {
4990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4991 "2576 Failed allocate memory for fast-path "
4992 "EQ record array\n");
4993 goto out_free_sp_eq;
4995 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
4996 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4997 phba->sli4_hba.eq_ecount);
4999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5000 "0497 Failed allocate fast-path EQ\n");
5001 goto out_free_fp_eq;
5003 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5007 * Create Complete Queues (CQs)
5010 /* Get CQ depth from module parameter, fake the default for now */
5011 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5012 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5014 /* Create slow-path Mailbox Command Complete Queue */
5015 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5016 phba->sli4_hba.cq_ecount);
5018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5019 "0500 Failed allocate slow-path mailbox CQ\n");
5020 goto out_free_fp_eq;
5022 phba->sli4_hba.mbx_cq = qdesc;
5024 /* Create slow-path ELS Complete Queue */
5025 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5026 phba->sli4_hba.cq_ecount);
5028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5029 "0501 Failed allocate slow-path ELS CQ\n");
5030 goto out_free_mbx_cq;
5032 phba->sli4_hba.els_cq = qdesc;
5034 /* Create slow-path Unsolicited Receive Complete Queue */
5035 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5036 phba->sli4_hba.cq_ecount);
5038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5039 "0502 Failed allocate slow-path USOL RX CQ\n");
5040 goto out_free_els_cq;
5042 phba->sli4_hba.rxq_cq = qdesc;
5044 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5045 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5046 phba->cfg_fcp_eq_count), GFP_KERNEL);
5047 if (!phba->sli4_hba.fcp_cq) {
5048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5049 "2577 Failed allocate memory for fast-path "
5050 "CQ record array\n");
5051 goto out_free_rxq_cq;
5053 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5054 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5055 phba->sli4_hba.cq_ecount);
5057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5058 "0499 Failed allocate fast-path FCP "
5059 "CQ (%d)\n", fcp_cqidx);
5060 goto out_free_fcp_cq;
5062 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5065 /* Create Mailbox Command Queue */
5066 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5067 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5069 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5070 phba->sli4_hba.mq_ecount);
5072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5073 "0505 Failed allocate slow-path MQ\n");
5074 goto out_free_fcp_cq;
5076 phba->sli4_hba.mbx_wq = qdesc;
5079 * Create all the Work Queues (WQs)
5081 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5082 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5084 /* Create slow-path ELS Work Queue */
5085 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5086 phba->sli4_hba.wq_ecount);
5088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5089 "0504 Failed allocate slow-path ELS WQ\n");
5090 goto out_free_mbx_wq;
5092 phba->sli4_hba.els_wq = qdesc;
5094 /* Create fast-path FCP Work Queue(s) */
5095 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5096 phba->cfg_fcp_wq_count), GFP_KERNEL);
5097 if (!phba->sli4_hba.fcp_wq) {
5098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5099 "2578 Failed allocate memory for fast-path "
5100 "WQ record array\n");
5101 goto out_free_els_wq;
5103 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5104 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5105 phba->sli4_hba.wq_ecount);
5107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5108 "0503 Failed allocate fast-path FCP "
5109 "WQ (%d)\n", fcp_wqidx);
5110 goto out_free_fcp_wq;
5112 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5116 * Create Receive Queue (RQ)
5118 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5119 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5121 /* Create Receive Queue for header */
5122 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5123 phba->sli4_hba.rq_ecount);
5125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5126 "0506 Failed allocate receive HRQ\n");
5127 goto out_free_fcp_wq;
5129 phba->sli4_hba.hdr_rq = qdesc;
5131 /* Create Receive Queue for data */
5132 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5133 phba->sli4_hba.rq_ecount);
5135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5136 "0507 Failed allocate receive DRQ\n");
5137 goto out_free_hdr_rq;
5139 phba->sli4_hba.dat_rq = qdesc;
5144 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5145 phba->sli4_hba.hdr_rq = NULL;
5147 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5148 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5149 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5151 kfree(phba->sli4_hba.fcp_wq);
5153 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5154 phba->sli4_hba.els_wq = NULL;
5156 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5157 phba->sli4_hba.mbx_wq = NULL;
5159 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5160 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5161 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5163 kfree(phba->sli4_hba.fcp_cq);
5165 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5166 phba->sli4_hba.rxq_cq = NULL;
5168 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5169 phba->sli4_hba.els_cq = NULL;
5171 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5172 phba->sli4_hba.mbx_cq = NULL;
5174 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5175 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5176 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5178 kfree(phba->sli4_hba.fp_eq);
5180 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5181 phba->sli4_hba.sp_eq = NULL;
5187 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5188 * @phba: pointer to lpfc hba data structure.
5190 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5195 * ENOMEM - No availble memory
5196 * EIO - The mailbox failed to complete successfully.
5199 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5203 /* Release mailbox command work queue */
5204 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5205 phba->sli4_hba.mbx_wq = NULL;
5207 /* Release ELS work queue */
5208 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5209 phba->sli4_hba.els_wq = NULL;
5211 /* Release FCP work queue */
5212 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5213 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5214 kfree(phba->sli4_hba.fcp_wq);
5215 phba->sli4_hba.fcp_wq = NULL;
5217 /* Release unsolicited receive queue */
5218 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5219 phba->sli4_hba.hdr_rq = NULL;
5220 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5221 phba->sli4_hba.dat_rq = NULL;
5223 /* Release unsolicited receive complete queue */
5224 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5225 phba->sli4_hba.rxq_cq = NULL;
5227 /* Release ELS complete queue */
5228 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5229 phba->sli4_hba.els_cq = NULL;
5231 /* Release mailbox command complete queue */
5232 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5233 phba->sli4_hba.mbx_cq = NULL;
5235 /* Release FCP response complete queue */
5236 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5237 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5238 kfree(phba->sli4_hba.fcp_cq);
5239 phba->sli4_hba.fcp_cq = NULL;
5241 /* Release fast-path event queue */
5242 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5243 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5244 kfree(phba->sli4_hba.fp_eq);
5245 phba->sli4_hba.fp_eq = NULL;
5247 /* Release slow-path event queue */
5248 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5249 phba->sli4_hba.sp_eq = NULL;
5255 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5256 * @phba: pointer to lpfc hba data structure.
5258 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5263 * ENOMEM - No availble memory
5264 * EIO - The mailbox failed to complete successfully.
5267 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5270 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5271 int fcp_cq_index = 0;
5274 * Set up Event Queues (EQs)
5277 /* Set up slow-path event queue */
5278 if (!phba->sli4_hba.sp_eq) {
5279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5280 "0520 Slow-path EQ not allocated\n");
5283 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5287 "0521 Failed setup of slow-path EQ: "
5291 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5292 "2583 Slow-path EQ setup: queue-id=%d\n",
5293 phba->sli4_hba.sp_eq->queue_id);
5295 /* Set up fast-path event queue */
5296 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5297 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5299 "0522 Fast-path EQ (%d) not "
5300 "allocated\n", fcp_eqidx);
5301 goto out_destroy_fp_eq;
5303 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5304 phba->cfg_fcp_imax);
5306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5307 "0523 Failed setup of fast-path EQ "
5308 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
5309 goto out_destroy_fp_eq;
5311 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5312 "2584 Fast-path EQ setup: "
5313 "queue[%d]-id=%d\n", fcp_eqidx,
5314 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5318 * Set up Complete Queues (CQs)
5321 /* Set up slow-path MBOX Complete Queue as the first CQ */
5322 if (!phba->sli4_hba.mbx_cq) {
5323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5324 "0528 Mailbox CQ not allocated\n");
5325 goto out_destroy_fp_eq;
5327 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5328 LPFC_MCQ, LPFC_MBOX);
5330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5331 "0529 Failed setup of slow-path mailbox CQ: "
5333 goto out_destroy_fp_eq;
5335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5336 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5337 phba->sli4_hba.mbx_cq->queue_id,
5338 phba->sli4_hba.sp_eq->queue_id);
5340 /* Set up slow-path ELS Complete Queue */
5341 if (!phba->sli4_hba.els_cq) {
5342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5343 "0530 ELS CQ not allocated\n");
5344 goto out_destroy_mbx_cq;
5346 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5347 LPFC_WCQ, LPFC_ELS);
5349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5350 "0531 Failed setup of slow-path ELS CQ: "
5352 goto out_destroy_mbx_cq;
5354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5355 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5356 phba->sli4_hba.els_cq->queue_id,
5357 phba->sli4_hba.sp_eq->queue_id);
5359 /* Set up slow-path Unsolicited Receive Complete Queue */
5360 if (!phba->sli4_hba.rxq_cq) {
5361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5362 "0532 USOL RX CQ not allocated\n");
5363 goto out_destroy_els_cq;
5365 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5366 LPFC_RCQ, LPFC_USOL);
5368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5369 "0533 Failed setup of slow-path USOL RX CQ: "
5371 goto out_destroy_els_cq;
5373 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5374 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5375 phba->sli4_hba.rxq_cq->queue_id,
5376 phba->sli4_hba.sp_eq->queue_id);
5378 /* Set up fast-path FCP Response Complete Queue */
5379 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5380 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5382 "0526 Fast-path FCP CQ (%d) not "
5383 "allocated\n", fcp_cqidx);
5384 goto out_destroy_fcp_cq;
5386 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5387 phba->sli4_hba.fp_eq[fcp_cqidx],
5388 LPFC_WCQ, LPFC_FCP);
5390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5391 "0527 Failed setup of fast-path FCP "
5392 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5393 goto out_destroy_fcp_cq;
5395 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5396 "2588 FCP CQ setup: cq[%d]-id=%d, "
5397 "parent eq[%d]-id=%d\n",
5399 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5401 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5405 * Set up all the Work Queues (WQs)
5408 /* Set up Mailbox Command Queue */
5409 if (!phba->sli4_hba.mbx_wq) {
5410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5411 "0538 Slow-path MQ not allocated\n");
5412 goto out_destroy_fcp_cq;
5414 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5415 phba->sli4_hba.mbx_cq, LPFC_MBOX);
5417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5418 "0539 Failed setup of slow-path MQ: "
5420 goto out_destroy_fcp_cq;
5422 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5423 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5424 phba->sli4_hba.mbx_wq->queue_id,
5425 phba->sli4_hba.mbx_cq->queue_id);
5427 /* Set up slow-path ELS Work Queue */
5428 if (!phba->sli4_hba.els_wq) {
5429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5430 "0536 Slow-path ELS WQ not allocated\n");
5431 goto out_destroy_mbx_wq;
5433 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5434 phba->sli4_hba.els_cq, LPFC_ELS);
5436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5437 "0537 Failed setup of slow-path ELS WQ: "
5439 goto out_destroy_mbx_wq;
5441 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5442 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5443 phba->sli4_hba.els_wq->queue_id,
5444 phba->sli4_hba.els_cq->queue_id);
5446 /* Set up fast-path FCP Work Queue */
5447 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5448 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5450 "0534 Fast-path FCP WQ (%d) not "
5451 "allocated\n", fcp_wqidx);
5452 goto out_destroy_fcp_wq;
5454 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5455 phba->sli4_hba.fcp_cq[fcp_cq_index],
5458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5459 "0535 Failed setup of fast-path FCP "
5460 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5461 goto out_destroy_fcp_wq;
5463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5464 "2591 FCP WQ setup: wq[%d]-id=%d, "
5465 "parent cq[%d]-id=%d\n",
5467 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5469 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5470 /* Round robin FCP Work Queue's Completion Queue assignment */
5471 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5475 * Create Receive Queue (RQ)
5477 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5479 "0540 Receive Queue not allocated\n");
5480 goto out_destroy_fcp_wq;
5482 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5483 phba->sli4_hba.rxq_cq, LPFC_USOL);
5485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5486 "0541 Failed setup of Receive Queue: "
5488 goto out_destroy_fcp_wq;
5490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5491 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5492 "parent cq-id=%d\n",
5493 phba->sli4_hba.hdr_rq->queue_id,
5494 phba->sli4_hba.dat_rq->queue_id,
5495 phba->sli4_hba.rxq_cq->queue_id);
5499 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5500 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5501 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5503 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5505 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5506 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5507 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5509 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5511 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5513 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5514 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5515 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5521 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5522 * @phba: pointer to lpfc hba data structure.
5524 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5529 * ENOMEM - No availble memory
5530 * EIO - The mailbox failed to complete successfully.
5533 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5537 /* Unset mailbox command work queue */
5538 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5539 /* Unset ELS work queue */
5540 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5541 /* Unset unsolicited receive queue */
5542 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5543 /* Unset FCP work queue */
5544 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5545 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5546 /* Unset mailbox command complete queue */
5547 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5548 /* Unset ELS complete queue */
5549 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5550 /* Unset unsolicited receive complete queue */
5551 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5552 /* Unset FCP response complete queue */
5553 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5554 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5555 /* Unset fast-path event queue */
5556 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5557 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5558 /* Unset slow-path event queue */
5559 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5563 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5564 * @phba: pointer to lpfc hba data structure.
5566 * This routine is invoked to allocate and set up a pool of completion queue
5567 * events. The body of the completion queue event is a completion queue entry
5568 * CQE. For now, this pool is used for the interrupt service routine to queue
5569 * the following HBA completion queue events for the worker thread to process:
5570 * - Mailbox asynchronous events
5571 * - Receive queue completion unsolicited events
5572 * Later, this can be used for all the slow-path events.
5576 * -ENOMEM - No availble memory
5579 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5581 struct lpfc_cq_event *cq_event;
5584 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5585 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5587 goto out_pool_create_fail;
5588 list_add_tail(&cq_event->list,
5589 &phba->sli4_hba.sp_cqe_event_pool);
5593 out_pool_create_fail:
5594 lpfc_sli4_cq_event_pool_destroy(phba);
5599 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5600 * @phba: pointer to lpfc hba data structure.
5602 * This routine is invoked to free the pool of completion queue events at
5603 * driver unload time. Note that, it is the responsibility of the driver
5604 * cleanup routine to free all the outstanding completion-queue events
5605 * allocated from this pool back into the pool before invoking this routine
5606 * to destroy the pool.
5609 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5611 struct lpfc_cq_event *cq_event, *next_cq_event;
5613 list_for_each_entry_safe(cq_event, next_cq_event,
5614 &phba->sli4_hba.sp_cqe_event_pool, list) {
5615 list_del(&cq_event->list);
5621 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5622 * @phba: pointer to lpfc hba data structure.
5624 * This routine is the lock free version of the API invoked to allocate a
5625 * completion-queue event from the free pool.
5627 * Return: Pointer to the newly allocated completion-queue event if successful
5630 struct lpfc_cq_event *
5631 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5633 struct lpfc_cq_event *cq_event = NULL;
5635 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5636 struct lpfc_cq_event, list);
5641 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5642 * @phba: pointer to lpfc hba data structure.
5644 * This routine is the lock version of the API invoked to allocate a
5645 * completion-queue event from the free pool.
5647 * Return: Pointer to the newly allocated completion-queue event if successful
5650 struct lpfc_cq_event *
5651 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5653 struct lpfc_cq_event *cq_event;
5654 unsigned long iflags;
5656 spin_lock_irqsave(&phba->hbalock, iflags);
5657 cq_event = __lpfc_sli4_cq_event_alloc(phba);
5658 spin_unlock_irqrestore(&phba->hbalock, iflags);
5663 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5664 * @phba: pointer to lpfc hba data structure.
5665 * @cq_event: pointer to the completion queue event to be freed.
5667 * This routine is the lock free version of the API invoked to release a
5668 * completion-queue event back into the free pool.
5671 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5672 struct lpfc_cq_event *cq_event)
5674 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5678 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5679 * @phba: pointer to lpfc hba data structure.
5680 * @cq_event: pointer to the completion queue event to be freed.
5682 * This routine is the lock version of the API invoked to release a
5683 * completion-queue event back into the free pool.
5686 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5687 struct lpfc_cq_event *cq_event)
5689 unsigned long iflags;
5690 spin_lock_irqsave(&phba->hbalock, iflags);
5691 __lpfc_sli4_cq_event_release(phba, cq_event);
5692 spin_unlock_irqrestore(&phba->hbalock, iflags);
5696 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5697 * @phba: pointer to lpfc hba data structure.
5699 * This routine is to free all the pending completion-queue events to the
5700 * back into the free pool for device reset.
5703 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5706 struct lpfc_cq_event *cqe;
5707 unsigned long iflags;
5709 /* Retrieve all the pending WCQEs from pending WCQE lists */
5710 spin_lock_irqsave(&phba->hbalock, iflags);
5711 /* Pending FCP XRI abort events */
5712 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5714 /* Pending ELS XRI abort events */
5715 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5717 /* Pending asynnc events */
5718 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5720 spin_unlock_irqrestore(&phba->hbalock, iflags);
5722 while (!list_empty(&cqelist)) {
5723 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5724 lpfc_sli4_cq_event_release(phba, cqe);
5729 * lpfc_pci_function_reset - Reset pci function.
5730 * @phba: pointer to lpfc hba data structure.
5732 * This routine is invoked to request a PCI function reset. It will destroys
5733 * all resources assigned to the PCI function which originates this request.
5737 * ENOMEM - No availble memory
5738 * EIO - The mailbox failed to complete successfully.
5741 lpfc_pci_function_reset(struct lpfc_hba *phba)
5743 LPFC_MBOXQ_t *mboxq;
5745 uint32_t shdr_status, shdr_add_status;
5746 union lpfc_sli4_cfg_shdr *shdr;
5748 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5751 "0494 Unable to allocate memory for issuing "
5752 "SLI_FUNCTION_RESET mailbox command\n");
5756 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5757 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5758 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5759 LPFC_SLI4_MBX_EMBED);
5760 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5761 shdr = (union lpfc_sli4_cfg_shdr *)
5762 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5763 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5764 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5765 if (rc != MBX_TIMEOUT)
5766 mempool_free(mboxq, phba->mbox_mem_pool);
5767 if (shdr_status || shdr_add_status || rc) {
5768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5769 "0495 SLI_FUNCTION_RESET mailbox failed with "
5770 "status x%x add_status x%x, mbx status x%x\n",
5771 shdr_status, shdr_add_status, rc);
5778 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5779 * @phba: pointer to lpfc hba data structure.
5780 * @cnt: number of nop mailbox commands to send.
5782 * This routine is invoked to send a number @cnt of NOP mailbox command and
5783 * wait for each command to complete.
5785 * Return: the number of NOP mailbox command completed.
5788 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5790 LPFC_MBOXQ_t *mboxq;
5791 int length, cmdsent;
5794 uint32_t shdr_status, shdr_add_status;
5795 union lpfc_sli4_cfg_shdr *shdr;
5798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5799 "2518 Requested to send 0 NOP mailbox cmd\n");
5803 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5806 "2519 Unable to allocate memory for issuing "
5807 "NOP mailbox command\n");
5811 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5812 length = (sizeof(struct lpfc_mbx_nop) -
5813 sizeof(struct lpfc_sli4_cfg_mhdr));
5814 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5815 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5817 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5818 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5819 if (!phba->sli4_hba.intr_enable)
5820 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5822 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5823 if (rc == MBX_TIMEOUT)
5825 /* Check return status */
5826 shdr = (union lpfc_sli4_cfg_shdr *)
5827 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5828 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5829 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5831 if (shdr_status || shdr_add_status || rc) {
5832 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5833 "2520 NOP mailbox command failed "
5834 "status x%x add_status x%x mbx "
5835 "status x%x\n", shdr_status,
5836 shdr_add_status, rc);
5841 if (rc != MBX_TIMEOUT)
5842 mempool_free(mboxq, phba->mbox_mem_pool);
5848 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5849 * @phba: pointer to lpfc hba data structure.
5852 * This routine is invoked to unregister a FCFI from device.
5855 lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5860 unsigned long flags;
5862 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5867 lpfc_unreg_fcfi(mbox, fcfi);
5869 if (!phba->sli4_hba.intr_enable)
5870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5872 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5873 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5875 if (rc != MBX_TIMEOUT)
5876 mempool_free(mbox, phba->mbox_mem_pool);
5877 if (rc != MBX_SUCCESS)
5878 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5879 "2517 Unregister FCFI command failed "
5880 "status %d, mbxStatus x%x\n", rc,
5881 bf_get(lpfc_mqe_status, &mbox->u.mqe));
5883 spin_lock_irqsave(&phba->hbalock, flags);
5884 /* Mark the FCFI is no longer registered */
5885 phba->fcf.fcf_flag &=
5886 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5887 spin_unlock_irqrestore(&phba->hbalock, flags);
5892 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5893 * @phba: pointer to lpfc hba data structure.
5895 * This routine is invoked to set up the PCI device memory space for device
5896 * with SLI-4 interface spec.
5900 * other values - error
5903 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5905 struct pci_dev *pdev;
5906 unsigned long bar0map_len, bar1map_len, bar2map_len;
5907 int error = -ENODEV;
5909 /* Obtain PCI device reference */
5913 pdev = phba->pcidev;
5915 /* Set the device DMA mask size */
5916 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5917 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5920 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5921 * number of bytes required by each mapping. They are actually
5922 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5924 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5925 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5927 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5928 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5930 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5931 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5933 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5934 phba->sli4_hba.conf_regs_memmap_p =
5935 ioremap(phba->pci_bar0_map, bar0map_len);
5936 if (!phba->sli4_hba.conf_regs_memmap_p) {
5937 dev_printk(KERN_ERR, &pdev->dev,
5938 "ioremap failed for SLI4 PCI config registers.\n");
5942 /* Map SLI4 HBA Control Register base to a kernel virtual address. */
5943 phba->sli4_hba.ctrl_regs_memmap_p =
5944 ioremap(phba->pci_bar1_map, bar1map_len);
5945 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5946 dev_printk(KERN_ERR, &pdev->dev,
5947 "ioremap failed for SLI4 HBA control registers.\n");
5948 goto out_iounmap_conf;
5951 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5952 phba->sli4_hba.drbl_regs_memmap_p =
5953 ioremap(phba->pci_bar2_map, bar2map_len);
5954 if (!phba->sli4_hba.drbl_regs_memmap_p) {
5955 dev_printk(KERN_ERR, &pdev->dev,
5956 "ioremap failed for SLI4 HBA doorbell registers.\n");
5957 goto out_iounmap_ctrl;
5960 /* Set up BAR0 PCI config space register memory map */
5961 lpfc_sli4_bar0_register_memmap(phba);
5963 /* Set up BAR1 register memory map */
5964 lpfc_sli4_bar1_register_memmap(phba);
5966 /* Set up BAR2 register memory map */
5967 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5969 goto out_iounmap_all;
5974 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5976 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5978 iounmap(phba->sli4_hba.conf_regs_memmap_p);
5984 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
5985 * @phba: pointer to lpfc hba data structure.
5987 * This routine is invoked to unset the PCI device memory space for device
5988 * with SLI-4 interface spec.
5991 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
5993 struct pci_dev *pdev;
5995 /* Obtain PCI device reference */
5999 pdev = phba->pcidev;
6001 /* Free coherent DMA memory allocated */
6003 /* Unmap I/O memory space */
6004 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6005 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6006 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6012 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6013 * @phba: pointer to lpfc hba data structure.
6015 * This routine is invoked to enable the MSI-X interrupt vectors to device
6016 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6017 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6018 * invoked, enables either all or nothing, depending on the current
6019 * availability of PCI vector resources. The device driver is responsible
6020 * for calling the individual request_irq() to register each MSI-X vector
6021 * with a interrupt handler, which is done in this function. Note that
6022 * later when device is unloading, the driver should always call free_irq()
6023 * on all MSI-X vectors it has done request_irq() on before calling
6024 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6025 * will be left with MSI-X enabled and leaks its vectors.
6029 * other values - error
6032 lpfc_sli_enable_msix(struct lpfc_hba *phba)
6037 /* Set up MSI-X multi-message vectors */
6038 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6039 phba->msix_entries[i].entry = i;
6041 /* Configure MSI-X capability structure */
6042 rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6043 ARRAY_SIZE(phba->msix_entries));
6045 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6046 "0420 PCI enable MSI-X failed (%d)\n", rc);
6049 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6050 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6051 "0477 MSI-X entry[%d]: vector=x%x "
6053 phba->msix_entries[i].vector,
6054 phba->msix_entries[i].entry);
6056 * Assign MSI-X vectors to interrupt handlers
6059 /* vector-0 is associated to slow-path handler */
6060 rc = request_irq(phba->msix_entries[0].vector,
6061 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6062 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6064 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6065 "0421 MSI-X slow-path request_irq failed "
6070 /* vector-1 is associated to fast-path handler */
6071 rc = request_irq(phba->msix_entries[1].vector,
6072 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6073 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6076 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6077 "0429 MSI-X fast-path request_irq failed "
6083 * Configure HBA MSI-X attention conditions to messages
6085 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6090 "0474 Unable to allocate memory for issuing "
6091 "MBOX_CONFIG_MSI command\n");
6094 rc = lpfc_config_msi(phba, pmb);
6097 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6098 if (rc != MBX_SUCCESS) {
6099 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6100 "0351 Config MSI mailbox command failed, "
6101 "mbxCmd x%x, mbxStatus x%x\n",
6102 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6106 /* Free memory allocated for mailbox command */
6107 mempool_free(pmb, phba->mbox_mem_pool);
6111 /* Free memory allocated for mailbox command */
6112 mempool_free(pmb, phba->mbox_mem_pool);
6115 /* free the irq already requested */
6116 free_irq(phba->msix_entries[1].vector, phba);
6119 /* free the irq already requested */
6120 free_irq(phba->msix_entries[0].vector, phba);
6123 /* Unconfigure MSI-X capability structure */
6124 pci_disable_msix(phba->pcidev);
6129 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6130 * @phba: pointer to lpfc hba data structure.
6132 * This routine is invoked to release the MSI-X vectors and then disable the
6133 * MSI-X interrupt mode to device with SLI-3 interface spec.
6136 lpfc_sli_disable_msix(struct lpfc_hba *phba)
6140 /* Free up MSI-X multi-message vectors */
6141 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6142 free_irq(phba->msix_entries[i].vector, phba);
6144 pci_disable_msix(phba->pcidev);
6150 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6151 * @phba: pointer to lpfc hba data structure.
6153 * This routine is invoked to enable the MSI interrupt mode to device with
6154 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6155 * enable the MSI vector. The device driver is responsible for calling the
6156 * request_irq() to register MSI vector with a interrupt the handler, which
6157 * is done in this function.
6161 * other values - error
6164 lpfc_sli_enable_msi(struct lpfc_hba *phba)
6168 rc = pci_enable_msi(phba->pcidev);
6170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6171 "0462 PCI enable MSI mode success.\n");
6173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6174 "0471 PCI enable MSI mode failed (%d)\n", rc);
6178 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6179 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6181 pci_disable_msi(phba->pcidev);
6182 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6183 "0478 MSI request_irq failed (%d)\n", rc);
6189 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6190 * @phba: pointer to lpfc hba data structure.
6192 * This routine is invoked to disable the MSI interrupt mode to device with
6193 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6194 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6195 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6199 lpfc_sli_disable_msi(struct lpfc_hba *phba)
6201 free_irq(phba->pcidev->irq, phba);
6202 pci_disable_msi(phba->pcidev);
6207 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6208 * @phba: pointer to lpfc hba data structure.
6210 * This routine is invoked to enable device interrupt and associate driver's
6211 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6212 * spec. Depends on the interrupt mode configured to the driver, the driver
6213 * will try to fallback from the configured interrupt mode to an interrupt
6214 * mode which is supported by the platform, kernel, and device in the order
6216 * MSI-X -> MSI -> IRQ.
6220 * other values - error
6223 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6225 uint32_t intr_mode = LPFC_INTR_ERROR;
6228 if (cfg_mode == 2) {
6229 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6230 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6232 /* Now, try to enable MSI-X interrupt mode */
6233 retval = lpfc_sli_enable_msix(phba);
6235 /* Indicate initialization to MSI-X mode */
6236 phba->intr_type = MSIX;
6242 /* Fallback to MSI if MSI-X initialization failed */
6243 if (cfg_mode >= 1 && phba->intr_type == NONE) {
6244 retval = lpfc_sli_enable_msi(phba);
6246 /* Indicate initialization to MSI mode */
6247 phba->intr_type = MSI;
6252 /* Fallback to INTx if both MSI-X/MSI initalization failed */
6253 if (phba->intr_type == NONE) {
6254 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6255 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6257 /* Indicate initialization to INTx mode */
6258 phba->intr_type = INTx;
6266 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6267 * @phba: pointer to lpfc hba data structure.
6269 * This routine is invoked to disable device interrupt and disassociate the
6270 * driver's interrupt handler(s) from interrupt vector(s) to device with
6271 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6272 * release the interrupt vector(s) for the message signaled interrupt.
6275 lpfc_sli_disable_intr(struct lpfc_hba *phba)
6277 /* Disable the currently initialized interrupt mode */
6278 if (phba->intr_type == MSIX)
6279 lpfc_sli_disable_msix(phba);
6280 else if (phba->intr_type == MSI)
6281 lpfc_sli_disable_msi(phba);
6282 else if (phba->intr_type == INTx)
6283 free_irq(phba->pcidev->irq, phba);
6285 /* Reset interrupt management states */
6286 phba->intr_type = NONE;
6287 phba->sli.slistat.sli_intr = 0;
6293 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6294 * @phba: pointer to lpfc hba data structure.
6296 * This routine is invoked to enable the MSI-X interrupt vectors to device
6297 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6298 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6299 * enables either all or nothing, depending on the current availability of
6300 * PCI vector resources. The device driver is responsible for calling the
6301 * individual request_irq() to register each MSI-X vector with a interrupt
6302 * handler, which is done in this function. Note that later when device is
6303 * unloading, the driver should always call free_irq() on all MSI-X vectors
6304 * it has done request_irq() on before calling pci_disable_msix(). Failure
6305 * to do so results in a BUG_ON() and a device will be left with MSI-X
6306 * enabled and leaks its vectors.
6310 * other values - error
6313 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6317 /* Set up MSI-X multi-message vectors */
6318 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6319 phba->sli4_hba.msix_entries[index].entry = index;
6321 /* Configure MSI-X capability structure */
6322 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6323 phba->sli4_hba.cfg_eqn);
6325 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6326 "0484 PCI enable MSI-X failed (%d)\n", rc);
6329 /* Log MSI-X vector assignment */
6330 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6331 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6332 "0489 MSI-X entry[%d]: vector=x%x "
6333 "message=%d\n", index,
6334 phba->sli4_hba.msix_entries[index].vector,
6335 phba->sli4_hba.msix_entries[index].entry);
6337 * Assign MSI-X vectors to interrupt handlers
6340 /* The first vector must associated to slow-path handler for MQ */
6341 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6342 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6343 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6345 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6346 "0485 MSI-X slow-path request_irq failed "
6351 /* The rest of the vector(s) are associated to fast-path handler(s) */
6352 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6353 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6354 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6355 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6356 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6357 LPFC_FP_DRIVER_HANDLER_NAME,
6358 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6360 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6361 "0486 MSI-X fast-path (%d) "
6362 "request_irq failed (%d)\n", index, rc);
6370 /* free the irq already requested */
6371 for (--index; index >= 1; index--)
6372 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6373 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6375 /* free the irq already requested */
6376 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6379 /* Unconfigure MSI-X capability structure */
6380 pci_disable_msix(phba->pcidev);
6385 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6386 * @phba: pointer to lpfc hba data structure.
6388 * This routine is invoked to release the MSI-X vectors and then disable the
6389 * MSI-X interrupt mode to device with SLI-4 interface spec.
6392 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
6396 /* Free up MSI-X multi-message vectors */
6397 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6399 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6400 free_irq(phba->sli4_hba.msix_entries[index].vector,
6401 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6403 pci_disable_msix(phba->pcidev);
6409 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6410 * @phba: pointer to lpfc hba data structure.
6412 * This routine is invoked to enable the MSI interrupt mode to device with
6413 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6414 * to enable the MSI vector. The device driver is responsible for calling
6415 * the request_irq() to register MSI vector with a interrupt the handler,
6416 * which is done in this function.
6420 * other values - error
6423 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6427 rc = pci_enable_msi(phba->pcidev);
6429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6430 "0487 PCI enable MSI mode success.\n");
6432 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6433 "0488 PCI enable MSI mode failed (%d)\n", rc);
6437 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6438 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6440 pci_disable_msi(phba->pcidev);
6441 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6442 "0490 MSI request_irq failed (%d)\n", rc);
6445 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6446 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6447 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6454 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6455 * @phba: pointer to lpfc hba data structure.
6457 * This routine is invoked to disable the MSI interrupt mode to device with
6458 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6459 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6460 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6464 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6466 free_irq(phba->pcidev->irq, phba);
6467 pci_disable_msi(phba->pcidev);
6472 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
6473 * @phba: pointer to lpfc hba data structure.
6475 * This routine is invoked to enable device interrupt and associate driver's
6476 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
6477 * interface spec. Depends on the interrupt mode configured to the driver,
6478 * the driver will try to fallback from the configured interrupt mode to an
6479 * interrupt mode which is supported by the platform, kernel, and device in
6481 * MSI-X -> MSI -> IRQ.
6485 * other values - error
6488 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6490 uint32_t intr_mode = LPFC_INTR_ERROR;
6493 if (cfg_mode == 2) {
6494 /* Preparation before conf_msi mbox cmd */
6497 /* Now, try to enable MSI-X interrupt mode */
6498 retval = lpfc_sli4_enable_msix(phba);
6500 /* Indicate initialization to MSI-X mode */
6501 phba->intr_type = MSIX;
6507 /* Fallback to MSI if MSI-X initialization failed */
6508 if (cfg_mode >= 1 && phba->intr_type == NONE) {
6509 retval = lpfc_sli4_enable_msi(phba);
6511 /* Indicate initialization to MSI mode */
6512 phba->intr_type = MSI;
6517 /* Fallback to INTx if both MSI-X/MSI initalization failed */
6518 if (phba->intr_type == NONE) {
6519 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6520 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6522 /* Indicate initialization to INTx mode */
6523 phba->intr_type = INTx;
6525 for (index = 0; index < phba->cfg_fcp_eq_count;
6527 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6528 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6536 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
6537 * @phba: pointer to lpfc hba data structure.
6539 * This routine is invoked to disable device interrupt and disassociate
6540 * the driver's interrupt handler(s) from interrupt vector(s) to device
6541 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
6542 * will release the interrupt vector(s) for the message signaled interrupt.
6545 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
6547 /* Disable the currently initialized interrupt mode */
6548 if (phba->intr_type == MSIX)
6549 lpfc_sli4_disable_msix(phba);
6550 else if (phba->intr_type == MSI)
6551 lpfc_sli4_disable_msi(phba);
6552 else if (phba->intr_type == INTx)
6553 free_irq(phba->pcidev->irq, phba);
6555 /* Reset interrupt management states */
6556 phba->intr_type = NONE;
6557 phba->sli.slistat.sli_intr = 0;
6563 * lpfc_unset_hba - Unset SLI3 hba device initialization
6564 * @phba: pointer to lpfc hba data structure.
6566 * This routine is invoked to unset the HBA device initialization steps to
6567 * a device with SLI-3 interface spec.
6570 lpfc_unset_hba(struct lpfc_hba *phba)
6572 struct lpfc_vport *vport = phba->pport;
6573 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6575 spin_lock_irq(shost->host_lock);
6576 vport->load_flag |= FC_UNLOADING;
6577 spin_unlock_irq(shost->host_lock);
6579 lpfc_stop_hba_timers(phba);
6581 phba->pport->work_port_events = 0;
6583 lpfc_sli_hba_down(phba);
6585 lpfc_sli_brdrestart(phba);
6587 lpfc_sli_disable_intr(phba);
6593 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
6594 * @phba: pointer to lpfc hba data structure.
6596 * This routine is invoked to unset the HBA device initialization steps to
6597 * a device with SLI-4 interface spec.
6600 lpfc_sli4_unset_hba(struct lpfc_hba *phba)
6602 struct lpfc_vport *vport = phba->pport;
6603 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6605 spin_lock_irq(shost->host_lock);
6606 vport->load_flag |= FC_UNLOADING;
6607 spin_unlock_irq(shost->host_lock);
6609 phba->pport->work_port_events = 0;
6611 lpfc_sli4_hba_down(phba);
6613 lpfc_sli4_disable_intr(phba);
6619 * lpfc_sli4_hba_unset - Unset the fcoe hba
6620 * @phba: Pointer to HBA context object.
6622 * This function is called in the SLI4 code path to reset the HBA's FCoE
6623 * function. The caller is not required to hold any lock. This routine
6624 * issues PCI function reset mailbox command to reset the FCoE function.
6625 * At the end of the function, it calls lpfc_hba_down_post function to
6626 * free any pending commands.
6629 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6632 LPFC_MBOXQ_t *mboxq;
6634 lpfc_stop_hba_timers(phba);
6635 phba->sli4_hba.intr_enable = 0;
6638 * Gracefully wait out the potential current outstanding asynchronous
6642 /* First, block any pending async mailbox command from posted */
6643 spin_lock_irq(&phba->hbalock);
6644 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
6645 spin_unlock_irq(&phba->hbalock);
6646 /* Now, trying to wait it out if we can */
6647 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6649 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6652 /* Forcefully release the outstanding mailbox command if timed out */
6653 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6654 spin_lock_irq(&phba->hbalock);
6655 mboxq = phba->sli.mbox_active;
6656 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
6657 __lpfc_mbox_cmpl_put(phba, mboxq);
6658 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6659 phba->sli.mbox_active = NULL;
6660 spin_unlock_irq(&phba->hbalock);
6663 /* Tear down the queues in the HBA */
6664 lpfc_sli4_queue_unset(phba);
6666 /* Disable PCI subsystem interrupt */
6667 lpfc_sli4_disable_intr(phba);
6669 /* Stop kthread signal shall trigger work_done one more time */
6670 kthread_stop(phba->worker_thread);
6672 /* Stop the SLI4 device port */
6673 phba->pport->work_port_events = 0;
6677 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
6678 * @pdev: pointer to PCI device
6679 * @pid: pointer to PCI device identifier
6681 * This routine is to be called to attach a device with SLI-3 interface spec
6682 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6683 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6684 * information of the device and driver to see if the driver state that it can
6685 * support this kind of device. If the match is successful, the driver core
6686 * invokes this routine. If this routine determines it can claim the HBA, it
6687 * does all the initialization that it needs to do to handle the HBA properly.
6690 * 0 - driver can claim the device
6691 * negative value - driver can not claim the device
6693 static int __devinit
6694 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6696 struct lpfc_hba *phba;
6697 struct lpfc_vport *vport = NULL;
6699 uint32_t cfg_mode, intr_mode;
6701 /* Allocate memory for HBA structure */
6702 phba = lpfc_hba_alloc(pdev);
6706 /* Perform generic PCI device enabling operation */
6707 error = lpfc_enable_pci_dev(phba);
6709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6710 "1401 Failed to enable pci device.\n");
6714 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
6715 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
6717 goto out_disable_pci_dev;
6719 /* Set up SLI-3 specific device PCI memory space */
6720 error = lpfc_sli_pci_mem_setup(phba);
6722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6723 "1402 Failed to set up pci memory space.\n");
6724 goto out_disable_pci_dev;
6727 /* Set up phase-1 common device driver resources */
6728 error = lpfc_setup_driver_resource_phase1(phba);
6730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6731 "1403 Failed to set up driver resource.\n");
6732 goto out_unset_pci_mem_s3;
6735 /* Set up SLI-3 specific device driver resources */
6736 error = lpfc_sli_driver_resource_setup(phba);
6738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6739 "1404 Failed to set up driver resource.\n");
6740 goto out_unset_pci_mem_s3;
6743 /* Initialize and populate the iocb list per host */
6744 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
6746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6747 "1405 Failed to initialize iocb list.\n");
6748 goto out_unset_driver_resource_s3;
6751 /* Set up common device driver resources */
6752 error = lpfc_setup_driver_resource_phase2(phba);
6754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6755 "1406 Failed to set up driver resource.\n");
6756 goto out_free_iocb_list;
6759 /* Create SCSI host to the physical port */
6760 error = lpfc_create_shost(phba);
6762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6763 "1407 Failed to create scsi host.\n");
6764 goto out_unset_driver_resource;
6767 /* Configure sysfs attributes */
6768 vport = phba->pport;
6769 error = lpfc_alloc_sysfs_attr(vport);
6771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6772 "1476 Failed to allocate sysfs attr\n");
6773 goto out_destroy_shost;
6776 /* Now, trying to enable interrupt and bring up the device */
6777 cfg_mode = phba->cfg_use_msi;
6779 /* Put device to a known state before enabling interrupt */
6780 lpfc_stop_port(phba);
6781 /* Configure and enable interrupt */
6782 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
6783 if (intr_mode == LPFC_INTR_ERROR) {
6784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6785 "0431 Failed to enable interrupt.\n");
6787 goto out_free_sysfs_attr;
6789 /* SLI-3 HBA setup */
6790 if (lpfc_sli_hba_setup(phba)) {
6791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6792 "1477 Failed to set up hba\n");
6794 goto out_remove_device;
6797 /* Wait 50ms for the interrupts of previous mailbox commands */
6799 /* Check active interrupts on message signaled interrupts */
6800 if (intr_mode == 0 ||
6801 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
6802 /* Log the current active interrupt mode */
6803 phba->intr_mode = intr_mode;
6804 lpfc_log_intr_mode(phba, intr_mode);
6807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6808 "0447 Configure interrupt mode (%d) "
6809 "failed active interrupt test.\n",
6811 /* Disable the current interrupt mode */
6812 lpfc_sli_disable_intr(phba);
6813 /* Try next level of interrupt mode */
6814 cfg_mode = --intr_mode;
6818 /* Perform post initialization setup */
6819 lpfc_post_init_setup(phba);
6821 /* Check if there are static vports to be created. */
6822 lpfc_create_static_vport(phba);
6827 lpfc_unset_hba(phba);
6828 out_free_sysfs_attr:
6829 lpfc_free_sysfs_attr(vport);
6831 lpfc_destroy_shost(phba);
6832 out_unset_driver_resource:
6833 lpfc_unset_driver_resource_phase2(phba);
6835 lpfc_free_iocb_list(phba);
6836 out_unset_driver_resource_s3:
6837 lpfc_sli_driver_resource_unset(phba);
6838 out_unset_pci_mem_s3:
6839 lpfc_sli_pci_mem_unset(phba);
6840 out_disable_pci_dev:
6841 lpfc_disable_pci_dev(phba);
6843 lpfc_hba_free(phba);
6848 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
6849 * @pdev: pointer to PCI device
6851 * This routine is to be called to disattach a device with SLI-3 interface
6852 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6853 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6854 * device to be removed from the PCI subsystem properly.
6856 static void __devexit
6857 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
6859 struct Scsi_Host *shost = pci_get_drvdata(pdev);
6860 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6861 struct lpfc_vport **vports;
6862 struct lpfc_hba *phba = vport->phba;
6864 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
6866 spin_lock_irq(&phba->hbalock);
6867 vport->load_flag |= FC_UNLOADING;
6868 spin_unlock_irq(&phba->hbalock);
6870 lpfc_free_sysfs_attr(vport);
6872 /* Release all the vports against this physical port */
6873 vports = lpfc_create_vport_work_array(phba);
6875 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
6876 fc_vport_terminate(vports[i]->fc_vport);
6877 lpfc_destroy_vport_work_array(phba, vports);
6879 /* Remove FC host and then SCSI host with the physical port */
6880 fc_remove_host(shost);
6881 scsi_remove_host(shost);
6882 lpfc_cleanup(vport);
6885 * Bring down the SLI Layer. This step disable all interrupts,
6886 * clears the rings, discards all mailbox commands, and resets
6890 /* HBA interrupt will be diabled after this call */
6891 lpfc_sli_hba_down(phba);
6892 /* Stop kthread signal shall trigger work_done one more time */
6893 kthread_stop(phba->worker_thread);
6894 /* Final cleanup of txcmplq and reset the HBA */
6895 lpfc_sli_brdrestart(phba);
6897 lpfc_stop_hba_timers(phba);
6898 spin_lock_irq(&phba->hbalock);
6899 list_del_init(&vport->listentry);
6900 spin_unlock_irq(&phba->hbalock);
6902 lpfc_debugfs_terminate(vport);
6904 /* Disable interrupt */
6905 lpfc_sli_disable_intr(phba);
6907 pci_set_drvdata(pdev, NULL);
6908 scsi_host_put(shost);
6911 * Call scsi_free before mem_free since scsi bufs are released to their
6912 * corresponding pools here.
6914 lpfc_scsi_free(phba);
6915 lpfc_mem_free_all(phba);
6917 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6918 phba->hbqslimp.virt, phba->hbqslimp.phys);
6920 /* Free resources associated with SLI2 interface */
6921 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6922 phba->slim2p.virt, phba->slim2p.phys);
6924 /* unmap adapter SLIM and Control Registers */
6925 iounmap(phba->ctrl_regs_memmap_p);
6926 iounmap(phba->slim_memmap_p);
6928 lpfc_hba_free(phba);
6930 pci_release_selected_regions(pdev, bars);
6931 pci_disable_device(pdev);
6935 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
6936 * @pdev: pointer to PCI device
6937 * @msg: power management message
6939 * This routine is to be called from the kernel's PCI subsystem to support
6940 * system Power Management (PM) to device with SLI-3 interface spec. When
6941 * PM invokes this method, it quiesces the device by stopping the driver's
6942 * worker thread for the device, turning off device's interrupt and DMA,
6943 * and bring the device offline. Note that as the driver implements the
6944 * minimum PM requirements to a power-aware driver's PM support for the
6945 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
6946 * to the suspend() method call will be treated as SUSPEND and the driver will
6947 * fully reinitialize its device during resume() method call, the driver will
6948 * set device to PCI_D3hot state in PCI config space instead of setting it
6949 * according to the @msg provided by the PM.
6952 * 0 - driver suspended the device
6956 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
6958 struct Scsi_Host *shost = pci_get_drvdata(pdev);
6959 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
6961 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6962 "0473 PCI device Power Management suspend.\n");
6964 /* Bring down the device */
6965 lpfc_offline_prep(phba);
6967 kthread_stop(phba->worker_thread);
6969 /* Disable interrupt from device */
6970 lpfc_sli_disable_intr(phba);
6972 /* Save device state to PCI config space */
6973 pci_save_state(pdev);
6974 pci_set_power_state(pdev, PCI_D3hot);
6980 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
6981 * @pdev: pointer to PCI device
6983 * This routine is to be called from the kernel's PCI subsystem to support
6984 * system Power Management (PM) to device with SLI-3 interface spec. When PM
6985 * invokes this method, it restores the device's PCI config space state and
6986 * fully reinitializes the device and brings it online. Note that as the
6987 * driver implements the minimum PM requirements to a power-aware driver's
6988 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
6989 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
6990 * driver will fully reinitialize its device during resume() method call,
6991 * the device will be set to PCI_D0 directly in PCI config space before
6992 * restoring the state.
6995 * 0 - driver suspended the device
6999 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7001 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7002 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7006 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7007 "0452 PCI device Power Management resume.\n");
7009 /* Restore device state from PCI config space */
7010 pci_set_power_state(pdev, PCI_D0);
7011 pci_restore_state(pdev);
7012 if (pdev->is_busmaster)
7013 pci_set_master(pdev);
7015 /* Startup the kernel thread for this host adapter. */
7016 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7017 "lpfc_worker_%d", phba->brd_no);
7018 if (IS_ERR(phba->worker_thread)) {
7019 error = PTR_ERR(phba->worker_thread);
7020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7021 "0434 PM resume failed to start worker "
7022 "thread: error=x%x.\n", error);
7026 /* Configure and enable interrupt */
7027 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7028 if (intr_mode == LPFC_INTR_ERROR) {
7029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7030 "0430 PM resume Failed to enable interrupt\n");
7033 phba->intr_mode = intr_mode;
7035 /* Restart HBA and bring it online */
7036 lpfc_sli_brdrestart(phba);
7039 /* Log the current active interrupt mode */
7040 lpfc_log_intr_mode(phba, phba->intr_mode);
7046 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7047 * @pdev: pointer to PCI device.
7048 * @state: the current PCI connection state.
7050 * This routine is called from the PCI subsystem for I/O error handling to
7051 * device with SLI-3 interface spec. This function is called by the PCI
7052 * subsystem after a PCI bus error affecting this device has been detected.
7053 * When this function is invoked, it will need to stop all the I/Os and
7054 * interrupt(s) to the device. Once that is done, it will return
7055 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7059 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7060 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7062 static pci_ers_result_t
7063 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7065 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7066 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7067 struct lpfc_sli *psli = &phba->sli;
7068 struct lpfc_sli_ring *pring;
7070 if (state == pci_channel_io_perm_failure) {
7071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7072 "0472 PCI channel I/O permanent failure\n");
7073 /* Block all SCSI devices' I/Os on the host */
7074 lpfc_scsi_dev_block(phba);
7075 /* Clean up all driver's outstanding SCSI I/Os */
7076 lpfc_sli_flush_fcp_rings(phba);
7077 return PCI_ERS_RESULT_DISCONNECT;
7080 pci_disable_device(pdev);
7082 * There may be I/Os dropped by the firmware.
7083 * Error iocb (I/O) on txcmplq and let the SCSI layer
7084 * retry it after re-establishing link.
7086 pring = &psli->ring[psli->fcp_ring];
7087 lpfc_sli_abort_iocb_ring(phba, pring);
7089 /* Disable interrupt */
7090 lpfc_sli_disable_intr(phba);
7092 /* Request a slot reset. */
7093 return PCI_ERS_RESULT_NEED_RESET;
7097 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7098 * @pdev: pointer to PCI device.
7100 * This routine is called from the PCI subsystem for error handling to
7101 * device with SLI-3 interface spec. This is called after PCI bus has been
7102 * reset to restart the PCI card from scratch, as if from a cold-boot.
7103 * During the PCI subsystem error recovery, after driver returns
7104 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7105 * recovery and then call this routine before calling the .resume method
7106 * to recover the device. This function will initialize the HBA device,
7107 * enable the interrupt, but it will just put the HBA to offline state
7108 * without passing any I/O traffic.
7111 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7112 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7114 static pci_ers_result_t
7115 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7117 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7118 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7119 struct lpfc_sli *psli = &phba->sli;
7122 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7123 if (pci_enable_device_mem(pdev)) {
7124 printk(KERN_ERR "lpfc: Cannot re-enable "
7125 "PCI device after reset.\n");
7126 return PCI_ERS_RESULT_DISCONNECT;
7129 pci_restore_state(pdev);
7130 if (pdev->is_busmaster)
7131 pci_set_master(pdev);
7133 spin_lock_irq(&phba->hbalock);
7134 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7135 spin_unlock_irq(&phba->hbalock);
7137 /* Configure and enable interrupt */
7138 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7139 if (intr_mode == LPFC_INTR_ERROR) {
7140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7141 "0427 Cannot re-enable interrupt after "
7143 return PCI_ERS_RESULT_DISCONNECT;
7145 phba->intr_mode = intr_mode;
7147 /* Take device offline; this will perform cleanup */
7149 lpfc_sli_brdrestart(phba);
7151 /* Log the current active interrupt mode */
7152 lpfc_log_intr_mode(phba, phba->intr_mode);
7154 return PCI_ERS_RESULT_RECOVERED;
7158 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
7159 * @pdev: pointer to PCI device
7161 * This routine is called from the PCI subsystem for error handling to device
7162 * with SLI-3 interface spec. It is called when kernel error recovery tells
7163 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7164 * error recovery. After this call, traffic can start to flow from this device
7168 lpfc_io_resume_s3(struct pci_dev *pdev)
7170 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7171 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7177 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7178 * @phba: pointer to lpfc hba data structure.
7180 * returns the number of ELS/CT IOCBs to reserve
7183 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7185 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7189 else if (max_xri <= 256)
7191 else if (max_xri <= 512)
7193 else if (max_xri <= 1024)
7200 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7201 * @pdev: pointer to PCI device
7202 * @pid: pointer to PCI device identifier
7204 * This routine is called from the kernel's PCI subsystem to device with
7205 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7206 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7207 * information of the device and driver to see if the driver state that it
7208 * can support this kind of device. If the match is successful, the driver
7209 * core invokes this routine. If this routine determines it can claim the HBA,
7210 * it does all the initialization that it needs to do to handle the HBA
7214 * 0 - driver can claim the device
7215 * negative value - driver can not claim the device
7217 static int __devinit
7218 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7220 struct lpfc_hba *phba;
7221 struct lpfc_vport *vport = NULL;
7223 uint32_t cfg_mode, intr_mode;
7226 /* Allocate memory for HBA structure */
7227 phba = lpfc_hba_alloc(pdev);
7231 /* Perform generic PCI device enabling operation */
7232 error = lpfc_enable_pci_dev(phba);
7234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7235 "1409 Failed to enable pci device.\n");
7239 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
7240 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7242 goto out_disable_pci_dev;
7244 /* Set up SLI-4 specific device PCI memory space */
7245 error = lpfc_sli4_pci_mem_setup(phba);
7247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7248 "1410 Failed to set up pci memory space.\n");
7249 goto out_disable_pci_dev;
7252 /* Set up phase-1 common device driver resources */
7253 error = lpfc_setup_driver_resource_phase1(phba);
7255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7256 "1411 Failed to set up driver resource.\n");
7257 goto out_unset_pci_mem_s4;
7260 /* Set up SLI-4 Specific device driver resources */
7261 error = lpfc_sli4_driver_resource_setup(phba);
7263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7264 "1412 Failed to set up driver resource.\n");
7265 goto out_unset_pci_mem_s4;
7268 /* Initialize and populate the iocb list per host */
7269 error = lpfc_init_iocb_list(phba,
7270 phba->sli4_hba.max_cfg_param.max_xri);
7272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7273 "1413 Failed to initialize iocb list.\n");
7274 goto out_unset_driver_resource_s4;
7277 /* Set up common device driver resources */
7278 error = lpfc_setup_driver_resource_phase2(phba);
7280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7281 "1414 Failed to set up driver resource.\n");
7282 goto out_free_iocb_list;
7285 /* Create SCSI host to the physical port */
7286 error = lpfc_create_shost(phba);
7288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7289 "1415 Failed to create scsi host.\n");
7290 goto out_unset_driver_resource;
7293 /* Configure sysfs attributes */
7294 vport = phba->pport;
7295 error = lpfc_alloc_sysfs_attr(vport);
7297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7298 "1416 Failed to allocate sysfs attr\n");
7299 goto out_destroy_shost;
7302 /* Now, trying to enable interrupt and bring up the device */
7303 cfg_mode = phba->cfg_use_msi;
7305 /* Put device to a known state before enabling interrupt */
7306 lpfc_stop_port(phba);
7307 /* Configure and enable interrupt */
7308 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7309 if (intr_mode == LPFC_INTR_ERROR) {
7310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7311 "0426 Failed to enable interrupt.\n");
7313 goto out_free_sysfs_attr;
7315 /* Set up SLI-4 HBA */
7316 if (lpfc_sli4_hba_setup(phba)) {
7317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7318 "1421 Failed to set up hba\n");
7320 goto out_disable_intr;
7323 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
7325 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7328 /* Check active interrupts received only for MSI/MSI-X */
7329 if (intr_mode == 0 ||
7330 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7331 /* Log the current active interrupt mode */
7332 phba->intr_mode = intr_mode;
7333 lpfc_log_intr_mode(phba, intr_mode);
7336 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7337 "0451 Configure interrupt mode (%d) "
7338 "failed active interrupt test.\n",
7340 /* Unset the preivous SLI-4 HBA setup */
7341 lpfc_sli4_unset_hba(phba);
7342 /* Try next level of interrupt mode */
7343 cfg_mode = --intr_mode;
7346 /* Perform post initialization setup */
7347 lpfc_post_init_setup(phba);
7352 lpfc_sli4_disable_intr(phba);
7353 out_free_sysfs_attr:
7354 lpfc_free_sysfs_attr(vport);
7356 lpfc_destroy_shost(phba);
7357 out_unset_driver_resource:
7358 lpfc_unset_driver_resource_phase2(phba);
7360 lpfc_free_iocb_list(phba);
7361 out_unset_driver_resource_s4:
7362 lpfc_sli4_driver_resource_unset(phba);
7363 out_unset_pci_mem_s4:
7364 lpfc_sli4_pci_mem_unset(phba);
7365 out_disable_pci_dev:
7366 lpfc_disable_pci_dev(phba);
7368 lpfc_hba_free(phba);
7373 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7374 * @pdev: pointer to PCI device
7376 * This routine is called from the kernel's PCI subsystem to device with
7377 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7378 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7379 * device to be removed from the PCI subsystem properly.
7381 static void __devexit
7382 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7384 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7385 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7386 struct lpfc_vport **vports;
7387 struct lpfc_hba *phba = vport->phba;
7390 /* Mark the device unloading flag */
7391 spin_lock_irq(&phba->hbalock);
7392 vport->load_flag |= FC_UNLOADING;
7393 spin_unlock_irq(&phba->hbalock);
7395 /* Free the HBA sysfs attributes */
7396 lpfc_free_sysfs_attr(vport);
7398 /* Release all the vports against this physical port */
7399 vports = lpfc_create_vport_work_array(phba);
7401 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7402 fc_vport_terminate(vports[i]->fc_vport);
7403 lpfc_destroy_vport_work_array(phba, vports);
7405 /* Remove FC host and then SCSI host with the physical port */
7406 fc_remove_host(shost);
7407 scsi_remove_host(shost);
7409 /* Perform cleanup on the physical port */
7410 lpfc_cleanup(vport);
7413 * Bring down the SLI Layer. This step disables all interrupts,
7414 * clears the rings, discards all mailbox commands, and resets
7415 * the HBA FCoE function.
7417 lpfc_debugfs_terminate(vport);
7418 lpfc_sli4_hba_unset(phba);
7420 spin_lock_irq(&phba->hbalock);
7421 list_del_init(&vport->listentry);
7422 spin_unlock_irq(&phba->hbalock);
7424 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7425 * buffers are released to their corresponding pools here.
7427 lpfc_scsi_free(phba);
7428 lpfc_sli4_driver_resource_unset(phba);
7430 /* Unmap adapter Control and Doorbell registers */
7431 lpfc_sli4_pci_mem_unset(phba);
7433 /* Release PCI resources and disable device's PCI function */
7434 scsi_host_put(shost);
7435 lpfc_disable_pci_dev(phba);
7437 /* Finally, free the driver's device data structure */
7438 lpfc_hba_free(phba);
7444 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7445 * @pdev: pointer to PCI device
7446 * @msg: power management message
7448 * This routine is called from the kernel's PCI subsystem to support system
7449 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7450 * this method, it quiesces the device by stopping the driver's worker
7451 * thread for the device, turning off device's interrupt and DMA, and bring
7452 * the device offline. Note that as the driver implements the minimum PM
7453 * requirements to a power-aware driver's PM support for suspend/resume -- all
7454 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7455 * method call will be treated as SUSPEND and the driver will fully
7456 * reinitialize its device during resume() method call, the driver will set
7457 * device to PCI_D3hot state in PCI config space instead of setting it
7458 * according to the @msg provided by the PM.
7461 * 0 - driver suspended the device
7465 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
7467 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7468 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7470 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7471 "0298 PCI device Power Management suspend.\n");
7473 /* Bring down the device */
7474 lpfc_offline_prep(phba);
7476 kthread_stop(phba->worker_thread);
7478 /* Disable interrupt from device */
7479 lpfc_sli4_disable_intr(phba);
7481 /* Save device state to PCI config space */
7482 pci_save_state(pdev);
7483 pci_set_power_state(pdev, PCI_D3hot);
7489 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7490 * @pdev: pointer to PCI device
7492 * This routine is called from the kernel's PCI subsystem to support system
7493 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7494 * this method, it restores the device's PCI config space state and fully
7495 * reinitializes the device and brings it online. Note that as the driver
7496 * implements the minimum PM requirements to a power-aware driver's PM for
7497 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7498 * to the suspend() method call will be treated as SUSPEND and the driver
7499 * will fully reinitialize its device during resume() method call, the device
7500 * will be set to PCI_D0 directly in PCI config space before restoring the
7504 * 0 - driver suspended the device
7508 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7510 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7511 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7516 "0292 PCI device Power Management resume.\n");
7518 /* Restore device state from PCI config space */
7519 pci_set_power_state(pdev, PCI_D0);
7520 pci_restore_state(pdev);
7521 if (pdev->is_busmaster)
7522 pci_set_master(pdev);
7524 /* Startup the kernel thread for this host adapter. */
7525 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7526 "lpfc_worker_%d", phba->brd_no);
7527 if (IS_ERR(phba->worker_thread)) {
7528 error = PTR_ERR(phba->worker_thread);
7529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7530 "0293 PM resume failed to start worker "
7531 "thread: error=x%x.\n", error);
7535 /* Configure and enable interrupt */
7536 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7537 if (intr_mode == LPFC_INTR_ERROR) {
7538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7539 "0294 PM resume Failed to enable interrupt\n");
7542 phba->intr_mode = intr_mode;
7544 /* Restart HBA and bring it online */
7545 lpfc_sli_brdrestart(phba);
7548 /* Log the current active interrupt mode */
7549 lpfc_log_intr_mode(phba, phba->intr_mode);
7555 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7556 * @pdev: pointer to PCI device.
7557 * @state: the current PCI connection state.
7559 * This routine is called from the PCI subsystem for error handling to device
7560 * with SLI-4 interface spec. This function is called by the PCI subsystem
7561 * after a PCI bus error affecting this device has been detected. When this
7562 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7563 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7564 * for the PCI subsystem to perform proper recovery as desired.
7567 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7568 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7570 static pci_ers_result_t
7571 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7573 return PCI_ERS_RESULT_NEED_RESET;
7577 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7578 * @pdev: pointer to PCI device.
7580 * This routine is called from the PCI subsystem for error handling to device
7581 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7582 * restart the PCI card from scratch, as if from a cold-boot. During the
7583 * PCI subsystem error recovery, after the driver returns
7584 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7585 * recovery and then call this routine before calling the .resume method to
7586 * recover the device. This function will initialize the HBA device, enable
7587 * the interrupt, but it will just put the HBA to offline state without
7588 * passing any I/O traffic.
7591 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7592 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7594 static pci_ers_result_t
7595 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7597 return PCI_ERS_RESULT_RECOVERED;
7601 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7602 * @pdev: pointer to PCI device
7604 * This routine is called from the PCI subsystem for error handling to device
7605 * with SLI-4 interface spec. It is called when kernel error recovery tells
7606 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7607 * error recovery. After this call, traffic can start to flow from this device
7611 lpfc_io_resume_s4(struct pci_dev *pdev)
7617 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7618 * @pdev: pointer to PCI device
7619 * @pid: pointer to PCI device identifier
7621 * This routine is to be registered to the kernel's PCI subsystem. When an
7622 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7623 * at PCI device-specific information of the device and driver to see if the
7624 * driver state that it can support this kind of device. If the match is
7625 * successful, the driver core invokes this routine. This routine dispatches
7626 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7627 * do all the initialization that it needs to do to handle the HBA device
7631 * 0 - driver can claim the device
7632 * negative value - driver can not claim the device
7634 static int __devinit
7635 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7640 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
7644 case PCI_DEVICE_ID_TIGERSHARK:
7645 case PCI_DEVICE_ID_TIGERSHARK_S:
7646 rc = lpfc_pci_probe_one_s4(pdev, pid);
7649 rc = lpfc_pci_probe_one_s3(pdev, pid);
7656 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7657 * @pdev: pointer to PCI device
7659 * This routine is to be registered to the kernel's PCI subsystem. When an
7660 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7661 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7662 * remove routine, which will perform all the necessary cleanup for the
7663 * device to be removed from the PCI subsystem properly.
7665 static void __devexit
7666 lpfc_pci_remove_one(struct pci_dev *pdev)
7668 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7669 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7671 switch (phba->pci_dev_grp) {
7672 case LPFC_PCI_DEV_LP:
7673 lpfc_pci_remove_one_s3(pdev);
7675 case LPFC_PCI_DEV_OC:
7676 lpfc_pci_remove_one_s4(pdev);
7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7680 "1424 Invalid PCI device group: 0x%x\n",
7688 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7689 * @pdev: pointer to PCI device
7690 * @msg: power management message
7692 * This routine is to be registered to the kernel's PCI subsystem to support
7693 * system Power Management (PM). When PM invokes this method, it dispatches
7694 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7695 * suspend the device.
7698 * 0 - driver suspended the device
7702 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7704 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7705 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7708 switch (phba->pci_dev_grp) {
7709 case LPFC_PCI_DEV_LP:
7710 rc = lpfc_pci_suspend_one_s3(pdev, msg);
7712 case LPFC_PCI_DEV_OC:
7713 rc = lpfc_pci_suspend_one_s4(pdev, msg);
7716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7717 "1425 Invalid PCI device group: 0x%x\n",
7725 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7726 * @pdev: pointer to PCI device
7728 * This routine is to be registered to the kernel's PCI subsystem to support
7729 * system Power Management (PM). When PM invokes this method, it dispatches
7730 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7731 * resume the device.
7734 * 0 - driver suspended the device
7738 lpfc_pci_resume_one(struct pci_dev *pdev)
7740 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7741 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7744 switch (phba->pci_dev_grp) {
7745 case LPFC_PCI_DEV_LP:
7746 rc = lpfc_pci_resume_one_s3(pdev);
7748 case LPFC_PCI_DEV_OC:
7749 rc = lpfc_pci_resume_one_s4(pdev);
7752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7753 "1426 Invalid PCI device group: 0x%x\n",
7761 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7762 * @pdev: pointer to PCI device.
7763 * @state: the current PCI connection state.
7765 * This routine is registered to the PCI subsystem for error handling. This
7766 * function is called by the PCI subsystem after a PCI bus error affecting
7767 * this device has been detected. When this routine is invoked, it dispatches
7768 * the action to the proper SLI-3 or SLI-4 device error detected handling
7769 * routine, which will perform the proper error detected operation.
7772 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7773 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7775 static pci_ers_result_t
7776 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7778 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7779 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7780 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7782 switch (phba->pci_dev_grp) {
7783 case LPFC_PCI_DEV_LP:
7784 rc = lpfc_io_error_detected_s3(pdev, state);
7786 case LPFC_PCI_DEV_OC:
7787 rc = lpfc_io_error_detected_s4(pdev, state);
7790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7791 "1427 Invalid PCI device group: 0x%x\n",
7799 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7800 * @pdev: pointer to PCI device.
7802 * This routine is registered to the PCI subsystem for error handling. This
7803 * function is called after PCI bus has been reset to restart the PCI card
7804 * from scratch, as if from a cold-boot. When this routine is invoked, it
7805 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7806 * routine, which will perform the proper device reset.
7809 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7810 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7812 static pci_ers_result_t
7813 lpfc_io_slot_reset(struct pci_dev *pdev)
7815 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7816 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7817 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7819 switch (phba->pci_dev_grp) {
7820 case LPFC_PCI_DEV_LP:
7821 rc = lpfc_io_slot_reset_s3(pdev);
7823 case LPFC_PCI_DEV_OC:
7824 rc = lpfc_io_slot_reset_s4(pdev);
7827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7828 "1428 Invalid PCI device group: 0x%x\n",
7836 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7837 * @pdev: pointer to PCI device
7839 * This routine is registered to the PCI subsystem for error handling. It
7840 * is called when kernel error recovery tells the lpfc driver that it is
7841 * OK to resume normal PCI operation after PCI bus error recovery. When
7842 * this routine is invoked, it dispatches the action to the proper SLI-3
7843 * or SLI-4 device io_resume routine, which will resume the device operation.
7846 lpfc_io_resume(struct pci_dev *pdev)
7848 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7851 switch (phba->pci_dev_grp) {
7852 case LPFC_PCI_DEV_LP:
7853 lpfc_io_resume_s3(pdev);
7855 case LPFC_PCI_DEV_OC:
7856 lpfc_io_resume_s4(pdev);
7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7860 "1429 Invalid PCI device group: 0x%x\n",
7867 static struct pci_device_id lpfc_id_table[] = {
7868 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
7869 PCI_ANY_ID, PCI_ANY_ID, },
7870 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
7871 PCI_ANY_ID, PCI_ANY_ID, },
7872 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
7873 PCI_ANY_ID, PCI_ANY_ID, },
7874 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
7875 PCI_ANY_ID, PCI_ANY_ID, },
7876 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
7877 PCI_ANY_ID, PCI_ANY_ID, },
7878 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
7879 PCI_ANY_ID, PCI_ANY_ID, },
7880 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
7881 PCI_ANY_ID, PCI_ANY_ID, },
7882 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
7883 PCI_ANY_ID, PCI_ANY_ID, },
7884 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
7885 PCI_ANY_ID, PCI_ANY_ID, },
7886 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
7887 PCI_ANY_ID, PCI_ANY_ID, },
7888 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
7889 PCI_ANY_ID, PCI_ANY_ID, },
7890 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
7891 PCI_ANY_ID, PCI_ANY_ID, },
7892 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
7893 PCI_ANY_ID, PCI_ANY_ID, },
7894 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
7895 PCI_ANY_ID, PCI_ANY_ID, },
7896 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
7897 PCI_ANY_ID, PCI_ANY_ID, },
7898 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
7899 PCI_ANY_ID, PCI_ANY_ID, },
7900 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
7901 PCI_ANY_ID, PCI_ANY_ID, },
7902 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
7903 PCI_ANY_ID, PCI_ANY_ID, },
7904 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
7905 PCI_ANY_ID, PCI_ANY_ID, },
7906 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
7907 PCI_ANY_ID, PCI_ANY_ID, },
7908 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
7909 PCI_ANY_ID, PCI_ANY_ID, },
7910 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
7911 PCI_ANY_ID, PCI_ANY_ID, },
7912 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
7913 PCI_ANY_ID, PCI_ANY_ID, },
7914 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
7915 PCI_ANY_ID, PCI_ANY_ID, },
7916 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
7917 PCI_ANY_ID, PCI_ANY_ID, },
7918 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
7919 PCI_ANY_ID, PCI_ANY_ID, },
7920 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
7921 PCI_ANY_ID, PCI_ANY_ID, },
7922 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
7923 PCI_ANY_ID, PCI_ANY_ID, },
7924 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
7925 PCI_ANY_ID, PCI_ANY_ID, },
7926 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
7927 PCI_ANY_ID, PCI_ANY_ID, },
7928 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
7929 PCI_ANY_ID, PCI_ANY_ID, },
7930 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
7931 PCI_ANY_ID, PCI_ANY_ID, },
7932 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
7933 PCI_ANY_ID, PCI_ANY_ID, },
7934 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
7935 PCI_ANY_ID, PCI_ANY_ID, },
7936 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
7937 PCI_ANY_ID, PCI_ANY_ID, },
7938 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
7939 PCI_ANY_ID, PCI_ANY_ID, },
7940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
7941 PCI_ANY_ID, PCI_ANY_ID, },
7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7943 PCI_ANY_ID, PCI_ANY_ID, },
7944 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
7945 PCI_ANY_ID, PCI_ANY_ID, },
7949 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
7951 static struct pci_error_handlers lpfc_err_handler = {
7952 .error_detected = lpfc_io_error_detected,
7953 .slot_reset = lpfc_io_slot_reset,
7954 .resume = lpfc_io_resume,
7957 static struct pci_driver lpfc_driver = {
7958 .name = LPFC_DRIVER_NAME,
7959 .id_table = lpfc_id_table,
7960 .probe = lpfc_pci_probe_one,
7961 .remove = __devexit_p(lpfc_pci_remove_one),
7962 .suspend = lpfc_pci_suspend_one,
7963 .resume = lpfc_pci_resume_one,
7964 .err_handler = &lpfc_err_handler,
7968 * lpfc_init - lpfc module initialization routine
7970 * This routine is to be invoked when the lpfc module is loaded into the
7971 * kernel. The special kernel macro module_init() is used to indicate the
7972 * role of this routine to the kernel as lpfc module entry point.
7976 * -ENOMEM - FC attach transport failed
7977 * all others - failed
7984 printk(LPFC_MODULE_DESC "\n");
7985 printk(LPFC_COPYRIGHT "\n");
7987 if (lpfc_enable_npiv) {
7988 lpfc_transport_functions.vport_create = lpfc_vport_create;
7989 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
7991 lpfc_transport_template =
7992 fc_attach_transport(&lpfc_transport_functions);
7993 if (lpfc_transport_template == NULL)
7995 if (lpfc_enable_npiv) {
7996 lpfc_vport_transport_template =
7997 fc_attach_transport(&lpfc_vport_transport_functions);
7998 if (lpfc_vport_transport_template == NULL) {
7999 fc_release_transport(lpfc_transport_template);
8003 error = pci_register_driver(&lpfc_driver);
8005 fc_release_transport(lpfc_transport_template);
8006 if (lpfc_enable_npiv)
8007 fc_release_transport(lpfc_vport_transport_template);
8014 * lpfc_exit - lpfc module removal routine
8016 * This routine is invoked when the lpfc module is removed from the kernel.
8017 * The special kernel macro module_exit() is used to indicate the role of
8018 * this routine to the kernel as lpfc module exit point.
8023 pci_unregister_driver(&lpfc_driver);
8024 fc_release_transport(lpfc_transport_template);
8025 if (lpfc_enable_npiv)
8026 fc_release_transport(lpfc_vport_transport_template);
8027 if (_dump_buf_data) {
8028 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
8030 (1L << _dump_buf_data_order), _dump_buf_data);
8031 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8034 if (_dump_buf_dif) {
8035 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
8037 (1L << _dump_buf_dif_order), _dump_buf_dif);
8038 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8042 module_init(lpfc_init);
8043 module_exit(lpfc_exit);
8044 MODULE_LICENSE("GPL");
8045 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
8046 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
8047 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);