2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
24 * IOC local definitions
28 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
31 #define bfa_ioc_firmware_lock(__ioc) \
32 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
33 #define bfa_ioc_firmware_unlock(__ioc) \
34 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
35 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
36 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
37 #define bfa_ioc_notify_fail(__ioc) \
38 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
39 #define bfa_ioc_sync_start(__ioc) \
40 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
41 #define bfa_ioc_sync_join(__ioc) \
42 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
43 #define bfa_ioc_sync_leave(__ioc) \
44 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
45 #define bfa_ioc_sync_ack(__ioc) \
46 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
47 #define bfa_ioc_sync_complete(__ioc) \
48 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
50 #define bfa_ioc_mbox_cmd_pending(__ioc) \
51 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
52 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
54 static bool bfa_nw_auto_recover = true;
57 * forward declarations
59 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
60 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
61 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
62 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
63 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
64 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
65 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
66 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
67 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
68 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
69 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
70 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
71 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
72 static void bfa_ioc_recover(struct bfa_ioc *ioc);
73 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
74 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
75 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
76 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
77 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
78 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
79 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
80 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
81 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
82 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
83 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
85 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
86 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
88 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
90 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
92 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
94 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
96 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
97 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
100 * IOC state machine definitions/declarations
103 IOC_E_RESET = 1, /*!< IOC reset request */
104 IOC_E_ENABLE = 2, /*!< IOC enable request */
105 IOC_E_DISABLE = 3, /*!< IOC disable request */
106 IOC_E_DETACH = 4, /*!< driver detach cleanup */
107 IOC_E_ENABLED = 5, /*!< f/w enabled */
108 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
109 IOC_E_DISABLED = 7, /*!< f/w disabled */
110 IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */
111 IOC_E_HBFAIL = 9, /*!< heartbeat failure */
112 IOC_E_HWERROR = 10, /*!< hardware error interrupt */
113 IOC_E_TIMEOUT = 11, /*!< timeout */
114 IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */
117 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
128 static struct bfa_sm_table ioc_sm_table[] = {
129 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
130 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
131 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
132 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
133 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
134 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
135 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
136 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
137 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
138 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
142 * Forward declareations for iocpf state machine
144 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
145 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
146 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
147 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
148 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
149 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
152 * IOCPF state machine events
155 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
156 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
157 IOCPF_E_STOP = 3, /*!< stop on driver detach */
158 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
159 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
160 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
161 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
162 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
163 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
164 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
165 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
166 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
172 enum bfa_iocpf_state {
173 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
174 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
175 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
176 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
177 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
178 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
179 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
180 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
181 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
184 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
185 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
186 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
187 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
188 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
189 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
190 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
191 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
193 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
197 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
199 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
201 static struct bfa_sm_table iocpf_sm_table[] = {
202 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
203 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
204 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
205 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
206 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
207 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
208 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
209 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
210 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
211 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
212 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
213 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
214 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
215 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
223 * Beginning state. IOC uninit state.
226 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
231 * IOC is in uninit state.
234 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
238 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
247 * Reset entry actions -- initialize state machine
250 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
252 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
256 * IOC is in reset state.
259 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
263 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
267 bfa_ioc_disable_comp(ioc);
271 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
280 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
282 bfa_iocpf_enable(ioc);
286 * Host IOC function is being enabled, awaiting response from firmware.
287 * Semaphore is acquired.
290 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
294 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
298 /* !!! fall through !!! */
300 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
301 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
302 if (event != IOC_E_PFFAILED)
303 bfa_iocpf_initfail(ioc);
307 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
308 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
312 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
316 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
329 * Semaphore should be acquired for version check.
332 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
334 mod_timer(&ioc->ioc_timer, jiffies +
335 msecs_to_jiffies(BFA_IOC_TOV));
336 bfa_ioc_send_getattr(ioc);
340 * IOC configuration in progress. Timer is active.
343 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
346 case IOC_E_FWRSP_GETATTR:
347 del_timer(&ioc->ioc_timer);
348 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
353 del_timer(&ioc->ioc_timer);
356 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
357 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
358 if (event != IOC_E_PFFAILED)
359 bfa_iocpf_getattrfail(ioc);
363 del_timer(&ioc->ioc_timer);
364 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
376 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
378 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
379 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
380 bfa_ioc_hb_monitor(ioc);
384 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
391 bfa_ioc_hb_stop(ioc);
392 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
397 bfa_ioc_hb_stop(ioc);
398 /* !!! fall through !!! */
400 if (ioc->iocpf.auto_recover)
401 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
403 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
405 bfa_ioc_fail_notify(ioc);
407 if (event != IOC_E_PFFAILED)
417 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
419 bfa_iocpf_disable(ioc);
423 * IOC is being disabled
426 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
430 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
435 * No state change. Will move to disabled state
436 * after iocpf sm completes failure processing and
437 * moves to disabled state.
443 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
444 bfa_ioc_disable_comp(ioc);
453 * IOC disable completion entry.
456 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
458 bfa_ioc_disable_comp(ioc);
462 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
466 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
470 ioc->cbfn->disable_cbfn(ioc->bfa);
474 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
484 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
489 * Hardware initialization retry.
492 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
496 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
502 * Initialization retry failed.
504 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
505 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
506 if (event != IOC_E_PFFAILED)
507 bfa_iocpf_initfail(ioc);
511 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
512 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
519 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
523 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
533 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
541 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
545 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
549 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
553 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
558 /* HB failure notification, ignore. */
567 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
575 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
580 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
584 ioc->cbfn->disable_cbfn(ioc->bfa);
588 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
597 * IOCPF State Machine
601 * Reset entry actions -- initialize state machine
604 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
606 iocpf->fw_mismatch_notified = false;
607 iocpf->auto_recover = bfa_nw_auto_recover;
611 * Beginning state. IOC is in reset state.
614 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
618 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
630 * Semaphore should be acquired for version check.
633 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
635 bfa_ioc_hw_sem_init(iocpf->ioc);
636 bfa_ioc_hw_sem_get(iocpf->ioc);
640 * Awaiting h/w semaphore to continue with version check.
643 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
645 struct bfa_ioc *ioc = iocpf->ioc;
648 case IOCPF_E_SEMLOCKED:
649 if (bfa_ioc_firmware_lock(ioc)) {
650 if (bfa_ioc_sync_start(ioc)) {
651 bfa_ioc_sync_join(ioc);
652 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
654 bfa_ioc_firmware_unlock(ioc);
655 bfa_nw_ioc_hw_sem_release(ioc);
656 mod_timer(&ioc->sem_timer, jiffies +
657 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
660 bfa_nw_ioc_hw_sem_release(ioc);
661 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
665 case IOCPF_E_SEM_ERROR:
666 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
667 bfa_ioc_pf_hwfailed(ioc);
670 case IOCPF_E_DISABLE:
671 bfa_ioc_hw_sem_get_cancel(ioc);
672 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
673 bfa_ioc_pf_disabled(ioc);
677 bfa_ioc_hw_sem_get_cancel(ioc);
678 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
687 * Notify enable completion callback
690 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
692 /* Call only the first time sm enters fwmismatch state. */
693 if (!iocpf->fw_mismatch_notified)
694 bfa_ioc_pf_fwmismatch(iocpf->ioc);
696 iocpf->fw_mismatch_notified = true;
697 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
698 msecs_to_jiffies(BFA_IOC_TOV));
702 * Awaiting firmware version match.
705 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
707 struct bfa_ioc *ioc = iocpf->ioc;
710 case IOCPF_E_TIMEOUT:
711 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
714 case IOCPF_E_DISABLE:
715 del_timer(&ioc->iocpf_timer);
716 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
717 bfa_ioc_pf_disabled(ioc);
721 del_timer(&ioc->iocpf_timer);
722 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
731 * Request for semaphore.
734 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
736 bfa_ioc_hw_sem_get(iocpf->ioc);
740 * Awaiting semaphore for h/w initialzation.
743 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
745 struct bfa_ioc *ioc = iocpf->ioc;
748 case IOCPF_E_SEMLOCKED:
749 if (bfa_ioc_sync_complete(ioc)) {
750 bfa_ioc_sync_join(ioc);
751 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
753 bfa_nw_ioc_hw_sem_release(ioc);
754 mod_timer(&ioc->sem_timer, jiffies +
755 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
759 case IOCPF_E_SEM_ERROR:
760 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
761 bfa_ioc_pf_hwfailed(ioc);
764 case IOCPF_E_DISABLE:
765 bfa_ioc_hw_sem_get_cancel(ioc);
766 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
775 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
777 iocpf->poll_time = 0;
778 bfa_ioc_reset(iocpf->ioc, false);
782 * Hardware is being initialized. Interrupts are enabled.
783 * Holding hardware semaphore lock.
786 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
788 struct bfa_ioc *ioc = iocpf->ioc;
791 case IOCPF_E_FWREADY:
792 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
795 case IOCPF_E_TIMEOUT:
796 bfa_nw_ioc_hw_sem_release(ioc);
797 bfa_ioc_pf_failed(ioc);
798 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
801 case IOCPF_E_DISABLE:
802 del_timer(&ioc->iocpf_timer);
803 bfa_ioc_sync_leave(ioc);
804 bfa_nw_ioc_hw_sem_release(ioc);
805 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
814 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
816 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
817 msecs_to_jiffies(BFA_IOC_TOV));
819 * Enable Interrupts before sending fw IOC ENABLE cmd.
821 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
822 bfa_ioc_send_enable(iocpf->ioc);
826 * Host IOC function is being enabled, awaiting response from firmware.
827 * Semaphore is acquired.
830 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
832 struct bfa_ioc *ioc = iocpf->ioc;
835 case IOCPF_E_FWRSP_ENABLE:
836 del_timer(&ioc->iocpf_timer);
837 bfa_nw_ioc_hw_sem_release(ioc);
838 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
841 case IOCPF_E_INITFAIL:
842 del_timer(&ioc->iocpf_timer);
844 * !!! fall through !!!
846 case IOCPF_E_TIMEOUT:
847 bfa_nw_ioc_hw_sem_release(ioc);
848 if (event == IOCPF_E_TIMEOUT)
849 bfa_ioc_pf_failed(ioc);
850 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
853 case IOCPF_E_DISABLE:
854 del_timer(&ioc->iocpf_timer);
855 bfa_nw_ioc_hw_sem_release(ioc);
856 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
865 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
867 bfa_ioc_pf_enabled(iocpf->ioc);
871 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
874 case IOCPF_E_DISABLE:
875 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
878 case IOCPF_E_GETATTRFAIL:
879 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
883 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
892 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
894 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
895 msecs_to_jiffies(BFA_IOC_TOV));
896 bfa_ioc_send_disable(iocpf->ioc);
900 * IOC is being disabled
903 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
905 struct bfa_ioc *ioc = iocpf->ioc;
908 case IOCPF_E_FWRSP_DISABLE:
909 del_timer(&ioc->iocpf_timer);
910 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
914 del_timer(&ioc->iocpf_timer);
916 * !!! fall through !!!
919 case IOCPF_E_TIMEOUT:
920 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
921 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
924 case IOCPF_E_FWRSP_ENABLE:
933 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
935 bfa_ioc_hw_sem_get(iocpf->ioc);
939 * IOC hb ack request is being removed.
942 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
944 struct bfa_ioc *ioc = iocpf->ioc;
947 case IOCPF_E_SEMLOCKED:
948 bfa_ioc_sync_leave(ioc);
949 bfa_nw_ioc_hw_sem_release(ioc);
950 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
953 case IOCPF_E_SEM_ERROR:
954 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
955 bfa_ioc_pf_hwfailed(ioc);
967 * IOC disable completion entry.
970 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
972 bfa_ioc_mbox_flush(iocpf->ioc);
973 bfa_ioc_pf_disabled(iocpf->ioc);
977 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
979 struct bfa_ioc *ioc = iocpf->ioc;
983 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
987 bfa_ioc_firmware_unlock(ioc);
988 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
997 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
999 bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
1000 bfa_ioc_hw_sem_get(iocpf->ioc);
1004 * Hardware initialization failed.
1007 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1009 struct bfa_ioc *ioc = iocpf->ioc;
1012 case IOCPF_E_SEMLOCKED:
1013 bfa_ioc_notify_fail(ioc);
1014 bfa_ioc_sync_leave(ioc);
1015 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1016 bfa_nw_ioc_hw_sem_release(ioc);
1017 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1020 case IOCPF_E_SEM_ERROR:
1021 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1022 bfa_ioc_pf_hwfailed(ioc);
1025 case IOCPF_E_DISABLE:
1026 bfa_ioc_hw_sem_get_cancel(ioc);
1027 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1031 bfa_ioc_hw_sem_get_cancel(ioc);
1032 bfa_ioc_firmware_unlock(ioc);
1033 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1040 bfa_sm_fault(event);
1045 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1050 * Hardware initialization failed.
1053 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1055 struct bfa_ioc *ioc = iocpf->ioc;
1058 case IOCPF_E_DISABLE:
1059 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1063 bfa_ioc_firmware_unlock(ioc);
1064 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1068 bfa_sm_fault(event);
1073 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1076 * Mark IOC as failed in hardware and stop firmware.
1078 bfa_ioc_lpu_stop(iocpf->ioc);
1081 * Flush any queued up mailbox requests.
1083 bfa_ioc_mbox_flush(iocpf->ioc);
1084 bfa_ioc_hw_sem_get(iocpf->ioc);
1088 * IOC is in failed state.
1091 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1093 struct bfa_ioc *ioc = iocpf->ioc;
1096 case IOCPF_E_SEMLOCKED:
1097 bfa_ioc_sync_ack(ioc);
1098 bfa_ioc_notify_fail(ioc);
1099 if (!iocpf->auto_recover) {
1100 bfa_ioc_sync_leave(ioc);
1101 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1102 bfa_nw_ioc_hw_sem_release(ioc);
1103 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1105 if (bfa_ioc_sync_complete(ioc))
1106 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1108 bfa_nw_ioc_hw_sem_release(ioc);
1109 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1114 case IOCPF_E_SEM_ERROR:
1115 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1116 bfa_ioc_pf_hwfailed(ioc);
1119 case IOCPF_E_DISABLE:
1120 bfa_ioc_hw_sem_get_cancel(ioc);
1121 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1128 bfa_sm_fault(event);
1133 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1139 * IOC is in failed state.
1142 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1145 case IOCPF_E_DISABLE:
1146 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1150 bfa_sm_fault(event);
1155 * BFA IOC private functions
1159 * Notify common modules registered for notification.
1162 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1164 struct bfa_ioc_notify *notify;
1165 struct list_head *qe;
1167 list_for_each(qe, &ioc->notify_q) {
1168 notify = (struct bfa_ioc_notify *)qe;
1169 notify->cbfn(notify->cbarg, event);
1174 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1176 ioc->cbfn->disable_cbfn(ioc->bfa);
1177 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1181 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1185 #define BFA_SEM_SPINCNT 3000
1187 r32 = readl(sem_reg);
1189 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1192 r32 = readl(sem_reg);
1202 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1208 /* Clear fwver hdr */
1210 bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1212 u32 pgnum, pgoff, loff = 0;
1215 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1216 pgoff = PSS_SMEM_PGOFF(loff);
1217 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1219 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1220 writel(0, ioc->ioc_regs.smem_page_start + loff);
1221 loff += sizeof(u32);
1227 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1229 struct bfi_ioc_image_hdr fwhdr;
1232 /* Spin on init semaphore to serialize. */
1233 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1236 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1239 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1240 if (fwstate == BFI_IOC_UNINIT) {
1241 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1245 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1247 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1248 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1252 bfa_ioc_fwver_clear(ioc);
1253 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1254 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
1257 * Try to lock and then unlock the semaphore.
1259 readl(ioc->ioc_regs.ioc_sem_reg);
1260 writel(1, ioc->ioc_regs.ioc_sem_reg);
1262 /* Unlock init semaphore */
1263 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1267 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1272 * First read to the semaphore register will return 0, subsequent reads
1273 * will return 1. Semaphore is released by writing 1 to the register
1275 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1277 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1281 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1285 mod_timer(&ioc->sem_timer, jiffies +
1286 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1290 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1292 writel(1, ioc->ioc_regs.ioc_sem_reg);
1296 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1298 del_timer(&ioc->sem_timer);
1303 * Initialize LPU local memory (aka secondary memory / SRAM)
1306 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1310 #define PSS_LMEM_INIT_TIME 10000
1312 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1313 pss_ctl &= ~__PSS_LMEM_RESET;
1314 pss_ctl |= __PSS_LMEM_INIT_EN;
1317 * i2c workaround 12.5khz clock
1319 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1320 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1323 * wait for memory initialization to be complete
1327 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1329 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1332 * If memory initialization is not successful, IOC timeout will catch
1335 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1337 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1338 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1342 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1347 * Take processor out of reset.
1349 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1350 pss_ctl &= ~__PSS_LPU0_RESET;
1352 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1356 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1361 * Put processors in reset.
1363 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1364 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1366 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1370 * Get driver and firmware versions.
1373 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1378 u32 *fwsig = (u32 *) fwhdr;
1380 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1381 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1383 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1386 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1387 loff += sizeof(u32);
1392 * Returns TRUE if same.
1395 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1397 struct bfi_ioc_image_hdr *drv_fwhdr;
1400 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1401 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1403 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1404 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1412 * Return true if current running version is valid. Firmware signature and
1413 * execution context (driver/bios) must match.
1416 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1418 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1420 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1421 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1422 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1424 if (fwhdr.signature != drv_fwhdr->signature)
1427 if (swab32(fwhdr.bootenv) != boot_env)
1430 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1434 * Conditionally flush any pending message from firmware at start.
1437 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1441 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1443 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1447 * @img ioc_init_logic.jpg
1450 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1452 enum bfi_ioc_state ioc_fwstate;
1456 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1459 ioc_fwstate = BFI_IOC_UNINIT;
1461 boot_env = BFI_FWBOOT_ENV_OS;
1464 * check if firmware is valid
1466 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1467 false : bfa_ioc_fwver_valid(ioc, boot_env);
1470 bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1471 bfa_ioc_poll_fwinit(ioc);
1476 * If hardware initialization is in progress (initialized by other IOC),
1477 * just wait for an initialization completion interrupt.
1479 if (ioc_fwstate == BFI_IOC_INITING) {
1480 bfa_ioc_poll_fwinit(ioc);
1485 * If IOC function is disabled and firmware version is same,
1486 * just re-enable IOC.
1488 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1490 * When using MSI-X any pending firmware ready event should
1491 * be flushed. Otherwise MSI-X interrupts are not delivered.
1493 bfa_ioc_msgflush(ioc);
1494 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1499 * Initialize the h/w for any other states.
1501 bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1502 bfa_ioc_poll_fwinit(ioc);
1506 bfa_nw_ioc_timeout(void *ioc_arg)
1508 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1510 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1514 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1516 u32 *msgp = (u32 *) ioc_msg;
1519 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1522 * first write msg to mailbox registers
1524 for (i = 0; i < len / sizeof(u32); i++)
1525 writel(cpu_to_le32(msgp[i]),
1526 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1528 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1529 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1532 * write 1 to mailbox CMD to trigger LPU event
1534 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1535 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1539 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1541 struct bfi_ioc_ctrl_req enable_req;
1544 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1545 bfa_ioc_portid(ioc));
1546 enable_req.clscode = htons(ioc->clscode);
1547 do_gettimeofday(&tv);
1548 enable_req.tv_sec = ntohl(tv.tv_sec);
1549 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1553 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1555 struct bfi_ioc_ctrl_req disable_req;
1557 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1558 bfa_ioc_portid(ioc));
1559 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1563 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1565 struct bfi_ioc_getattr_req attr_req;
1567 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1568 bfa_ioc_portid(ioc));
1569 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1570 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1574 bfa_nw_ioc_hb_check(void *cbarg)
1576 struct bfa_ioc *ioc = cbarg;
1579 hb_count = readl(ioc->ioc_regs.heartbeat);
1580 if (ioc->hb_count == hb_count) {
1581 bfa_ioc_recover(ioc);
1584 ioc->hb_count = hb_count;
1587 bfa_ioc_mbox_poll(ioc);
1588 mod_timer(&ioc->hb_timer, jiffies +
1589 msecs_to_jiffies(BFA_IOC_HB_TOV));
1593 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1595 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1596 mod_timer(&ioc->hb_timer, jiffies +
1597 msecs_to_jiffies(BFA_IOC_HB_TOV));
1601 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1603 del_timer(&ioc->hb_timer);
1608 * Initiate a full firmware download.
1611 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1621 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1623 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1625 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1627 for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1628 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1629 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1630 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1631 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1637 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1638 ((ioc->ioc_regs.smem_page_start) + (loff)));
1640 loff += sizeof(u32);
1643 * handle page offset wrap around
1645 loff = PSS_SMEM_PGOFF(loff);
1649 ioc->ioc_regs.host_page_num_fn);
1653 writel(bfa_ioc_smem_pgnum(ioc, 0),
1654 ioc->ioc_regs.host_page_num_fn);
1657 * Set boot type, env and device mode at the end.
1659 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1660 ioc->port0_mode, ioc->port1_mode);
1661 writel(asicmode, ((ioc->ioc_regs.smem_page_start)
1662 + BFI_FWBOOT_DEVMODE_OFF));
1663 writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1664 + (BFI_FWBOOT_TYPE_OFF)));
1665 writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1666 + (BFI_FWBOOT_ENV_OFF)));
1670 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1672 bfa_ioc_hwinit(ioc, force);
1676 * BFA ioc enable reply by firmware
1679 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1682 struct bfa_iocpf *iocpf = &ioc->iocpf;
1684 ioc->port_mode = ioc->port_mode_cfg = port_mode;
1685 ioc->ad_cap_bm = cap_bm;
1686 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1691 * Update BFA configuration from firmware configuration.
1694 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1696 struct bfi_ioc_attr *attr = ioc->attr;
1698 attr->adapter_prop = ntohl(attr->adapter_prop);
1699 attr->card_type = ntohl(attr->card_type);
1700 attr->maxfrsize = ntohs(attr->maxfrsize);
1702 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1706 * Attach time initialization of mbox logic.
1709 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1711 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1714 INIT_LIST_HEAD(&mod->cmd_q);
1715 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1716 mod->mbhdlr[mc].cbfn = NULL;
1717 mod->mbhdlr[mc].cbarg = ioc->bfa;
1722 * Mbox poll timer -- restarts any pending mailbox requests.
1725 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1727 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1728 struct bfa_mbox_cmd *cmd;
1729 bfa_mbox_cmd_cbfn_t cbfn;
1734 * If no command pending, do nothing
1736 if (list_empty(&mod->cmd_q))
1740 * If previous command is not yet fetched by firmware, do nothing
1742 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1747 * Enqueue command to firmware.
1749 bfa_q_deq(&mod->cmd_q, &cmd);
1750 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1753 * Give a callback to the client, indicating that the command is sent
1764 * Cleanup any pending requests.
1767 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1769 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1770 struct bfa_mbox_cmd *cmd;
1772 while (!list_empty(&mod->cmd_q))
1773 bfa_q_deq(&mod->cmd_q, &cmd);
1777 * Read data from SMEM to host through PCI memmap
1779 * @param[in] ioc memory for IOC
1780 * @param[in] tbuf app memory to store data from smem
1781 * @param[in] soff smem offset
1782 * @param[in] sz size of smem in bytes
1785 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
1787 u32 pgnum, loff, r32;
1791 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1792 loff = PSS_SMEM_PGOFF(soff);
1795 * Hold semaphore to serialize pll init and fwtrc.
1797 if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
1800 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1802 len = sz/sizeof(u32);
1803 for (i = 0; i < len; i++) {
1804 r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1805 buf[i] = be32_to_cpu(r32);
1806 loff += sizeof(u32);
1809 * handle page offset wrap around
1811 loff = PSS_SMEM_PGOFF(loff);
1814 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1818 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1819 ioc->ioc_regs.host_page_num_fn);
1824 readl(ioc->ioc_regs.ioc_init_sem_reg);
1825 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1830 * Retrieve saved firmware trace from a prior IOC failure.
1833 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1835 u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
1836 int tlen, status = 0;
1839 if (tlen > BNA_DBG_FWTRC_LEN)
1840 tlen = BNA_DBG_FWTRC_LEN;
1842 status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
1848 * Save firmware trace if configured.
1851 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
1855 if (ioc->dbg_fwsave_once) {
1856 ioc->dbg_fwsave_once = 0;
1857 if (ioc->dbg_fwsave_len) {
1858 tlen = ioc->dbg_fwsave_len;
1859 bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
1865 * Retrieve saved firmware trace from a prior IOC failure.
1868 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1872 if (ioc->dbg_fwsave_len == 0)
1873 return BFA_STATUS_ENOFSAVE;
1876 if (tlen > ioc->dbg_fwsave_len)
1877 tlen = ioc->dbg_fwsave_len;
1879 memcpy(trcdata, ioc->dbg_fwsave, tlen);
1881 return BFA_STATUS_OK;
1885 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1888 * Notify driver and common modules registered for notification.
1890 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1891 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1892 bfa_nw_ioc_debug_save_ftrc(ioc);
1896 * IOCPF to IOC interface
1899 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1901 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1905 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1907 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1911 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1913 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1917 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
1919 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1923 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1926 * Provide enable completion callback and AEN notification.
1928 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1934 static enum bfa_status
1935 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1938 * Hold semaphore so that nobody can access the chip during init.
1940 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1942 bfa_ioc_pll_init_asic(ioc);
1944 ioc->pllinit = true;
1946 /* Initialize LMEM */
1947 bfa_ioc_lmem_init(ioc);
1950 * release semaphore.
1952 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1954 return BFA_STATUS_OK;
1958 * Interface used by diag module to do firmware boot with memory test
1959 * as the entry vector.
1962 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1965 bfa_ioc_stats(ioc, ioc_boots);
1967 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1971 * Initialize IOC state of all functions on a chip reset.
1973 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
1974 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
1975 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
1977 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
1978 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
1981 bfa_ioc_msgflush(ioc);
1982 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1983 bfa_ioc_lpu_start(ioc);
1987 * Enable/disable IOC failure auto recovery.
1990 bfa_nw_ioc_auto_recover(bool auto_recover)
1992 bfa_nw_auto_recover = auto_recover;
1996 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
2002 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2009 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2011 r32 = readl(ioc->ioc_regs.lpu_mbox +
2013 msgp[i] = htonl(r32);
2017 * turn off mailbox interrupt by clearing mailbox status
2019 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2020 readl(ioc->ioc_regs.lpu_mbox_cmd);
2026 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
2028 union bfi_ioc_i2h_msg_u *msg;
2029 struct bfa_iocpf *iocpf = &ioc->iocpf;
2031 msg = (union bfi_ioc_i2h_msg_u *) m;
2033 bfa_ioc_stats(ioc, ioc_isrs);
2035 switch (msg->mh.msg_id) {
2036 case BFI_IOC_I2H_HBEAT:
2039 case BFI_IOC_I2H_ENABLE_REPLY:
2040 bfa_ioc_enable_reply(ioc,
2041 (enum bfa_mode)msg->fw_event.port_mode,
2042 msg->fw_event.cap_bm);
2045 case BFI_IOC_I2H_DISABLE_REPLY:
2046 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2049 case BFI_IOC_I2H_GETATTR_REPLY:
2050 bfa_ioc_getattr_reply(ioc);
2059 * IOC attach time initialization and setup.
2061 * @param[in] ioc memory for IOC
2062 * @param[in] bfa driver instance structure
2065 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
2069 ioc->fcmode = false;
2070 ioc->pllinit = false;
2071 ioc->dbg_fwsave_once = true;
2072 ioc->iocpf.ioc = ioc;
2074 bfa_ioc_mbox_attach(ioc);
2075 INIT_LIST_HEAD(&ioc->notify_q);
2077 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2078 bfa_fsm_send_event(ioc, IOC_E_RESET);
2082 * Driver detach time IOC cleanup.
2085 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
2087 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2089 /* Done with detach, empty the notify_q. */
2090 INIT_LIST_HEAD(&ioc->notify_q);
2094 * Setup IOC PCI properties.
2096 * @param[in] pcidev PCI device information for this IOC
2099 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
2100 enum bfi_pcifn_class clscode)
2102 ioc->clscode = clscode;
2103 ioc->pcidev = *pcidev;
2106 * Initialize IOC and device personality
2108 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2109 ioc->asic_mode = BFI_ASIC_MODE_FC;
2111 switch (pcidev->device_id) {
2112 case PCI_DEVICE_ID_BROCADE_CT:
2113 ioc->asic_gen = BFI_ASIC_GEN_CT;
2114 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2115 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2116 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2117 ioc->ad_cap_bm = BFA_CM_CNA;
2120 case BFA_PCI_DEVICE_ID_CT2:
2121 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2122 if (clscode == BFI_PCIFN_CLASS_FC &&
2123 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2124 ioc->asic_mode = BFI_ASIC_MODE_FC16;
2126 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2127 ioc->ad_cap_bm = BFA_CM_HBA;
2129 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2130 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2131 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2133 ioc->port_mode_cfg = BFA_MODE_CNA;
2134 ioc->ad_cap_bm = BFA_CM_CNA;
2137 ioc->port_mode_cfg = BFA_MODE_NIC;
2138 ioc->ad_cap_bm = BFA_CM_NIC;
2148 * Set asic specific interfaces.
2150 if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2151 bfa_nw_ioc_set_ct_hwif(ioc);
2153 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2154 bfa_nw_ioc_set_ct2_hwif(ioc);
2155 bfa_nw_ioc_ct2_poweron(ioc);
2158 bfa_ioc_map_port(ioc);
2159 bfa_ioc_reg_init(ioc);
2163 * Initialize IOC dma memory
2165 * @param[in] dm_kva kernel virtual address of IOC dma memory
2166 * @param[in] dm_pa physical address of IOC dma memory
2169 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
2172 * dma memory for firmware attribute
2174 ioc->attr_dma.kva = dm_kva;
2175 ioc->attr_dma.pa = dm_pa;
2176 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2180 * Return size of dma memory required.
2183 bfa_nw_ioc_meminfo(void)
2185 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2189 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2191 bfa_ioc_stats(ioc, ioc_enables);
2192 ioc->dbg_fwsave_once = true;
2194 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2198 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2200 bfa_ioc_stats(ioc, ioc_disables);
2201 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2205 * Initialize memory for saving firmware trace.
2208 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2210 ioc->dbg_fwsave = dbg_fwsave;
2211 ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
2215 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2217 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2221 * Register mailbox message handler function, to be called by common modules
2224 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2225 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2227 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2229 mod->mbhdlr[mc].cbfn = cbfn;
2230 mod->mbhdlr[mc].cbarg = cbarg;
2234 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2235 * Responsibility of caller to serialize
2237 * @param[in] ioc IOC instance
2238 * @param[i] cmd Mailbox command
2241 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2242 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2244 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2251 * If a previous command is pending, queue new command
2253 if (!list_empty(&mod->cmd_q)) {
2254 list_add_tail(&cmd->qe, &mod->cmd_q);
2259 * If mailbox is busy, queue command for poll timer
2261 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2263 list_add_tail(&cmd->qe, &mod->cmd_q);
2268 * mailbox is free -- queue command to firmware
2270 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2276 * Handle mailbox interrupts
2279 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2281 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2285 if (bfa_ioc_msgget(ioc, &m)) {
2287 * Treat IOC message class as special.
2289 mc = m.mh.msg_class;
2290 if (mc == BFI_MC_IOC) {
2291 bfa_ioc_isr(ioc, &m);
2295 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2298 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2301 bfa_ioc_lpu_read_stat(ioc);
2304 * Try to send pending mailbox commands
2306 bfa_ioc_mbox_poll(ioc);
2310 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2312 bfa_ioc_stats(ioc, ioc_hbfails);
2313 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2314 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2318 * return true if IOC is disabled
2321 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2323 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2324 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2328 * return true if IOC is operational
2331 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2333 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2337 * Add to IOC heartbeat failure notification queue. To be used by common
2338 * modules such as cee, port, diag.
2341 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2342 struct bfa_ioc_notify *notify)
2344 list_add_tail(¬ify->qe, &ioc->notify_q);
2347 #define BFA_MFG_NAME "Brocade"
2349 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2350 struct bfa_adapter_attr *ad_attr)
2352 struct bfi_ioc_attr *ioc_attr;
2354 ioc_attr = ioc->attr;
2356 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2357 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2358 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2359 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2360 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2361 sizeof(struct bfa_mfg_vpd));
2363 ad_attr->nports = bfa_ioc_get_nports(ioc);
2364 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2366 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2367 /* For now, model descr uses same model string */
2368 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2370 ad_attr->card_type = ioc_attr->card_type;
2371 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2373 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2374 ad_attr->prototype = 1;
2376 ad_attr->prototype = 0;
2378 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2379 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
2381 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2382 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2383 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2384 ad_attr->asic_rev = ioc_attr->asic_rev;
2386 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2389 static enum bfa_ioc_type
2390 bfa_ioc_get_type(struct bfa_ioc *ioc)
2392 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2393 return BFA_IOC_TYPE_LL;
2395 BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2397 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2398 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2402 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2404 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2406 (void *)ioc->attr->brcd_serialnum,
2407 BFA_ADAPTER_SERIAL_NUM_LEN);
2411 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2413 memset(fw_ver, 0, BFA_VERSION_LEN);
2414 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2418 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2420 BUG_ON(!(chip_rev));
2422 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2428 chip_rev[4] = ioc->attr->asic_rev;
2433 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2435 memset(optrom_ver, 0, BFA_VERSION_LEN);
2436 memcpy(optrom_ver, ioc->attr->optrom_version,
2441 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2443 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2444 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2448 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2450 struct bfi_ioc_attr *ioc_attr;
2453 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2455 ioc_attr = ioc->attr;
2457 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2458 BFA_MFG_NAME, ioc_attr->card_type);
2461 static enum bfa_ioc_state
2462 bfa_ioc_get_state(struct bfa_ioc *ioc)
2464 enum bfa_iocpf_state iocpf_st;
2465 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2467 if (ioc_st == BFA_IOC_ENABLING ||
2468 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2470 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2473 case BFA_IOCPF_SEMWAIT:
2474 ioc_st = BFA_IOC_SEMWAIT;
2477 case BFA_IOCPF_HWINIT:
2478 ioc_st = BFA_IOC_HWINIT;
2481 case BFA_IOCPF_FWMISMATCH:
2482 ioc_st = BFA_IOC_FWMISMATCH;
2485 case BFA_IOCPF_FAIL:
2486 ioc_st = BFA_IOC_FAIL;
2489 case BFA_IOCPF_INITFAIL:
2490 ioc_st = BFA_IOC_INITFAIL;
2501 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2503 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2505 ioc_attr->state = bfa_ioc_get_state(ioc);
2506 ioc_attr->port_id = ioc->port_id;
2507 ioc_attr->port_mode = ioc->port_mode;
2509 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2510 ioc_attr->cap_bm = ioc->ad_cap_bm;
2512 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2514 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2516 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2517 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2518 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2525 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2527 return ioc->attr->pwwn;
2531 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2533 return ioc->attr->mac;
2537 * Firmware failure detected. Start recovery actions.
2540 bfa_ioc_recover(struct bfa_ioc *ioc)
2542 pr_crit("Heart Beat of IOC has failed\n");
2543 bfa_ioc_stats(ioc, ioc_hbfails);
2544 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2545 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2549 * @dg hal_iocpf_pvt BFA IOC PF private functions
2554 bfa_iocpf_enable(struct bfa_ioc *ioc)
2556 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2560 bfa_iocpf_disable(struct bfa_ioc *ioc)
2562 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2566 bfa_iocpf_fail(struct bfa_ioc *ioc)
2568 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2572 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2574 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2578 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2580 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2584 bfa_iocpf_stop(struct bfa_ioc *ioc)
2586 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2590 bfa_nw_iocpf_timeout(void *ioc_arg)
2592 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2593 enum bfa_iocpf_state iocpf_st;
2595 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2597 if (iocpf_st == BFA_IOCPF_HWINIT)
2598 bfa_ioc_poll_fwinit(ioc);
2600 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2604 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2606 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2608 bfa_ioc_hw_sem_get(ioc);
2612 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
2614 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2616 if (fwstate == BFI_IOC_DISABLED) {
2617 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2621 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
2622 bfa_nw_iocpf_timeout(ioc);
2624 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2625 mod_timer(&ioc->iocpf_timer, jiffies +
2626 msecs_to_jiffies(BFA_IOC_POLL_TOV));
2631 * Flash module specific
2635 * FLASH DMA buffer should be big enough to hold both MFG block and
2636 * asic block(64k) at the same time and also should be 2k aligned to
2637 * avoid write segement to cross sector boundary.
2639 #define BFA_FLASH_SEG_SZ 2048
2640 #define BFA_FLASH_DMA_BUF_SZ \
2641 roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
2644 bfa_flash_cb(struct bfa_flash *flash)
2648 flash->cbfn(flash->cbarg, flash->status);
2652 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
2654 struct bfa_flash *flash = cbarg;
2657 case BFA_IOC_E_DISABLED:
2658 case BFA_IOC_E_FAILED:
2659 if (flash->op_busy) {
2660 flash->status = BFA_STATUS_IOC_FAILURE;
2661 flash->cbfn(flash->cbarg, flash->status);
2671 * Send flash write request.
2673 * @param[in] cbarg - callback argument
2676 bfa_flash_write_send(struct bfa_flash *flash)
2678 struct bfi_flash_write_req *msg =
2679 (struct bfi_flash_write_req *) flash->mb.msg;
2682 msg->type = be32_to_cpu(flash->type);
2683 msg->instance = flash->instance;
2684 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2685 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2686 flash->residue : BFA_FLASH_DMA_BUF_SZ;
2687 msg->length = be32_to_cpu(len);
2689 /* indicate if it's the last msg of the whole write operation */
2690 msg->last = (len == flash->residue) ? 1 : 0;
2692 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
2693 bfa_ioc_portid(flash->ioc));
2694 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2695 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
2696 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2698 flash->residue -= len;
2699 flash->offset += len;
2703 * Send flash read request.
2705 * @param[in] cbarg - callback argument
2708 bfa_flash_read_send(void *cbarg)
2710 struct bfa_flash *flash = cbarg;
2711 struct bfi_flash_read_req *msg =
2712 (struct bfi_flash_read_req *) flash->mb.msg;
2715 msg->type = be32_to_cpu(flash->type);
2716 msg->instance = flash->instance;
2717 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2718 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2719 flash->residue : BFA_FLASH_DMA_BUF_SZ;
2720 msg->length = be32_to_cpu(len);
2721 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
2722 bfa_ioc_portid(flash->ioc));
2723 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2724 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2728 * Process flash response messages upon receiving interrupts.
2730 * @param[in] flasharg - flash structure
2731 * @param[in] msg - message structure
2734 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
2736 struct bfa_flash *flash = flasharg;
2740 struct bfi_flash_query_rsp *query;
2741 struct bfi_flash_write_rsp *write;
2742 struct bfi_flash_read_rsp *read;
2743 struct bfi_mbmsg *msg;
2748 /* receiving response after ioc failure */
2749 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
2752 switch (msg->mh.msg_id) {
2753 case BFI_FLASH_I2H_QUERY_RSP:
2754 status = be32_to_cpu(m.query->status);
2755 if (status == BFA_STATUS_OK) {
2757 struct bfa_flash_attr *attr, *f;
2759 attr = (struct bfa_flash_attr *) flash->ubuf;
2760 f = (struct bfa_flash_attr *) flash->dbuf_kva;
2761 attr->status = be32_to_cpu(f->status);
2762 attr->npart = be32_to_cpu(f->npart);
2763 for (i = 0; i < attr->npart; i++) {
2764 attr->part[i].part_type =
2765 be32_to_cpu(f->part[i].part_type);
2766 attr->part[i].part_instance =
2767 be32_to_cpu(f->part[i].part_instance);
2768 attr->part[i].part_off =
2769 be32_to_cpu(f->part[i].part_off);
2770 attr->part[i].part_size =
2771 be32_to_cpu(f->part[i].part_size);
2772 attr->part[i].part_len =
2773 be32_to_cpu(f->part[i].part_len);
2774 attr->part[i].part_status =
2775 be32_to_cpu(f->part[i].part_status);
2778 flash->status = status;
2779 bfa_flash_cb(flash);
2781 case BFI_FLASH_I2H_WRITE_RSP:
2782 status = be32_to_cpu(m.write->status);
2783 if (status != BFA_STATUS_OK || flash->residue == 0) {
2784 flash->status = status;
2785 bfa_flash_cb(flash);
2787 bfa_flash_write_send(flash);
2789 case BFI_FLASH_I2H_READ_RSP:
2790 status = be32_to_cpu(m.read->status);
2791 if (status != BFA_STATUS_OK) {
2792 flash->status = status;
2793 bfa_flash_cb(flash);
2795 u32 len = be32_to_cpu(m.read->length);
2796 memcpy(flash->ubuf + flash->offset,
2797 flash->dbuf_kva, len);
2798 flash->residue -= len;
2799 flash->offset += len;
2800 if (flash->residue == 0) {
2801 flash->status = status;
2802 bfa_flash_cb(flash);
2804 bfa_flash_read_send(flash);
2807 case BFI_FLASH_I2H_BOOT_VER_RSP:
2808 case BFI_FLASH_I2H_EVENT:
2816 * Flash memory info API.
2819 bfa_nw_flash_meminfo(void)
2821 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2827 * @param[in] flash - flash structure
2828 * @param[in] ioc - ioc structure
2829 * @param[in] dev - device structure
2832 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
2836 flash->cbarg = NULL;
2839 bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
2840 bfa_q_qe_init(&flash->ioc_notify);
2841 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
2842 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
2846 * Claim memory for flash
2848 * @param[in] flash - flash structure
2849 * @param[in] dm_kva - pointer to virtual memory address
2850 * @param[in] dm_pa - physical memory address
2853 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
2855 flash->dbuf_kva = dm_kva;
2856 flash->dbuf_pa = dm_pa;
2857 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
2858 dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2859 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2863 * Get flash attribute.
2865 * @param[in] flash - flash structure
2866 * @param[in] attr - flash attribute structure
2867 * @param[in] cbfn - callback function
2868 * @param[in] cbarg - callback argument
2873 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
2874 bfa_cb_flash cbfn, void *cbarg)
2876 struct bfi_flash_query_req *msg =
2877 (struct bfi_flash_query_req *) flash->mb.msg;
2879 if (!bfa_nw_ioc_is_operational(flash->ioc))
2880 return BFA_STATUS_IOC_NON_OP;
2883 return BFA_STATUS_DEVBUSY;
2887 flash->cbarg = cbarg;
2888 flash->ubuf = (u8 *) attr;
2890 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
2891 bfa_ioc_portid(flash->ioc));
2892 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
2893 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2895 return BFA_STATUS_OK;
2899 * Update flash partition.
2901 * @param[in] flash - flash structure
2902 * @param[in] type - flash partition type
2903 * @param[in] instance - flash partition instance
2904 * @param[in] buf - update data buffer
2905 * @param[in] len - data buffer length
2906 * @param[in] offset - offset relative to the partition starting address
2907 * @param[in] cbfn - callback function
2908 * @param[in] cbarg - callback argument
2913 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
2914 void *buf, u32 len, u32 offset,
2915 bfa_cb_flash cbfn, void *cbarg)
2917 if (!bfa_nw_ioc_is_operational(flash->ioc))
2918 return BFA_STATUS_IOC_NON_OP;
2921 * 'len' must be in word (4-byte) boundary
2923 if (!len || (len & 0x03))
2924 return BFA_STATUS_FLASH_BAD_LEN;
2926 if (type == BFA_FLASH_PART_MFG)
2927 return BFA_STATUS_EINVAL;
2930 return BFA_STATUS_DEVBUSY;
2934 flash->cbarg = cbarg;
2936 flash->instance = instance;
2937 flash->residue = len;
2939 flash->addr_off = offset;
2942 bfa_flash_write_send(flash);
2944 return BFA_STATUS_OK;
2948 * Read flash partition.
2950 * @param[in] flash - flash structure
2951 * @param[in] type - flash partition type
2952 * @param[in] instance - flash partition instance
2953 * @param[in] buf - read data buffer
2954 * @param[in] len - data buffer length
2955 * @param[in] offset - offset relative to the partition starting address
2956 * @param[in] cbfn - callback function
2957 * @param[in] cbarg - callback argument
2962 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
2963 void *buf, u32 len, u32 offset,
2964 bfa_cb_flash cbfn, void *cbarg)
2966 if (!bfa_nw_ioc_is_operational(flash->ioc))
2967 return BFA_STATUS_IOC_NON_OP;
2970 * 'len' must be in word (4-byte) boundary
2972 if (!len || (len & 0x03))
2973 return BFA_STATUS_FLASH_BAD_LEN;
2976 return BFA_STATUS_DEVBUSY;
2980 flash->cbarg = cbarg;
2982 flash->instance = instance;
2983 flash->residue = len;
2985 flash->addr_off = offset;
2988 bfa_flash_read_send(flash);
2990 return BFA_STATUS_OK;