]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/brocade/bna/bfa_ioc.c
0b640fafbda359d20a43cac67ae561e7451f5a9b
[~andy/linux] / drivers / net / ethernet / brocade / bna / bfa_ioc.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18
19 #include "bfa_ioc.h"
20 #include "bfi_reg.h"
21 #include "bfa_defs.h"
22
23 /**
24  * IOC local definitions
25  */
26
27 /**
28  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
29  */
30
31 #define bfa_ioc_firmware_lock(__ioc)                    \
32                         ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
33 #define bfa_ioc_firmware_unlock(__ioc)                  \
34                         ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
35 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
36 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
37 #define bfa_ioc_notify_fail(__ioc)                      \
38                         ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
39 #define bfa_ioc_sync_start(__ioc)               \
40                         ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
41 #define bfa_ioc_sync_join(__ioc)                        \
42                         ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
43 #define bfa_ioc_sync_leave(__ioc)                       \
44                         ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
45 #define bfa_ioc_sync_ack(__ioc)                         \
46                         ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
47 #define bfa_ioc_sync_complete(__ioc)                    \
48                         ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
49
50 #define bfa_ioc_mbox_cmd_pending(__ioc)         \
51                         (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
52                         readl((__ioc)->ioc_regs.hfn_mbox_cmd))
53
54 static bool bfa_nw_auto_recover = true;
55
56 /*
57  * forward declarations
58  */
59 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
60 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
61 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
62 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
63 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
64 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
65 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
66 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
67 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
68 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
69 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
70 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
71 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
72 static void bfa_ioc_recover(struct bfa_ioc *ioc);
73 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
74 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
75 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
76 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
77 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
78 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
79 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
80 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
81 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
82 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
83 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
84                          u32 boot_param);
85 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
86 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
87                                                 char *serial_num);
88 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
89                                                 char *fw_ver);
90 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
91                                                 char *chip_rev);
92 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
93                                                 char *optrom_ver);
94 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
95                                                 char *manufacturer);
96 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
97 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
98
99 /**
100  * IOC state machine definitions/declarations
101  */
102 enum ioc_event {
103         IOC_E_RESET             = 1,    /*!< IOC reset request          */
104         IOC_E_ENABLE            = 2,    /*!< IOC enable request         */
105         IOC_E_DISABLE           = 3,    /*!< IOC disable request        */
106         IOC_E_DETACH            = 4,    /*!< driver detach cleanup      */
107         IOC_E_ENABLED           = 5,    /*!< f/w enabled                */
108         IOC_E_FWRSP_GETATTR     = 6,    /*!< IOC get attribute response */
109         IOC_E_DISABLED          = 7,    /*!< f/w disabled               */
110         IOC_E_PFFAILED          = 8,    /*!< failure notice by iocpf sm */
111         IOC_E_HBFAIL            = 9,    /*!< heartbeat failure          */
112         IOC_E_HWERROR           = 10,   /*!< hardware error interrupt   */
113         IOC_E_TIMEOUT           = 11,   /*!< timeout                    */
114         IOC_E_HWFAILED          = 12,   /*!< PCI mapping failure notice */
115 };
116
117 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
127
128 static struct bfa_sm_table ioc_sm_table[] = {
129         {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
130         {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
131         {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
132         {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
133         {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
134         {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
135         {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
136         {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
137         {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
138         {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
139 };
140
141 /*
142  * Forward declareations for iocpf state machine
143  */
144 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
145 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
146 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
147 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
148 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
149 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
150
151 /**
152  * IOCPF state machine events
153  */
154 enum iocpf_event {
155         IOCPF_E_ENABLE          = 1,    /*!< IOCPF enable request       */
156         IOCPF_E_DISABLE         = 2,    /*!< IOCPF disable request      */
157         IOCPF_E_STOP            = 3,    /*!< stop on driver detach      */
158         IOCPF_E_FWREADY         = 4,    /*!< f/w initialization done    */
159         IOCPF_E_FWRSP_ENABLE    = 5,    /*!< enable f/w response        */
160         IOCPF_E_FWRSP_DISABLE   = 6,    /*!< disable f/w response       */
161         IOCPF_E_FAIL            = 7,    /*!< failure notice by ioc sm   */
162         IOCPF_E_INITFAIL        = 8,    /*!< init fail notice by ioc sm */
163         IOCPF_E_GETATTRFAIL     = 9,    /*!< init fail notice by ioc sm */
164         IOCPF_E_SEMLOCKED       = 10,   /*!< h/w semaphore is locked    */
165         IOCPF_E_TIMEOUT         = 11,   /*!< f/w response timeout       */
166         IOCPF_E_SEM_ERROR       = 12,   /*!< h/w sem mapping error      */
167 };
168
169 /**
170  * IOCPF states
171  */
172 enum bfa_iocpf_state {
173         BFA_IOCPF_RESET         = 1,    /*!< IOC is in reset state */
174         BFA_IOCPF_SEMWAIT       = 2,    /*!< Waiting for IOC h/w semaphore */
175         BFA_IOCPF_HWINIT        = 3,    /*!< IOC h/w is being initialized */
176         BFA_IOCPF_READY         = 4,    /*!< IOCPF is initialized */
177         BFA_IOCPF_INITFAIL      = 5,    /*!< IOCPF failed */
178         BFA_IOCPF_FAIL          = 6,    /*!< IOCPF failed */
179         BFA_IOCPF_DISABLING     = 7,    /*!< IOCPF is being disabled */
180         BFA_IOCPF_DISABLED      = 8,    /*!< IOCPF is disabled */
181         BFA_IOCPF_FWMISMATCH    = 9,    /*!< IOC f/w different from drivers */
182 };
183
184 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
185 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
186 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
187 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
188 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
189 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
190 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
191 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
192                                                 enum iocpf_event);
193 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
197 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
198                                                 enum iocpf_event);
199 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
200
201 static struct bfa_sm_table iocpf_sm_table[] = {
202         {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
203         {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
204         {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
205         {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
206         {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
207         {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
208         {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
209         {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
210         {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
211         {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
212         {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
213         {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
214         {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
215         {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
216 };
217
218 /**
219  * IOC State Machine
220  */
221
222 /**
223  * Beginning state. IOC uninit state.
224  */
225 static void
226 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
227 {
228 }
229
230 /**
231  * IOC is in uninit state.
232  */
233 static void
234 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
235 {
236         switch (event) {
237         case IOC_E_RESET:
238                 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
239                 break;
240
241         default:
242                 bfa_sm_fault(event);
243         }
244 }
245
246 /**
247  * Reset entry actions -- initialize state machine
248  */
249 static void
250 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
251 {
252         bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
253 }
254
255 /**
256  * IOC is in reset state.
257  */
258 static void
259 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
260 {
261         switch (event) {
262         case IOC_E_ENABLE:
263                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
264                 break;
265
266         case IOC_E_DISABLE:
267                 bfa_ioc_disable_comp(ioc);
268                 break;
269
270         case IOC_E_DETACH:
271                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
272                 break;
273
274         default:
275                 bfa_sm_fault(event);
276         }
277 }
278
279 static void
280 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
281 {
282         bfa_iocpf_enable(ioc);
283 }
284
285 /**
286  * Host IOC function is being enabled, awaiting response from firmware.
287  * Semaphore is acquired.
288  */
289 static void
290 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
291 {
292         switch (event) {
293         case IOC_E_ENABLED:
294                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
295                 break;
296
297         case IOC_E_PFFAILED:
298                 /* !!! fall through !!! */
299         case IOC_E_HWERROR:
300                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
301                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
302                 if (event != IOC_E_PFFAILED)
303                         bfa_iocpf_initfail(ioc);
304                 break;
305
306         case IOC_E_HWFAILED:
307                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
308                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
309                 break;
310
311         case IOC_E_DISABLE:
312                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
313                 break;
314
315         case IOC_E_DETACH:
316                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
317                 bfa_iocpf_stop(ioc);
318                 break;
319
320         case IOC_E_ENABLE:
321                 break;
322
323         default:
324                 bfa_sm_fault(event);
325         }
326 }
327
328 /**
329  * Semaphore should be acquired for version check.
330  */
331 static void
332 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
333 {
334         mod_timer(&ioc->ioc_timer, jiffies +
335                 msecs_to_jiffies(BFA_IOC_TOV));
336         bfa_ioc_send_getattr(ioc);
337 }
338
339 /**
340  * IOC configuration in progress. Timer is active.
341  */
342 static void
343 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
344 {
345         switch (event) {
346         case IOC_E_FWRSP_GETATTR:
347                 del_timer(&ioc->ioc_timer);
348                 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
349                 break;
350
351         case IOC_E_PFFAILED:
352         case IOC_E_HWERROR:
353                 del_timer(&ioc->ioc_timer);
354                 /* fall through */
355         case IOC_E_TIMEOUT:
356                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
357                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
358                 if (event != IOC_E_PFFAILED)
359                         bfa_iocpf_getattrfail(ioc);
360                 break;
361
362         case IOC_E_DISABLE:
363                 del_timer(&ioc->ioc_timer);
364                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
365                 break;
366
367         case IOC_E_ENABLE:
368                 break;
369
370         default:
371                 bfa_sm_fault(event);
372         }
373 }
374
375 static void
376 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
377 {
378         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
379         bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
380         bfa_ioc_hb_monitor(ioc);
381 }
382
383 static void
384 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
385 {
386         switch (event) {
387         case IOC_E_ENABLE:
388                 break;
389
390         case IOC_E_DISABLE:
391                 bfa_ioc_hb_stop(ioc);
392                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
393                 break;
394
395         case IOC_E_PFFAILED:
396         case IOC_E_HWERROR:
397                 bfa_ioc_hb_stop(ioc);
398                 /* !!! fall through !!! */
399         case IOC_E_HBFAIL:
400                 if (ioc->iocpf.auto_recover)
401                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
402                 else
403                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
404
405                 bfa_ioc_fail_notify(ioc);
406
407                 if (event != IOC_E_PFFAILED)
408                         bfa_iocpf_fail(ioc);
409                 break;
410
411         default:
412                 bfa_sm_fault(event);
413         }
414 }
415
416 static void
417 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
418 {
419         bfa_iocpf_disable(ioc);
420 }
421
422 /**
423  * IOC is being disabled
424  */
425 static void
426 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
427 {
428         switch (event) {
429         case IOC_E_DISABLED:
430                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
431                 break;
432
433         case IOC_E_HWERROR:
434                 /*
435                  * No state change.  Will move to disabled state
436                  * after iocpf sm completes failure processing and
437                  * moves to disabled state.
438                  */
439                 bfa_iocpf_fail(ioc);
440                 break;
441
442         case IOC_E_HWFAILED:
443                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
444                 bfa_ioc_disable_comp(ioc);
445                 break;
446
447         default:
448                 bfa_sm_fault(event);
449         }
450 }
451
452 /**
453  * IOC disable completion entry.
454  */
455 static void
456 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
457 {
458         bfa_ioc_disable_comp(ioc);
459 }
460
461 static void
462 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
463 {
464         switch (event) {
465         case IOC_E_ENABLE:
466                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
467                 break;
468
469         case IOC_E_DISABLE:
470                 ioc->cbfn->disable_cbfn(ioc->bfa);
471                 break;
472
473         case IOC_E_DETACH:
474                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
475                 bfa_iocpf_stop(ioc);
476                 break;
477
478         default:
479                 bfa_sm_fault(event);
480         }
481 }
482
483 static void
484 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
485 {
486 }
487
488 /**
489  * Hardware initialization retry.
490  */
491 static void
492 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
493 {
494         switch (event) {
495         case IOC_E_ENABLED:
496                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
497                 break;
498
499         case IOC_E_PFFAILED:
500         case IOC_E_HWERROR:
501                 /**
502                  * Initialization retry failed.
503                  */
504                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
505                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
506                 if (event != IOC_E_PFFAILED)
507                         bfa_iocpf_initfail(ioc);
508                 break;
509
510         case IOC_E_HWFAILED:
511                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
512                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
513                 break;
514
515         case IOC_E_ENABLE:
516                 break;
517
518         case IOC_E_DISABLE:
519                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
520                 break;
521
522         case IOC_E_DETACH:
523                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
524                 bfa_iocpf_stop(ioc);
525                 break;
526
527         default:
528                 bfa_sm_fault(event);
529         }
530 }
531
532 static void
533 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
534 {
535 }
536
537 /**
538  * IOC failure.
539  */
540 static void
541 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
542 {
543         switch (event) {
544         case IOC_E_ENABLE:
545                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
546                 break;
547
548         case IOC_E_DISABLE:
549                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
550                 break;
551
552         case IOC_E_DETACH:
553                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
554                 bfa_iocpf_stop(ioc);
555                 break;
556
557         case IOC_E_HWERROR:
558                 /* HB failure notification, ignore. */
559                 break;
560
561         default:
562                 bfa_sm_fault(event);
563         }
564 }
565
566 static void
567 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
568 {
569 }
570
571 /**
572  * IOC failure.
573  */
574 static void
575 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
576 {
577         switch (event) {
578
579         case IOC_E_ENABLE:
580                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
581                 break;
582
583         case IOC_E_DISABLE:
584                 ioc->cbfn->disable_cbfn(ioc->bfa);
585                 break;
586
587         case IOC_E_DETACH:
588                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
589                 break;
590
591         default:
592                 bfa_sm_fault(event);
593         }
594 }
595
596 /**
597  * IOCPF State Machine
598  */
599
600 /**
601  * Reset entry actions -- initialize state machine
602  */
603 static void
604 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
605 {
606         iocpf->fw_mismatch_notified = false;
607         iocpf->auto_recover = bfa_nw_auto_recover;
608 }
609
610 /**
611  * Beginning state. IOC is in reset state.
612  */
613 static void
614 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
615 {
616         switch (event) {
617         case IOCPF_E_ENABLE:
618                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
619                 break;
620
621         case IOCPF_E_STOP:
622                 break;
623
624         default:
625                 bfa_sm_fault(event);
626         }
627 }
628
629 /**
630  * Semaphore should be acquired for version check.
631  */
632 static void
633 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
634 {
635         bfa_ioc_hw_sem_init(iocpf->ioc);
636         bfa_ioc_hw_sem_get(iocpf->ioc);
637 }
638
639 /**
640  * Awaiting h/w semaphore to continue with version check.
641  */
642 static void
643 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
644 {
645         struct bfa_ioc *ioc = iocpf->ioc;
646
647         switch (event) {
648         case IOCPF_E_SEMLOCKED:
649                 if (bfa_ioc_firmware_lock(ioc)) {
650                         if (bfa_ioc_sync_start(ioc)) {
651                                 bfa_ioc_sync_join(ioc);
652                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
653                         } else {
654                                 bfa_ioc_firmware_unlock(ioc);
655                                 bfa_nw_ioc_hw_sem_release(ioc);
656                                 mod_timer(&ioc->sem_timer, jiffies +
657                                         msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
658                         }
659                 } else {
660                         bfa_nw_ioc_hw_sem_release(ioc);
661                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
662                 }
663                 break;
664
665         case IOCPF_E_SEM_ERROR:
666                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
667                 bfa_ioc_pf_hwfailed(ioc);
668                 break;
669
670         case IOCPF_E_DISABLE:
671                 bfa_ioc_hw_sem_get_cancel(ioc);
672                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
673                 bfa_ioc_pf_disabled(ioc);
674                 break;
675
676         case IOCPF_E_STOP:
677                 bfa_ioc_hw_sem_get_cancel(ioc);
678                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
679                 break;
680
681         default:
682                 bfa_sm_fault(event);
683         }
684 }
685
686 /**
687  * Notify enable completion callback
688  */
689 static void
690 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
691 {
692         /* Call only the first time sm enters fwmismatch state. */
693         if (!iocpf->fw_mismatch_notified)
694                 bfa_ioc_pf_fwmismatch(iocpf->ioc);
695
696         iocpf->fw_mismatch_notified = true;
697         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
698                 msecs_to_jiffies(BFA_IOC_TOV));
699 }
700
701 /**
702  * Awaiting firmware version match.
703  */
704 static void
705 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
706 {
707         struct bfa_ioc *ioc = iocpf->ioc;
708
709         switch (event) {
710         case IOCPF_E_TIMEOUT:
711                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
712                 break;
713
714         case IOCPF_E_DISABLE:
715                 del_timer(&ioc->iocpf_timer);
716                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
717                 bfa_ioc_pf_disabled(ioc);
718                 break;
719
720         case IOCPF_E_STOP:
721                 del_timer(&ioc->iocpf_timer);
722                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
723                 break;
724
725         default:
726                 bfa_sm_fault(event);
727         }
728 }
729
730 /**
731  * Request for semaphore.
732  */
733 static void
734 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
735 {
736         bfa_ioc_hw_sem_get(iocpf->ioc);
737 }
738
739 /**
740  * Awaiting semaphore for h/w initialzation.
741  */
742 static void
743 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
744 {
745         struct bfa_ioc *ioc = iocpf->ioc;
746
747         switch (event) {
748         case IOCPF_E_SEMLOCKED:
749                 if (bfa_ioc_sync_complete(ioc)) {
750                         bfa_ioc_sync_join(ioc);
751                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
752                 } else {
753                         bfa_nw_ioc_hw_sem_release(ioc);
754                         mod_timer(&ioc->sem_timer, jiffies +
755                                 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
756                 }
757                 break;
758
759         case IOCPF_E_SEM_ERROR:
760                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
761                 bfa_ioc_pf_hwfailed(ioc);
762                 break;
763
764         case IOCPF_E_DISABLE:
765                 bfa_ioc_hw_sem_get_cancel(ioc);
766                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
767                 break;
768
769         default:
770                 bfa_sm_fault(event);
771         }
772 }
773
774 static void
775 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
776 {
777         iocpf->poll_time = 0;
778         bfa_ioc_reset(iocpf->ioc, false);
779 }
780
781 /**
782  * Hardware is being initialized. Interrupts are enabled.
783  * Holding hardware semaphore lock.
784  */
785 static void
786 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
787 {
788         struct bfa_ioc *ioc = iocpf->ioc;
789
790         switch (event) {
791         case IOCPF_E_FWREADY:
792                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
793                 break;
794
795         case IOCPF_E_TIMEOUT:
796                 bfa_nw_ioc_hw_sem_release(ioc);
797                         bfa_ioc_pf_failed(ioc);
798                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
799                 break;
800
801         case IOCPF_E_DISABLE:
802                 del_timer(&ioc->iocpf_timer);
803                 bfa_ioc_sync_leave(ioc);
804                 bfa_nw_ioc_hw_sem_release(ioc);
805                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
806                 break;
807
808         default:
809                 bfa_sm_fault(event);
810         }
811 }
812
813 static void
814 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
815 {
816         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
817                 msecs_to_jiffies(BFA_IOC_TOV));
818         /**
819          * Enable Interrupts before sending fw IOC ENABLE cmd.
820          */
821         iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
822         bfa_ioc_send_enable(iocpf->ioc);
823 }
824
825 /**
826  * Host IOC function is being enabled, awaiting response from firmware.
827  * Semaphore is acquired.
828  */
829 static void
830 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
831 {
832         struct bfa_ioc *ioc = iocpf->ioc;
833
834         switch (event) {
835         case IOCPF_E_FWRSP_ENABLE:
836                 del_timer(&ioc->iocpf_timer);
837                 bfa_nw_ioc_hw_sem_release(ioc);
838                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
839                 break;
840
841         case IOCPF_E_INITFAIL:
842                 del_timer(&ioc->iocpf_timer);
843                 /*
844                  * !!! fall through !!!
845                  */
846         case IOCPF_E_TIMEOUT:
847                 bfa_nw_ioc_hw_sem_release(ioc);
848                 if (event == IOCPF_E_TIMEOUT)
849                         bfa_ioc_pf_failed(ioc);
850                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
851                 break;
852
853         case IOCPF_E_DISABLE:
854                 del_timer(&ioc->iocpf_timer);
855                 bfa_nw_ioc_hw_sem_release(ioc);
856                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
857                 break;
858
859         default:
860                 bfa_sm_fault(event);
861         }
862 }
863
864 static void
865 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
866 {
867         bfa_ioc_pf_enabled(iocpf->ioc);
868 }
869
870 static void
871 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
872 {
873         switch (event) {
874         case IOCPF_E_DISABLE:
875                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
876                 break;
877
878         case IOCPF_E_GETATTRFAIL:
879                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
880                 break;
881
882         case IOCPF_E_FAIL:
883                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
884                 break;
885
886         default:
887                 bfa_sm_fault(event);
888         }
889 }
890
891 static void
892 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
893 {
894         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
895                 msecs_to_jiffies(BFA_IOC_TOV));
896         bfa_ioc_send_disable(iocpf->ioc);
897 }
898
899 /**
900  * IOC is being disabled
901  */
902 static void
903 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
904 {
905         struct bfa_ioc *ioc = iocpf->ioc;
906
907         switch (event) {
908         case IOCPF_E_FWRSP_DISABLE:
909                 del_timer(&ioc->iocpf_timer);
910                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
911                 break;
912
913         case IOCPF_E_FAIL:
914                 del_timer(&ioc->iocpf_timer);
915                 /*
916                  * !!! fall through !!!
917                  */
918
919         case IOCPF_E_TIMEOUT:
920                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
921                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
922                 break;
923
924         case IOCPF_E_FWRSP_ENABLE:
925                 break;
926
927         default:
928                 bfa_sm_fault(event);
929         }
930 }
931
932 static void
933 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
934 {
935         bfa_ioc_hw_sem_get(iocpf->ioc);
936 }
937
938 /**
939  * IOC hb ack request is being removed.
940  */
941 static void
942 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
943 {
944         struct bfa_ioc *ioc = iocpf->ioc;
945
946         switch (event) {
947         case IOCPF_E_SEMLOCKED:
948                 bfa_ioc_sync_leave(ioc);
949                 bfa_nw_ioc_hw_sem_release(ioc);
950                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
951                 break;
952
953         case IOCPF_E_SEM_ERROR:
954                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
955                 bfa_ioc_pf_hwfailed(ioc);
956                 break;
957
958         case IOCPF_E_FAIL:
959                 break;
960
961         default:
962                 bfa_sm_fault(event);
963         }
964 }
965
966 /**
967  * IOC disable completion entry.
968  */
969 static void
970 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
971 {
972         bfa_ioc_mbox_flush(iocpf->ioc);
973         bfa_ioc_pf_disabled(iocpf->ioc);
974 }
975
976 static void
977 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
978 {
979         struct bfa_ioc *ioc = iocpf->ioc;
980
981         switch (event) {
982         case IOCPF_E_ENABLE:
983                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
984                 break;
985
986         case IOCPF_E_STOP:
987                 bfa_ioc_firmware_unlock(ioc);
988                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
989                 break;
990
991         default:
992                 bfa_sm_fault(event);
993         }
994 }
995
996 static void
997 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
998 {
999         bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
1000         bfa_ioc_hw_sem_get(iocpf->ioc);
1001 }
1002
1003 /**
1004  * Hardware initialization failed.
1005  */
1006 static void
1007 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1008 {
1009         struct bfa_ioc *ioc = iocpf->ioc;
1010
1011         switch (event) {
1012         case IOCPF_E_SEMLOCKED:
1013                 bfa_ioc_notify_fail(ioc);
1014                 bfa_ioc_sync_leave(ioc);
1015                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1016                 bfa_nw_ioc_hw_sem_release(ioc);
1017                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1018                 break;
1019
1020         case IOCPF_E_SEM_ERROR:
1021                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1022                 bfa_ioc_pf_hwfailed(ioc);
1023                 break;
1024
1025         case IOCPF_E_DISABLE:
1026                 bfa_ioc_hw_sem_get_cancel(ioc);
1027                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1028                 break;
1029
1030         case IOCPF_E_STOP:
1031                 bfa_ioc_hw_sem_get_cancel(ioc);
1032                 bfa_ioc_firmware_unlock(ioc);
1033                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1034                 break;
1035
1036         case IOCPF_E_FAIL:
1037                 break;
1038
1039         default:
1040                 bfa_sm_fault(event);
1041         }
1042 }
1043
1044 static void
1045 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1046 {
1047 }
1048
1049 /**
1050  * Hardware initialization failed.
1051  */
1052 static void
1053 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1054 {
1055         struct bfa_ioc *ioc = iocpf->ioc;
1056
1057         switch (event) {
1058         case IOCPF_E_DISABLE:
1059                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1060                 break;
1061
1062         case IOCPF_E_STOP:
1063                 bfa_ioc_firmware_unlock(ioc);
1064                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1065                 break;
1066
1067         default:
1068                 bfa_sm_fault(event);
1069         }
1070 }
1071
1072 static void
1073 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1074 {
1075         /**
1076          * Mark IOC as failed in hardware and stop firmware.
1077          */
1078         bfa_ioc_lpu_stop(iocpf->ioc);
1079
1080         /**
1081          * Flush any queued up mailbox requests.
1082          */
1083         bfa_ioc_mbox_flush(iocpf->ioc);
1084         bfa_ioc_hw_sem_get(iocpf->ioc);
1085 }
1086
1087 /**
1088  * IOC is in failed state.
1089  */
1090 static void
1091 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1092 {
1093         struct bfa_ioc *ioc = iocpf->ioc;
1094
1095         switch (event) {
1096         case IOCPF_E_SEMLOCKED:
1097                 bfa_ioc_sync_ack(ioc);
1098                 bfa_ioc_notify_fail(ioc);
1099                 if (!iocpf->auto_recover) {
1100                         bfa_ioc_sync_leave(ioc);
1101                         writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1102                         bfa_nw_ioc_hw_sem_release(ioc);
1103                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1104                 } else {
1105                         if (bfa_ioc_sync_complete(ioc))
1106                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1107                         else {
1108                                 bfa_nw_ioc_hw_sem_release(ioc);
1109                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1110                         }
1111                 }
1112                 break;
1113
1114         case IOCPF_E_SEM_ERROR:
1115                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1116                 bfa_ioc_pf_hwfailed(ioc);
1117                 break;
1118
1119         case IOCPF_E_DISABLE:
1120                 bfa_ioc_hw_sem_get_cancel(ioc);
1121                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1122                 break;
1123
1124         case IOCPF_E_FAIL:
1125                 break;
1126
1127         default:
1128                 bfa_sm_fault(event);
1129         }
1130 }
1131
1132 static void
1133 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1134 {
1135 }
1136
1137 /**
1138  * @brief
1139  * IOC is in failed state.
1140  */
1141 static void
1142 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1143 {
1144         switch (event) {
1145         case IOCPF_E_DISABLE:
1146                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1147                 break;
1148
1149         default:
1150                 bfa_sm_fault(event);
1151         }
1152 }
1153
1154 /**
1155  * BFA IOC private functions
1156  */
1157
1158 /**
1159  * Notify common modules registered for notification.
1160  */
1161 static void
1162 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1163 {
1164         struct bfa_ioc_notify *notify;
1165         struct list_head                        *qe;
1166
1167         list_for_each(qe, &ioc->notify_q) {
1168                 notify = (struct bfa_ioc_notify *)qe;
1169                 notify->cbfn(notify->cbarg, event);
1170         }
1171 }
1172
1173 static void
1174 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1175 {
1176         ioc->cbfn->disable_cbfn(ioc->bfa);
1177         bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1178 }
1179
1180 bool
1181 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1182 {
1183         u32 r32;
1184         int cnt = 0;
1185 #define BFA_SEM_SPINCNT 3000
1186
1187         r32 = readl(sem_reg);
1188
1189         while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1190                 cnt++;
1191                 udelay(2);
1192                 r32 = readl(sem_reg);
1193         }
1194
1195         if (!(r32 & 1))
1196                 return true;
1197
1198         return false;
1199 }
1200
1201 void
1202 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1203 {
1204         readl(sem_reg);
1205         writel(1, sem_reg);
1206 }
1207
1208 /* Clear fwver hdr */
1209 static void
1210 bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1211 {
1212         u32 pgnum, pgoff, loff = 0;
1213         int i;
1214
1215         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1216         pgoff = PSS_SMEM_PGOFF(loff);
1217         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1218
1219         for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1220                 writel(0, ioc->ioc_regs.smem_page_start + loff);
1221                 loff += sizeof(u32);
1222         }
1223 }
1224
1225
1226 static void
1227 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1228 {
1229         struct bfi_ioc_image_hdr fwhdr;
1230         u32 fwstate, r32;
1231
1232         /* Spin on init semaphore to serialize. */
1233         r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1234         while (r32 & 0x1) {
1235                 udelay(20);
1236                 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1237         }
1238
1239         fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1240         if (fwstate == BFI_IOC_UNINIT) {
1241                 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1242                 return;
1243         }
1244
1245         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1246
1247         if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1248                 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1249                 return;
1250         }
1251
1252         bfa_ioc_fwver_clear(ioc);
1253         writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1254         writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
1255
1256         /*
1257          * Try to lock and then unlock the semaphore.
1258          */
1259         readl(ioc->ioc_regs.ioc_sem_reg);
1260         writel(1, ioc->ioc_regs.ioc_sem_reg);
1261
1262         /* Unlock init semaphore */
1263         writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1264 }
1265
1266 static void
1267 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1268 {
1269         u32     r32;
1270
1271         /**
1272          * First read to the semaphore register will return 0, subsequent reads
1273          * will return 1. Semaphore is released by writing 1 to the register
1274          */
1275         r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1276         if (r32 == ~0) {
1277                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1278                 return;
1279         }
1280         if (!(r32 & 1)) {
1281                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1282                 return;
1283         }
1284
1285         mod_timer(&ioc->sem_timer, jiffies +
1286                 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1287 }
1288
1289 void
1290 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1291 {
1292         writel(1, ioc->ioc_regs.ioc_sem_reg);
1293 }
1294
1295 static void
1296 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1297 {
1298         del_timer(&ioc->sem_timer);
1299 }
1300
1301 /**
1302  * @brief
1303  * Initialize LPU local memory (aka secondary memory / SRAM)
1304  */
1305 static void
1306 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1307 {
1308         u32     pss_ctl;
1309         int             i;
1310 #define PSS_LMEM_INIT_TIME  10000
1311
1312         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1313         pss_ctl &= ~__PSS_LMEM_RESET;
1314         pss_ctl |= __PSS_LMEM_INIT_EN;
1315
1316         /*
1317          * i2c workaround 12.5khz clock
1318          */
1319         pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1320         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1321
1322         /**
1323          * wait for memory initialization to be complete
1324          */
1325         i = 0;
1326         do {
1327                 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1328                 i++;
1329         } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1330
1331         /**
1332          * If memory initialization is not successful, IOC timeout will catch
1333          * such failures.
1334          */
1335         BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1336
1337         pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1338         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1339 }
1340
1341 static void
1342 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1343 {
1344         u32     pss_ctl;
1345
1346         /**
1347          * Take processor out of reset.
1348          */
1349         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1350         pss_ctl &= ~__PSS_LPU0_RESET;
1351
1352         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1353 }
1354
1355 static void
1356 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1357 {
1358         u32     pss_ctl;
1359
1360         /**
1361          * Put processors in reset.
1362          */
1363         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1364         pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1365
1366         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1367 }
1368
1369 /**
1370  * Get driver and firmware versions.
1371  */
1372 void
1373 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1374 {
1375         u32     pgnum;
1376         u32     loff = 0;
1377         int             i;
1378         u32     *fwsig = (u32 *) fwhdr;
1379
1380         pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1381         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1382
1383         for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1384              i++) {
1385                 fwsig[i] =
1386                         swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1387                 loff += sizeof(u32);
1388         }
1389 }
1390
1391 /**
1392  * Returns TRUE if same.
1393  */
1394 bool
1395 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1396 {
1397         struct bfi_ioc_image_hdr *drv_fwhdr;
1398         int i;
1399
1400         drv_fwhdr = (struct bfi_ioc_image_hdr *)
1401                 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1402
1403         for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1404                 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1405                         return false;
1406         }
1407
1408         return true;
1409 }
1410
1411 /**
1412  * Return true if current running version is valid. Firmware signature and
1413  * execution context (driver/bios) must match.
1414  */
1415 static bool
1416 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1417 {
1418         struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1419
1420         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1421         drv_fwhdr = (struct bfi_ioc_image_hdr *)
1422                 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1423
1424         if (fwhdr.signature != drv_fwhdr->signature)
1425                 return false;
1426
1427         if (swab32(fwhdr.bootenv) != boot_env)
1428                 return false;
1429
1430         return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1431 }
1432
1433 /**
1434  * Conditionally flush any pending message from firmware at start.
1435  */
1436 static void
1437 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1438 {
1439         u32     r32;
1440
1441         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1442         if (r32)
1443                 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1444 }
1445
1446 /**
1447  * @img ioc_init_logic.jpg
1448  */
1449 static void
1450 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1451 {
1452         enum bfi_ioc_state ioc_fwstate;
1453         bool fwvalid;
1454         u32 boot_env;
1455
1456         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1457
1458         if (force)
1459                 ioc_fwstate = BFI_IOC_UNINIT;
1460
1461         boot_env = BFI_FWBOOT_ENV_OS;
1462
1463         /**
1464          * check if firmware is valid
1465          */
1466         fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1467                 false : bfa_ioc_fwver_valid(ioc, boot_env);
1468
1469         if (!fwvalid) {
1470                 bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1471                 bfa_ioc_poll_fwinit(ioc);
1472                 return;
1473         }
1474
1475         /**
1476          * If hardware initialization is in progress (initialized by other IOC),
1477          * just wait for an initialization completion interrupt.
1478          */
1479         if (ioc_fwstate == BFI_IOC_INITING) {
1480                 bfa_ioc_poll_fwinit(ioc);
1481                 return;
1482         }
1483
1484         /**
1485          * If IOC function is disabled and firmware version is same,
1486          * just re-enable IOC.
1487          */
1488         if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1489                 /**
1490                  * When using MSI-X any pending firmware ready event should
1491                  * be flushed. Otherwise MSI-X interrupts are not delivered.
1492                  */
1493                 bfa_ioc_msgflush(ioc);
1494                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1495                 return;
1496         }
1497
1498         /**
1499          * Initialize the h/w for any other states.
1500          */
1501         bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1502         bfa_ioc_poll_fwinit(ioc);
1503 }
1504
1505 void
1506 bfa_nw_ioc_timeout(void *ioc_arg)
1507 {
1508         struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1509
1510         bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1511 }
1512
1513 static void
1514 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1515 {
1516         u32 *msgp = (u32 *) ioc_msg;
1517         u32 i;
1518
1519         BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1520
1521         /*
1522          * first write msg to mailbox registers
1523          */
1524         for (i = 0; i < len / sizeof(u32); i++)
1525                 writel(cpu_to_le32(msgp[i]),
1526                               ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1527
1528         for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1529                 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1530
1531         /*
1532          * write 1 to mailbox CMD to trigger LPU event
1533          */
1534         writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1535         (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1536 }
1537
1538 static void
1539 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1540 {
1541         struct bfi_ioc_ctrl_req enable_req;
1542         struct timeval tv;
1543
1544         bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1545                     bfa_ioc_portid(ioc));
1546         enable_req.clscode = htons(ioc->clscode);
1547         do_gettimeofday(&tv);
1548         enable_req.tv_sec = ntohl(tv.tv_sec);
1549         bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1550 }
1551
1552 static void
1553 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1554 {
1555         struct bfi_ioc_ctrl_req disable_req;
1556
1557         bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1558                     bfa_ioc_portid(ioc));
1559         bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1560 }
1561
1562 static void
1563 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1564 {
1565         struct bfi_ioc_getattr_req attr_req;
1566
1567         bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1568                     bfa_ioc_portid(ioc));
1569         bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1570         bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1571 }
1572
1573 void
1574 bfa_nw_ioc_hb_check(void *cbarg)
1575 {
1576         struct bfa_ioc *ioc = cbarg;
1577         u32     hb_count;
1578
1579         hb_count = readl(ioc->ioc_regs.heartbeat);
1580         if (ioc->hb_count == hb_count) {
1581                 bfa_ioc_recover(ioc);
1582                 return;
1583         } else {
1584                 ioc->hb_count = hb_count;
1585         }
1586
1587         bfa_ioc_mbox_poll(ioc);
1588         mod_timer(&ioc->hb_timer, jiffies +
1589                 msecs_to_jiffies(BFA_IOC_HB_TOV));
1590 }
1591
1592 static void
1593 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1594 {
1595         ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1596         mod_timer(&ioc->hb_timer, jiffies +
1597                 msecs_to_jiffies(BFA_IOC_HB_TOV));
1598 }
1599
1600 static void
1601 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1602 {
1603         del_timer(&ioc->hb_timer);
1604 }
1605
1606 /**
1607  * @brief
1608  *      Initiate a full firmware download.
1609  */
1610 static void
1611 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1612                     u32 boot_env)
1613 {
1614         u32 *fwimg;
1615         u32 pgnum;
1616         u32 loff = 0;
1617         u32 chunkno = 0;
1618         u32 i;
1619         u32 asicmode;
1620
1621         fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1622
1623         pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1624
1625         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1626
1627         for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1628                 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1629                         chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1630                         fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1631                                         BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1632                 }
1633
1634                 /**
1635                  * write smem
1636                  */
1637                 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1638                               ((ioc->ioc_regs.smem_page_start) + (loff)));
1639
1640                 loff += sizeof(u32);
1641
1642                 /**
1643                  * handle page offset wrap around
1644                  */
1645                 loff = PSS_SMEM_PGOFF(loff);
1646                 if (loff == 0) {
1647                         pgnum++;
1648                         writel(pgnum,
1649                                       ioc->ioc_regs.host_page_num_fn);
1650                 }
1651         }
1652
1653         writel(bfa_ioc_smem_pgnum(ioc, 0),
1654                       ioc->ioc_regs.host_page_num_fn);
1655
1656         /*
1657          * Set boot type, env and device mode at the end.
1658         */
1659         asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1660                                         ioc->port0_mode, ioc->port1_mode);
1661         writel(asicmode, ((ioc->ioc_regs.smem_page_start)
1662                         + BFI_FWBOOT_DEVMODE_OFF));
1663         writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1664                         + (BFI_FWBOOT_TYPE_OFF)));
1665         writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1666                         + (BFI_FWBOOT_ENV_OFF)));
1667 }
1668
1669 static void
1670 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1671 {
1672         bfa_ioc_hwinit(ioc, force);
1673 }
1674
1675 /**
1676  * BFA ioc enable reply by firmware
1677  */
1678 static void
1679 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1680                         u8 cap_bm)
1681 {
1682         struct bfa_iocpf *iocpf = &ioc->iocpf;
1683
1684         ioc->port_mode = ioc->port_mode_cfg = port_mode;
1685         ioc->ad_cap_bm = cap_bm;
1686         bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1687 }
1688
1689 /**
1690  * @brief
1691  * Update BFA configuration from firmware configuration.
1692  */
1693 static void
1694 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1695 {
1696         struct bfi_ioc_attr *attr = ioc->attr;
1697
1698         attr->adapter_prop  = ntohl(attr->adapter_prop);
1699         attr->card_type     = ntohl(attr->card_type);
1700         attr->maxfrsize     = ntohs(attr->maxfrsize);
1701
1702         bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1703 }
1704
1705 /**
1706  * Attach time initialization of mbox logic.
1707  */
1708 static void
1709 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1710 {
1711         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1712         int     mc;
1713
1714         INIT_LIST_HEAD(&mod->cmd_q);
1715         for (mc = 0; mc < BFI_MC_MAX; mc++) {
1716                 mod->mbhdlr[mc].cbfn = NULL;
1717                 mod->mbhdlr[mc].cbarg = ioc->bfa;
1718         }
1719 }
1720
1721 /**
1722  * Mbox poll timer -- restarts any pending mailbox requests.
1723  */
1724 static void
1725 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1726 {
1727         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1728         struct bfa_mbox_cmd *cmd;
1729         bfa_mbox_cmd_cbfn_t cbfn;
1730         void *cbarg;
1731         u32 stat;
1732
1733         /**
1734          * If no command pending, do nothing
1735          */
1736         if (list_empty(&mod->cmd_q))
1737                 return;
1738
1739         /**
1740          * If previous command is not yet fetched by firmware, do nothing
1741          */
1742         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1743         if (stat)
1744                 return;
1745
1746         /**
1747          * Enqueue command to firmware.
1748          */
1749         bfa_q_deq(&mod->cmd_q, &cmd);
1750         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1751
1752         /**
1753          * Give a callback to the client, indicating that the command is sent
1754          */
1755         if (cmd->cbfn) {
1756                 cbfn = cmd->cbfn;
1757                 cbarg = cmd->cbarg;
1758                 cmd->cbfn = NULL;
1759                 cbfn(cbarg);
1760         }
1761 }
1762
1763 /**
1764  * Cleanup any pending requests.
1765  */
1766 static void
1767 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1768 {
1769         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1770         struct bfa_mbox_cmd *cmd;
1771
1772         while (!list_empty(&mod->cmd_q))
1773                 bfa_q_deq(&mod->cmd_q, &cmd);
1774 }
1775
1776 /**
1777  * Read data from SMEM to host through PCI memmap
1778  *
1779  * @param[in]  ioc     memory for IOC
1780  * @param[in]  tbuf    app memory to store data from smem
1781  * @param[in]  soff    smem offset
1782  * @param[in]  sz      size of smem in bytes
1783  */
1784 static int
1785 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
1786 {
1787         u32 pgnum, loff, r32;
1788         int i, len;
1789         u32 *buf = tbuf;
1790
1791         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1792         loff = PSS_SMEM_PGOFF(soff);
1793
1794         /*
1795          *  Hold semaphore to serialize pll init and fwtrc.
1796         */
1797         if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
1798                 return 1;
1799
1800         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1801
1802         len = sz/sizeof(u32);
1803         for (i = 0; i < len; i++) {
1804                 r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1805                 buf[i] = be32_to_cpu(r32);
1806                 loff += sizeof(u32);
1807
1808                 /**
1809                  * handle page offset wrap around
1810                  */
1811                 loff = PSS_SMEM_PGOFF(loff);
1812                 if (loff == 0) {
1813                         pgnum++;
1814                         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1815                 }
1816         }
1817
1818         writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1819                ioc->ioc_regs.host_page_num_fn);
1820
1821         /*
1822          * release semaphore
1823          */
1824         readl(ioc->ioc_regs.ioc_init_sem_reg);
1825         writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1826         return 0;
1827 }
1828
1829 /**
1830  * Retrieve saved firmware trace from a prior IOC failure.
1831  */
1832 int
1833 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1834 {
1835         u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
1836         int tlen, status = 0;
1837
1838         tlen = *trclen;
1839         if (tlen > BNA_DBG_FWTRC_LEN)
1840                 tlen = BNA_DBG_FWTRC_LEN;
1841
1842         status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
1843         *trclen = tlen;
1844         return status;
1845 }
1846
1847 /**
1848  * Save firmware trace if configured.
1849  */
1850 static void
1851 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
1852 {
1853         int tlen;
1854
1855         if (ioc->dbg_fwsave_once) {
1856                 ioc->dbg_fwsave_once = 0;
1857                 if (ioc->dbg_fwsave_len) {
1858                         tlen = ioc->dbg_fwsave_len;
1859                         bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
1860                 }
1861         }
1862 }
1863
1864 /**
1865  * Retrieve saved firmware trace from a prior IOC failure.
1866  */
1867 int
1868 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1869 {
1870         int tlen;
1871
1872         if (ioc->dbg_fwsave_len == 0)
1873                 return BFA_STATUS_ENOFSAVE;
1874
1875         tlen = *trclen;
1876         if (tlen > ioc->dbg_fwsave_len)
1877                 tlen = ioc->dbg_fwsave_len;
1878
1879         memcpy(trcdata, ioc->dbg_fwsave, tlen);
1880         *trclen = tlen;
1881         return BFA_STATUS_OK;
1882 }
1883
1884 static void
1885 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1886 {
1887         /**
1888          * Notify driver and common modules registered for notification.
1889          */
1890         ioc->cbfn->hbfail_cbfn(ioc->bfa);
1891         bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1892         bfa_nw_ioc_debug_save_ftrc(ioc);
1893 }
1894
1895 /**
1896  * IOCPF to IOC interface
1897  */
1898 static void
1899 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1900 {
1901         bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1902 }
1903
1904 static void
1905 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1906 {
1907         bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1908 }
1909
1910 static void
1911 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1912 {
1913         bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1914 }
1915
1916 static void
1917 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
1918 {
1919         bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1920 }
1921
1922 static void
1923 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1924 {
1925         /**
1926          * Provide enable completion callback and AEN notification.
1927          */
1928         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1929 }
1930
1931 /**
1932  * IOC public
1933  */
1934 static enum bfa_status
1935 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1936 {
1937         /*
1938          *  Hold semaphore so that nobody can access the chip during init.
1939          */
1940         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1941
1942         bfa_ioc_pll_init_asic(ioc);
1943
1944         ioc->pllinit = true;
1945
1946         /* Initialize LMEM */
1947         bfa_ioc_lmem_init(ioc);
1948
1949         /*
1950          *  release semaphore.
1951          */
1952         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1953
1954         return BFA_STATUS_OK;
1955 }
1956
1957 /**
1958  * Interface used by diag module to do firmware boot with memory test
1959  * as the entry vector.
1960  */
1961 static void
1962 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1963                 u32 boot_env)
1964 {
1965         bfa_ioc_stats(ioc, ioc_boots);
1966
1967         if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1968                 return;
1969
1970         /**
1971          * Initialize IOC state of all functions on a chip reset.
1972          */
1973         if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
1974                 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
1975                 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
1976         } else {
1977                 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
1978                 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
1979         }
1980
1981         bfa_ioc_msgflush(ioc);
1982         bfa_ioc_download_fw(ioc, boot_type, boot_env);
1983         bfa_ioc_lpu_start(ioc);
1984 }
1985
1986 /**
1987  * Enable/disable IOC failure auto recovery.
1988  */
1989 void
1990 bfa_nw_ioc_auto_recover(bool auto_recover)
1991 {
1992         bfa_nw_auto_recover = auto_recover;
1993 }
1994
1995 static bool
1996 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1997 {
1998         u32     *msgp = mbmsg;
1999         u32     r32;
2000         int             i;
2001
2002         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2003         if ((r32 & 1) == 0)
2004                 return false;
2005
2006         /**
2007          * read the MBOX msg
2008          */
2009         for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2010              i++) {
2011                 r32 = readl(ioc->ioc_regs.lpu_mbox +
2012                                    i * sizeof(u32));
2013                 msgp[i] = htonl(r32);
2014         }
2015
2016         /**
2017          * turn off mailbox interrupt by clearing mailbox status
2018          */
2019         writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2020         readl(ioc->ioc_regs.lpu_mbox_cmd);
2021
2022         return true;
2023 }
2024
2025 static void
2026 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
2027 {
2028         union bfi_ioc_i2h_msg_u *msg;
2029         struct bfa_iocpf *iocpf = &ioc->iocpf;
2030
2031         msg = (union bfi_ioc_i2h_msg_u *) m;
2032
2033         bfa_ioc_stats(ioc, ioc_isrs);
2034
2035         switch (msg->mh.msg_id) {
2036         case BFI_IOC_I2H_HBEAT:
2037                 break;
2038
2039         case BFI_IOC_I2H_ENABLE_REPLY:
2040                 bfa_ioc_enable_reply(ioc,
2041                         (enum bfa_mode)msg->fw_event.port_mode,
2042                         msg->fw_event.cap_bm);
2043                 break;
2044
2045         case BFI_IOC_I2H_DISABLE_REPLY:
2046                 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2047                 break;
2048
2049         case BFI_IOC_I2H_GETATTR_REPLY:
2050                 bfa_ioc_getattr_reply(ioc);
2051                 break;
2052
2053         default:
2054                 BUG_ON(1);
2055         }
2056 }
2057
2058 /**
2059  * IOC attach time initialization and setup.
2060  *
2061  * @param[in]   ioc     memory for IOC
2062  * @param[in]   bfa     driver instance structure
2063  */
2064 void
2065 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
2066 {
2067         ioc->bfa        = bfa;
2068         ioc->cbfn       = cbfn;
2069         ioc->fcmode     = false;
2070         ioc->pllinit    = false;
2071         ioc->dbg_fwsave_once = true;
2072         ioc->iocpf.ioc  = ioc;
2073
2074         bfa_ioc_mbox_attach(ioc);
2075         INIT_LIST_HEAD(&ioc->notify_q);
2076
2077         bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2078         bfa_fsm_send_event(ioc, IOC_E_RESET);
2079 }
2080
2081 /**
2082  * Driver detach time IOC cleanup.
2083  */
2084 void
2085 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
2086 {
2087         bfa_fsm_send_event(ioc, IOC_E_DETACH);
2088
2089         /* Done with detach, empty the notify_q. */
2090         INIT_LIST_HEAD(&ioc->notify_q);
2091 }
2092
2093 /**
2094  * Setup IOC PCI properties.
2095  *
2096  * @param[in]   pcidev  PCI device information for this IOC
2097  */
2098 void
2099 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
2100                  enum bfi_pcifn_class clscode)
2101 {
2102         ioc->clscode    = clscode;
2103         ioc->pcidev     = *pcidev;
2104
2105         /**
2106          * Initialize IOC and device personality
2107          */
2108         ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2109         ioc->asic_mode  = BFI_ASIC_MODE_FC;
2110
2111         switch (pcidev->device_id) {
2112         case PCI_DEVICE_ID_BROCADE_CT:
2113                 ioc->asic_gen = BFI_ASIC_GEN_CT;
2114                 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2115                 ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2116                 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2117                 ioc->ad_cap_bm = BFA_CM_CNA;
2118                 break;
2119
2120         case BFA_PCI_DEVICE_ID_CT2:
2121                 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2122                 if (clscode == BFI_PCIFN_CLASS_FC &&
2123                         pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2124                         ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2125                         ioc->fcmode = true;
2126                         ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2127                         ioc->ad_cap_bm = BFA_CM_HBA;
2128                 } else {
2129                         ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2130                         ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2131                         if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2132                                 ioc->port_mode =
2133                                 ioc->port_mode_cfg = BFA_MODE_CNA;
2134                                 ioc->ad_cap_bm = BFA_CM_CNA;
2135                         } else {
2136                                 ioc->port_mode =
2137                                 ioc->port_mode_cfg = BFA_MODE_NIC;
2138                                 ioc->ad_cap_bm = BFA_CM_NIC;
2139                         }
2140                 }
2141                 break;
2142
2143         default:
2144                 BUG_ON(1);
2145         }
2146
2147         /**
2148          * Set asic specific interfaces.
2149          */
2150         if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2151                 bfa_nw_ioc_set_ct_hwif(ioc);
2152         else {
2153                 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2154                 bfa_nw_ioc_set_ct2_hwif(ioc);
2155                 bfa_nw_ioc_ct2_poweron(ioc);
2156         }
2157
2158         bfa_ioc_map_port(ioc);
2159         bfa_ioc_reg_init(ioc);
2160 }
2161
2162 /**
2163  * Initialize IOC dma memory
2164  *
2165  * @param[in]   dm_kva  kernel virtual address of IOC dma memory
2166  * @param[in]   dm_pa   physical address of IOC dma memory
2167  */
2168 void
2169 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
2170 {
2171         /**
2172          * dma memory for firmware attribute
2173          */
2174         ioc->attr_dma.kva = dm_kva;
2175         ioc->attr_dma.pa = dm_pa;
2176         ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2177 }
2178
2179 /**
2180  * Return size of dma memory required.
2181  */
2182 u32
2183 bfa_nw_ioc_meminfo(void)
2184 {
2185         return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2186 }
2187
2188 void
2189 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2190 {
2191         bfa_ioc_stats(ioc, ioc_enables);
2192         ioc->dbg_fwsave_once = true;
2193
2194         bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2195 }
2196
2197 void
2198 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2199 {
2200         bfa_ioc_stats(ioc, ioc_disables);
2201         bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2202 }
2203
2204 /**
2205  * Initialize memory for saving firmware trace.
2206  */
2207 void
2208 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2209 {
2210         ioc->dbg_fwsave = dbg_fwsave;
2211         ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
2212 }
2213
2214 static u32
2215 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2216 {
2217         return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2218 }
2219
2220 /**
2221  * Register mailbox message handler function, to be called by common modules
2222  */
2223 void
2224 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2225                     bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2226 {
2227         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2228
2229         mod->mbhdlr[mc].cbfn    = cbfn;
2230         mod->mbhdlr[mc].cbarg = cbarg;
2231 }
2232
2233 /**
2234  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2235  * Responsibility of caller to serialize
2236  *
2237  * @param[in]   ioc     IOC instance
2238  * @param[i]    cmd     Mailbox command
2239  */
2240 bool
2241 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2242                         bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2243 {
2244         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2245         u32                     stat;
2246
2247         cmd->cbfn = cbfn;
2248         cmd->cbarg = cbarg;
2249
2250         /**
2251          * If a previous command is pending, queue new command
2252          */
2253         if (!list_empty(&mod->cmd_q)) {
2254                 list_add_tail(&cmd->qe, &mod->cmd_q);
2255                 return true;
2256         }
2257
2258         /**
2259          * If mailbox is busy, queue command for poll timer
2260          */
2261         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2262         if (stat) {
2263                 list_add_tail(&cmd->qe, &mod->cmd_q);
2264                 return true;
2265         }
2266
2267         /**
2268          * mailbox is free -- queue command to firmware
2269          */
2270         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2271
2272         return false;
2273 }
2274
2275 /**
2276  * Handle mailbox interrupts
2277  */
2278 void
2279 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2280 {
2281         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2282         struct bfi_mbmsg m;
2283         int                             mc;
2284
2285         if (bfa_ioc_msgget(ioc, &m)) {
2286                 /**
2287                  * Treat IOC message class as special.
2288                  */
2289                 mc = m.mh.msg_class;
2290                 if (mc == BFI_MC_IOC) {
2291                         bfa_ioc_isr(ioc, &m);
2292                         return;
2293                 }
2294
2295                 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2296                         return;
2297
2298                 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2299         }
2300
2301         bfa_ioc_lpu_read_stat(ioc);
2302
2303         /**
2304          * Try to send pending mailbox commands
2305          */
2306         bfa_ioc_mbox_poll(ioc);
2307 }
2308
2309 void
2310 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2311 {
2312         bfa_ioc_stats(ioc, ioc_hbfails);
2313         bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2314         bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2315 }
2316
2317 /**
2318  * return true if IOC is disabled
2319  */
2320 bool
2321 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2322 {
2323         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2324                 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2325 }
2326
2327 /**
2328  * return true if IOC is operational
2329  */
2330 bool
2331 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2332 {
2333         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2334 }
2335
2336 /**
2337  * Add to IOC heartbeat failure notification queue. To be used by common
2338  * modules such as cee, port, diag.
2339  */
2340 void
2341 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2342                         struct bfa_ioc_notify *notify)
2343 {
2344         list_add_tail(&notify->qe, &ioc->notify_q);
2345 }
2346
2347 #define BFA_MFG_NAME "Brocade"
2348 static void
2349 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2350                          struct bfa_adapter_attr *ad_attr)
2351 {
2352         struct bfi_ioc_attr *ioc_attr;
2353
2354         ioc_attr = ioc->attr;
2355
2356         bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2357         bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2358         bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2359         bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2360         memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2361                       sizeof(struct bfa_mfg_vpd));
2362
2363         ad_attr->nports = bfa_ioc_get_nports(ioc);
2364         ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2365
2366         bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2367         /* For now, model descr uses same model string */
2368         bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2369
2370         ad_attr->card_type = ioc_attr->card_type;
2371         ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2372
2373         if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2374                 ad_attr->prototype = 1;
2375         else
2376                 ad_attr->prototype = 0;
2377
2378         ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2379         ad_attr->mac  = bfa_nw_ioc_get_mac(ioc);
2380
2381         ad_attr->pcie_gen = ioc_attr->pcie_gen;
2382         ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2383         ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2384         ad_attr->asic_rev = ioc_attr->asic_rev;
2385
2386         bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2387 }
2388
2389 static enum bfa_ioc_type
2390 bfa_ioc_get_type(struct bfa_ioc *ioc)
2391 {
2392         if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2393                 return BFA_IOC_TYPE_LL;
2394
2395         BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2396
2397         return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2398                 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2399 }
2400
2401 static void
2402 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2403 {
2404         memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2405         memcpy(serial_num,
2406                         (void *)ioc->attr->brcd_serialnum,
2407                         BFA_ADAPTER_SERIAL_NUM_LEN);
2408 }
2409
2410 static void
2411 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2412 {
2413         memset(fw_ver, 0, BFA_VERSION_LEN);
2414         memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2415 }
2416
2417 static void
2418 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2419 {
2420         BUG_ON(!(chip_rev));
2421
2422         memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2423
2424         chip_rev[0] = 'R';
2425         chip_rev[1] = 'e';
2426         chip_rev[2] = 'v';
2427         chip_rev[3] = '-';
2428         chip_rev[4] = ioc->attr->asic_rev;
2429         chip_rev[5] = '\0';
2430 }
2431
2432 static void
2433 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2434 {
2435         memset(optrom_ver, 0, BFA_VERSION_LEN);
2436         memcpy(optrom_ver, ioc->attr->optrom_version,
2437                       BFA_VERSION_LEN);
2438 }
2439
2440 static void
2441 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2442 {
2443         memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2444         memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2445 }
2446
2447 static void
2448 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2449 {
2450         struct bfi_ioc_attr *ioc_attr;
2451
2452         BUG_ON(!(model));
2453         memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2454
2455         ioc_attr = ioc->attr;
2456
2457         snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2458                 BFA_MFG_NAME, ioc_attr->card_type);
2459 }
2460
2461 static enum bfa_ioc_state
2462 bfa_ioc_get_state(struct bfa_ioc *ioc)
2463 {
2464         enum bfa_iocpf_state iocpf_st;
2465         enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2466
2467         if (ioc_st == BFA_IOC_ENABLING ||
2468                 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2469
2470                 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2471
2472                 switch (iocpf_st) {
2473                 case BFA_IOCPF_SEMWAIT:
2474                         ioc_st = BFA_IOC_SEMWAIT;
2475                         break;
2476
2477                 case BFA_IOCPF_HWINIT:
2478                         ioc_st = BFA_IOC_HWINIT;
2479                         break;
2480
2481                 case BFA_IOCPF_FWMISMATCH:
2482                         ioc_st = BFA_IOC_FWMISMATCH;
2483                         break;
2484
2485                 case BFA_IOCPF_FAIL:
2486                         ioc_st = BFA_IOC_FAIL;
2487                         break;
2488
2489                 case BFA_IOCPF_INITFAIL:
2490                         ioc_st = BFA_IOC_INITFAIL;
2491                         break;
2492
2493                 default:
2494                         break;
2495                 }
2496         }
2497         return ioc_st;
2498 }
2499
2500 void
2501 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2502 {
2503         memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2504
2505         ioc_attr->state = bfa_ioc_get_state(ioc);
2506         ioc_attr->port_id = ioc->port_id;
2507         ioc_attr->port_mode = ioc->port_mode;
2508
2509         ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2510         ioc_attr->cap_bm = ioc->ad_cap_bm;
2511
2512         ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2513
2514         bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2515
2516         ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2517         ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2518         bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2519 }
2520
2521 /**
2522  * WWN public
2523  */
2524 static u64
2525 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2526 {
2527         return ioc->attr->pwwn;
2528 }
2529
2530 mac_t
2531 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2532 {
2533         return ioc->attr->mac;
2534 }
2535
2536 /**
2537  * Firmware failure detected. Start recovery actions.
2538  */
2539 static void
2540 bfa_ioc_recover(struct bfa_ioc *ioc)
2541 {
2542         pr_crit("Heart Beat of IOC has failed\n");
2543         bfa_ioc_stats(ioc, ioc_hbfails);
2544         bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2545         bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2546 }
2547
2548 /**
2549  * @dg hal_iocpf_pvt BFA IOC PF private functions
2550  * @{
2551  */
2552
2553 static void
2554 bfa_iocpf_enable(struct bfa_ioc *ioc)
2555 {
2556         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2557 }
2558
2559 static void
2560 bfa_iocpf_disable(struct bfa_ioc *ioc)
2561 {
2562         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2563 }
2564
2565 static void
2566 bfa_iocpf_fail(struct bfa_ioc *ioc)
2567 {
2568         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2569 }
2570
2571 static void
2572 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2573 {
2574         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2575 }
2576
2577 static void
2578 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2579 {
2580         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2581 }
2582
2583 static void
2584 bfa_iocpf_stop(struct bfa_ioc *ioc)
2585 {
2586         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2587 }
2588
2589 void
2590 bfa_nw_iocpf_timeout(void *ioc_arg)
2591 {
2592         struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2593         enum bfa_iocpf_state iocpf_st;
2594
2595         iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2596
2597         if (iocpf_st == BFA_IOCPF_HWINIT)
2598                 bfa_ioc_poll_fwinit(ioc);
2599         else
2600                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2601 }
2602
2603 void
2604 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2605 {
2606         struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2607
2608         bfa_ioc_hw_sem_get(ioc);
2609 }
2610
2611 static void
2612 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
2613 {
2614         u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2615
2616         if (fwstate == BFI_IOC_DISABLED) {
2617                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2618                 return;
2619         }
2620
2621         if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
2622                 bfa_nw_iocpf_timeout(ioc);
2623         } else {
2624                 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2625                 mod_timer(&ioc->iocpf_timer, jiffies +
2626                         msecs_to_jiffies(BFA_IOC_POLL_TOV));
2627         }
2628 }
2629
2630 /*
2631  *      Flash module specific
2632  */
2633
2634 /*
2635  * FLASH DMA buffer should be big enough to hold both MFG block and
2636  * asic block(64k) at the same time and also should be 2k aligned to
2637  * avoid write segement to cross sector boundary.
2638  */
2639 #define BFA_FLASH_SEG_SZ        2048
2640 #define BFA_FLASH_DMA_BUF_SZ    \
2641         roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
2642
2643 static void
2644 bfa_flash_cb(struct bfa_flash *flash)
2645 {
2646         flash->op_busy = 0;
2647         if (flash->cbfn)
2648                 flash->cbfn(flash->cbarg, flash->status);
2649 }
2650
2651 static void
2652 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
2653 {
2654         struct bfa_flash *flash = cbarg;
2655
2656         switch (event) {
2657         case BFA_IOC_E_DISABLED:
2658         case BFA_IOC_E_FAILED:
2659                 if (flash->op_busy) {
2660                         flash->status = BFA_STATUS_IOC_FAILURE;
2661                         flash->cbfn(flash->cbarg, flash->status);
2662                         flash->op_busy = 0;
2663                 }
2664                 break;
2665         default:
2666                 break;
2667         }
2668 }
2669
2670 /*
2671  * Send flash write request.
2672  *
2673  * @param[in] cbarg - callback argument
2674  */
2675 static void
2676 bfa_flash_write_send(struct bfa_flash *flash)
2677 {
2678         struct bfi_flash_write_req *msg =
2679                         (struct bfi_flash_write_req *) flash->mb.msg;
2680         u32     len;
2681
2682         msg->type = be32_to_cpu(flash->type);
2683         msg->instance = flash->instance;
2684         msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2685         len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2686                flash->residue : BFA_FLASH_DMA_BUF_SZ;
2687         msg->length = be32_to_cpu(len);
2688
2689         /* indicate if it's the last msg of the whole write operation */
2690         msg->last = (len == flash->residue) ? 1 : 0;
2691
2692         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
2693                     bfa_ioc_portid(flash->ioc));
2694         bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2695         memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
2696         bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2697
2698         flash->residue -= len;
2699         flash->offset += len;
2700 }
2701
2702 /*
2703  * Send flash read request.
2704  *
2705  * @param[in] cbarg - callback argument
2706  */
2707 static void
2708 bfa_flash_read_send(void *cbarg)
2709 {
2710         struct bfa_flash *flash = cbarg;
2711         struct bfi_flash_read_req *msg =
2712                         (struct bfi_flash_read_req *) flash->mb.msg;
2713         u32     len;
2714
2715         msg->type = be32_to_cpu(flash->type);
2716         msg->instance = flash->instance;
2717         msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2718         len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2719                flash->residue : BFA_FLASH_DMA_BUF_SZ;
2720         msg->length = be32_to_cpu(len);
2721         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
2722                     bfa_ioc_portid(flash->ioc));
2723         bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2724         bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2725 }
2726
2727 /*
2728  * Process flash response messages upon receiving interrupts.
2729  *
2730  * @param[in] flasharg - flash structure
2731  * @param[in] msg - message structure
2732  */
2733 static void
2734 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
2735 {
2736         struct bfa_flash *flash = flasharg;
2737         u32     status;
2738
2739         union {
2740                 struct bfi_flash_query_rsp *query;
2741                 struct bfi_flash_write_rsp *write;
2742                 struct bfi_flash_read_rsp *read;
2743                 struct bfi_mbmsg   *msg;
2744         } m;
2745
2746         m.msg = msg;
2747
2748         /* receiving response after ioc failure */
2749         if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
2750                 return;
2751
2752         switch (msg->mh.msg_id) {
2753         case BFI_FLASH_I2H_QUERY_RSP:
2754                 status = be32_to_cpu(m.query->status);
2755                 if (status == BFA_STATUS_OK) {
2756                         u32     i;
2757                         struct bfa_flash_attr *attr, *f;
2758
2759                         attr = (struct bfa_flash_attr *) flash->ubuf;
2760                         f = (struct bfa_flash_attr *) flash->dbuf_kva;
2761                         attr->status = be32_to_cpu(f->status);
2762                         attr->npart = be32_to_cpu(f->npart);
2763                         for (i = 0; i < attr->npart; i++) {
2764                                 attr->part[i].part_type =
2765                                         be32_to_cpu(f->part[i].part_type);
2766                                 attr->part[i].part_instance =
2767                                         be32_to_cpu(f->part[i].part_instance);
2768                                 attr->part[i].part_off =
2769                                         be32_to_cpu(f->part[i].part_off);
2770                                 attr->part[i].part_size =
2771                                         be32_to_cpu(f->part[i].part_size);
2772                                 attr->part[i].part_len =
2773                                         be32_to_cpu(f->part[i].part_len);
2774                                 attr->part[i].part_status =
2775                                         be32_to_cpu(f->part[i].part_status);
2776                         }
2777                 }
2778                 flash->status = status;
2779                 bfa_flash_cb(flash);
2780                 break;
2781         case BFI_FLASH_I2H_WRITE_RSP:
2782                 status = be32_to_cpu(m.write->status);
2783                 if (status != BFA_STATUS_OK || flash->residue == 0) {
2784                         flash->status = status;
2785                         bfa_flash_cb(flash);
2786                 } else
2787                         bfa_flash_write_send(flash);
2788                 break;
2789         case BFI_FLASH_I2H_READ_RSP:
2790                 status = be32_to_cpu(m.read->status);
2791                 if (status != BFA_STATUS_OK) {
2792                         flash->status = status;
2793                         bfa_flash_cb(flash);
2794                 } else {
2795                         u32 len = be32_to_cpu(m.read->length);
2796                         memcpy(flash->ubuf + flash->offset,
2797                                flash->dbuf_kva, len);
2798                         flash->residue -= len;
2799                         flash->offset += len;
2800                         if (flash->residue == 0) {
2801                                 flash->status = status;
2802                                 bfa_flash_cb(flash);
2803                         } else
2804                                 bfa_flash_read_send(flash);
2805                 }
2806                 break;
2807         case BFI_FLASH_I2H_BOOT_VER_RSP:
2808         case BFI_FLASH_I2H_EVENT:
2809                 break;
2810         default:
2811                 WARN_ON(1);
2812         }
2813 }
2814
2815 /*
2816  * Flash memory info API.
2817  */
2818 u32
2819 bfa_nw_flash_meminfo(void)
2820 {
2821         return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2822 }
2823
2824 /*
2825  * Flash attach API.
2826  *
2827  * @param[in] flash - flash structure
2828  * @param[in] ioc  - ioc structure
2829  * @param[in] dev  - device structure
2830  */
2831 void
2832 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
2833 {
2834         flash->ioc = ioc;
2835         flash->cbfn = NULL;
2836         flash->cbarg = NULL;
2837         flash->op_busy = 0;
2838
2839         bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
2840         bfa_q_qe_init(&flash->ioc_notify);
2841         bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
2842         list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
2843 }
2844
2845 /*
2846  * Claim memory for flash
2847  *
2848  * @param[in] flash - flash structure
2849  * @param[in] dm_kva - pointer to virtual memory address
2850  * @param[in] dm_pa - physical memory address
2851  */
2852 void
2853 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
2854 {
2855         flash->dbuf_kva = dm_kva;
2856         flash->dbuf_pa = dm_pa;
2857         memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
2858         dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2859         dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2860 }
2861
2862 /*
2863  * Get flash attribute.
2864  *
2865  * @param[in] flash - flash structure
2866  * @param[in] attr - flash attribute structure
2867  * @param[in] cbfn - callback function
2868  * @param[in] cbarg - callback argument
2869  *
2870  * Return status.
2871  */
2872 enum bfa_status
2873 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
2874                       bfa_cb_flash cbfn, void *cbarg)
2875 {
2876         struct bfi_flash_query_req *msg =
2877                         (struct bfi_flash_query_req *) flash->mb.msg;
2878
2879         if (!bfa_nw_ioc_is_operational(flash->ioc))
2880                 return BFA_STATUS_IOC_NON_OP;
2881
2882         if (flash->op_busy)
2883                 return BFA_STATUS_DEVBUSY;
2884
2885         flash->op_busy = 1;
2886         flash->cbfn = cbfn;
2887         flash->cbarg = cbarg;
2888         flash->ubuf = (u8 *) attr;
2889
2890         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
2891                     bfa_ioc_portid(flash->ioc));
2892         bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
2893         bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2894
2895         return BFA_STATUS_OK;
2896 }
2897
2898 /*
2899  * Update flash partition.
2900  *
2901  * @param[in] flash - flash structure
2902  * @param[in] type - flash partition type
2903  * @param[in] instance - flash partition instance
2904  * @param[in] buf - update data buffer
2905  * @param[in] len - data buffer length
2906  * @param[in] offset - offset relative to the partition starting address
2907  * @param[in] cbfn - callback function
2908  * @param[in] cbarg - callback argument
2909  *
2910  * Return status.
2911  */
2912 enum bfa_status
2913 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
2914                          void *buf, u32 len, u32 offset,
2915                          bfa_cb_flash cbfn, void *cbarg)
2916 {
2917         if (!bfa_nw_ioc_is_operational(flash->ioc))
2918                 return BFA_STATUS_IOC_NON_OP;
2919
2920         /*
2921          * 'len' must be in word (4-byte) boundary
2922          */
2923         if (!len || (len & 0x03))
2924                 return BFA_STATUS_FLASH_BAD_LEN;
2925
2926         if (type == BFA_FLASH_PART_MFG)
2927                 return BFA_STATUS_EINVAL;
2928
2929         if (flash->op_busy)
2930                 return BFA_STATUS_DEVBUSY;
2931
2932         flash->op_busy = 1;
2933         flash->cbfn = cbfn;
2934         flash->cbarg = cbarg;
2935         flash->type = type;
2936         flash->instance = instance;
2937         flash->residue = len;
2938         flash->offset = 0;
2939         flash->addr_off = offset;
2940         flash->ubuf = buf;
2941
2942         bfa_flash_write_send(flash);
2943
2944         return BFA_STATUS_OK;
2945 }
2946
2947 /*
2948  * Read flash partition.
2949  *
2950  * @param[in] flash - flash structure
2951  * @param[in] type - flash partition type
2952  * @param[in] instance - flash partition instance
2953  * @param[in] buf - read data buffer
2954  * @param[in] len - data buffer length
2955  * @param[in] offset - offset relative to the partition starting address
2956  * @param[in] cbfn - callback function
2957  * @param[in] cbarg - callback argument
2958  *
2959  * Return status.
2960  */
2961 enum bfa_status
2962 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
2963                        void *buf, u32 len, u32 offset,
2964                        bfa_cb_flash cbfn, void *cbarg)
2965 {
2966         if (!bfa_nw_ioc_is_operational(flash->ioc))
2967                 return BFA_STATUS_IOC_NON_OP;
2968
2969         /*
2970          * 'len' must be in word (4-byte) boundary
2971          */
2972         if (!len || (len & 0x03))
2973                 return BFA_STATUS_FLASH_BAD_LEN;
2974
2975         if (flash->op_busy)
2976                 return BFA_STATUS_DEVBUSY;
2977
2978         flash->op_busy = 1;
2979         flash->cbfn = cbfn;
2980         flash->cbarg = cbarg;
2981         flash->type = type;
2982         flash->instance = instance;
2983         flash->residue = len;
2984         flash->offset = 0;
2985         flash->addr_off = offset;
2986         flash->ubuf = buf;
2987
2988         bfa_flash_read_send(flash);
2989
2990         return BFA_STATUS_OK;
2991 }