]> Pileus Git - ~andy/linux/blob - drivers/scsi/bfa/bfa_ioc.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[~andy/linux] / drivers / scsi / bfa / bfa_ioc.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 #include "bfad_drv.h"
19 #include "bfad_im.h"
20 #include "bfa_ioc.h"
21 #include "bfi_reg.h"
22 #include "bfa_defs.h"
23 #include "bfa_defs_svc.h"
24
25 BFA_TRC_FILE(CNA, IOC);
26
27 /*
28  * IOC local definitions
29  */
30 #define BFA_IOC_TOV             3000    /* msecs */
31 #define BFA_IOC_HWSEM_TOV       500     /* msecs */
32 #define BFA_IOC_HB_TOV          500     /* msecs */
33 #define BFA_IOC_TOV_RECOVER      BFA_IOC_HB_TOV
34 #define BFA_IOC_POLL_TOV        BFA_TIMER_FREQ
35
36 #define bfa_ioc_timer_start(__ioc)                                      \
37         bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
38                         bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39 #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
40
41 #define bfa_hb_timer_start(__ioc)                                       \
42         bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,         \
43                         bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44 #define bfa_hb_timer_stop(__ioc)        bfa_timer_stop(&(__ioc)->hb_timer)
45
46 #define BFA_DBG_FWTRC_OFF(_fn)  (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
47
48 /*
49  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50  */
51
52 #define bfa_ioc_firmware_lock(__ioc)                    \
53                         ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc)                  \
55                         ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_fail(__ioc)              \
59                         ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
60 #define bfa_ioc_sync_start(__ioc)               \
61                         ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
62 #define bfa_ioc_sync_join(__ioc)                \
63                         ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64 #define bfa_ioc_sync_leave(__ioc)               \
65                         ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66 #define bfa_ioc_sync_ack(__ioc)                 \
67                         ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68 #define bfa_ioc_sync_complete(__ioc)            \
69                         ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
70
71 #define bfa_ioc_mbox_cmd_pending(__ioc)         \
72                         (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
73                         readl((__ioc)->ioc_regs.hfn_mbox_cmd))
74
75 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
76
77 /*
78  * forward declarations
79  */
80 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
81 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
82 static void bfa_ioc_timeout(void *ioc);
83 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
92                                 enum bfa_ioc_event_e event);
93 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
94 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
95 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
96 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
97 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
98
99 /*
100  * IOC state machine definitions/declarations
101  */
102 enum ioc_event {
103         IOC_E_RESET             = 1,    /*  IOC reset request           */
104         IOC_E_ENABLE            = 2,    /*  IOC enable request          */
105         IOC_E_DISABLE           = 3,    /*  IOC disable request */
106         IOC_E_DETACH            = 4,    /*  driver detach cleanup       */
107         IOC_E_ENABLED           = 5,    /*  f/w enabled         */
108         IOC_E_FWRSP_GETATTR     = 6,    /*  IOC get attribute response  */
109         IOC_E_DISABLED          = 7,    /*  f/w disabled                */
110         IOC_E_PFFAILED          = 8,    /*  failure notice by iocpf sm  */
111         IOC_E_HBFAIL            = 9,    /*  heartbeat failure           */
112         IOC_E_HWERROR           = 10,   /*  hardware error interrupt    */
113         IOC_E_TIMEOUT           = 11,   /*  timeout                     */
114         IOC_E_HWFAILED          = 12,   /*  PCI mapping failure notice  */
115 };
116
117 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
127
128 static struct bfa_sm_table_s ioc_sm_table[] = {
129         {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
130         {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
131         {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
132         {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
133         {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
134         {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
135         {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
136         {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
137         {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
138         {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
139 };
140
141 /*
142  * IOCPF state machine definitions/declarations
143  */
144
145 #define bfa_iocpf_timer_start(__ioc)                                    \
146         bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
147                         bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
148 #define bfa_iocpf_timer_stop(__ioc)     bfa_timer_stop(&(__ioc)->ioc_timer)
149
150 #define bfa_iocpf_poll_timer_start(__ioc)                               \
151         bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
152                         bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
153
154 #define bfa_sem_timer_start(__ioc)                                      \
155         bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,        \
156                         bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
157 #define bfa_sem_timer_stop(__ioc)       bfa_timer_stop(&(__ioc)->sem_timer)
158
159 /*
160  * Forward declareations for iocpf state machine
161  */
162 static void bfa_iocpf_timeout(void *ioc_arg);
163 static void bfa_iocpf_sem_timeout(void *ioc_arg);
164 static void bfa_iocpf_poll_timeout(void *ioc_arg);
165
166 /*
167  * IOCPF state machine events
168  */
169 enum iocpf_event {
170         IOCPF_E_ENABLE          = 1,    /*  IOCPF enable request        */
171         IOCPF_E_DISABLE         = 2,    /*  IOCPF disable request       */
172         IOCPF_E_STOP            = 3,    /*  stop on driver detach       */
173         IOCPF_E_FWREADY         = 4,    /*  f/w initialization done     */
174         IOCPF_E_FWRSP_ENABLE    = 5,    /*  enable f/w response */
175         IOCPF_E_FWRSP_DISABLE   = 6,    /*  disable f/w response        */
176         IOCPF_E_FAIL            = 7,    /*  failure notice by ioc sm    */
177         IOCPF_E_INITFAIL        = 8,    /*  init fail notice by ioc sm  */
178         IOCPF_E_GETATTRFAIL     = 9,    /*  init fail notice by ioc sm  */
179         IOCPF_E_SEMLOCKED       = 10,   /*  h/w semaphore is locked     */
180         IOCPF_E_TIMEOUT         = 11,   /*  f/w response timeout        */
181         IOCPF_E_SEM_ERROR       = 12,   /*  h/w sem mapping error       */
182 };
183
184 /*
185  * IOCPF states
186  */
187 enum bfa_iocpf_state {
188         BFA_IOCPF_RESET         = 1,    /*  IOC is in reset state */
189         BFA_IOCPF_SEMWAIT       = 2,    /*  Waiting for IOC h/w semaphore */
190         BFA_IOCPF_HWINIT        = 3,    /*  IOC h/w is being initialized */
191         BFA_IOCPF_READY         = 4,    /*  IOCPF is initialized */
192         BFA_IOCPF_INITFAIL      = 5,    /*  IOCPF failed */
193         BFA_IOCPF_FAIL          = 6,    /*  IOCPF failed */
194         BFA_IOCPF_DISABLING     = 7,    /*  IOCPF is being disabled */
195         BFA_IOCPF_DISABLED      = 8,    /*  IOCPF is disabled */
196         BFA_IOCPF_FWMISMATCH    = 9,    /*  IOC f/w different from drivers */
197 };
198
199 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
200 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
201 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
202 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
203 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
204 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
205 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
206 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
207                                                 enum iocpf_event);
208 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
209 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
210 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
211 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
212 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
213                                                 enum iocpf_event);
214 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
215
216 static struct bfa_sm_table_s iocpf_sm_table[] = {
217         {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
218         {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
219         {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
220         {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
221         {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
222         {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
223         {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
224         {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
225         {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
226         {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
227         {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
228         {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
229         {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
230         {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
231 };
232
233 /*
234  * IOC State Machine
235  */
236
237 /*
238  * Beginning state. IOC uninit state.
239  */
240
241 static void
242 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
243 {
244 }
245
246 /*
247  * IOC is in uninit state.
248  */
249 static void
250 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
251 {
252         bfa_trc(ioc, event);
253
254         switch (event) {
255         case IOC_E_RESET:
256                 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
257                 break;
258
259         default:
260                 bfa_sm_fault(ioc, event);
261         }
262 }
263 /*
264  * Reset entry actions -- initialize state machine
265  */
266 static void
267 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
268 {
269         bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
270 }
271
272 /*
273  * IOC is in reset state.
274  */
275 static void
276 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
277 {
278         bfa_trc(ioc, event);
279
280         switch (event) {
281         case IOC_E_ENABLE:
282                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
283                 break;
284
285         case IOC_E_DISABLE:
286                 bfa_ioc_disable_comp(ioc);
287                 break;
288
289         case IOC_E_DETACH:
290                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
291                 break;
292
293         default:
294                 bfa_sm_fault(ioc, event);
295         }
296 }
297
298
299 static void
300 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
301 {
302         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
303 }
304
305 /*
306  * Host IOC function is being enabled, awaiting response from firmware.
307  * Semaphore is acquired.
308  */
309 static void
310 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
311 {
312         bfa_trc(ioc, event);
313
314         switch (event) {
315         case IOC_E_ENABLED:
316                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
317                 break;
318
319         case IOC_E_PFFAILED:
320                 /* !!! fall through !!! */
321         case IOC_E_HWERROR:
322                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
323                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
324                 if (event != IOC_E_PFFAILED)
325                         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
326                 break;
327
328         case IOC_E_HWFAILED:
329                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
330                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
331                 break;
332
333         case IOC_E_DISABLE:
334                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
335                 break;
336
337         case IOC_E_DETACH:
338                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
339                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
340                 break;
341
342         case IOC_E_ENABLE:
343                 break;
344
345         default:
346                 bfa_sm_fault(ioc, event);
347         }
348 }
349
350
351 static void
352 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
353 {
354         bfa_ioc_timer_start(ioc);
355         bfa_ioc_send_getattr(ioc);
356 }
357
358 /*
359  * IOC configuration in progress. Timer is active.
360  */
361 static void
362 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
363 {
364         bfa_trc(ioc, event);
365
366         switch (event) {
367         case IOC_E_FWRSP_GETATTR:
368                 bfa_ioc_timer_stop(ioc);
369                 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
370                 break;
371
372         case IOC_E_PFFAILED:
373         case IOC_E_HWERROR:
374                 bfa_ioc_timer_stop(ioc);
375                 /* !!! fall through !!! */
376         case IOC_E_TIMEOUT:
377                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
378                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
379                 if (event != IOC_E_PFFAILED)
380                         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
381                 break;
382
383         case IOC_E_DISABLE:
384                 bfa_ioc_timer_stop(ioc);
385                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
386                 break;
387
388         case IOC_E_ENABLE:
389                 break;
390
391         default:
392                 bfa_sm_fault(ioc, event);
393         }
394 }
395
396 static void
397 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
398 {
399         struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
400
401         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
402         bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
403         bfa_ioc_hb_monitor(ioc);
404         BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
405         bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
406 }
407
408 static void
409 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
410 {
411         bfa_trc(ioc, event);
412
413         switch (event) {
414         case IOC_E_ENABLE:
415                 break;
416
417         case IOC_E_DISABLE:
418                 bfa_hb_timer_stop(ioc);
419                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
420                 break;
421
422         case IOC_E_PFFAILED:
423         case IOC_E_HWERROR:
424                 bfa_hb_timer_stop(ioc);
425                 /* !!! fall through !!! */
426         case IOC_E_HBFAIL:
427                 if (ioc->iocpf.auto_recover)
428                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
429                 else
430                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
431
432                 bfa_ioc_fail_notify(ioc);
433
434                 if (event != IOC_E_PFFAILED)
435                         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
436                 break;
437
438         default:
439                 bfa_sm_fault(ioc, event);
440         }
441 }
442
443
444 static void
445 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
446 {
447         struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
448         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
449         BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
450         bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
451 }
452
453 /*
454  * IOC is being disabled
455  */
456 static void
457 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
458 {
459         bfa_trc(ioc, event);
460
461         switch (event) {
462         case IOC_E_DISABLED:
463                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
464                 break;
465
466         case IOC_E_HWERROR:
467                 /*
468                  * No state change.  Will move to disabled state
469                  * after iocpf sm completes failure processing and
470                  * moves to disabled state.
471                  */
472                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
473                 break;
474
475         case IOC_E_HWFAILED:
476                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
477                 bfa_ioc_disable_comp(ioc);
478                 break;
479
480         default:
481                 bfa_sm_fault(ioc, event);
482         }
483 }
484
485 /*
486  * IOC disable completion entry.
487  */
488 static void
489 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
490 {
491         bfa_ioc_disable_comp(ioc);
492 }
493
494 static void
495 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
496 {
497         bfa_trc(ioc, event);
498
499         switch (event) {
500         case IOC_E_ENABLE:
501                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
502                 break;
503
504         case IOC_E_DISABLE:
505                 ioc->cbfn->disable_cbfn(ioc->bfa);
506                 break;
507
508         case IOC_E_DETACH:
509                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
510                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
511                 break;
512
513         default:
514                 bfa_sm_fault(ioc, event);
515         }
516 }
517
518
519 static void
520 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
521 {
522         bfa_trc(ioc, 0);
523 }
524
525 /*
526  * Hardware initialization retry.
527  */
528 static void
529 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
530 {
531         bfa_trc(ioc, event);
532
533         switch (event) {
534         case IOC_E_ENABLED:
535                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
536                 break;
537
538         case IOC_E_PFFAILED:
539         case IOC_E_HWERROR:
540                 /*
541                  * Initialization retry failed.
542                  */
543                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
544                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
545                 if (event != IOC_E_PFFAILED)
546                         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
547                 break;
548
549         case IOC_E_HWFAILED:
550                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
551                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
552                 break;
553
554         case IOC_E_ENABLE:
555                 break;
556
557         case IOC_E_DISABLE:
558                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
559                 break;
560
561         case IOC_E_DETACH:
562                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
563                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
564                 break;
565
566         default:
567                 bfa_sm_fault(ioc, event);
568         }
569 }
570
571
572 static void
573 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
574 {
575         bfa_trc(ioc, 0);
576 }
577
578 /*
579  * IOC failure.
580  */
581 static void
582 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
583 {
584         bfa_trc(ioc, event);
585
586         switch (event) {
587
588         case IOC_E_ENABLE:
589                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
590                 break;
591
592         case IOC_E_DISABLE:
593                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
594                 break;
595
596         case IOC_E_DETACH:
597                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
598                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
599                 break;
600
601         case IOC_E_HWERROR:
602                 /*
603                  * HB failure notification, ignore.
604                  */
605                 break;
606         default:
607                 bfa_sm_fault(ioc, event);
608         }
609 }
610
611 static void
612 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
613 {
614         bfa_trc(ioc, 0);
615 }
616
617 static void
618 bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
619 {
620         bfa_trc(ioc, event);
621
622         switch (event) {
623         case IOC_E_ENABLE:
624                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
625                 break;
626
627         case IOC_E_DISABLE:
628                 ioc->cbfn->disable_cbfn(ioc->bfa);
629                 break;
630
631         case IOC_E_DETACH:
632                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
633                 break;
634
635         default:
636                 bfa_sm_fault(ioc, event);
637         }
638 }
639
640 /*
641  * IOCPF State Machine
642  */
643
644 /*
645  * Reset entry actions -- initialize state machine
646  */
647 static void
648 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
649 {
650         iocpf->fw_mismatch_notified = BFA_FALSE;
651         iocpf->auto_recover = bfa_auto_recover;
652 }
653
654 /*
655  * Beginning state. IOC is in reset state.
656  */
657 static void
658 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
659 {
660         struct bfa_ioc_s *ioc = iocpf->ioc;
661
662         bfa_trc(ioc, event);
663
664         switch (event) {
665         case IOCPF_E_ENABLE:
666                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
667                 break;
668
669         case IOCPF_E_STOP:
670                 break;
671
672         default:
673                 bfa_sm_fault(ioc, event);
674         }
675 }
676
677 /*
678  * Semaphore should be acquired for version check.
679  */
680 static void
681 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
682 {
683         struct bfi_ioc_image_hdr_s      fwhdr;
684         u32     r32, fwstate, pgnum, pgoff, loff = 0;
685         int     i;
686
687         /*
688          * Spin on init semaphore to serialize.
689          */
690         r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
691         while (r32 & 0x1) {
692                 udelay(20);
693                 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
694         }
695
696         /* h/w sem init */
697         fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
698         if (fwstate == BFI_IOC_UNINIT) {
699                 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
700                 goto sem_get;
701         }
702
703         bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
704
705         if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
706                 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
707                 goto sem_get;
708         }
709
710         /*
711          * Clear fwver hdr
712          */
713         pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
714         pgoff = PSS_SMEM_PGOFF(loff);
715         writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
716
717         for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
718                 bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
719                 loff += sizeof(u32);
720         }
721
722         bfa_trc(iocpf->ioc, fwstate);
723         bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
724         writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
725         writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
726
727         /*
728          * Unlock the hw semaphore. Should be here only once per boot.
729          */
730         readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
731         writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
732
733         /*
734          * unlock init semaphore.
735          */
736         writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
737
738 sem_get:
739         bfa_ioc_hw_sem_get(iocpf->ioc);
740 }
741
742 /*
743  * Awaiting h/w semaphore to continue with version check.
744  */
745 static void
746 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
747 {
748         struct bfa_ioc_s *ioc = iocpf->ioc;
749
750         bfa_trc(ioc, event);
751
752         switch (event) {
753         case IOCPF_E_SEMLOCKED:
754                 if (bfa_ioc_firmware_lock(ioc)) {
755                         if (bfa_ioc_sync_start(ioc)) {
756                                 bfa_ioc_sync_join(ioc);
757                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
758                         } else {
759                                 bfa_ioc_firmware_unlock(ioc);
760                                 writel(1, ioc->ioc_regs.ioc_sem_reg);
761                                 bfa_sem_timer_start(ioc);
762                         }
763                 } else {
764                         writel(1, ioc->ioc_regs.ioc_sem_reg);
765                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
766                 }
767                 break;
768
769         case IOCPF_E_SEM_ERROR:
770                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
771                 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
772                 break;
773
774         case IOCPF_E_DISABLE:
775                 bfa_sem_timer_stop(ioc);
776                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
777                 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
778                 break;
779
780         case IOCPF_E_STOP:
781                 bfa_sem_timer_stop(ioc);
782                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
783                 break;
784
785         default:
786                 bfa_sm_fault(ioc, event);
787         }
788 }
789
790 /*
791  * Notify enable completion callback.
792  */
793 static void
794 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
795 {
796         /*
797          * Call only the first time sm enters fwmismatch state.
798          */
799         if (iocpf->fw_mismatch_notified == BFA_FALSE)
800                 bfa_ioc_pf_fwmismatch(iocpf->ioc);
801
802         iocpf->fw_mismatch_notified = BFA_TRUE;
803         bfa_iocpf_timer_start(iocpf->ioc);
804 }
805
806 /*
807  * Awaiting firmware version match.
808  */
809 static void
810 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
811 {
812         struct bfa_ioc_s *ioc = iocpf->ioc;
813
814         bfa_trc(ioc, event);
815
816         switch (event) {
817         case IOCPF_E_TIMEOUT:
818                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
819                 break;
820
821         case IOCPF_E_DISABLE:
822                 bfa_iocpf_timer_stop(ioc);
823                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
824                 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
825                 break;
826
827         case IOCPF_E_STOP:
828                 bfa_iocpf_timer_stop(ioc);
829                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
830                 break;
831
832         default:
833                 bfa_sm_fault(ioc, event);
834         }
835 }
836
837 /*
838  * Request for semaphore.
839  */
840 static void
841 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
842 {
843         bfa_ioc_hw_sem_get(iocpf->ioc);
844 }
845
846 /*
847  * Awaiting semaphore for h/w initialzation.
848  */
849 static void
850 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
851 {
852         struct bfa_ioc_s *ioc = iocpf->ioc;
853
854         bfa_trc(ioc, event);
855
856         switch (event) {
857         case IOCPF_E_SEMLOCKED:
858                 if (bfa_ioc_sync_complete(ioc)) {
859                         bfa_ioc_sync_join(ioc);
860                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
861                 } else {
862                         writel(1, ioc->ioc_regs.ioc_sem_reg);
863                         bfa_sem_timer_start(ioc);
864                 }
865                 break;
866
867         case IOCPF_E_SEM_ERROR:
868                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
869                 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
870                 break;
871
872         case IOCPF_E_DISABLE:
873                 bfa_sem_timer_stop(ioc);
874                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
875                 break;
876
877         default:
878                 bfa_sm_fault(ioc, event);
879         }
880 }
881
882 static void
883 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
884 {
885         iocpf->poll_time = 0;
886         bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
887 }
888
889 /*
890  * Hardware is being initialized. Interrupts are enabled.
891  * Holding hardware semaphore lock.
892  */
893 static void
894 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
895 {
896         struct bfa_ioc_s *ioc = iocpf->ioc;
897
898         bfa_trc(ioc, event);
899
900         switch (event) {
901         case IOCPF_E_FWREADY:
902                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
903                 break;
904
905         case IOCPF_E_TIMEOUT:
906                 writel(1, ioc->ioc_regs.ioc_sem_reg);
907                 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
908                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
909                 break;
910
911         case IOCPF_E_DISABLE:
912                 bfa_iocpf_timer_stop(ioc);
913                 bfa_ioc_sync_leave(ioc);
914                 writel(1, ioc->ioc_regs.ioc_sem_reg);
915                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
916                 break;
917
918         default:
919                 bfa_sm_fault(ioc, event);
920         }
921 }
922
923 static void
924 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
925 {
926         bfa_iocpf_timer_start(iocpf->ioc);
927         /*
928          * Enable Interrupts before sending fw IOC ENABLE cmd.
929          */
930         iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
931         bfa_ioc_send_enable(iocpf->ioc);
932 }
933
934 /*
935  * Host IOC function is being enabled, awaiting response from firmware.
936  * Semaphore is acquired.
937  */
938 static void
939 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
940 {
941         struct bfa_ioc_s *ioc = iocpf->ioc;
942
943         bfa_trc(ioc, event);
944
945         switch (event) {
946         case IOCPF_E_FWRSP_ENABLE:
947                 bfa_iocpf_timer_stop(ioc);
948                 writel(1, ioc->ioc_regs.ioc_sem_reg);
949                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
950                 break;
951
952         case IOCPF_E_INITFAIL:
953                 bfa_iocpf_timer_stop(ioc);
954                 /*
955                  * !!! fall through !!!
956                  */
957
958         case IOCPF_E_TIMEOUT:
959                 writel(1, ioc->ioc_regs.ioc_sem_reg);
960                 if (event == IOCPF_E_TIMEOUT)
961                         bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
962                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
963                 break;
964
965         case IOCPF_E_DISABLE:
966                 bfa_iocpf_timer_stop(ioc);
967                 writel(1, ioc->ioc_regs.ioc_sem_reg);
968                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
969                 break;
970
971         default:
972                 bfa_sm_fault(ioc, event);
973         }
974 }
975
976 static void
977 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
978 {
979         bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
980 }
981
982 static void
983 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
984 {
985         struct bfa_ioc_s *ioc = iocpf->ioc;
986
987         bfa_trc(ioc, event);
988
989         switch (event) {
990         case IOCPF_E_DISABLE:
991                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
992                 break;
993
994         case IOCPF_E_GETATTRFAIL:
995                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
996                 break;
997
998         case IOCPF_E_FAIL:
999                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1000                 break;
1001
1002         default:
1003                 bfa_sm_fault(ioc, event);
1004         }
1005 }
1006
1007 static void
1008 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1009 {
1010         bfa_iocpf_timer_start(iocpf->ioc);
1011         bfa_ioc_send_disable(iocpf->ioc);
1012 }
1013
1014 /*
1015  * IOC is being disabled
1016  */
1017 static void
1018 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1019 {
1020         struct bfa_ioc_s *ioc = iocpf->ioc;
1021
1022         bfa_trc(ioc, event);
1023
1024         switch (event) {
1025         case IOCPF_E_FWRSP_DISABLE:
1026                 bfa_iocpf_timer_stop(ioc);
1027                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1028                 break;
1029
1030         case IOCPF_E_FAIL:
1031                 bfa_iocpf_timer_stop(ioc);
1032                 /*
1033                  * !!! fall through !!!
1034                  */
1035
1036         case IOCPF_E_TIMEOUT:
1037                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1038                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1039                 break;
1040
1041         case IOCPF_E_FWRSP_ENABLE:
1042                 break;
1043
1044         default:
1045                 bfa_sm_fault(ioc, event);
1046         }
1047 }
1048
1049 static void
1050 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1051 {
1052         bfa_ioc_hw_sem_get(iocpf->ioc);
1053 }
1054
1055 /*
1056  * IOC hb ack request is being removed.
1057  */
1058 static void
1059 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1060 {
1061         struct bfa_ioc_s *ioc = iocpf->ioc;
1062
1063         bfa_trc(ioc, event);
1064
1065         switch (event) {
1066         case IOCPF_E_SEMLOCKED:
1067                 bfa_ioc_sync_leave(ioc);
1068                 writel(1, ioc->ioc_regs.ioc_sem_reg);
1069                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1070                 break;
1071
1072         case IOCPF_E_SEM_ERROR:
1073                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1074                 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1075                 break;
1076
1077         case IOCPF_E_FAIL:
1078                 break;
1079
1080         default:
1081                 bfa_sm_fault(ioc, event);
1082         }
1083 }
1084
1085 /*
1086  * IOC disable completion entry.
1087  */
1088 static void
1089 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1090 {
1091         bfa_ioc_mbox_flush(iocpf->ioc);
1092         bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1093 }
1094
1095 static void
1096 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1097 {
1098         struct bfa_ioc_s *ioc = iocpf->ioc;
1099
1100         bfa_trc(ioc, event);
1101
1102         switch (event) {
1103         case IOCPF_E_ENABLE:
1104                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1105                 break;
1106
1107         case IOCPF_E_STOP:
1108                 bfa_ioc_firmware_unlock(ioc);
1109                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1110                 break;
1111
1112         default:
1113                 bfa_sm_fault(ioc, event);
1114         }
1115 }
1116
1117 static void
1118 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1119 {
1120         bfa_ioc_debug_save_ftrc(iocpf->ioc);
1121         bfa_ioc_hw_sem_get(iocpf->ioc);
1122 }
1123
1124 /*
1125  * Hardware initialization failed.
1126  */
1127 static void
1128 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1129 {
1130         struct bfa_ioc_s *ioc = iocpf->ioc;
1131
1132         bfa_trc(ioc, event);
1133
1134         switch (event) {
1135         case IOCPF_E_SEMLOCKED:
1136                 bfa_ioc_notify_fail(ioc);
1137                 bfa_ioc_sync_leave(ioc);
1138                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1139                 writel(1, ioc->ioc_regs.ioc_sem_reg);
1140                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1141                 break;
1142
1143         case IOCPF_E_SEM_ERROR:
1144                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1145                 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1146                 break;
1147
1148         case IOCPF_E_DISABLE:
1149                 bfa_sem_timer_stop(ioc);
1150                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1151                 break;
1152
1153         case IOCPF_E_STOP:
1154                 bfa_sem_timer_stop(ioc);
1155                 bfa_ioc_firmware_unlock(ioc);
1156                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1157                 break;
1158
1159         case IOCPF_E_FAIL:
1160                 break;
1161
1162         default:
1163                 bfa_sm_fault(ioc, event);
1164         }
1165 }
1166
1167 static void
1168 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1169 {
1170         bfa_trc(iocpf->ioc, 0);
1171 }
1172
1173 /*
1174  * Hardware initialization failed.
1175  */
1176 static void
1177 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1178 {
1179         struct bfa_ioc_s *ioc = iocpf->ioc;
1180
1181         bfa_trc(ioc, event);
1182
1183         switch (event) {
1184         case IOCPF_E_DISABLE:
1185                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1186                 break;
1187
1188         case IOCPF_E_STOP:
1189                 bfa_ioc_firmware_unlock(ioc);
1190                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1191                 break;
1192
1193         default:
1194                 bfa_sm_fault(ioc, event);
1195         }
1196 }
1197
1198 static void
1199 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1200 {
1201         /*
1202          * Mark IOC as failed in hardware and stop firmware.
1203          */
1204         bfa_ioc_lpu_stop(iocpf->ioc);
1205
1206         /*
1207          * Flush any queued up mailbox requests.
1208          */
1209         bfa_ioc_mbox_flush(iocpf->ioc);
1210
1211         bfa_ioc_hw_sem_get(iocpf->ioc);
1212 }
1213
1214 static void
1215 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1216 {
1217         struct bfa_ioc_s *ioc = iocpf->ioc;
1218
1219         bfa_trc(ioc, event);
1220
1221         switch (event) {
1222         case IOCPF_E_SEMLOCKED:
1223                 bfa_ioc_sync_ack(ioc);
1224                 bfa_ioc_notify_fail(ioc);
1225                 if (!iocpf->auto_recover) {
1226                         bfa_ioc_sync_leave(ioc);
1227                         writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1228                         writel(1, ioc->ioc_regs.ioc_sem_reg);
1229                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1230                 } else {
1231                         if (bfa_ioc_sync_complete(ioc))
1232                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1233                         else {
1234                                 writel(1, ioc->ioc_regs.ioc_sem_reg);
1235                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1236                         }
1237                 }
1238                 break;
1239
1240         case IOCPF_E_SEM_ERROR:
1241                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1242                 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1243                 break;
1244
1245         case IOCPF_E_DISABLE:
1246                 bfa_sem_timer_stop(ioc);
1247                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1248                 break;
1249
1250         case IOCPF_E_FAIL:
1251                 break;
1252
1253         default:
1254                 bfa_sm_fault(ioc, event);
1255         }
1256 }
1257
1258 static void
1259 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1260 {
1261         bfa_trc(iocpf->ioc, 0);
1262 }
1263
1264 /*
1265  * IOC is in failed state.
1266  */
1267 static void
1268 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1269 {
1270         struct bfa_ioc_s *ioc = iocpf->ioc;
1271
1272         bfa_trc(ioc, event);
1273
1274         switch (event) {
1275         case IOCPF_E_DISABLE:
1276                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1277                 break;
1278
1279         default:
1280                 bfa_sm_fault(ioc, event);
1281         }
1282 }
1283
1284 /*
1285  *  BFA IOC private functions
1286  */
1287
1288 /*
1289  * Notify common modules registered for notification.
1290  */
1291 static void
1292 bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1293 {
1294         struct bfa_ioc_notify_s *notify;
1295         struct list_head        *qe;
1296
1297         list_for_each(qe, &ioc->notify_q) {
1298                 notify = (struct bfa_ioc_notify_s *)qe;
1299                 notify->cbfn(notify->cbarg, event);
1300         }
1301 }
1302
1303 static void
1304 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1305 {
1306         ioc->cbfn->disable_cbfn(ioc->bfa);
1307         bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1308 }
1309
1310 bfa_boolean_t
1311 bfa_ioc_sem_get(void __iomem *sem_reg)
1312 {
1313         u32 r32;
1314         int cnt = 0;
1315 #define BFA_SEM_SPINCNT 3000
1316
1317         r32 = readl(sem_reg);
1318
1319         while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1320                 cnt++;
1321                 udelay(2);
1322                 r32 = readl(sem_reg);
1323         }
1324
1325         if (!(r32 & 1))
1326                 return BFA_TRUE;
1327
1328         return BFA_FALSE;
1329 }
1330
1331 static void
1332 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1333 {
1334         u32     r32;
1335
1336         /*
1337          * First read to the semaphore register will return 0, subsequent reads
1338          * will return 1. Semaphore is released by writing 1 to the register
1339          */
1340         r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1341         if (r32 == ~0) {
1342                 WARN_ON(r32 == ~0);
1343                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1344                 return;
1345         }
1346         if (!(r32 & 1)) {
1347                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1348                 return;
1349         }
1350
1351         bfa_sem_timer_start(ioc);
1352 }
1353
1354 /*
1355  * Initialize LPU local memory (aka secondary memory / SRAM)
1356  */
1357 static void
1358 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1359 {
1360         u32     pss_ctl;
1361         int             i;
1362 #define PSS_LMEM_INIT_TIME  10000
1363
1364         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1365         pss_ctl &= ~__PSS_LMEM_RESET;
1366         pss_ctl |= __PSS_LMEM_INIT_EN;
1367
1368         /*
1369          * i2c workaround 12.5khz clock
1370          */
1371         pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1372         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1373
1374         /*
1375          * wait for memory initialization to be complete
1376          */
1377         i = 0;
1378         do {
1379                 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1380                 i++;
1381         } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1382
1383         /*
1384          * If memory initialization is not successful, IOC timeout will catch
1385          * such failures.
1386          */
1387         WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1388         bfa_trc(ioc, pss_ctl);
1389
1390         pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1391         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1392 }
1393
1394 static void
1395 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1396 {
1397         u32     pss_ctl;
1398
1399         /*
1400          * Take processor out of reset.
1401          */
1402         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1403         pss_ctl &= ~__PSS_LPU0_RESET;
1404
1405         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1406 }
1407
1408 static void
1409 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1410 {
1411         u32     pss_ctl;
1412
1413         /*
1414          * Put processors in reset.
1415          */
1416         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1417         pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1418
1419         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1420 }
1421
1422 /*
1423  * Get driver and firmware versions.
1424  */
1425 void
1426 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1427 {
1428         u32     pgnum, pgoff;
1429         u32     loff = 0;
1430         int             i;
1431         u32     *fwsig = (u32 *) fwhdr;
1432
1433         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1434         pgoff = PSS_SMEM_PGOFF(loff);
1435         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1436
1437         for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1438              i++) {
1439                 fwsig[i] =
1440                         bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1441                 loff += sizeof(u32);
1442         }
1443 }
1444
1445 /*
1446  * Returns TRUE if same.
1447  */
1448 bfa_boolean_t
1449 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1450 {
1451         struct bfi_ioc_image_hdr_s *drv_fwhdr;
1452         int i;
1453
1454         drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1455                 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1456
1457         for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1458                 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1459                         bfa_trc(ioc, i);
1460                         bfa_trc(ioc, fwhdr->md5sum[i]);
1461                         bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1462                         return BFA_FALSE;
1463                 }
1464         }
1465
1466         bfa_trc(ioc, fwhdr->md5sum[0]);
1467         return BFA_TRUE;
1468 }
1469
1470 /*
1471  * Return true if current running version is valid. Firmware signature and
1472  * execution context (driver/bios) must match.
1473  */
1474 static bfa_boolean_t
1475 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1476 {
1477         struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1478
1479         bfa_ioc_fwver_get(ioc, &fwhdr);
1480         drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1481                 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1482
1483         if (fwhdr.signature != drv_fwhdr->signature) {
1484                 bfa_trc(ioc, fwhdr.signature);
1485                 bfa_trc(ioc, drv_fwhdr->signature);
1486                 return BFA_FALSE;
1487         }
1488
1489         if (swab32(fwhdr.bootenv) != boot_env) {
1490                 bfa_trc(ioc, fwhdr.bootenv);
1491                 bfa_trc(ioc, boot_env);
1492                 return BFA_FALSE;
1493         }
1494
1495         return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1496 }
1497
1498 /*
1499  * Conditionally flush any pending message from firmware at start.
1500  */
1501 static void
1502 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1503 {
1504         u32     r32;
1505
1506         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1507         if (r32)
1508                 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1509 }
1510
1511 static void
1512 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1513 {
1514         enum bfi_ioc_state ioc_fwstate;
1515         bfa_boolean_t fwvalid;
1516         u32 boot_type;
1517         u32 boot_env;
1518
1519         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1520
1521         if (force)
1522                 ioc_fwstate = BFI_IOC_UNINIT;
1523
1524         bfa_trc(ioc, ioc_fwstate);
1525
1526         boot_type = BFI_FWBOOT_TYPE_NORMAL;
1527         boot_env = BFI_FWBOOT_ENV_OS;
1528
1529         /*
1530          * check if firmware is valid
1531          */
1532         fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1533                 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1534
1535         if (!fwvalid) {
1536                 bfa_ioc_boot(ioc, boot_type, boot_env);
1537                 bfa_ioc_poll_fwinit(ioc);
1538                 return;
1539         }
1540
1541         /*
1542          * If hardware initialization is in progress (initialized by other IOC),
1543          * just wait for an initialization completion interrupt.
1544          */
1545         if (ioc_fwstate == BFI_IOC_INITING) {
1546                 bfa_ioc_poll_fwinit(ioc);
1547                 return;
1548         }
1549
1550         /*
1551          * If IOC function is disabled and firmware version is same,
1552          * just re-enable IOC.
1553          *
1554          * If option rom, IOC must not be in operational state. With
1555          * convergence, IOC will be in operational state when 2nd driver
1556          * is loaded.
1557          */
1558         if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1559
1560                 /*
1561                  * When using MSI-X any pending firmware ready event should
1562                  * be flushed. Otherwise MSI-X interrupts are not delivered.
1563                  */
1564                 bfa_ioc_msgflush(ioc);
1565                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1566                 return;
1567         }
1568
1569         /*
1570          * Initialize the h/w for any other states.
1571          */
1572         bfa_ioc_boot(ioc, boot_type, boot_env);
1573         bfa_ioc_poll_fwinit(ioc);
1574 }
1575
1576 static void
1577 bfa_ioc_timeout(void *ioc_arg)
1578 {
1579         struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1580
1581         bfa_trc(ioc, 0);
1582         bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1583 }
1584
1585 void
1586 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1587 {
1588         u32 *msgp = (u32 *) ioc_msg;
1589         u32 i;
1590
1591         bfa_trc(ioc, msgp[0]);
1592         bfa_trc(ioc, len);
1593
1594         WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1595
1596         /*
1597          * first write msg to mailbox registers
1598          */
1599         for (i = 0; i < len / sizeof(u32); i++)
1600                 writel(cpu_to_le32(msgp[i]),
1601                         ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1602
1603         for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1604                 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1605
1606         /*
1607          * write 1 to mailbox CMD to trigger LPU event
1608          */
1609         writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1610         (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1611 }
1612
1613 static void
1614 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1615 {
1616         struct bfi_ioc_ctrl_req_s enable_req;
1617         struct timeval tv;
1618
1619         bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1620                     bfa_ioc_portid(ioc));
1621         enable_req.clscode = cpu_to_be16(ioc->clscode);
1622         do_gettimeofday(&tv);
1623         enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1624         bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1625 }
1626
1627 static void
1628 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1629 {
1630         struct bfi_ioc_ctrl_req_s disable_req;
1631
1632         bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1633                     bfa_ioc_portid(ioc));
1634         bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1635 }
1636
1637 static void
1638 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1639 {
1640         struct bfi_ioc_getattr_req_s    attr_req;
1641
1642         bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1643                     bfa_ioc_portid(ioc));
1644         bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1645         bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1646 }
1647
1648 static void
1649 bfa_ioc_hb_check(void *cbarg)
1650 {
1651         struct bfa_ioc_s  *ioc = cbarg;
1652         u32     hb_count;
1653
1654         hb_count = readl(ioc->ioc_regs.heartbeat);
1655         if (ioc->hb_count == hb_count) {
1656                 bfa_ioc_recover(ioc);
1657                 return;
1658         } else {
1659                 ioc->hb_count = hb_count;
1660         }
1661
1662         bfa_ioc_mbox_poll(ioc);
1663         bfa_hb_timer_start(ioc);
1664 }
1665
1666 static void
1667 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1668 {
1669         ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1670         bfa_hb_timer_start(ioc);
1671 }
1672
1673 /*
1674  *      Initiate a full firmware download.
1675  */
1676 static void
1677 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1678                     u32 boot_env)
1679 {
1680         u32 *fwimg;
1681         u32 pgnum, pgoff;
1682         u32 loff = 0;
1683         u32 chunkno = 0;
1684         u32 i;
1685         u32 asicmode;
1686
1687         bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1688         fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1689
1690         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1691         pgoff = PSS_SMEM_PGOFF(loff);
1692
1693         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1694
1695         for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1696
1697                 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1698                         chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1699                         fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1700                                         BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1701                 }
1702
1703                 /*
1704                  * write smem
1705                  */
1706                 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1707                               fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1708
1709                 loff += sizeof(u32);
1710
1711                 /*
1712                  * handle page offset wrap around
1713                  */
1714                 loff = PSS_SMEM_PGOFF(loff);
1715                 if (loff == 0) {
1716                         pgnum++;
1717                         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1718                 }
1719         }
1720
1721         writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1722                         ioc->ioc_regs.host_page_num_fn);
1723
1724         /*
1725          * Set boot type and device mode at the end.
1726          */
1727         asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1728                                 ioc->port0_mode, ioc->port1_mode);
1729         bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1730                         swab32(asicmode));
1731         bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1732                         swab32(boot_type));
1733         bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1734                         swab32(boot_env));
1735 }
1736
1737
1738 /*
1739  * Update BFA configuration from firmware configuration.
1740  */
1741 static void
1742 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1743 {
1744         struct bfi_ioc_attr_s   *attr = ioc->attr;
1745
1746         attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1747         attr->card_type     = be32_to_cpu(attr->card_type);
1748         attr->maxfrsize     = be16_to_cpu(attr->maxfrsize);
1749         ioc->fcmode     = (attr->port_mode == BFI_PORT_MODE_FC);
1750
1751         bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1752 }
1753
1754 /*
1755  * Attach time initialization of mbox logic.
1756  */
1757 static void
1758 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1759 {
1760         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
1761         int     mc;
1762
1763         INIT_LIST_HEAD(&mod->cmd_q);
1764         for (mc = 0; mc < BFI_MC_MAX; mc++) {
1765                 mod->mbhdlr[mc].cbfn = NULL;
1766                 mod->mbhdlr[mc].cbarg = ioc->bfa;
1767         }
1768 }
1769
1770 /*
1771  * Mbox poll timer -- restarts any pending mailbox requests.
1772  */
1773 static void
1774 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1775 {
1776         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
1777         struct bfa_mbox_cmd_s           *cmd;
1778         u32                     stat;
1779
1780         /*
1781          * If no command pending, do nothing
1782          */
1783         if (list_empty(&mod->cmd_q))
1784                 return;
1785
1786         /*
1787          * If previous command is not yet fetched by firmware, do nothing
1788          */
1789         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1790         if (stat)
1791                 return;
1792
1793         /*
1794          * Enqueue command to firmware.
1795          */
1796         bfa_q_deq(&mod->cmd_q, &cmd);
1797         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1798 }
1799
1800 /*
1801  * Cleanup any pending requests.
1802  */
1803 static void
1804 bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
1805 {
1806         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
1807         struct bfa_mbox_cmd_s           *cmd;
1808
1809         while (!list_empty(&mod->cmd_q))
1810                 bfa_q_deq(&mod->cmd_q, &cmd);
1811 }
1812
1813 /*
1814  * Read data from SMEM to host through PCI memmap
1815  *
1816  * @param[in]   ioc     memory for IOC
1817  * @param[in]   tbuf    app memory to store data from smem
1818  * @param[in]   soff    smem offset
1819  * @param[in]   sz      size of smem in bytes
1820  */
1821 static bfa_status_t
1822 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1823 {
1824         u32 pgnum, loff;
1825         __be32 r32;
1826         int i, len;
1827         u32 *buf = tbuf;
1828
1829         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1830         loff = PSS_SMEM_PGOFF(soff);
1831         bfa_trc(ioc, pgnum);
1832         bfa_trc(ioc, loff);
1833         bfa_trc(ioc, sz);
1834
1835         /*
1836          *  Hold semaphore to serialize pll init and fwtrc.
1837          */
1838         if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1839                 bfa_trc(ioc, 0);
1840                 return BFA_STATUS_FAILED;
1841         }
1842
1843         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1844
1845         len = sz/sizeof(u32);
1846         bfa_trc(ioc, len);
1847         for (i = 0; i < len; i++) {
1848                 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1849                 buf[i] = be32_to_cpu(r32);
1850                 loff += sizeof(u32);
1851
1852                 /*
1853                  * handle page offset wrap around
1854                  */
1855                 loff = PSS_SMEM_PGOFF(loff);
1856                 if (loff == 0) {
1857                         pgnum++;
1858                         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1859                 }
1860         }
1861         writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1862                         ioc->ioc_regs.host_page_num_fn);
1863         /*
1864          *  release semaphore.
1865          */
1866         readl(ioc->ioc_regs.ioc_init_sem_reg);
1867         writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1868
1869         bfa_trc(ioc, pgnum);
1870         return BFA_STATUS_OK;
1871 }
1872
1873 /*
1874  * Clear SMEM data from host through PCI memmap
1875  *
1876  * @param[in]   ioc     memory for IOC
1877  * @param[in]   soff    smem offset
1878  * @param[in]   sz      size of smem in bytes
1879  */
1880 static bfa_status_t
1881 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1882 {
1883         int i, len;
1884         u32 pgnum, loff;
1885
1886         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1887         loff = PSS_SMEM_PGOFF(soff);
1888         bfa_trc(ioc, pgnum);
1889         bfa_trc(ioc, loff);
1890         bfa_trc(ioc, sz);
1891
1892         /*
1893          *  Hold semaphore to serialize pll init and fwtrc.
1894          */
1895         if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1896                 bfa_trc(ioc, 0);
1897                 return BFA_STATUS_FAILED;
1898         }
1899
1900         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1901
1902         len = sz/sizeof(u32); /* len in words */
1903         bfa_trc(ioc, len);
1904         for (i = 0; i < len; i++) {
1905                 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1906                 loff += sizeof(u32);
1907
1908                 /*
1909                  * handle page offset wrap around
1910                  */
1911                 loff = PSS_SMEM_PGOFF(loff);
1912                 if (loff == 0) {
1913                         pgnum++;
1914                         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1915                 }
1916         }
1917         writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1918                         ioc->ioc_regs.host_page_num_fn);
1919
1920         /*
1921          *  release semaphore.
1922          */
1923         readl(ioc->ioc_regs.ioc_init_sem_reg);
1924         writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1925         bfa_trc(ioc, pgnum);
1926         return BFA_STATUS_OK;
1927 }
1928
1929 static void
1930 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1931 {
1932         struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1933
1934         /*
1935          * Notify driver and common modules registered for notification.
1936          */
1937         ioc->cbfn->hbfail_cbfn(ioc->bfa);
1938         bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1939
1940         bfa_ioc_debug_save_ftrc(ioc);
1941
1942         BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1943                 "Heart Beat of IOC has failed\n");
1944         bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
1945
1946 }
1947
1948 static void
1949 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1950 {
1951         struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1952         /*
1953          * Provide enable completion callback.
1954          */
1955         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1956         BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1957                 "Running firmware version is incompatible "
1958                 "with the driver version\n");
1959         bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
1960 }
1961
1962 bfa_status_t
1963 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1964 {
1965
1966         /*
1967          *  Hold semaphore so that nobody can access the chip during init.
1968          */
1969         bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1970
1971         bfa_ioc_pll_init_asic(ioc);
1972
1973         ioc->pllinit = BFA_TRUE;
1974
1975         /*
1976          * Initialize LMEM
1977          */
1978         bfa_ioc_lmem_init(ioc);
1979
1980         /*
1981          *  release semaphore.
1982          */
1983         readl(ioc->ioc_regs.ioc_init_sem_reg);
1984         writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1985
1986         return BFA_STATUS_OK;
1987 }
1988
1989 /*
1990  * Interface used by diag module to do firmware boot with memory test
1991  * as the entry vector.
1992  */
1993 void
1994 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1995 {
1996         bfa_ioc_stats(ioc, ioc_boots);
1997
1998         if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1999                 return;
2000
2001         /*
2002          * Initialize IOC state of all functions on a chip reset.
2003          */
2004         if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2005                 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
2006                 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
2007         } else {
2008                 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
2009                 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
2010         }
2011
2012         bfa_ioc_msgflush(ioc);
2013         bfa_ioc_download_fw(ioc, boot_type, boot_env);
2014         bfa_ioc_lpu_start(ioc);
2015 }
2016
2017 /*
2018  * Enable/disable IOC failure auto recovery.
2019  */
2020 void
2021 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2022 {
2023         bfa_auto_recover = auto_recover;
2024 }
2025
2026
2027
2028 bfa_boolean_t
2029 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2030 {
2031         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2032 }
2033
2034 bfa_boolean_t
2035 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2036 {
2037         u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
2038
2039         return ((r32 != BFI_IOC_UNINIT) &&
2040                 (r32 != BFI_IOC_INITING) &&
2041                 (r32 != BFI_IOC_MEMTEST));
2042 }
2043
2044 bfa_boolean_t
2045 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2046 {
2047         __be32  *msgp = mbmsg;
2048         u32     r32;
2049         int             i;
2050
2051         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2052         if ((r32 & 1) == 0)
2053                 return BFA_FALSE;
2054
2055         /*
2056          * read the MBOX msg
2057          */
2058         for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2059              i++) {
2060                 r32 = readl(ioc->ioc_regs.lpu_mbox +
2061                                    i * sizeof(u32));
2062                 msgp[i] = cpu_to_be32(r32);
2063         }
2064
2065         /*
2066          * turn off mailbox interrupt by clearing mailbox status
2067          */
2068         writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2069         readl(ioc->ioc_regs.lpu_mbox_cmd);
2070
2071         return BFA_TRUE;
2072 }
2073
2074 void
2075 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2076 {
2077         union bfi_ioc_i2h_msg_u *msg;
2078         struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2079
2080         msg = (union bfi_ioc_i2h_msg_u *) m;
2081
2082         bfa_ioc_stats(ioc, ioc_isrs);
2083
2084         switch (msg->mh.msg_id) {
2085         case BFI_IOC_I2H_HBEAT:
2086                 break;
2087
2088         case BFI_IOC_I2H_ENABLE_REPLY:
2089                 ioc->port_mode = ioc->port_mode_cfg =
2090                                 (enum bfa_mode_s)msg->fw_event.port_mode;
2091                 ioc->ad_cap_bm = msg->fw_event.cap_bm;
2092                 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2093                 break;
2094
2095         case BFI_IOC_I2H_DISABLE_REPLY:
2096                 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2097                 break;
2098
2099         case BFI_IOC_I2H_GETATTR_REPLY:
2100                 bfa_ioc_getattr_reply(ioc);
2101                 break;
2102
2103         default:
2104                 bfa_trc(ioc, msg->mh.msg_id);
2105                 WARN_ON(1);
2106         }
2107 }
2108
2109 /*
2110  * IOC attach time initialization and setup.
2111  *
2112  * @param[in]   ioc     memory for IOC
2113  * @param[in]   bfa     driver instance structure
2114  */
2115 void
2116 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2117                struct bfa_timer_mod_s *timer_mod)
2118 {
2119         ioc->bfa        = bfa;
2120         ioc->cbfn       = cbfn;
2121         ioc->timer_mod  = timer_mod;
2122         ioc->fcmode     = BFA_FALSE;
2123         ioc->pllinit    = BFA_FALSE;
2124         ioc->dbg_fwsave_once = BFA_TRUE;
2125         ioc->iocpf.ioc  = ioc;
2126
2127         bfa_ioc_mbox_attach(ioc);
2128         INIT_LIST_HEAD(&ioc->notify_q);
2129
2130         bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2131         bfa_fsm_send_event(ioc, IOC_E_RESET);
2132 }
2133
2134 /*
2135  * Driver detach time IOC cleanup.
2136  */
2137 void
2138 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2139 {
2140         bfa_fsm_send_event(ioc, IOC_E_DETACH);
2141         INIT_LIST_HEAD(&ioc->notify_q);
2142 }
2143
2144 /*
2145  * Setup IOC PCI properties.
2146  *
2147  * @param[in]   pcidev  PCI device information for this IOC
2148  */
2149 void
2150 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2151                 enum bfi_pcifn_class clscode)
2152 {
2153         ioc->clscode    = clscode;
2154         ioc->pcidev     = *pcidev;
2155
2156         /*
2157          * Initialize IOC and device personality
2158          */
2159         ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2160         ioc->asic_mode  = BFI_ASIC_MODE_FC;
2161
2162         switch (pcidev->device_id) {
2163         case BFA_PCI_DEVICE_ID_FC_8G1P:
2164         case BFA_PCI_DEVICE_ID_FC_8G2P:
2165                 ioc->asic_gen = BFI_ASIC_GEN_CB;
2166                 ioc->fcmode = BFA_TRUE;
2167                 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2168                 ioc->ad_cap_bm = BFA_CM_HBA;
2169                 break;
2170
2171         case BFA_PCI_DEVICE_ID_CT:
2172                 ioc->asic_gen = BFI_ASIC_GEN_CT;
2173                 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2174                 ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2175                 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2176                 ioc->ad_cap_bm = BFA_CM_CNA;
2177                 break;
2178
2179         case BFA_PCI_DEVICE_ID_CT_FC:
2180                 ioc->asic_gen = BFI_ASIC_GEN_CT;
2181                 ioc->fcmode = BFA_TRUE;
2182                 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2183                 ioc->ad_cap_bm = BFA_CM_HBA;
2184                 break;
2185
2186         case BFA_PCI_DEVICE_ID_CT2:
2187                 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2188                 if (clscode == BFI_PCIFN_CLASS_FC &&
2189                     pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2190                         ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2191                         ioc->fcmode = BFA_TRUE;
2192                         ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2193                         ioc->ad_cap_bm = BFA_CM_HBA;
2194                 } else {
2195                         ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2196                         ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2197                         if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2198                                 ioc->port_mode =
2199                                 ioc->port_mode_cfg = BFA_MODE_CNA;
2200                                 ioc->ad_cap_bm = BFA_CM_CNA;
2201                         } else {
2202                                 ioc->port_mode =
2203                                 ioc->port_mode_cfg = BFA_MODE_NIC;
2204                                 ioc->ad_cap_bm = BFA_CM_NIC;
2205                         }
2206                 }
2207                 break;
2208
2209         default:
2210                 WARN_ON(1);
2211         }
2212
2213         /*
2214          * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2215          */
2216         if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2217                 bfa_ioc_set_cb_hwif(ioc);
2218         else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2219                 bfa_ioc_set_ct_hwif(ioc);
2220         else {
2221                 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2222                 bfa_ioc_set_ct2_hwif(ioc);
2223                 bfa_ioc_ct2_poweron(ioc);
2224         }
2225
2226         bfa_ioc_map_port(ioc);
2227         bfa_ioc_reg_init(ioc);
2228 }
2229
2230 /*
2231  * Initialize IOC dma memory
2232  *
2233  * @param[in]   dm_kva  kernel virtual address of IOC dma memory
2234  * @param[in]   dm_pa   physical address of IOC dma memory
2235  */
2236 void
2237 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2238 {
2239         /*
2240          * dma memory for firmware attribute
2241          */
2242         ioc->attr_dma.kva = dm_kva;
2243         ioc->attr_dma.pa = dm_pa;
2244         ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2245 }
2246
2247 void
2248 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2249 {
2250         bfa_ioc_stats(ioc, ioc_enables);
2251         ioc->dbg_fwsave_once = BFA_TRUE;
2252
2253         bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2254 }
2255
2256 void
2257 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2258 {
2259         bfa_ioc_stats(ioc, ioc_disables);
2260         bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2261 }
2262
2263
2264 /*
2265  * Initialize memory for saving firmware trace. Driver must initialize
2266  * trace memory before call bfa_ioc_enable().
2267  */
2268 void
2269 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2270 {
2271         ioc->dbg_fwsave     = dbg_fwsave;
2272         ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2273 }
2274
2275 /*
2276  * Register mailbox message handler functions
2277  *
2278  * @param[in]   ioc             IOC instance
2279  * @param[in]   mcfuncs         message class handler functions
2280  */
2281 void
2282 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2283 {
2284         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2285         int                             mc;
2286
2287         for (mc = 0; mc < BFI_MC_MAX; mc++)
2288                 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2289 }
2290
2291 /*
2292  * Register mailbox message handler function, to be called by common modules
2293  */
2294 void
2295 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2296                     bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2297 {
2298         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2299
2300         mod->mbhdlr[mc].cbfn    = cbfn;
2301         mod->mbhdlr[mc].cbarg   = cbarg;
2302 }
2303
2304 /*
2305  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2306  * Responsibility of caller to serialize
2307  *
2308  * @param[in]   ioc     IOC instance
2309  * @param[i]    cmd     Mailbox command
2310  */
2311 void
2312 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2313 {
2314         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2315         u32                     stat;
2316
2317         /*
2318          * If a previous command is pending, queue new command
2319          */
2320         if (!list_empty(&mod->cmd_q)) {
2321                 list_add_tail(&cmd->qe, &mod->cmd_q);
2322                 return;
2323         }
2324
2325         /*
2326          * If mailbox is busy, queue command for poll timer
2327          */
2328         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2329         if (stat) {
2330                 list_add_tail(&cmd->qe, &mod->cmd_q);
2331                 return;
2332         }
2333
2334         /*
2335          * mailbox is free -- queue command to firmware
2336          */
2337         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2338 }
2339
2340 /*
2341  * Handle mailbox interrupts
2342  */
2343 void
2344 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2345 {
2346         struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2347         struct bfi_mbmsg_s              m;
2348         int                             mc;
2349
2350         if (bfa_ioc_msgget(ioc, &m)) {
2351                 /*
2352                  * Treat IOC message class as special.
2353                  */
2354                 mc = m.mh.msg_class;
2355                 if (mc == BFI_MC_IOC) {
2356                         bfa_ioc_isr(ioc, &m);
2357                         return;
2358                 }
2359
2360                 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2361                         return;
2362
2363                 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2364         }
2365
2366         bfa_ioc_lpu_read_stat(ioc);
2367
2368         /*
2369          * Try to send pending mailbox commands
2370          */
2371         bfa_ioc_mbox_poll(ioc);
2372 }
2373
2374 void
2375 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2376 {
2377         bfa_ioc_stats(ioc, ioc_hbfails);
2378         ioc->stats.hb_count = ioc->hb_count;
2379         bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2380 }
2381
2382 /*
2383  * return true if IOC is disabled
2384  */
2385 bfa_boolean_t
2386 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2387 {
2388         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2389                 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2390 }
2391
2392 /*
2393  * return true if IOC firmware is different.
2394  */
2395 bfa_boolean_t
2396 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2397 {
2398         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2399                 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2400                 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2401 }
2402
2403 #define bfa_ioc_state_disabled(__sm)            \
2404         (((__sm) == BFI_IOC_UNINIT) ||          \
2405          ((__sm) == BFI_IOC_INITING) ||         \
2406          ((__sm) == BFI_IOC_HWINIT) ||          \
2407          ((__sm) == BFI_IOC_DISABLED) ||        \
2408          ((__sm) == BFI_IOC_FAIL) ||            \
2409          ((__sm) == BFI_IOC_CFG_DISABLED))
2410
2411 /*
2412  * Check if adapter is disabled -- both IOCs should be in a disabled
2413  * state.
2414  */
2415 bfa_boolean_t
2416 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2417 {
2418         u32     ioc_state;
2419
2420         if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2421                 return BFA_FALSE;
2422
2423         ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
2424         if (!bfa_ioc_state_disabled(ioc_state))
2425                 return BFA_FALSE;
2426
2427         if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2428                 ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
2429                 if (!bfa_ioc_state_disabled(ioc_state))
2430                         return BFA_FALSE;
2431         }
2432
2433         return BFA_TRUE;
2434 }
2435
2436 /*
2437  * Reset IOC fwstate registers.
2438  */
2439 void
2440 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2441 {
2442         writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2443         writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2444 }
2445
2446 #define BFA_MFG_NAME "Brocade"
2447 void
2448 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2449                          struct bfa_adapter_attr_s *ad_attr)
2450 {
2451         struct bfi_ioc_attr_s   *ioc_attr;
2452
2453         ioc_attr = ioc->attr;
2454
2455         bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2456         bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2457         bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2458         bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2459         memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2460                       sizeof(struct bfa_mfg_vpd_s));
2461
2462         ad_attr->nports = bfa_ioc_get_nports(ioc);
2463         ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2464
2465         bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2466         /* For now, model descr uses same model string */
2467         bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2468
2469         ad_attr->card_type = ioc_attr->card_type;
2470         ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2471
2472         if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2473                 ad_attr->prototype = 1;
2474         else
2475                 ad_attr->prototype = 0;
2476
2477         ad_attr->pwwn = ioc->attr->pwwn;
2478         ad_attr->mac  = bfa_ioc_get_mac(ioc);
2479
2480         ad_attr->pcie_gen = ioc_attr->pcie_gen;
2481         ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2482         ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2483         ad_attr->asic_rev = ioc_attr->asic_rev;
2484
2485         bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2486
2487         ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2488         ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2489                                   !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2490 }
2491
2492 enum bfa_ioc_type_e
2493 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2494 {
2495         if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2496                 return BFA_IOC_TYPE_LL;
2497
2498         WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2499
2500         return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2501                 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2502 }
2503
2504 void
2505 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2506 {
2507         memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2508         memcpy((void *)serial_num,
2509                         (void *)ioc->attr->brcd_serialnum,
2510                         BFA_ADAPTER_SERIAL_NUM_LEN);
2511 }
2512
2513 void
2514 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2515 {
2516         memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2517         memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2518 }
2519
2520 void
2521 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2522 {
2523         WARN_ON(!chip_rev);
2524
2525         memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2526
2527         chip_rev[0] = 'R';
2528         chip_rev[1] = 'e';
2529         chip_rev[2] = 'v';
2530         chip_rev[3] = '-';
2531         chip_rev[4] = ioc->attr->asic_rev;
2532         chip_rev[5] = '\0';
2533 }
2534
2535 void
2536 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2537 {
2538         memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2539         memcpy(optrom_ver, ioc->attr->optrom_version,
2540                       BFA_VERSION_LEN);
2541 }
2542
2543 void
2544 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2545 {
2546         memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2547         memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2548 }
2549
2550 void
2551 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2552 {
2553         struct bfi_ioc_attr_s   *ioc_attr;
2554
2555         WARN_ON(!model);
2556         memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2557
2558         ioc_attr = ioc->attr;
2559
2560         snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2561                         BFA_MFG_NAME, ioc_attr->card_type);
2562 }
2563
2564 enum bfa_ioc_state
2565 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2566 {
2567         enum bfa_iocpf_state iocpf_st;
2568         enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2569
2570         if (ioc_st == BFA_IOC_ENABLING ||
2571                 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2572
2573                 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2574
2575                 switch (iocpf_st) {
2576                 case BFA_IOCPF_SEMWAIT:
2577                         ioc_st = BFA_IOC_SEMWAIT;
2578                         break;
2579
2580                 case BFA_IOCPF_HWINIT:
2581                         ioc_st = BFA_IOC_HWINIT;
2582                         break;
2583
2584                 case BFA_IOCPF_FWMISMATCH:
2585                         ioc_st = BFA_IOC_FWMISMATCH;
2586                         break;
2587
2588                 case BFA_IOCPF_FAIL:
2589                         ioc_st = BFA_IOC_FAIL;
2590                         break;
2591
2592                 case BFA_IOCPF_INITFAIL:
2593                         ioc_st = BFA_IOC_INITFAIL;
2594                         break;
2595
2596                 default:
2597                         break;
2598                 }
2599         }
2600
2601         return ioc_st;
2602 }
2603
2604 void
2605 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2606 {
2607         memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2608
2609         ioc_attr->state = bfa_ioc_get_state(ioc);
2610         ioc_attr->port_id = ioc->port_id;
2611         ioc_attr->port_mode = ioc->port_mode;
2612         ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2613         ioc_attr->cap_bm = ioc->ad_cap_bm;
2614
2615         ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2616
2617         bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2618
2619         ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2620         ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2621         bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2622 }
2623
2624 mac_t
2625 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2626 {
2627         /*
2628          * Check the IOC type and return the appropriate MAC
2629          */
2630         if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2631                 return ioc->attr->fcoe_mac;
2632         else
2633                 return ioc->attr->mac;
2634 }
2635
2636 mac_t
2637 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2638 {
2639         mac_t   m;
2640
2641         m = ioc->attr->mfg_mac;
2642         if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2643                 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2644         else
2645                 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2646                         bfa_ioc_pcifn(ioc));
2647
2648         return m;
2649 }
2650
2651 /*
2652  * Send AEN notification
2653  */
2654 void
2655 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2656 {
2657         struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2658         struct bfa_aen_entry_s  *aen_entry;
2659         enum bfa_ioc_type_e ioc_type;
2660
2661         bfad_get_aen_entry(bfad, aen_entry);
2662         if (!aen_entry)
2663                 return;
2664
2665         ioc_type = bfa_ioc_get_type(ioc);
2666         switch (ioc_type) {
2667         case BFA_IOC_TYPE_FC:
2668                 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2669                 break;
2670         case BFA_IOC_TYPE_FCoE:
2671                 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2672                 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2673                 break;
2674         case BFA_IOC_TYPE_LL:
2675                 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2676                 break;
2677         default:
2678                 WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2679                 break;
2680         }
2681
2682         /* Send the AEN notification */
2683         aen_entry->aen_data.ioc.ioc_type = ioc_type;
2684         bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2685                                   BFA_AEN_CAT_IOC, event);
2686 }
2687
2688 /*
2689  * Retrieve saved firmware trace from a prior IOC failure.
2690  */
2691 bfa_status_t
2692 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2693 {
2694         int     tlen;
2695
2696         if (ioc->dbg_fwsave_len == 0)
2697                 return BFA_STATUS_ENOFSAVE;
2698
2699         tlen = *trclen;
2700         if (tlen > ioc->dbg_fwsave_len)
2701                 tlen = ioc->dbg_fwsave_len;
2702
2703         memcpy(trcdata, ioc->dbg_fwsave, tlen);
2704         *trclen = tlen;
2705         return BFA_STATUS_OK;
2706 }
2707
2708
2709 /*
2710  * Retrieve saved firmware trace from a prior IOC failure.
2711  */
2712 bfa_status_t
2713 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2714 {
2715         u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2716         int tlen;
2717         bfa_status_t status;
2718
2719         bfa_trc(ioc, *trclen);
2720
2721         tlen = *trclen;
2722         if (tlen > BFA_DBG_FWTRC_LEN)
2723                 tlen = BFA_DBG_FWTRC_LEN;
2724
2725         status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2726         *trclen = tlen;
2727         return status;
2728 }
2729
2730 static void
2731 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2732 {
2733         struct bfa_mbox_cmd_s cmd;
2734         struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2735
2736         bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2737                     bfa_ioc_portid(ioc));
2738         req->clscode = cpu_to_be16(ioc->clscode);
2739         bfa_ioc_mbox_queue(ioc, &cmd);
2740 }
2741
2742 static void
2743 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2744 {
2745         u32 fwsync_iter = 1000;
2746
2747         bfa_ioc_send_fwsync(ioc);
2748
2749         /*
2750          * After sending a fw sync mbox command wait for it to
2751          * take effect.  We will not wait for a response because
2752          *    1. fw_sync mbox cmd doesn't have a response.
2753          *    2. Even if we implement that,  interrupts might not
2754          *       be enabled when we call this function.
2755          * So, just keep checking if any mbox cmd is pending, and
2756          * after waiting for a reasonable amount of time, go ahead.
2757          * It is possible that fw has crashed and the mbox command
2758          * is never acknowledged.
2759          */
2760         while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2761                 fwsync_iter--;
2762 }
2763
2764 /*
2765  * Dump firmware smem
2766  */
2767 bfa_status_t
2768 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2769                                 u32 *offset, int *buflen)
2770 {
2771         u32 loff;
2772         int dlen;
2773         bfa_status_t status;
2774         u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2775
2776         if (*offset >= smem_len) {
2777                 *offset = *buflen = 0;
2778                 return BFA_STATUS_EINVAL;
2779         }
2780
2781         loff = *offset;
2782         dlen = *buflen;
2783
2784         /*
2785          * First smem read, sync smem before proceeding
2786          * No need to sync before reading every chunk.
2787          */
2788         if (loff == 0)
2789                 bfa_ioc_fwsync(ioc);
2790
2791         if ((loff + dlen) >= smem_len)
2792                 dlen = smem_len - loff;
2793
2794         status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2795
2796         if (status != BFA_STATUS_OK) {
2797                 *offset = *buflen = 0;
2798                 return status;
2799         }
2800
2801         *offset += dlen;
2802
2803         if (*offset >= smem_len)
2804                 *offset = 0;
2805
2806         *buflen = dlen;
2807
2808         return status;
2809 }
2810
2811 /*
2812  * Firmware statistics
2813  */
2814 bfa_status_t
2815 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2816 {
2817         u32 loff = BFI_IOC_FWSTATS_OFF + \
2818                 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2819         int tlen;
2820         bfa_status_t status;
2821
2822         if (ioc->stats_busy) {
2823                 bfa_trc(ioc, ioc->stats_busy);
2824                 return BFA_STATUS_DEVBUSY;
2825         }
2826         ioc->stats_busy = BFA_TRUE;
2827
2828         tlen = sizeof(struct bfa_fw_stats_s);
2829         status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2830
2831         ioc->stats_busy = BFA_FALSE;
2832         return status;
2833 }
2834
2835 bfa_status_t
2836 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2837 {
2838         u32 loff = BFI_IOC_FWSTATS_OFF + \
2839                 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2840         int tlen;
2841         bfa_status_t status;
2842
2843         if (ioc->stats_busy) {
2844                 bfa_trc(ioc, ioc->stats_busy);
2845                 return BFA_STATUS_DEVBUSY;
2846         }
2847         ioc->stats_busy = BFA_TRUE;
2848
2849         tlen = sizeof(struct bfa_fw_stats_s);
2850         status = bfa_ioc_smem_clr(ioc, loff, tlen);
2851
2852         ioc->stats_busy = BFA_FALSE;
2853         return status;
2854 }
2855
2856 /*
2857  * Save firmware trace if configured.
2858  */
2859 static void
2860 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2861 {
2862         int             tlen;
2863
2864         if (ioc->dbg_fwsave_once) {
2865                 ioc->dbg_fwsave_once = BFA_FALSE;
2866                 if (ioc->dbg_fwsave_len) {
2867                         tlen = ioc->dbg_fwsave_len;
2868                         bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2869                 }
2870         }
2871 }
2872
2873 /*
2874  * Firmware failure detected. Start recovery actions.
2875  */
2876 static void
2877 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2878 {
2879         bfa_ioc_stats(ioc, ioc_hbfails);
2880         ioc->stats.hb_count = ioc->hb_count;
2881         bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2882 }
2883
2884 /*
2885  *  BFA IOC PF private functions
2886  */
2887 static void
2888 bfa_iocpf_timeout(void *ioc_arg)
2889 {
2890         struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2891
2892         bfa_trc(ioc, 0);
2893         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2894 }
2895
2896 static void
2897 bfa_iocpf_sem_timeout(void *ioc_arg)
2898 {
2899         struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2900
2901         bfa_ioc_hw_sem_get(ioc);
2902 }
2903
2904 static void
2905 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2906 {
2907         u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2908
2909         bfa_trc(ioc, fwstate);
2910
2911         if (fwstate == BFI_IOC_DISABLED) {
2912                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2913                 return;
2914         }
2915
2916         if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
2917                 bfa_iocpf_timeout(ioc);
2918         else {
2919                 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2920                 bfa_iocpf_poll_timer_start(ioc);
2921         }
2922 }
2923
2924 static void
2925 bfa_iocpf_poll_timeout(void *ioc_arg)
2926 {
2927         struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2928
2929         bfa_ioc_poll_fwinit(ioc);
2930 }
2931
2932 /*
2933  *  bfa timer function
2934  */
2935 void
2936 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2937 {
2938         struct list_head *qh = &mod->timer_q;
2939         struct list_head *qe, *qe_next;
2940         struct bfa_timer_s *elem;
2941         struct list_head timedout_q;
2942
2943         INIT_LIST_HEAD(&timedout_q);
2944
2945         qe = bfa_q_next(qh);
2946
2947         while (qe != qh) {
2948                 qe_next = bfa_q_next(qe);
2949
2950                 elem = (struct bfa_timer_s *) qe;
2951                 if (elem->timeout <= BFA_TIMER_FREQ) {
2952                         elem->timeout = 0;
2953                         list_del(&elem->qe);
2954                         list_add_tail(&elem->qe, &timedout_q);
2955                 } else {
2956                         elem->timeout -= BFA_TIMER_FREQ;
2957                 }
2958
2959                 qe = qe_next;   /* go to next elem */
2960         }
2961
2962         /*
2963          * Pop all the timeout entries
2964          */
2965         while (!list_empty(&timedout_q)) {
2966                 bfa_q_deq(&timedout_q, &elem);
2967                 elem->timercb(elem->arg);
2968         }
2969 }
2970
2971 /*
2972  * Should be called with lock protection
2973  */
2974 void
2975 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2976                     void (*timercb) (void *), void *arg, unsigned int timeout)
2977 {
2978
2979         WARN_ON(timercb == NULL);
2980         WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
2981
2982         timer->timeout = timeout;
2983         timer->timercb = timercb;
2984         timer->arg = arg;
2985
2986         list_add_tail(&timer->qe, &mod->timer_q);
2987 }
2988
2989 /*
2990  * Should be called with lock protection
2991  */
2992 void
2993 bfa_timer_stop(struct bfa_timer_s *timer)
2994 {
2995         WARN_ON(list_empty(&timer->qe));
2996
2997         list_del(&timer->qe);
2998 }
2999
3000 /*
3001  *      ASIC block related
3002  */
3003 static void
3004 bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3005 {
3006         struct bfa_ablk_cfg_inst_s *cfg_inst;
3007         int i, j;
3008         u16     be16;
3009         u32     be32;
3010
3011         for (i = 0; i < BFA_ABLK_MAX; i++) {
3012                 cfg_inst = &cfg->inst[i];
3013                 for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3014                         be16 = cfg_inst->pf_cfg[j].pers;
3015                         cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3016                         be16 = cfg_inst->pf_cfg[j].num_qpairs;
3017                         cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3018                         be16 = cfg_inst->pf_cfg[j].num_vectors;
3019                         cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3020                         be32 = cfg_inst->pf_cfg[j].bw;
3021                         cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
3022                 }
3023         }
3024 }
3025
3026 static void
3027 bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3028 {
3029         struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3030         struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3031         bfa_ablk_cbfn_t cbfn;
3032
3033         WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3034         bfa_trc(ablk->ioc, msg->mh.msg_id);
3035
3036         switch (msg->mh.msg_id) {
3037         case BFI_ABLK_I2H_QUERY:
3038                 if (rsp->status == BFA_STATUS_OK) {
3039                         memcpy(ablk->cfg, ablk->dma_addr.kva,
3040                                 sizeof(struct bfa_ablk_cfg_s));
3041                         bfa_ablk_config_swap(ablk->cfg);
3042                         ablk->cfg = NULL;
3043                 }
3044                 break;
3045
3046         case BFI_ABLK_I2H_ADPT_CONFIG:
3047         case BFI_ABLK_I2H_PORT_CONFIG:
3048                 /* update config port mode */
3049                 ablk->ioc->port_mode_cfg = rsp->port_mode;
3050
3051         case BFI_ABLK_I2H_PF_DELETE:
3052         case BFI_ABLK_I2H_PF_UPDATE:
3053         case BFI_ABLK_I2H_OPTROM_ENABLE:
3054         case BFI_ABLK_I2H_OPTROM_DISABLE:
3055                 /* No-op */
3056                 break;
3057
3058         case BFI_ABLK_I2H_PF_CREATE:
3059                 *(ablk->pcifn) = rsp->pcifn;
3060                 ablk->pcifn = NULL;
3061                 break;
3062
3063         default:
3064                 WARN_ON(1);
3065         }
3066
3067         ablk->busy = BFA_FALSE;
3068         if (ablk->cbfn) {
3069                 cbfn = ablk->cbfn;
3070                 ablk->cbfn = NULL;
3071                 cbfn(ablk->cbarg, rsp->status);
3072         }
3073 }
3074
3075 static void
3076 bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3077 {
3078         struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3079
3080         bfa_trc(ablk->ioc, event);
3081
3082         switch (event) {
3083         case BFA_IOC_E_ENABLED:
3084                 WARN_ON(ablk->busy != BFA_FALSE);
3085                 break;
3086
3087         case BFA_IOC_E_DISABLED:
3088         case BFA_IOC_E_FAILED:
3089                 /* Fail any pending requests */
3090                 ablk->pcifn = NULL;
3091                 if (ablk->busy) {
3092                         if (ablk->cbfn)
3093                                 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3094                         ablk->cbfn = NULL;
3095                         ablk->busy = BFA_FALSE;
3096                 }
3097                 break;
3098
3099         default:
3100                 WARN_ON(1);
3101                 break;
3102         }
3103 }
3104
3105 u32
3106 bfa_ablk_meminfo(void)
3107 {
3108         return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3109 }
3110
3111 void
3112 bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3113 {
3114         ablk->dma_addr.kva = dma_kva;
3115         ablk->dma_addr.pa  = dma_pa;
3116 }
3117
3118 void
3119 bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3120 {
3121         ablk->ioc = ioc;
3122
3123         bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3124         bfa_q_qe_init(&ablk->ioc_notify);
3125         bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3126         list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3127 }
3128
3129 bfa_status_t
3130 bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3131                 bfa_ablk_cbfn_t cbfn, void *cbarg)
3132 {
3133         struct bfi_ablk_h2i_query_s *m;
3134
3135         WARN_ON(!ablk_cfg);
3136
3137         if (!bfa_ioc_is_operational(ablk->ioc)) {
3138                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3139                 return BFA_STATUS_IOC_FAILURE;
3140         }
3141
3142         if (ablk->busy) {
3143                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3144                 return  BFA_STATUS_DEVBUSY;
3145         }
3146
3147         ablk->cfg = ablk_cfg;
3148         ablk->cbfn  = cbfn;
3149         ablk->cbarg = cbarg;
3150         ablk->busy  = BFA_TRUE;
3151
3152         m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3153         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3154                     bfa_ioc_portid(ablk->ioc));
3155         bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3156         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3157
3158         return BFA_STATUS_OK;
3159 }
3160
3161 bfa_status_t
3162 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3163                 u8 port, enum bfi_pcifn_class personality, int bw,
3164                 bfa_ablk_cbfn_t cbfn, void *cbarg)
3165 {
3166         struct bfi_ablk_h2i_pf_req_s *m;
3167
3168         if (!bfa_ioc_is_operational(ablk->ioc)) {
3169                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3170                 return BFA_STATUS_IOC_FAILURE;
3171         }
3172
3173         if (ablk->busy) {
3174                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3175                 return  BFA_STATUS_DEVBUSY;
3176         }
3177
3178         ablk->pcifn = pcifn;
3179         ablk->cbfn = cbfn;
3180         ablk->cbarg = cbarg;
3181         ablk->busy  = BFA_TRUE;
3182
3183         m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3184         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3185                     bfa_ioc_portid(ablk->ioc));
3186         m->pers = cpu_to_be16((u16)personality);
3187         m->bw = cpu_to_be32(bw);
3188         m->port = port;
3189         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3190
3191         return BFA_STATUS_OK;
3192 }
3193
3194 bfa_status_t
3195 bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3196                 bfa_ablk_cbfn_t cbfn, void *cbarg)
3197 {
3198         struct bfi_ablk_h2i_pf_req_s *m;
3199
3200         if (!bfa_ioc_is_operational(ablk->ioc)) {
3201                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3202                 return BFA_STATUS_IOC_FAILURE;
3203         }
3204
3205         if (ablk->busy) {
3206                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3207                 return  BFA_STATUS_DEVBUSY;
3208         }
3209
3210         ablk->cbfn  = cbfn;
3211         ablk->cbarg = cbarg;
3212         ablk->busy  = BFA_TRUE;
3213
3214         m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3215         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3216                     bfa_ioc_portid(ablk->ioc));
3217         m->pcifn = (u8)pcifn;
3218         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3219
3220         return BFA_STATUS_OK;
3221 }
3222
3223 bfa_status_t
3224 bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3225                 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3226 {
3227         struct bfi_ablk_h2i_cfg_req_s *m;
3228
3229         if (!bfa_ioc_is_operational(ablk->ioc)) {
3230                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3231                 return BFA_STATUS_IOC_FAILURE;
3232         }
3233
3234         if (ablk->busy) {
3235                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3236                 return  BFA_STATUS_DEVBUSY;
3237         }
3238
3239         ablk->cbfn  = cbfn;
3240         ablk->cbarg = cbarg;
3241         ablk->busy  = BFA_TRUE;
3242
3243         m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3244         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3245                     bfa_ioc_portid(ablk->ioc));
3246         m->mode = (u8)mode;
3247         m->max_pf = (u8)max_pf;
3248         m->max_vf = (u8)max_vf;
3249         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3250
3251         return BFA_STATUS_OK;
3252 }
3253
3254 bfa_status_t
3255 bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3256                 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3257 {
3258         struct bfi_ablk_h2i_cfg_req_s *m;
3259
3260         if (!bfa_ioc_is_operational(ablk->ioc)) {
3261                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3262                 return BFA_STATUS_IOC_FAILURE;
3263         }
3264
3265         if (ablk->busy) {
3266                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3267                 return  BFA_STATUS_DEVBUSY;
3268         }
3269
3270         ablk->cbfn  = cbfn;
3271         ablk->cbarg = cbarg;
3272         ablk->busy  = BFA_TRUE;
3273
3274         m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3275         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3276                 bfa_ioc_portid(ablk->ioc));
3277         m->port = (u8)port;
3278         m->mode = (u8)mode;
3279         m->max_pf = (u8)max_pf;
3280         m->max_vf = (u8)max_vf;
3281         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3282
3283         return BFA_STATUS_OK;
3284 }
3285
3286 bfa_status_t
3287 bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3288                 bfa_ablk_cbfn_t cbfn, void *cbarg)
3289 {
3290         struct bfi_ablk_h2i_pf_req_s *m;
3291
3292         if (!bfa_ioc_is_operational(ablk->ioc)) {
3293                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3294                 return BFA_STATUS_IOC_FAILURE;
3295         }
3296
3297         if (ablk->busy) {
3298                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3299                 return  BFA_STATUS_DEVBUSY;
3300         }
3301
3302         ablk->cbfn  = cbfn;
3303         ablk->cbarg = cbarg;
3304         ablk->busy  = BFA_TRUE;
3305
3306         m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3307         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3308                 bfa_ioc_portid(ablk->ioc));
3309         m->pcifn = (u8)pcifn;
3310         m->bw = cpu_to_be32(bw);
3311         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3312
3313         return BFA_STATUS_OK;
3314 }
3315
3316 bfa_status_t
3317 bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3318 {
3319         struct bfi_ablk_h2i_optrom_s *m;
3320
3321         if (!bfa_ioc_is_operational(ablk->ioc)) {
3322                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3323                 return BFA_STATUS_IOC_FAILURE;
3324         }
3325
3326         if (ablk->busy) {
3327                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3328                 return  BFA_STATUS_DEVBUSY;
3329         }
3330
3331         ablk->cbfn  = cbfn;
3332         ablk->cbarg = cbarg;
3333         ablk->busy  = BFA_TRUE;
3334
3335         m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3336         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3337                 bfa_ioc_portid(ablk->ioc));
3338         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3339
3340         return BFA_STATUS_OK;
3341 }
3342
3343 bfa_status_t
3344 bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3345 {
3346         struct bfi_ablk_h2i_optrom_s *m;
3347
3348         if (!bfa_ioc_is_operational(ablk->ioc)) {
3349                 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3350                 return BFA_STATUS_IOC_FAILURE;
3351         }
3352
3353         if (ablk->busy) {
3354                 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3355                 return  BFA_STATUS_DEVBUSY;
3356         }
3357
3358         ablk->cbfn  = cbfn;
3359         ablk->cbarg = cbarg;
3360         ablk->busy  = BFA_TRUE;
3361
3362         m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3363         bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3364                 bfa_ioc_portid(ablk->ioc));
3365         bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3366
3367         return BFA_STATUS_OK;
3368 }
3369
3370 /*
3371  *      SFP module specific
3372  */
3373
3374 /* forward declarations */
3375 static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3376 static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3377 static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3378                                 enum bfa_port_speed portspeed);
3379
3380 static void
3381 bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3382 {
3383         bfa_trc(sfp, sfp->lock);
3384         if (sfp->cbfn)
3385                 sfp->cbfn(sfp->cbarg, sfp->status);
3386         sfp->lock = 0;
3387         sfp->cbfn = NULL;
3388 }
3389
3390 static void
3391 bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3392 {
3393         bfa_trc(sfp, sfp->portspeed);
3394         if (sfp->media) {
3395                 bfa_sfp_media_get(sfp);
3396                 if (sfp->state_query_cbfn)
3397                         sfp->state_query_cbfn(sfp->state_query_cbarg,
3398                                         sfp->status);
3399                         sfp->media = NULL;
3400                 }
3401
3402                 if (sfp->portspeed) {
3403                         sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3404                         if (sfp->state_query_cbfn)
3405                                 sfp->state_query_cbfn(sfp->state_query_cbarg,
3406                                                 sfp->status);
3407                                 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3408                 }
3409
3410                 sfp->state_query_lock = 0;
3411                 sfp->state_query_cbfn = NULL;
3412 }
3413
3414 /*
3415  *      IOC event handler.
3416  */
3417 static void
3418 bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3419 {
3420         struct bfa_sfp_s *sfp = sfp_arg;
3421
3422         bfa_trc(sfp, event);
3423         bfa_trc(sfp, sfp->lock);
3424         bfa_trc(sfp, sfp->state_query_lock);
3425
3426         switch (event) {
3427         case BFA_IOC_E_DISABLED:
3428         case BFA_IOC_E_FAILED:
3429                 if (sfp->lock) {
3430                         sfp->status = BFA_STATUS_IOC_FAILURE;
3431                         bfa_cb_sfp_show(sfp);
3432                 }
3433
3434                 if (sfp->state_query_lock) {
3435                         sfp->status = BFA_STATUS_IOC_FAILURE;
3436                         bfa_cb_sfp_state_query(sfp);
3437                 }
3438                 break;
3439
3440         default:
3441                 break;
3442         }
3443 }
3444
3445 /*
3446  * SFP's State Change Notification post to AEN
3447  */
3448 static void
3449 bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3450 {
3451         struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3452         struct bfa_aen_entry_s  *aen_entry;
3453         enum bfa_port_aen_event aen_evt = 0;
3454
3455         bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3456                       ((u64)rsp->event));
3457
3458         bfad_get_aen_entry(bfad, aen_entry);
3459         if (!aen_entry)
3460                 return;
3461
3462         aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3463         aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3464         aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3465
3466         switch (rsp->event) {
3467         case BFA_SFP_SCN_INSERTED:
3468                 aen_evt = BFA_PORT_AEN_SFP_INSERT;
3469                 break;
3470         case BFA_SFP_SCN_REMOVED:
3471                 aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3472                 break;
3473         case BFA_SFP_SCN_FAILED:
3474                 aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3475                 break;
3476         case BFA_SFP_SCN_UNSUPPORT:
3477                 aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3478                 break;
3479         case BFA_SFP_SCN_POM:
3480                 aen_evt = BFA_PORT_AEN_SFP_POM;
3481                 aen_entry->aen_data.port.level = rsp->pomlvl;
3482                 break;
3483         default:
3484                 bfa_trc(sfp, rsp->event);
3485                 WARN_ON(1);
3486         }
3487
3488         /* Send the AEN notification */
3489         bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3490                                   BFA_AEN_CAT_PORT, aen_evt);
3491 }
3492
3493 /*
3494  *      SFP get data send
3495  */
3496 static void
3497 bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3498 {
3499         struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3500
3501         bfa_trc(sfp, req->memtype);
3502
3503         /* build host command */
3504         bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3505                         bfa_ioc_portid(sfp->ioc));
3506
3507         /* send mbox cmd */
3508         bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3509 }
3510
3511 /*
3512  *      SFP is valid, read sfp data
3513  */
3514 static void
3515 bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3516 {
3517         struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3518
3519         WARN_ON(sfp->lock != 0);
3520         bfa_trc(sfp, sfp->state);
3521
3522         sfp->lock = 1;
3523         sfp->memtype = memtype;
3524         req->memtype = memtype;
3525
3526         /* Setup SG list */
3527         bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3528
3529         bfa_sfp_getdata_send(sfp);
3530 }
3531
3532 /*
3533  *      SFP scn handler
3534  */
3535 static void
3536 bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3537 {
3538         struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3539
3540         switch (rsp->event) {
3541         case BFA_SFP_SCN_INSERTED:
3542                 sfp->state = BFA_SFP_STATE_INSERTED;
3543                 sfp->data_valid = 0;
3544                 bfa_sfp_scn_aen_post(sfp, rsp);
3545                 break;
3546         case BFA_SFP_SCN_REMOVED:
3547                 sfp->state = BFA_SFP_STATE_REMOVED;
3548                 sfp->data_valid = 0;
3549                 bfa_sfp_scn_aen_post(sfp, rsp);
3550                  break;
3551         case BFA_SFP_SCN_FAILED:
3552                 sfp->state = BFA_SFP_STATE_FAILED;
3553                 sfp->data_valid = 0;
3554                 bfa_sfp_scn_aen_post(sfp, rsp);
3555                 break;
3556         case BFA_SFP_SCN_UNSUPPORT:
3557                 sfp->state = BFA_SFP_STATE_UNSUPPORT;
3558                 bfa_sfp_scn_aen_post(sfp, rsp);
3559                 if (!sfp->lock)
3560                         bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3561                 break;
3562         case BFA_SFP_SCN_POM:
3563                 bfa_sfp_scn_aen_post(sfp, rsp);
3564                 break;
3565         case BFA_SFP_SCN_VALID:
3566                 sfp->state = BFA_SFP_STATE_VALID;
3567                 if (!sfp->lock)
3568                         bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3569                 break;
3570         default:
3571                 bfa_trc(sfp, rsp->event);
3572                 WARN_ON(1);
3573         }
3574 }
3575
3576 /*
3577  * SFP show complete
3578  */
3579 static void
3580 bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3581 {
3582         struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3583
3584         if (!sfp->lock) {
3585                 /*
3586                  * receiving response after ioc failure
3587                  */
3588                 bfa_trc(sfp, sfp->lock);
3589                 return;
3590         }
3591
3592         bfa_trc(sfp, rsp->status);
3593         if (rsp->status == BFA_STATUS_OK) {
3594                 sfp->data_valid = 1;
3595                 if (sfp->state == BFA_SFP_STATE_VALID)
3596                         sfp->status = BFA_STATUS_OK;
3597                 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3598                         sfp->status = BFA_STATUS_SFP_UNSUPP;
3599                 else
3600                         bfa_trc(sfp, sfp->state);
3601         } else {
3602                 sfp->data_valid = 0;
3603                 sfp->status = rsp->status;
3604                 /* sfpshow shouldn't change sfp state */
3605         }
3606
3607         bfa_trc(sfp, sfp->memtype);
3608         if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3609                 bfa_trc(sfp, sfp->data_valid);
3610                 if (sfp->data_valid) {
3611                         u32     size = sizeof(struct sfp_mem_s);
3612                         u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3613                         memcpy(des, sfp->dbuf_kva, size);
3614                 }
3615                 /*
3616                  * Queue completion callback.
3617                  */
3618                 bfa_cb_sfp_show(sfp);
3619         } else
3620                 sfp->lock = 0;
3621
3622         bfa_trc(sfp, sfp->state_query_lock);
3623         if (sfp->state_query_lock) {
3624                 sfp->state = rsp->state;
3625                 /* Complete callback */
3626                 bfa_cb_sfp_state_query(sfp);
3627         }
3628 }
3629
3630 /*
3631  *      SFP query fw sfp state
3632  */
3633 static void
3634 bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3635 {
3636         struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3637
3638         /* Should not be doing query if not in _INIT state */
3639         WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3640         WARN_ON(sfp->state_query_lock != 0);
3641         bfa_trc(sfp, sfp->state);
3642
3643         sfp->state_query_lock = 1;
3644         req->memtype = 0;
3645
3646         if (!sfp->lock)
3647                 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3648 }
3649
3650 static void
3651 bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3652 {
3653         enum bfa_defs_sfp_media_e *media = sfp->media;
3654
3655         *media = BFA_SFP_MEDIA_UNKNOWN;
3656
3657         if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3658                 *media = BFA_SFP_MEDIA_UNSUPPORT;
3659         else if (sfp->state == BFA_SFP_STATE_VALID) {
3660                 union sfp_xcvr_e10g_code_u e10g;
3661                 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3662                 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3663                                 (sfpmem->srlid_base.xcvr[5] >> 1);
3664
3665                 e10g.b = sfpmem->srlid_base.xcvr[0];
3666                 bfa_trc(sfp, e10g.b);
3667                 bfa_trc(sfp, xmtr_tech);
3668                 /* check fc transmitter tech */
3669                 if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3670                     (xmtr_tech & SFP_XMTR_TECH_CP) ||
3671                     (xmtr_tech & SFP_XMTR_TECH_CA))
3672                         *media = BFA_SFP_MEDIA_CU;
3673                 else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3674                          (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3675                         *media = BFA_SFP_MEDIA_EL;
3676                 else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3677                          (xmtr_tech & SFP_XMTR_TECH_LC))
3678                         *media = BFA_SFP_MEDIA_LW;
3679                 else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3680                          (xmtr_tech & SFP_XMTR_TECH_SN) ||
3681                          (xmtr_tech & SFP_XMTR_TECH_SA))
3682                         *media = BFA_SFP_MEDIA_SW;
3683                 /* Check 10G Ethernet Compilance code */
3684                 else if (e10g.r.e10g_sr)
3685                         *media = BFA_SFP_MEDIA_SW;
3686                 else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3687                         *media = BFA_SFP_MEDIA_LW;
3688                 else if (e10g.r.e10g_unall)
3689                         *media = BFA_SFP_MEDIA_UNKNOWN;
3690                 else
3691                         bfa_trc(sfp, 0);
3692         } else
3693                 bfa_trc(sfp, sfp->state);
3694 }
3695
3696 static bfa_status_t
3697 bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3698 {
3699         struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3700         struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3701         union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3702         union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3703
3704         if (portspeed == BFA_PORT_SPEED_10GBPS) {
3705                 if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3706                         return BFA_STATUS_OK;
3707                 else {
3708                         bfa_trc(sfp, e10g.b);
3709                         return BFA_STATUS_UNSUPP_SPEED;
3710                 }
3711         }
3712         if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3713             ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3714             ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3715             ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3716             ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3717                 return BFA_STATUS_OK;
3718         else {
3719                 bfa_trc(sfp, portspeed);
3720                 bfa_trc(sfp, fc3.b);
3721                 bfa_trc(sfp, e10g.b);
3722                 return BFA_STATUS_UNSUPP_SPEED;
3723         }
3724 }
3725
3726 /*
3727  *      SFP hmbox handler
3728  */
3729 void
3730 bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3731 {
3732         struct bfa_sfp_s *sfp = sfparg;
3733
3734         switch (msg->mh.msg_id) {
3735         case BFI_SFP_I2H_SHOW:
3736                 bfa_sfp_show_comp(sfp, msg);
3737                 break;
3738
3739         case BFI_SFP_I2H_SCN:
3740                 bfa_sfp_scn(sfp, msg);
3741                 break;
3742
3743         default:
3744                 bfa_trc(sfp, msg->mh.msg_id);
3745                 WARN_ON(1);
3746         }
3747 }
3748
3749 /*
3750  *      Return DMA memory needed by sfp module.
3751  */
3752 u32
3753 bfa_sfp_meminfo(void)
3754 {
3755         return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3756 }
3757
3758 /*
3759  *      Attach virtual and physical memory for SFP.
3760  */
3761 void
3762 bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3763                 struct bfa_trc_mod_s *trcmod)
3764 {
3765         sfp->dev = dev;
3766         sfp->ioc = ioc;
3767         sfp->trcmod = trcmod;
3768
3769         sfp->cbfn = NULL;
3770         sfp->cbarg = NULL;
3771         sfp->sfpmem = NULL;
3772         sfp->lock = 0;
3773         sfp->data_valid = 0;
3774         sfp->state = BFA_SFP_STATE_INIT;
3775         sfp->state_query_lock = 0;
3776         sfp->state_query_cbfn = NULL;
3777         sfp->state_query_cbarg = NULL;
3778         sfp->media = NULL;
3779         sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3780         sfp->is_elb = BFA_FALSE;
3781
3782         bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3783         bfa_q_qe_init(&sfp->ioc_notify);
3784         bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3785         list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3786 }
3787
3788 /*
3789  *      Claim Memory for SFP
3790  */
3791 void
3792 bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3793 {
3794         sfp->dbuf_kva   = dm_kva;
3795         sfp->dbuf_pa    = dm_pa;
3796         memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3797
3798         dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3799         dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3800 }
3801
3802 /*
3803  * Show SFP eeprom content
3804  *
3805  * @param[in] sfp   - bfa sfp module
3806  *
3807  * @param[out] sfpmem - sfp eeprom data
3808  *
3809  */
3810 bfa_status_t
3811 bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3812                 bfa_cb_sfp_t cbfn, void *cbarg)
3813 {
3814
3815         if (!bfa_ioc_is_operational(sfp->ioc)) {
3816                 bfa_trc(sfp, 0);
3817                 return BFA_STATUS_IOC_NON_OP;
3818         }
3819
3820         if (sfp->lock) {
3821                 bfa_trc(sfp, 0);
3822                 return BFA_STATUS_DEVBUSY;
3823         }
3824
3825         sfp->cbfn = cbfn;
3826         sfp->cbarg = cbarg;
3827         sfp->sfpmem = sfpmem;
3828
3829         bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3830         return BFA_STATUS_OK;
3831 }
3832
3833 /*
3834  * Return SFP Media type
3835  *
3836  * @param[in] sfp   - bfa sfp module
3837  *
3838  * @param[out] media - port speed from user
3839  *
3840  */
3841 bfa_status_t
3842 bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3843                 bfa_cb_sfp_t cbfn, void *cbarg)
3844 {
3845         if (!bfa_ioc_is_operational(sfp->ioc)) {
3846                 bfa_trc(sfp, 0);
3847                 return BFA_STATUS_IOC_NON_OP;
3848         }
3849
3850         sfp->media = media;
3851         if (sfp->state == BFA_SFP_STATE_INIT) {
3852                 if (sfp->state_query_lock) {
3853                         bfa_trc(sfp, 0);
3854                         return BFA_STATUS_DEVBUSY;
3855                 } else {
3856                         sfp->state_query_cbfn = cbfn;
3857                         sfp->state_query_cbarg = cbarg;
3858                         bfa_sfp_state_query(sfp);
3859                         return BFA_STATUS_SFP_NOT_READY;
3860                 }
3861         }
3862
3863         bfa_sfp_media_get(sfp);
3864         return BFA_STATUS_OK;
3865 }
3866
3867 /*
3868  * Check if user set port speed is allowed by the SFP
3869  *
3870  * @param[in] sfp   - bfa sfp module
3871  * @param[in] portspeed - port speed from user
3872  *
3873  */
3874 bfa_status_t
3875 bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3876                 bfa_cb_sfp_t cbfn, void *cbarg)
3877 {
3878         WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3879
3880         if (!bfa_ioc_is_operational(sfp->ioc))
3881                 return BFA_STATUS_IOC_NON_OP;
3882
3883         /* For Mezz card, all speed is allowed */
3884         if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3885                 return BFA_STATUS_OK;
3886
3887         /* Check SFP state */
3888         sfp->portspeed = portspeed;
3889         if (sfp->state == BFA_SFP_STATE_INIT) {
3890                 if (sfp->state_query_lock) {
3891                         bfa_trc(sfp, 0);
3892                         return BFA_STATUS_DEVBUSY;
3893                 } else {
3894                         sfp->state_query_cbfn = cbfn;
3895                         sfp->state_query_cbarg = cbarg;
3896                         bfa_sfp_state_query(sfp);
3897                         return BFA_STATUS_SFP_NOT_READY;
3898                 }
3899         }
3900
3901         if (sfp->state == BFA_SFP_STATE_REMOVED ||
3902             sfp->state == BFA_SFP_STATE_FAILED) {
3903                 bfa_trc(sfp, sfp->state);
3904                 return BFA_STATUS_NO_SFP_DEV;
3905         }
3906
3907         if (sfp->state == BFA_SFP_STATE_INSERTED) {
3908                 bfa_trc(sfp, sfp->state);
3909                 return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
3910         }
3911
3912         /* For eloopback, all speed is allowed */
3913         if (sfp->is_elb)
3914                 return BFA_STATUS_OK;
3915
3916         return bfa_sfp_speed_valid(sfp, portspeed);
3917 }
3918
3919 /*
3920  *      Flash module specific
3921  */
3922
3923 /*
3924  * FLASH DMA buffer should be big enough to hold both MFG block and
3925  * asic block(64k) at the same time and also should be 2k aligned to
3926  * avoid write segement to cross sector boundary.
3927  */
3928 #define BFA_FLASH_SEG_SZ        2048
3929 #define BFA_FLASH_DMA_BUF_SZ    \
3930         BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3931
3932 static void
3933 bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
3934                         int inst, int type)
3935 {
3936         struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
3937         struct bfa_aen_entry_s  *aen_entry;
3938
3939         bfad_get_aen_entry(bfad, aen_entry);
3940         if (!aen_entry)
3941                 return;
3942
3943         aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
3944         aen_entry->aen_data.audit.partition_inst = inst;
3945         aen_entry->aen_data.audit.partition_type = type;
3946
3947         /* Send the AEN notification */
3948         bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
3949                                   BFA_AEN_CAT_AUDIT, event);
3950 }
3951
3952 static void
3953 bfa_flash_cb(struct bfa_flash_s *flash)
3954 {
3955         flash->op_busy = 0;
3956         if (flash->cbfn)
3957                 flash->cbfn(flash->cbarg, flash->status);
3958 }
3959
3960 static void
3961 bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
3962 {
3963         struct bfa_flash_s      *flash = cbarg;
3964
3965         bfa_trc(flash, event);
3966         switch (event) {
3967         case BFA_IOC_E_DISABLED:
3968         case BFA_IOC_E_FAILED:
3969                 if (flash->op_busy) {
3970                         flash->status = BFA_STATUS_IOC_FAILURE;
3971                         flash->cbfn(flash->cbarg, flash->status);
3972                         flash->op_busy = 0;
3973                 }
3974                 break;
3975
3976         default:
3977                 break;
3978         }
3979 }
3980
3981 /*
3982  * Send flash attribute query request.
3983  *
3984  * @param[in] cbarg - callback argument
3985  */
3986 static void
3987 bfa_flash_query_send(void *cbarg)
3988 {
3989         struct bfa_flash_s *flash = cbarg;
3990         struct bfi_flash_query_req_s *msg =
3991                         (struct bfi_flash_query_req_s *) flash->mb.msg;
3992
3993         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
3994                 bfa_ioc_portid(flash->ioc));
3995         bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
3996                 flash->dbuf_pa);
3997         bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3998 }
3999
4000 /*
4001  * Send flash write request.
4002  *
4003  * @param[in] cbarg - callback argument
4004  */
4005 static void
4006 bfa_flash_write_send(struct bfa_flash_s *flash)
4007 {
4008         struct bfi_flash_write_req_s *msg =
4009                         (struct bfi_flash_write_req_s *) flash->mb.msg;
4010         u32     len;
4011
4012         msg->type = be32_to_cpu(flash->type);
4013         msg->instance = flash->instance;
4014         msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4015         len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4016                 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4017         msg->length = be32_to_cpu(len);
4018
4019         /* indicate if it's the last msg of the whole write operation */
4020         msg->last = (len == flash->residue) ? 1 : 0;
4021
4022         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4023                         bfa_ioc_portid(flash->ioc));
4024         bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4025         memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4026         bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4027
4028         flash->residue -= len;
4029         flash->offset += len;
4030 }
4031
4032 /*
4033  * Send flash read request.
4034  *
4035  * @param[in] cbarg - callback argument
4036  */
4037 static void
4038 bfa_flash_read_send(void *cbarg)
4039 {
4040         struct bfa_flash_s *flash = cbarg;
4041         struct bfi_flash_read_req_s *msg =
4042                         (struct bfi_flash_read_req_s *) flash->mb.msg;
4043         u32     len;
4044
4045         msg->type = be32_to_cpu(flash->type);
4046         msg->instance = flash->instance;
4047         msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4048         len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4049                         flash->residue : BFA_FLASH_DMA_BUF_SZ;
4050         msg->length = be32_to_cpu(len);
4051         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4052                 bfa_ioc_portid(flash->ioc));
4053         bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4054         bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4055 }
4056
4057 /*
4058  * Send flash erase request.
4059  *
4060  * @param[in] cbarg - callback argument
4061  */
4062 static void
4063 bfa_flash_erase_send(void *cbarg)
4064 {
4065         struct bfa_flash_s *flash = cbarg;
4066         struct bfi_flash_erase_req_s *msg =
4067                         (struct bfi_flash_erase_req_s *) flash->mb.msg;
4068
4069         msg->type = be32_to_cpu(flash->type);
4070         msg->instance = flash->instance;
4071         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4072                         bfa_ioc_portid(flash->ioc));
4073         bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4074 }
4075
4076 /*
4077  * Process flash response messages upon receiving interrupts.
4078  *
4079  * @param[in] flasharg - flash structure
4080  * @param[in] msg - message structure
4081  */
4082 static void
4083 bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4084 {
4085         struct bfa_flash_s *flash = flasharg;
4086         u32     status;
4087
4088         union {
4089                 struct bfi_flash_query_rsp_s *query;
4090                 struct bfi_flash_erase_rsp_s *erase;
4091                 struct bfi_flash_write_rsp_s *write;
4092                 struct bfi_flash_read_rsp_s *read;
4093                 struct bfi_flash_event_s *event;
4094                 struct bfi_mbmsg_s   *msg;
4095         } m;
4096
4097         m.msg = msg;
4098         bfa_trc(flash, msg->mh.msg_id);
4099
4100         if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4101                 /* receiving response after ioc failure */
4102                 bfa_trc(flash, 0x9999);
4103                 return;
4104         }
4105
4106         switch (msg->mh.msg_id) {
4107         case BFI_FLASH_I2H_QUERY_RSP:
4108                 status = be32_to_cpu(m.query->status);
4109                 bfa_trc(flash, status);
4110                 if (status == BFA_STATUS_OK) {
4111                         u32     i;
4112                         struct bfa_flash_attr_s *attr, *f;
4113
4114                         attr = (struct bfa_flash_attr_s *) flash->ubuf;
4115                         f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4116                         attr->status = be32_to_cpu(f->status);
4117                         attr->npart = be32_to_cpu(f->npart);
4118                         bfa_trc(flash, attr->status);
4119                         bfa_trc(flash, attr->npart);
4120                         for (i = 0; i < attr->npart; i++) {
4121                                 attr->part[i].part_type =
4122                                         be32_to_cpu(f->part[i].part_type);
4123                                 attr->part[i].part_instance =
4124                                         be32_to_cpu(f->part[i].part_instance);
4125                                 attr->part[i].part_off =
4126                                         be32_to_cpu(f->part[i].part_off);
4127                                 attr->part[i].part_size =
4128                                         be32_to_cpu(f->part[i].part_size);
4129                                 attr->part[i].part_len =
4130                                         be32_to_cpu(f->part[i].part_len);
4131                                 attr->part[i].part_status =
4132                                         be32_to_cpu(f->part[i].part_status);
4133                         }
4134                 }
4135                 flash->status = status;
4136                 bfa_flash_cb(flash);
4137                 break;
4138         case BFI_FLASH_I2H_ERASE_RSP:
4139                 status = be32_to_cpu(m.erase->status);
4140                 bfa_trc(flash, status);
4141                 flash->status = status;
4142                 bfa_flash_cb(flash);
4143                 break;
4144         case BFI_FLASH_I2H_WRITE_RSP:
4145                 status = be32_to_cpu(m.write->status);
4146                 bfa_trc(flash, status);
4147                 if (status != BFA_STATUS_OK || flash->residue == 0) {
4148                         flash->status = status;
4149                         bfa_flash_cb(flash);
4150                 } else {
4151                         bfa_trc(flash, flash->offset);
4152                         bfa_flash_write_send(flash);
4153                 }
4154                 break;
4155         case BFI_FLASH_I2H_READ_RSP:
4156                 status = be32_to_cpu(m.read->status);
4157                 bfa_trc(flash, status);
4158                 if (status != BFA_STATUS_OK) {
4159                         flash->status = status;
4160                         bfa_flash_cb(flash);
4161                 } else {
4162                         u32 len = be32_to_cpu(m.read->length);
4163                         bfa_trc(flash, flash->offset);
4164                         bfa_trc(flash, len);
4165                         memcpy(flash->ubuf + flash->offset,
4166                                 flash->dbuf_kva, len);
4167                         flash->residue -= len;
4168                         flash->offset += len;
4169                         if (flash->residue == 0) {
4170                                 flash->status = status;
4171                                 bfa_flash_cb(flash);
4172                         } else
4173                                 bfa_flash_read_send(flash);
4174                 }
4175                 break;
4176         case BFI_FLASH_I2H_BOOT_VER_RSP:
4177                 break;
4178         case BFI_FLASH_I2H_EVENT:
4179                 status = be32_to_cpu(m.event->status);
4180                 bfa_trc(flash, status);
4181                 if (status == BFA_STATUS_BAD_FWCFG)
4182                         bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4183                 else if (status == BFA_STATUS_INVALID_VENDOR) {
4184                         u32 param;
4185                         param = be32_to_cpu(m.event->param);
4186                         bfa_trc(flash, param);
4187                         bfa_ioc_aen_post(flash->ioc,
4188                                 BFA_IOC_AEN_INVALID_VENDOR);
4189                 }
4190                 break;
4191
4192         default:
4193                 WARN_ON(1);
4194         }
4195 }
4196
4197 /*
4198  * Flash memory info API.
4199  *
4200  * @param[in] mincfg - minimal cfg variable
4201  */
4202 u32
4203 bfa_flash_meminfo(bfa_boolean_t mincfg)
4204 {
4205         /* min driver doesn't need flash */
4206         if (mincfg)
4207                 return 0;
4208         return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4209 }
4210
4211 /*
4212  * Flash attach API.
4213  *
4214  * @param[in] flash - flash structure
4215  * @param[in] ioc  - ioc structure
4216  * @param[in] dev  - device structure
4217  * @param[in] trcmod - trace module
4218  * @param[in] logmod - log module
4219  */
4220 void
4221 bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4222                 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4223 {
4224         flash->ioc = ioc;
4225         flash->trcmod = trcmod;
4226         flash->cbfn = NULL;
4227         flash->cbarg = NULL;
4228         flash->op_busy = 0;
4229
4230         bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4231         bfa_q_qe_init(&flash->ioc_notify);
4232         bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4233         list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4234
4235         /* min driver doesn't need flash */
4236         if (mincfg) {
4237                 flash->dbuf_kva = NULL;
4238                 flash->dbuf_pa = 0;
4239         }
4240 }
4241
4242 /*
4243  * Claim memory for flash
4244  *
4245  * @param[in] flash - flash structure
4246  * @param[in] dm_kva - pointer to virtual memory address
4247  * @param[in] dm_pa - physical memory address
4248  * @param[in] mincfg - minimal cfg variable
4249  */
4250 void
4251 bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4252                 bfa_boolean_t mincfg)
4253 {
4254         if (mincfg)
4255                 return;
4256
4257         flash->dbuf_kva = dm_kva;
4258         flash->dbuf_pa = dm_pa;
4259         memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4260         dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4261         dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4262 }
4263
4264 /*
4265  * Get flash attribute.
4266  *
4267  * @param[in] flash - flash structure
4268  * @param[in] attr - flash attribute structure
4269  * @param[in] cbfn - callback function
4270  * @param[in] cbarg - callback argument
4271  *
4272  * Return status.
4273  */
4274 bfa_status_t
4275 bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4276                 bfa_cb_flash_t cbfn, void *cbarg)
4277 {
4278         bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4279
4280         if (!bfa_ioc_is_operational(flash->ioc))
4281                 return BFA_STATUS_IOC_NON_OP;
4282
4283         if (flash->op_busy) {
4284                 bfa_trc(flash, flash->op_busy);
4285                 return BFA_STATUS_DEVBUSY;
4286         }
4287
4288         flash->op_busy = 1;
4289         flash->cbfn = cbfn;
4290         flash->cbarg = cbarg;
4291         flash->ubuf = (u8 *) attr;
4292         bfa_flash_query_send(flash);
4293
4294         return BFA_STATUS_OK;
4295 }
4296
4297 /*
4298  * Erase flash partition.
4299  *
4300  * @param[in] flash - flash structure
4301  * @param[in] type - flash partition type
4302  * @param[in] instance - flash partition instance
4303  * @param[in] cbfn - callback function
4304  * @param[in] cbarg - callback argument
4305  *
4306  * Return status.
4307  */
4308 bfa_status_t
4309 bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4310                 u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4311 {
4312         bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4313         bfa_trc(flash, type);
4314         bfa_trc(flash, instance);
4315
4316         if (!bfa_ioc_is_operational(flash->ioc))
4317                 return BFA_STATUS_IOC_NON_OP;
4318
4319         if (flash->op_busy) {
4320                 bfa_trc(flash, flash->op_busy);
4321                 return BFA_STATUS_DEVBUSY;
4322         }
4323
4324         flash->op_busy = 1;
4325         flash->cbfn = cbfn;
4326         flash->cbarg = cbarg;
4327         flash->type = type;
4328         flash->instance = instance;
4329
4330         bfa_flash_erase_send(flash);
4331         bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4332                                 instance, type);
4333         return BFA_STATUS_OK;
4334 }
4335
4336 /*
4337  * Update flash partition.
4338  *
4339  * @param[in] flash - flash structure
4340  * @param[in] type - flash partition type
4341  * @param[in] instance - flash partition instance
4342  * @param[in] buf - update data buffer
4343  * @param[in] len - data buffer length
4344  * @param[in] offset - offset relative to the partition starting address
4345  * @param[in] cbfn - callback function
4346  * @param[in] cbarg - callback argument
4347  *
4348  * Return status.
4349  */
4350 bfa_status_t
4351 bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4352                 u8 instance, void *buf, u32 len, u32 offset,
4353                 bfa_cb_flash_t cbfn, void *cbarg)
4354 {
4355         bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4356         bfa_trc(flash, type);
4357         bfa_trc(flash, instance);
4358         bfa_trc(flash, len);
4359         bfa_trc(flash, offset);
4360
4361         if (!bfa_ioc_is_operational(flash->ioc))
4362                 return BFA_STATUS_IOC_NON_OP;
4363
4364         /*
4365          * 'len' must be in word (4-byte) boundary
4366          * 'offset' must be in sector (16kb) boundary
4367          */
4368         if (!len || (len & 0x03) || (offset & 0x00003FFF))
4369                 return BFA_STATUS_FLASH_BAD_LEN;
4370
4371         if (type == BFA_FLASH_PART_MFG)
4372                 return BFA_STATUS_EINVAL;
4373
4374         if (flash->op_busy) {
4375                 bfa_trc(flash, flash->op_busy);
4376                 return BFA_STATUS_DEVBUSY;
4377         }
4378
4379         flash->op_busy = 1;
4380         flash->cbfn = cbfn;
4381         flash->cbarg = cbarg;
4382         flash->type = type;
4383         flash->instance = instance;
4384         flash->residue = len;
4385         flash->offset = 0;
4386         flash->addr_off = offset;
4387         flash->ubuf = buf;
4388
4389         bfa_flash_write_send(flash);
4390         return BFA_STATUS_OK;
4391 }
4392
4393 /*
4394  * Read flash partition.
4395  *
4396  * @param[in] flash - flash structure
4397  * @param[in] type - flash partition type
4398  * @param[in] instance - flash partition instance
4399  * @param[in] buf - read data buffer
4400  * @param[in] len - data buffer length
4401  * @param[in] offset - offset relative to the partition starting address
4402  * @param[in] cbfn - callback function
4403  * @param[in] cbarg - callback argument
4404  *
4405  * Return status.
4406  */
4407 bfa_status_t
4408 bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4409                 u8 instance, void *buf, u32 len, u32 offset,
4410                 bfa_cb_flash_t cbfn, void *cbarg)
4411 {
4412         bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4413         bfa_trc(flash, type);
4414         bfa_trc(flash, instance);
4415         bfa_trc(flash, len);
4416         bfa_trc(flash, offset);
4417
4418         if (!bfa_ioc_is_operational(flash->ioc))
4419                 return BFA_STATUS_IOC_NON_OP;
4420
4421         /*
4422          * 'len' must be in word (4-byte) boundary
4423          * 'offset' must be in sector (16kb) boundary
4424          */
4425         if (!len || (len & 0x03) || (offset & 0x00003FFF))
4426                 return BFA_STATUS_FLASH_BAD_LEN;
4427
4428         if (flash->op_busy) {
4429                 bfa_trc(flash, flash->op_busy);
4430                 return BFA_STATUS_DEVBUSY;
4431         }
4432
4433         flash->op_busy = 1;
4434         flash->cbfn = cbfn;
4435         flash->cbarg = cbarg;
4436         flash->type = type;
4437         flash->instance = instance;
4438         flash->residue = len;
4439         flash->offset = 0;
4440         flash->addr_off = offset;
4441         flash->ubuf = buf;
4442         bfa_flash_read_send(flash);
4443
4444         return BFA_STATUS_OK;
4445 }
4446
4447 /*
4448  *      DIAG module specific
4449  */
4450
4451 #define BFA_DIAG_MEMTEST_TOV    50000   /* memtest timeout in msec */
4452 #define CT2_BFA_DIAG_MEMTEST_TOV        (9*30*1000)  /* 4.5 min */
4453
4454 /* IOC event handler */
4455 static void
4456 bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4457 {
4458         struct bfa_diag_s *diag = diag_arg;
4459
4460         bfa_trc(diag, event);
4461         bfa_trc(diag, diag->block);
4462         bfa_trc(diag, diag->fwping.lock);
4463         bfa_trc(diag, diag->tsensor.lock);
4464
4465         switch (event) {
4466         case BFA_IOC_E_DISABLED:
4467         case BFA_IOC_E_FAILED:
4468                 if (diag->fwping.lock) {
4469                         diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4470                         diag->fwping.cbfn(diag->fwping.cbarg,
4471                                         diag->fwping.status);
4472                         diag->fwping.lock = 0;
4473                 }
4474
4475                 if (diag->tsensor.lock) {
4476                         diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4477                         diag->tsensor.cbfn(diag->tsensor.cbarg,
4478                                            diag->tsensor.status);
4479                         diag->tsensor.lock = 0;
4480                 }
4481
4482                 if (diag->block) {
4483                         if (diag->timer_active) {
4484                                 bfa_timer_stop(&diag->timer);
4485                                 diag->timer_active = 0;
4486                         }
4487
4488                         diag->status = BFA_STATUS_IOC_FAILURE;
4489                         diag->cbfn(diag->cbarg, diag->status);
4490                         diag->block = 0;
4491                 }
4492                 break;
4493
4494         default:
4495                 break;
4496         }
4497 }
4498
4499 static void
4500 bfa_diag_memtest_done(void *cbarg)
4501 {
4502         struct bfa_diag_s *diag = cbarg;
4503         struct bfa_ioc_s  *ioc = diag->ioc;
4504         struct bfa_diag_memtest_result *res = diag->result;
4505         u32     loff = BFI_BOOT_MEMTEST_RES_ADDR;
4506         u32     pgnum, pgoff, i;
4507
4508         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4509         pgoff = PSS_SMEM_PGOFF(loff);
4510
4511         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4512
4513         for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4514                          sizeof(u32)); i++) {
4515                 /* read test result from smem */
4516                 *((u32 *) res + i) =
4517                         bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4518                 loff += sizeof(u32);
4519         }
4520
4521         /* Reset IOC fwstates to BFI_IOC_UNINIT */
4522         bfa_ioc_reset_fwstate(ioc);
4523
4524         res->status = swab32(res->status);
4525         bfa_trc(diag, res->status);
4526
4527         if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4528                 diag->status = BFA_STATUS_OK;
4529         else {
4530                 diag->status = BFA_STATUS_MEMTEST_FAILED;
4531                 res->addr = swab32(res->addr);
4532                 res->exp = swab32(res->exp);
4533                 res->act = swab32(res->act);
4534                 res->err_status = swab32(res->err_status);
4535                 res->err_status1 = swab32(res->err_status1);
4536                 res->err_addr = swab32(res->err_addr);
4537                 bfa_trc(diag, res->addr);
4538                 bfa_trc(diag, res->exp);
4539                 bfa_trc(diag, res->act);
4540                 bfa_trc(diag, res->err_status);
4541                 bfa_trc(diag, res->err_status1);
4542                 bfa_trc(diag, res->err_addr);
4543         }
4544         diag->timer_active = 0;
4545         diag->cbfn(diag->cbarg, diag->status);
4546         diag->block = 0;
4547 }
4548
4549 /*
4550  * Firmware ping
4551  */
4552
4553 /*
4554  * Perform DMA test directly
4555  */
4556 static void
4557 diag_fwping_send(struct bfa_diag_s *diag)
4558 {
4559         struct bfi_diag_fwping_req_s *fwping_req;
4560         u32     i;
4561
4562         bfa_trc(diag, diag->fwping.dbuf_pa);
4563
4564         /* fill DMA area with pattern */
4565         for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4566                 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4567
4568         /* Fill mbox msg */
4569         fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4570
4571         /* Setup SG list */
4572         bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4573                         diag->fwping.dbuf_pa);
4574         /* Set up dma count */
4575         fwping_req->count = cpu_to_be32(diag->fwping.count);
4576         /* Set up data pattern */
4577         fwping_req->data = diag->fwping.data;
4578
4579         /* build host command */
4580         bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4581                 bfa_ioc_portid(diag->ioc));
4582
4583         /* send mbox cmd */
4584         bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4585 }
4586
4587 static void
4588 diag_fwping_comp(struct bfa_diag_s *diag,
4589                  struct bfi_diag_fwping_rsp_s *diag_rsp)
4590 {
4591         u32     rsp_data = diag_rsp->data;
4592         u8      rsp_dma_status = diag_rsp->dma_status;
4593
4594         bfa_trc(diag, rsp_data);
4595         bfa_trc(diag, rsp_dma_status);
4596
4597         if (rsp_dma_status == BFA_STATUS_OK) {
4598                 u32     i, pat;
4599                 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4600                         diag->fwping.data;
4601                 /* Check mbox data */
4602                 if (diag->fwping.data != rsp_data) {
4603                         bfa_trc(diag, rsp_data);
4604                         diag->fwping.result->dmastatus =
4605                                         BFA_STATUS_DATACORRUPTED;
4606                         diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4607                         diag->fwping.cbfn(diag->fwping.cbarg,
4608                                         diag->fwping.status);
4609                         diag->fwping.lock = 0;
4610                         return;
4611                 }
4612                 /* Check dma pattern */
4613                 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4614                         if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4615                                 bfa_trc(diag, i);
4616                                 bfa_trc(diag, pat);
4617                                 bfa_trc(diag,
4618                                         *((u32 *)diag->fwping.dbuf_kva + i));
4619                                 diag->fwping.result->dmastatus =
4620                                                 BFA_STATUS_DATACORRUPTED;
4621                                 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4622                                 diag->fwping.cbfn(diag->fwping.cbarg,
4623                                                 diag->fwping.status);
4624                                 diag->fwping.lock = 0;
4625                                 return;
4626                         }
4627                 }
4628                 diag->fwping.result->dmastatus = BFA_STATUS_OK;
4629                 diag->fwping.status = BFA_STATUS_OK;
4630                 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4631                 diag->fwping.lock = 0;
4632         } else {
4633                 diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4634                 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4635                 diag->fwping.lock = 0;
4636         }
4637 }
4638
4639 /*
4640  * Temperature Sensor
4641  */
4642
4643 static void
4644 diag_tempsensor_send(struct bfa_diag_s *diag)
4645 {
4646         struct bfi_diag_ts_req_s *msg;
4647
4648         msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4649         bfa_trc(diag, msg->temp);
4650         /* build host command */
4651         bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4652                 bfa_ioc_portid(diag->ioc));
4653         /* send mbox cmd */
4654         bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4655 }
4656
4657 static void
4658 diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4659 {
4660         if (!diag->tsensor.lock) {
4661                 /* receiving response after ioc failure */
4662                 bfa_trc(diag, diag->tsensor.lock);
4663                 return;
4664         }
4665
4666         /*
4667          * ASIC junction tempsensor is a reg read operation
4668          * it will always return OK
4669          */
4670         diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4671         diag->tsensor.temp->ts_junc = rsp->ts_junc;
4672         diag->tsensor.temp->ts_brd = rsp->ts_brd;
4673         diag->tsensor.temp->status = BFA_STATUS_OK;
4674
4675         if (rsp->ts_brd) {
4676                 if (rsp->status == BFA_STATUS_OK) {
4677                         diag->tsensor.temp->brd_temp =
4678                                 be16_to_cpu(rsp->brd_temp);
4679                 } else {
4680                         bfa_trc(diag, rsp->status);
4681                         diag->tsensor.temp->brd_temp = 0;
4682                         diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4683                 }
4684         }
4685         bfa_trc(diag, rsp->ts_junc);
4686         bfa_trc(diag, rsp->temp);
4687         bfa_trc(diag, rsp->ts_brd);
4688         bfa_trc(diag, rsp->brd_temp);
4689         diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4690         diag->tsensor.lock = 0;
4691 }
4692
4693 /*
4694  *      LED Test command
4695  */
4696 static void
4697 diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4698 {
4699         struct bfi_diag_ledtest_req_s  *msg;
4700
4701         msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4702         /* build host command */
4703         bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4704                         bfa_ioc_portid(diag->ioc));
4705
4706         /*
4707          * convert the freq from N blinks per 10 sec to
4708          * crossbow ontime value. We do it here because division is need
4709          */
4710         if (ledtest->freq)
4711                 ledtest->freq = 500 / ledtest->freq;
4712
4713         if (ledtest->freq == 0)
4714                 ledtest->freq = 1;
4715
4716         bfa_trc(diag, ledtest->freq);
4717         /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4718         msg->cmd = (u8) ledtest->cmd;
4719         msg->color = (u8) ledtest->color;
4720         msg->portid = bfa_ioc_portid(diag->ioc);
4721         msg->led = ledtest->led;
4722         msg->freq = cpu_to_be16(ledtest->freq);
4723
4724         /* send mbox cmd */
4725         bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4726 }
4727
4728 static void
4729 diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
4730 {
4731         bfa_trc(diag, diag->ledtest.lock);
4732         diag->ledtest.lock = BFA_FALSE;
4733         /* no bfa_cb_queue is needed because driver is not waiting */
4734 }
4735
4736 /*
4737  * Port beaconing
4738  */
4739 static void
4740 diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4741 {
4742         struct bfi_diag_portbeacon_req_s *msg;
4743
4744         msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4745         /* build host command */
4746         bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4747                 bfa_ioc_portid(diag->ioc));
4748         msg->beacon = beacon;
4749         msg->period = cpu_to_be32(sec);
4750         /* send mbox cmd */
4751         bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4752 }
4753
4754 static void
4755 diag_portbeacon_comp(struct bfa_diag_s *diag)
4756 {
4757         bfa_trc(diag, diag->beacon.state);
4758         diag->beacon.state = BFA_FALSE;
4759         if (diag->cbfn_beacon)
4760                 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4761 }
4762
4763 /*
4764  *      Diag hmbox handler
4765  */
4766 void
4767 bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4768 {
4769         struct bfa_diag_s *diag = diagarg;
4770
4771         switch (msg->mh.msg_id) {
4772         case BFI_DIAG_I2H_PORTBEACON:
4773                 diag_portbeacon_comp(diag);
4774                 break;
4775         case BFI_DIAG_I2H_FWPING:
4776                 diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4777                 break;
4778         case BFI_DIAG_I2H_TEMPSENSOR:
4779                 diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4780                 break;
4781         case BFI_DIAG_I2H_LEDTEST:
4782                 diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4783                 break;
4784         default:
4785                 bfa_trc(diag, msg->mh.msg_id);
4786                 WARN_ON(1);
4787         }
4788 }
4789
4790 /*
4791  * Gen RAM Test
4792  *
4793  *   @param[in] *diag           - diag data struct
4794  *   @param[in] *memtest        - mem test params input from upper layer,
4795  *   @param[in] pattern         - mem test pattern
4796  *   @param[in] *result         - mem test result
4797  *   @param[in] cbfn            - mem test callback functioin
4798  *   @param[in] cbarg           - callback functioin arg
4799  *
4800  *   @param[out]
4801  */
4802 bfa_status_t
4803 bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4804                 u32 pattern, struct bfa_diag_memtest_result *result,
4805                 bfa_cb_diag_t cbfn, void *cbarg)
4806 {
4807         u32     memtest_tov;
4808
4809         bfa_trc(diag, pattern);
4810
4811         if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4812                 return BFA_STATUS_ADAPTER_ENABLED;
4813
4814         /* check to see if there is another destructive diag cmd running */
4815         if (diag->block) {
4816                 bfa_trc(diag, diag->block);
4817                 return BFA_STATUS_DEVBUSY;
4818         } else
4819                 diag->block = 1;
4820
4821         diag->result = result;
4822         diag->cbfn = cbfn;
4823         diag->cbarg = cbarg;
4824
4825         /* download memtest code and take LPU0 out of reset */
4826         bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4827
4828         memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
4829                        CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
4830         bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4831                         bfa_diag_memtest_done, diag, memtest_tov);
4832         diag->timer_active = 1;
4833         return BFA_STATUS_OK;
4834 }
4835
4836 /*
4837  * DIAG firmware ping command
4838  *
4839  *   @param[in] *diag           - diag data struct
4840  *   @param[in] cnt             - dma loop count for testing PCIE
4841  *   @param[in] data            - data pattern to pass in fw
4842  *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
4843  *   @param[in] cbfn            - callback function
4844  *   @param[in] *cbarg          - callback functioin arg
4845  *
4846  *   @param[out]
4847  */
4848 bfa_status_t
4849 bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4850                 struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4851                 void *cbarg)
4852 {
4853         bfa_trc(diag, cnt);
4854         bfa_trc(diag, data);
4855
4856         if (!bfa_ioc_is_operational(diag->ioc))
4857                 return BFA_STATUS_IOC_NON_OP;
4858
4859         if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4860             ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4861                 return BFA_STATUS_CMD_NOTSUPP;
4862
4863         /* check to see if there is another destructive diag cmd running */
4864         if (diag->block || diag->fwping.lock) {
4865                 bfa_trc(diag, diag->block);
4866                 bfa_trc(diag, diag->fwping.lock);
4867                 return BFA_STATUS_DEVBUSY;
4868         }
4869
4870         /* Initialization */
4871         diag->fwping.lock = 1;
4872         diag->fwping.cbfn = cbfn;
4873         diag->fwping.cbarg = cbarg;
4874         diag->fwping.result = result;
4875         diag->fwping.data = data;
4876         diag->fwping.count = cnt;
4877
4878         /* Init test results */
4879         diag->fwping.result->data = 0;
4880         diag->fwping.result->status = BFA_STATUS_OK;
4881
4882         /* kick off the first ping */
4883         diag_fwping_send(diag);
4884         return BFA_STATUS_OK;
4885 }
4886
4887 /*
4888  * Read Temperature Sensor
4889  *
4890  *   @param[in] *diag           - diag data struct
4891  *   @param[in] *result         - pt to bfa_diag_temp_t data struct
4892  *   @param[in] cbfn            - callback function
4893  *   @param[in] *cbarg          - callback functioin arg
4894  *
4895  *   @param[out]
4896  */
4897 bfa_status_t
4898 bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4899                 struct bfa_diag_results_tempsensor_s *result,
4900                 bfa_cb_diag_t cbfn, void *cbarg)
4901 {
4902         /* check to see if there is a destructive diag cmd running */
4903         if (diag->block || diag->tsensor.lock) {
4904                 bfa_trc(diag, diag->block);
4905                 bfa_trc(diag, diag->tsensor.lock);
4906                 return BFA_STATUS_DEVBUSY;
4907         }
4908
4909         if (!bfa_ioc_is_operational(diag->ioc))
4910                 return BFA_STATUS_IOC_NON_OP;
4911
4912         /* Init diag mod params */
4913         diag->tsensor.lock = 1;
4914         diag->tsensor.temp = result;
4915         diag->tsensor.cbfn = cbfn;
4916         diag->tsensor.cbarg = cbarg;
4917
4918         /* Send msg to fw */
4919         diag_tempsensor_send(diag);
4920
4921         return BFA_STATUS_OK;
4922 }
4923
4924 /*
4925  * LED Test command
4926  *
4927  *   @param[in] *diag           - diag data struct
4928  *   @param[in] *ledtest        - pt to ledtest data structure
4929  *
4930  *   @param[out]
4931  */
4932 bfa_status_t
4933 bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4934 {
4935         bfa_trc(diag, ledtest->cmd);
4936
4937         if (!bfa_ioc_is_operational(diag->ioc))
4938                 return BFA_STATUS_IOC_NON_OP;
4939
4940         if (diag->beacon.state)
4941                 return BFA_STATUS_BEACON_ON;
4942
4943         if (diag->ledtest.lock)
4944                 return BFA_STATUS_LEDTEST_OP;
4945
4946         /* Send msg to fw */
4947         diag->ledtest.lock = BFA_TRUE;
4948         diag_ledtest_send(diag, ledtest);
4949
4950         return BFA_STATUS_OK;
4951 }
4952
4953 /*
4954  * Port beaconing command
4955  *
4956  *   @param[in] *diag           - diag data struct
4957  *   @param[in] beacon          - port beaconing 1:ON   0:OFF
4958  *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
4959  *   @param[in] sec             - beaconing duration in seconds
4960  *
4961  *   @param[out]
4962  */
4963 bfa_status_t
4964 bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
4965                 bfa_boolean_t link_e2e_beacon, uint32_t sec)
4966 {
4967         bfa_trc(diag, beacon);
4968         bfa_trc(diag, link_e2e_beacon);
4969         bfa_trc(diag, sec);
4970
4971         if (!bfa_ioc_is_operational(diag->ioc))
4972                 return BFA_STATUS_IOC_NON_OP;
4973
4974         if (diag->ledtest.lock)
4975                 return BFA_STATUS_LEDTEST_OP;
4976
4977         if (diag->beacon.state && beacon)       /* beacon alread on */
4978                 return BFA_STATUS_BEACON_ON;
4979
4980         diag->beacon.state      = beacon;
4981         diag->beacon.link_e2e   = link_e2e_beacon;
4982         if (diag->cbfn_beacon)
4983                 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
4984
4985         /* Send msg to fw */
4986         diag_portbeacon_send(diag, beacon, sec);
4987
4988         return BFA_STATUS_OK;
4989 }
4990
4991 /*
4992  * Return DMA memory needed by diag module.
4993  */
4994 u32
4995 bfa_diag_meminfo(void)
4996 {
4997         return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4998 }
4999
5000 /*
5001  *      Attach virtual and physical memory for Diag.
5002  */
5003 void
5004 bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5005         bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5006 {
5007         diag->dev = dev;
5008         diag->ioc = ioc;
5009         diag->trcmod = trcmod;
5010
5011         diag->block = 0;
5012         diag->cbfn = NULL;
5013         diag->cbarg = NULL;
5014         diag->result = NULL;
5015         diag->cbfn_beacon = cbfn_beacon;
5016
5017         bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5018         bfa_q_qe_init(&diag->ioc_notify);
5019         bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5020         list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5021 }
5022
5023 void
5024 bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5025 {
5026         diag->fwping.dbuf_kva = dm_kva;
5027         diag->fwping.dbuf_pa = dm_pa;
5028         memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5029 }
5030
5031 /*
5032  *      PHY module specific
5033  */
5034 #define BFA_PHY_DMA_BUF_SZ      0x02000         /* 8k dma buffer */
5035 #define BFA_PHY_LOCK_STATUS     0x018878        /* phy semaphore status reg */
5036
5037 static void
5038 bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5039 {
5040         int i, m = sz >> 2;
5041
5042         for (i = 0; i < m; i++)
5043                 obuf[i] = be32_to_cpu(ibuf[i]);
5044 }
5045
5046 static bfa_boolean_t
5047 bfa_phy_present(struct bfa_phy_s *phy)
5048 {
5049         return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5050 }
5051
5052 static void
5053 bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5054 {
5055         struct bfa_phy_s *phy = cbarg;
5056
5057         bfa_trc(phy, event);
5058
5059         switch (event) {
5060         case BFA_IOC_E_DISABLED:
5061         case BFA_IOC_E_FAILED:
5062                 if (phy->op_busy) {
5063                         phy->status = BFA_STATUS_IOC_FAILURE;
5064                         phy->cbfn(phy->cbarg, phy->status);
5065                         phy->op_busy = 0;
5066                 }
5067                 break;
5068
5069         default:
5070                 break;
5071         }
5072 }
5073
5074 /*
5075  * Send phy attribute query request.
5076  *
5077  * @param[in] cbarg - callback argument
5078  */
5079 static void
5080 bfa_phy_query_send(void *cbarg)
5081 {
5082         struct bfa_phy_s *phy = cbarg;
5083         struct bfi_phy_query_req_s *msg =
5084                         (struct bfi_phy_query_req_s *) phy->mb.msg;
5085
5086         msg->instance = phy->instance;
5087         bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5088                 bfa_ioc_portid(phy->ioc));
5089         bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5090         bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5091 }
5092
5093 /*
5094  * Send phy write request.
5095  *
5096  * @param[in] cbarg - callback argument
5097  */
5098 static void
5099 bfa_phy_write_send(void *cbarg)
5100 {
5101         struct bfa_phy_s *phy = cbarg;
5102         struct bfi_phy_write_req_s *msg =
5103                         (struct bfi_phy_write_req_s *) phy->mb.msg;
5104         u32     len;
5105         u16     *buf, *dbuf;
5106         int     i, sz;
5107
5108         msg->instance = phy->instance;
5109         msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5110         len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5111                         phy->residue : BFA_PHY_DMA_BUF_SZ;
5112         msg->length = cpu_to_be32(len);
5113
5114         /* indicate if it's the last msg of the whole write operation */
5115         msg->last = (len == phy->residue) ? 1 : 0;
5116
5117         bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5118                 bfa_ioc_portid(phy->ioc));
5119         bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5120
5121         buf = (u16 *) (phy->ubuf + phy->offset);
5122         dbuf = (u16 *)phy->dbuf_kva;
5123         sz = len >> 1;
5124         for (i = 0; i < sz; i++)
5125                 buf[i] = cpu_to_be16(dbuf[i]);
5126
5127         bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5128
5129         phy->residue -= len;
5130         phy->offset += len;
5131 }
5132
5133 /*
5134  * Send phy read request.
5135  *
5136  * @param[in] cbarg - callback argument
5137  */
5138 static void
5139 bfa_phy_read_send(void *cbarg)
5140 {
5141         struct bfa_phy_s *phy = cbarg;
5142         struct bfi_phy_read_req_s *msg =
5143                         (struct bfi_phy_read_req_s *) phy->mb.msg;
5144         u32     len;
5145
5146         msg->instance = phy->instance;
5147         msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5148         len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5149                         phy->residue : BFA_PHY_DMA_BUF_SZ;
5150         msg->length = cpu_to_be32(len);
5151         bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5152                 bfa_ioc_portid(phy->ioc));
5153         bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5154         bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5155 }
5156
5157 /*
5158  * Send phy stats request.
5159  *
5160  * @param[in] cbarg - callback argument
5161  */
5162 static void
5163 bfa_phy_stats_send(void *cbarg)
5164 {
5165         struct bfa_phy_s *phy = cbarg;
5166         struct bfi_phy_stats_req_s *msg =
5167                         (struct bfi_phy_stats_req_s *) phy->mb.msg;
5168
5169         msg->instance = phy->instance;
5170         bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5171                 bfa_ioc_portid(phy->ioc));
5172         bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5173         bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5174 }
5175
5176 /*
5177  * Flash memory info API.
5178  *
5179  * @param[in] mincfg - minimal cfg variable
5180  */
5181 u32
5182 bfa_phy_meminfo(bfa_boolean_t mincfg)
5183 {
5184         /* min driver doesn't need phy */
5185         if (mincfg)
5186                 return 0;
5187
5188         return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5189 }
5190
5191 /*
5192  * Flash attach API.
5193  *
5194  * @param[in] phy - phy structure
5195  * @param[in] ioc  - ioc structure
5196  * @param[in] dev  - device structure
5197  * @param[in] trcmod - trace module
5198  * @param[in] logmod - log module
5199  */
5200 void
5201 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5202                 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5203 {
5204         phy->ioc = ioc;
5205         phy->trcmod = trcmod;
5206         phy->cbfn = NULL;
5207         phy->cbarg = NULL;
5208         phy->op_busy = 0;
5209
5210         bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5211         bfa_q_qe_init(&phy->ioc_notify);
5212         bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5213         list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5214
5215         /* min driver doesn't need phy */
5216         if (mincfg) {
5217                 phy->dbuf_kva = NULL;
5218                 phy->dbuf_pa = 0;
5219         }
5220 }
5221
5222 /*
5223  * Claim memory for phy
5224  *
5225  * @param[in] phy - phy structure
5226  * @param[in] dm_kva - pointer to virtual memory address
5227  * @param[in] dm_pa - physical memory address
5228  * @param[in] mincfg - minimal cfg variable
5229  */
5230 void
5231 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5232                 bfa_boolean_t mincfg)
5233 {
5234         if (mincfg)
5235                 return;
5236
5237         phy->dbuf_kva = dm_kva;
5238         phy->dbuf_pa = dm_pa;
5239         memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5240         dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5241         dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5242 }
5243
5244 bfa_boolean_t
5245 bfa_phy_busy(struct bfa_ioc_s *ioc)
5246 {
5247         void __iomem    *rb;
5248
5249         rb = bfa_ioc_bar0(ioc);
5250         return readl(rb + BFA_PHY_LOCK_STATUS);
5251 }
5252
5253 /*
5254  * Get phy attribute.
5255  *
5256  * @param[in] phy - phy structure
5257  * @param[in] attr - phy attribute structure
5258  * @param[in] cbfn - callback function
5259  * @param[in] cbarg - callback argument
5260  *
5261  * Return status.
5262  */
5263 bfa_status_t
5264 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5265                 struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5266 {
5267         bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5268         bfa_trc(phy, instance);
5269
5270         if (!bfa_phy_present(phy))
5271                 return BFA_STATUS_PHY_NOT_PRESENT;
5272
5273         if (!bfa_ioc_is_operational(phy->ioc))
5274                 return BFA_STATUS_IOC_NON_OP;
5275
5276         if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5277                 bfa_trc(phy, phy->op_busy);
5278                 return BFA_STATUS_DEVBUSY;
5279         }
5280
5281         phy->op_busy = 1;
5282         phy->cbfn = cbfn;
5283         phy->cbarg = cbarg;
5284         phy->instance = instance;
5285         phy->ubuf = (uint8_t *) attr;
5286         bfa_phy_query_send(phy);
5287
5288         return BFA_STATUS_OK;
5289 }
5290
5291 /*
5292  * Get phy stats.
5293  *
5294  * @param[in] phy - phy structure
5295  * @param[in] instance - phy image instance
5296  * @param[in] stats - pointer to phy stats
5297  * @param[in] cbfn - callback function
5298  * @param[in] cbarg - callback argument
5299  *
5300  * Return status.
5301  */
5302 bfa_status_t
5303 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5304                 struct bfa_phy_stats_s *stats,
5305                 bfa_cb_phy_t cbfn, void *cbarg)
5306 {
5307         bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5308         bfa_trc(phy, instance);
5309
5310         if (!bfa_phy_present(phy))
5311                 return BFA_STATUS_PHY_NOT_PRESENT;
5312
5313         if (!bfa_ioc_is_operational(phy->ioc))
5314                 return BFA_STATUS_IOC_NON_OP;
5315
5316         if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5317                 bfa_trc(phy, phy->op_busy);
5318                 return BFA_STATUS_DEVBUSY;
5319         }
5320
5321         phy->op_busy = 1;
5322         phy->cbfn = cbfn;
5323         phy->cbarg = cbarg;
5324         phy->instance = instance;
5325         phy->ubuf = (u8 *) stats;
5326         bfa_phy_stats_send(phy);
5327
5328         return BFA_STATUS_OK;
5329 }
5330
5331 /*
5332  * Update phy image.
5333  *
5334  * @param[in] phy - phy structure
5335  * @param[in] instance - phy image instance
5336  * @param[in] buf - update data buffer
5337  * @param[in] len - data buffer length
5338  * @param[in] offset - offset relative to starting address
5339  * @param[in] cbfn - callback function
5340  * @param[in] cbarg - callback argument
5341  *
5342  * Return status.
5343  */
5344 bfa_status_t
5345 bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5346                 void *buf, u32 len, u32 offset,
5347                 bfa_cb_phy_t cbfn, void *cbarg)
5348 {
5349         bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5350         bfa_trc(phy, instance);
5351         bfa_trc(phy, len);
5352         bfa_trc(phy, offset);
5353
5354         if (!bfa_phy_present(phy))
5355                 return BFA_STATUS_PHY_NOT_PRESENT;
5356
5357         if (!bfa_ioc_is_operational(phy->ioc))
5358                 return BFA_STATUS_IOC_NON_OP;
5359
5360         /* 'len' must be in word (4-byte) boundary */
5361         if (!len || (len & 0x03))
5362                 return BFA_STATUS_FAILED;
5363
5364         if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5365                 bfa_trc(phy, phy->op_busy);
5366                 return BFA_STATUS_DEVBUSY;
5367         }
5368
5369         phy->op_busy = 1;
5370         phy->cbfn = cbfn;
5371         phy->cbarg = cbarg;
5372         phy->instance = instance;
5373         phy->residue = len;
5374         phy->offset = 0;
5375         phy->addr_off = offset;
5376         phy->ubuf = buf;
5377
5378         bfa_phy_write_send(phy);
5379         return BFA_STATUS_OK;
5380 }
5381
5382 /*
5383  * Read phy image.
5384  *
5385  * @param[in] phy - phy structure
5386  * @param[in] instance - phy image instance
5387  * @param[in] buf - read data buffer
5388  * @param[in] len - data buffer length
5389  * @param[in] offset - offset relative to starting address
5390  * @param[in] cbfn - callback function
5391  * @param[in] cbarg - callback argument
5392  *
5393  * Return status.
5394  */
5395 bfa_status_t
5396 bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5397                 void *buf, u32 len, u32 offset,
5398                 bfa_cb_phy_t cbfn, void *cbarg)
5399 {
5400         bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5401         bfa_trc(phy, instance);
5402         bfa_trc(phy, len);
5403         bfa_trc(phy, offset);
5404
5405         if (!bfa_phy_present(phy))
5406                 return BFA_STATUS_PHY_NOT_PRESENT;
5407
5408         if (!bfa_ioc_is_operational(phy->ioc))
5409                 return BFA_STATUS_IOC_NON_OP;
5410
5411         /* 'len' must be in word (4-byte) boundary */
5412         if (!len || (len & 0x03))
5413                 return BFA_STATUS_FAILED;
5414
5415         if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5416                 bfa_trc(phy, phy->op_busy);
5417                 return BFA_STATUS_DEVBUSY;
5418         }
5419
5420         phy->op_busy = 1;
5421         phy->cbfn = cbfn;
5422         phy->cbarg = cbarg;
5423         phy->instance = instance;
5424         phy->residue = len;
5425         phy->offset = 0;
5426         phy->addr_off = offset;
5427         phy->ubuf = buf;
5428         bfa_phy_read_send(phy);
5429
5430         return BFA_STATUS_OK;
5431 }
5432
5433 /*
5434  * Process phy response messages upon receiving interrupts.
5435  *
5436  * @param[in] phyarg - phy structure
5437  * @param[in] msg - message structure
5438  */
5439 void
5440 bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5441 {
5442         struct bfa_phy_s *phy = phyarg;
5443         u32     status;
5444
5445         union {
5446                 struct bfi_phy_query_rsp_s *query;
5447                 struct bfi_phy_stats_rsp_s *stats;
5448                 struct bfi_phy_write_rsp_s *write;
5449                 struct bfi_phy_read_rsp_s *read;
5450                 struct bfi_mbmsg_s   *msg;
5451         } m;
5452
5453         m.msg = msg;
5454         bfa_trc(phy, msg->mh.msg_id);
5455
5456         if (!phy->op_busy) {
5457                 /* receiving response after ioc failure */
5458                 bfa_trc(phy, 0x9999);
5459                 return;
5460         }
5461
5462         switch (msg->mh.msg_id) {
5463         case BFI_PHY_I2H_QUERY_RSP:
5464                 status = be32_to_cpu(m.query->status);
5465                 bfa_trc(phy, status);
5466
5467                 if (status == BFA_STATUS_OK) {
5468                         struct bfa_phy_attr_s *attr =
5469                                 (struct bfa_phy_attr_s *) phy->ubuf;
5470                         bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5471                                         sizeof(struct bfa_phy_attr_s));
5472                         bfa_trc(phy, attr->status);
5473                         bfa_trc(phy, attr->length);
5474                 }
5475
5476                 phy->status = status;
5477                 phy->op_busy = 0;
5478                 if (phy->cbfn)
5479                         phy->cbfn(phy->cbarg, phy->status);
5480                 break;
5481         case BFI_PHY_I2H_STATS_RSP:
5482                 status = be32_to_cpu(m.stats->status);
5483                 bfa_trc(phy, status);
5484
5485                 if (status == BFA_STATUS_OK) {
5486                         struct bfa_phy_stats_s *stats =
5487                                 (struct bfa_phy_stats_s *) phy->ubuf;
5488                         bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5489                                 sizeof(struct bfa_phy_stats_s));
5490                                 bfa_trc(phy, stats->status);
5491                 }
5492
5493                 phy->status = status;
5494                 phy->op_busy = 0;
5495                 if (phy->cbfn)
5496                         phy->cbfn(phy->cbarg, phy->status);
5497                 break;
5498         case BFI_PHY_I2H_WRITE_RSP:
5499                 status = be32_to_cpu(m.write->status);
5500                 bfa_trc(phy, status);
5501
5502                 if (status != BFA_STATUS_OK || phy->residue == 0) {
5503                         phy->status = status;
5504                         phy->op_busy = 0;
5505                         if (phy->cbfn)
5506                                 phy->cbfn(phy->cbarg, phy->status);
5507                 } else {
5508                         bfa_trc(phy, phy->offset);
5509                         bfa_phy_write_send(phy);
5510                 }
5511                 break;
5512         case BFI_PHY_I2H_READ_RSP:
5513                 status = be32_to_cpu(m.read->status);
5514                 bfa_trc(phy, status);
5515
5516                 if (status != BFA_STATUS_OK) {
5517                         phy->status = status;
5518                         phy->op_busy = 0;
5519                         if (phy->cbfn)
5520                                 phy->cbfn(phy->cbarg, phy->status);
5521                 } else {
5522                         u32 len = be32_to_cpu(m.read->length);
5523                         u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5524                         u16 *dbuf = (u16 *)phy->dbuf_kva;
5525                         int i, sz = len >> 1;
5526
5527                         bfa_trc(phy, phy->offset);
5528                         bfa_trc(phy, len);
5529
5530                         for (i = 0; i < sz; i++)
5531                                 buf[i] = be16_to_cpu(dbuf[i]);
5532
5533                         phy->residue -= len;
5534                         phy->offset += len;
5535
5536                         if (phy->residue == 0) {
5537                                 phy->status = status;
5538                                 phy->op_busy = 0;
5539                                 if (phy->cbfn)
5540                                         phy->cbfn(phy->cbarg, phy->status);
5541                         } else
5542                                 bfa_phy_read_send(phy);
5543                 }
5544                 break;
5545         default:
5546                 WARN_ON(1);
5547         }
5548 }
5549
5550 /*
5551  *      DCONF module specific
5552  */
5553
5554 BFA_MODULE(dconf);
5555
5556 /*
5557  * DCONF state machine events
5558  */
5559 enum bfa_dconf_event {
5560         BFA_DCONF_SM_INIT               = 1,    /* dconf Init */
5561         BFA_DCONF_SM_FLASH_COMP         = 2,    /* read/write to flash */
5562         BFA_DCONF_SM_WR                 = 3,    /* binding change, map */
5563         BFA_DCONF_SM_TIMEOUT            = 4,    /* Start timer */
5564         BFA_DCONF_SM_EXIT               = 5,    /* exit dconf module */
5565         BFA_DCONF_SM_IOCDISABLE         = 6,    /* IOC disable event */
5566 };
5567
5568 /* forward declaration of DCONF state machine */
5569 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5570                                 enum bfa_dconf_event event);
5571 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5572                                 enum bfa_dconf_event event);
5573 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5574                                 enum bfa_dconf_event event);
5575 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5576                                 enum bfa_dconf_event event);
5577 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5578                                 enum bfa_dconf_event event);
5579 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5580                                 enum bfa_dconf_event event);
5581 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5582                                 enum bfa_dconf_event event);
5583
5584 static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5585 static void bfa_dconf_timer(void *cbarg);
5586 static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5587 static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5588
5589 /*
5590  * Beginning state of dconf module. Waiting for an event to start.
5591  */
5592 static void
5593 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5594 {
5595         bfa_status_t bfa_status;
5596         bfa_trc(dconf->bfa, event);
5597
5598         switch (event) {
5599         case BFA_DCONF_SM_INIT:
5600                 if (dconf->min_cfg) {
5601                         bfa_trc(dconf->bfa, dconf->min_cfg);
5602                         bfa_fsm_send_event(&dconf->bfa->iocfc,
5603                                         IOCFC_E_DCONF_DONE);
5604                         return;
5605                 }
5606                 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5607                 bfa_timer_start(dconf->bfa, &dconf->timer,
5608                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5609                 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5610                                         BFA_FLASH_PART_DRV, dconf->instance,
5611                                         dconf->dconf,
5612                                         sizeof(struct bfa_dconf_s), 0,
5613                                         bfa_dconf_init_cb, dconf->bfa);
5614                 if (bfa_status != BFA_STATUS_OK) {
5615                         bfa_timer_stop(&dconf->timer);
5616                         bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5617                         bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5618                         return;
5619                 }
5620                 break;
5621         case BFA_DCONF_SM_EXIT:
5622                 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5623         case BFA_DCONF_SM_IOCDISABLE:
5624         case BFA_DCONF_SM_WR:
5625         case BFA_DCONF_SM_FLASH_COMP:
5626                 break;
5627         default:
5628                 bfa_sm_fault(dconf->bfa, event);
5629         }
5630 }
5631
5632 /*
5633  * Read flash for dconf entries and make a call back to the driver once done.
5634  */
5635 static void
5636 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5637                         enum bfa_dconf_event event)
5638 {
5639         bfa_trc(dconf->bfa, event);
5640
5641         switch (event) {
5642         case BFA_DCONF_SM_FLASH_COMP:
5643                 bfa_timer_stop(&dconf->timer);
5644                 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5645                 break;
5646         case BFA_DCONF_SM_TIMEOUT:
5647                 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5648                 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
5649                 break;
5650         case BFA_DCONF_SM_EXIT:
5651                 bfa_timer_stop(&dconf->timer);
5652                 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5653                 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5654                 break;
5655         case BFA_DCONF_SM_IOCDISABLE:
5656                 bfa_timer_stop(&dconf->timer);
5657                 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5658                 break;
5659         default:
5660                 bfa_sm_fault(dconf->bfa, event);
5661         }
5662 }
5663
5664 /*
5665  * DCONF Module is in ready state. Has completed the initialization.
5666  */
5667 static void
5668 bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5669 {
5670         bfa_trc(dconf->bfa, event);
5671
5672         switch (event) {
5673         case BFA_DCONF_SM_WR:
5674                 bfa_timer_start(dconf->bfa, &dconf->timer,
5675                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5676                 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5677                 break;
5678         case BFA_DCONF_SM_EXIT:
5679                 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5680                 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5681                 break;
5682         case BFA_DCONF_SM_INIT:
5683         case BFA_DCONF_SM_IOCDISABLE:
5684                 break;
5685         default:
5686                 bfa_sm_fault(dconf->bfa, event);
5687         }
5688 }
5689
5690 /*
5691  * entries are dirty, write back to the flash.
5692  */
5693
5694 static void
5695 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5696 {
5697         bfa_trc(dconf->bfa, event);
5698
5699         switch (event) {
5700         case BFA_DCONF_SM_TIMEOUT:
5701                 bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5702                 bfa_dconf_flash_write(dconf);
5703                 break;
5704         case BFA_DCONF_SM_WR:
5705                 bfa_timer_stop(&dconf->timer);
5706                 bfa_timer_start(dconf->bfa, &dconf->timer,
5707                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5708                 break;
5709         case BFA_DCONF_SM_EXIT:
5710                 bfa_timer_stop(&dconf->timer);
5711                 bfa_timer_start(dconf->bfa, &dconf->timer,
5712                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5713                 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5714                 bfa_dconf_flash_write(dconf);
5715                 break;
5716         case BFA_DCONF_SM_FLASH_COMP:
5717                 break;
5718         case BFA_DCONF_SM_IOCDISABLE:
5719                 bfa_timer_stop(&dconf->timer);
5720                 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5721                 break;
5722         default:
5723                 bfa_sm_fault(dconf->bfa, event);
5724         }
5725 }
5726
5727 /*
5728  * Sync the dconf entries to the flash.
5729  */
5730 static void
5731 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5732                         enum bfa_dconf_event event)
5733 {
5734         bfa_trc(dconf->bfa, event);
5735
5736         switch (event) {
5737         case BFA_DCONF_SM_IOCDISABLE:
5738         case BFA_DCONF_SM_FLASH_COMP:
5739                 bfa_timer_stop(&dconf->timer);
5740         case BFA_DCONF_SM_TIMEOUT:
5741                 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5742                 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5743                 break;
5744         default:
5745                 bfa_sm_fault(dconf->bfa, event);
5746         }
5747 }
5748
5749 static void
5750 bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5751 {
5752         bfa_trc(dconf->bfa, event);
5753
5754         switch (event) {
5755         case BFA_DCONF_SM_FLASH_COMP:
5756                 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5757                 break;
5758         case BFA_DCONF_SM_WR:
5759                 bfa_timer_start(dconf->bfa, &dconf->timer,
5760                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5761                 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5762                 break;
5763         case BFA_DCONF_SM_EXIT:
5764                 bfa_timer_start(dconf->bfa, &dconf->timer,
5765                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5766                 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5767                 break;
5768         case BFA_DCONF_SM_IOCDISABLE:
5769                 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5770                 break;
5771         default:
5772                 bfa_sm_fault(dconf->bfa, event);
5773         }
5774 }
5775
5776 static void
5777 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5778                         enum bfa_dconf_event event)
5779 {
5780         bfa_trc(dconf->bfa, event);
5781
5782         switch (event) {
5783         case BFA_DCONF_SM_INIT:
5784                 bfa_timer_start(dconf->bfa, &dconf->timer,
5785                         bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5786                 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5787                 break;
5788         case BFA_DCONF_SM_EXIT:
5789                 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5790                 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5791                 break;
5792         case BFA_DCONF_SM_IOCDISABLE:
5793                 break;
5794         default:
5795                 bfa_sm_fault(dconf->bfa, event);
5796         }
5797 }
5798
5799 /*
5800  * Compute and return memory needed by DRV_CFG module.
5801  */
5802 static void
5803 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5804                   struct bfa_s *bfa)
5805 {
5806         struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5807
5808         if (cfg->drvcfg.min_cfg)
5809                 bfa_mem_kva_setup(meminfo, dconf_kva,
5810                                 sizeof(struct bfa_dconf_hdr_s));
5811         else
5812                 bfa_mem_kva_setup(meminfo, dconf_kva,
5813                                 sizeof(struct bfa_dconf_s));
5814 }
5815
5816 static void
5817 bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5818                 struct bfa_pcidev_s *pcidev)
5819 {
5820         struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5821
5822         dconf->bfad = bfad;
5823         dconf->bfa = bfa;
5824         dconf->instance = bfa->ioc.port_id;
5825         bfa_trc(bfa, dconf->instance);
5826
5827         dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5828         if (cfg->drvcfg.min_cfg) {
5829                 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5830                 dconf->min_cfg = BFA_TRUE;
5831         } else {
5832                 dconf->min_cfg = BFA_FALSE;
5833                 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5834         }
5835
5836         bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
5837         bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5838 }
5839
5840 static void
5841 bfa_dconf_init_cb(void *arg, bfa_status_t status)
5842 {
5843         struct bfa_s *bfa = arg;
5844         struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5845
5846         bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5847         if (status == BFA_STATUS_OK) {
5848                 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5849                 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5850                         dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5851                 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5852                         dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5853         }
5854         bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
5855 }
5856
5857 void
5858 bfa_dconf_modinit(struct bfa_s *bfa)
5859 {
5860         struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5861         bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
5862 }
5863 static void
5864 bfa_dconf_start(struct bfa_s *bfa)
5865 {
5866 }
5867
5868 static void
5869 bfa_dconf_stop(struct bfa_s *bfa)
5870 {
5871 }
5872
5873 static void bfa_dconf_timer(void *cbarg)
5874 {
5875         struct bfa_dconf_mod_s *dconf = cbarg;
5876         bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
5877 }
5878 static void
5879 bfa_dconf_iocdisable(struct bfa_s *bfa)
5880 {
5881         struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5882         bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
5883 }
5884
5885 static void
5886 bfa_dconf_detach(struct bfa_s *bfa)
5887 {
5888 }
5889
5890 static bfa_status_t
5891 bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5892 {
5893         bfa_status_t bfa_status;
5894         bfa_trc(dconf->bfa, 0);
5895
5896         bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5897                                 BFA_FLASH_PART_DRV, dconf->instance,
5898                                 dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
5899                                 bfa_dconf_cbfn, dconf);
5900         if (bfa_status != BFA_STATUS_OK)
5901                 WARN_ON(bfa_status);
5902         bfa_trc(dconf->bfa, bfa_status);
5903
5904         return bfa_status;
5905 }
5906
5907 bfa_status_t
5908 bfa_dconf_update(struct bfa_s *bfa)
5909 {
5910         struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5911         bfa_trc(dconf->bfa, 0);
5912         if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5913                 return BFA_STATUS_FAILED;
5914
5915         if (dconf->min_cfg) {
5916                 bfa_trc(dconf->bfa, dconf->min_cfg);
5917                 return BFA_STATUS_FAILED;
5918         }
5919
5920         bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
5921         return BFA_STATUS_OK;
5922 }
5923
5924 static void
5925 bfa_dconf_cbfn(void *arg, bfa_status_t status)
5926 {
5927         struct bfa_dconf_mod_s *dconf = arg;
5928         WARN_ON(status);
5929         bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5930 }
5931
5932 void
5933 bfa_dconf_modexit(struct bfa_s *bfa)
5934 {
5935         struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5936         bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
5937 }