]> Pileus Git - ~andy/linux/blob - drivers/scsi/bfa/bfa_svc.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal
[~andy/linux] / drivers / scsi / bfa / bfa_svc.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 #include "bfad_drv.h"
19 #include "bfad_im.h"
20 #include "bfa_plog.h"
21 #include "bfa_cs.h"
22 #include "bfa_modules.h"
23
24 BFA_TRC_FILE(HAL, FCXP);
25 BFA_MODULE(fcdiag);
26 BFA_MODULE(fcxp);
27 BFA_MODULE(sgpg);
28 BFA_MODULE(lps);
29 BFA_MODULE(fcport);
30 BFA_MODULE(rport);
31 BFA_MODULE(uf);
32
33 /*
34  * LPS related definitions
35  */
36 #define BFA_LPS_MIN_LPORTS      (1)
37 #define BFA_LPS_MAX_LPORTS      (256)
38
39 /*
40  * Maximum Vports supported per physical port or vf.
41  */
42 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
43 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
44
45
46 /*
47  * FC PORT related definitions
48  */
49 /*
50  * The port is considered disabled if corresponding physical port or IOC are
51  * disabled explicitly
52  */
53 #define BFA_PORT_IS_DISABLED(bfa) \
54         ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
55         (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
56
57 /*
58  * BFA port state machine events
59  */
60 enum bfa_fcport_sm_event {
61         BFA_FCPORT_SM_START     = 1,    /*  start port state machine    */
62         BFA_FCPORT_SM_STOP      = 2,    /*  stop port state machine     */
63         BFA_FCPORT_SM_ENABLE    = 3,    /*  enable port         */
64         BFA_FCPORT_SM_DISABLE   = 4,    /*  disable port state machine */
65         BFA_FCPORT_SM_FWRSP     = 5,    /*  firmware enable/disable rsp */
66         BFA_FCPORT_SM_LINKUP    = 6,    /*  firmware linkup event       */
67         BFA_FCPORT_SM_LINKDOWN  = 7,    /*  firmware linkup down        */
68         BFA_FCPORT_SM_QRESUME   = 8,    /*  CQ space available  */
69         BFA_FCPORT_SM_HWFAIL    = 9,    /*  IOC h/w failure             */
70 };
71
72 /*
73  * BFA port link notification state machine events
74  */
75
76 enum bfa_fcport_ln_sm_event {
77         BFA_FCPORT_LN_SM_LINKUP         = 1,    /*  linkup event        */
78         BFA_FCPORT_LN_SM_LINKDOWN       = 2,    /*  linkdown event      */
79         BFA_FCPORT_LN_SM_NOTIFICATION   = 3     /*  done notification   */
80 };
81
82 /*
83  * RPORT related definitions
84  */
85 #define bfa_rport_offline_cb(__rp) do {                                 \
86         if ((__rp)->bfa->fcs)                                           \
87                 bfa_cb_rport_offline((__rp)->rport_drv);      \
88         else {                                                          \
89                 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
90                                 __bfa_cb_rport_offline, (__rp));      \
91         }                                                               \
92 } while (0)
93
94 #define bfa_rport_online_cb(__rp) do {                                  \
95         if ((__rp)->bfa->fcs)                                           \
96                 bfa_cb_rport_online((__rp)->rport_drv);      \
97         else {                                                          \
98                 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
99                                   __bfa_cb_rport_online, (__rp));      \
100                 }                                                       \
101 } while (0)
102
103 /*
104  * forward declarations FCXP related functions
105  */
106 static void     __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
107 static void     hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
108                                 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
109 static void     hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
110                                 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
111 static void     bfa_fcxp_qresume(void *cbarg);
112 static void     bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
113                                 struct bfi_fcxp_send_req_s *send_req);
114
115 /*
116  * forward declarations for LPS functions
117  */
118 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
119                 struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
120 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
121                                 struct bfa_iocfc_cfg_s *cfg,
122                                 struct bfa_pcidev_s *pcidev);
123 static void bfa_lps_detach(struct bfa_s *bfa);
124 static void bfa_lps_start(struct bfa_s *bfa);
125 static void bfa_lps_stop(struct bfa_s *bfa);
126 static void bfa_lps_iocdisable(struct bfa_s *bfa);
127 static void bfa_lps_login_rsp(struct bfa_s *bfa,
128                                 struct bfi_lps_login_rsp_s *rsp);
129 static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
130 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
131                                 struct bfi_lps_logout_rsp_s *rsp);
132 static void bfa_lps_reqq_resume(void *lps_arg);
133 static void bfa_lps_free(struct bfa_lps_s *lps);
134 static void bfa_lps_send_login(struct bfa_lps_s *lps);
135 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
136 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
137 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
138 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
139 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
140
141 /*
142  * forward declaration for LPS state machine
143  */
144 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
145 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
146 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
147                                         event);
148 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
149 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
150                                         enum bfa_lps_event event);
151 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
152 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
153                                         event);
154
155 /*
156  * forward declaration for FC Port functions
157  */
158 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
159 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
160 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
161 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
162 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
163 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
164 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
165                         enum bfa_port_linkstate event, bfa_boolean_t trunk);
166 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
167                                 enum bfa_port_linkstate event);
168 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
169 static void bfa_fcport_stats_get_timeout(void *cbarg);
170 static void bfa_fcport_stats_clr_timeout(void *cbarg);
171 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
172
173 /*
174  * forward declaration for FC PORT state machine
175  */
176 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
177                                         enum bfa_fcport_sm_event event);
178 static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
179                                         enum bfa_fcport_sm_event event);
180 static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
181                                         enum bfa_fcport_sm_event event);
182 static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
183                                         enum bfa_fcport_sm_event event);
184 static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
185                                         enum bfa_fcport_sm_event event);
186 static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
187                                         enum bfa_fcport_sm_event event);
188 static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
189                                         enum bfa_fcport_sm_event event);
190 static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
191                                         enum bfa_fcport_sm_event event);
192 static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
193                                         enum bfa_fcport_sm_event event);
194 static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
195                                         enum bfa_fcport_sm_event event);
196 static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
197                                         enum bfa_fcport_sm_event event);
198 static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
199                                         enum bfa_fcport_sm_event event);
200
201 static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
202                                         enum bfa_fcport_ln_sm_event event);
203 static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
204                                         enum bfa_fcport_ln_sm_event event);
205 static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
206                                         enum bfa_fcport_ln_sm_event event);
207 static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
208                                         enum bfa_fcport_ln_sm_event event);
209 static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
210                                         enum bfa_fcport_ln_sm_event event);
211 static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
212                                         enum bfa_fcport_ln_sm_event event);
213 static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
214                                         enum bfa_fcport_ln_sm_event event);
215
216 static struct bfa_sm_table_s hal_port_sm_table[] = {
217         {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
218         {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
219         {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
220         {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
221         {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
222         {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
223         {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
224         {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
225         {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
226         {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
227         {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
228         {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
229 };
230
231
232 /*
233  * forward declaration for RPORT related functions
234  */
235 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
236 static void             bfa_rport_free(struct bfa_rport_s *rport);
237 static bfa_boolean_t    bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
238 static bfa_boolean_t    bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
239 static bfa_boolean_t    bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
240 static void             __bfa_cb_rport_online(void *cbarg,
241                                                 bfa_boolean_t complete);
242 static void             __bfa_cb_rport_offline(void *cbarg,
243                                                 bfa_boolean_t complete);
244
245 /*
246  * forward declaration for RPORT state machine
247  */
248 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
249                                         enum bfa_rport_event event);
250 static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
251                                         enum bfa_rport_event event);
252 static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
253                                         enum bfa_rport_event event);
254 static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
255                                         enum bfa_rport_event event);
256 static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
257                                         enum bfa_rport_event event);
258 static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
259                                         enum bfa_rport_event event);
260 static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
261                                         enum bfa_rport_event event);
262 static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
263                                         enum bfa_rport_event event);
264 static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
265                                         enum bfa_rport_event event);
266 static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
267                                         enum bfa_rport_event event);
268 static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
269                                         enum bfa_rport_event event);
270 static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
271                                         enum bfa_rport_event event);
272 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
273                                         enum bfa_rport_event event);
274
275 /*
276  * PLOG related definitions
277  */
278 static int
279 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
280 {
281         if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
282                 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
283                 return 1;
284
285         if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
286                 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
287                 return 1;
288
289         return 0;
290 }
291
292 static u64
293 bfa_get_log_time(void)
294 {
295         u64 system_time = 0;
296         struct timeval tv;
297         do_gettimeofday(&tv);
298
299         /* We are interested in seconds only. */
300         system_time = tv.tv_sec;
301         return system_time;
302 }
303
304 static void
305 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
306 {
307         u16 tail;
308         struct bfa_plog_rec_s *pl_recp;
309
310         if (plog->plog_enabled == 0)
311                 return;
312
313         if (plkd_validate_logrec(pl_rec)) {
314                 WARN_ON(1);
315                 return;
316         }
317
318         tail = plog->tail;
319
320         pl_recp = &(plog->plog_recs[tail]);
321
322         memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
323
324         pl_recp->tv = bfa_get_log_time();
325         BFA_PL_LOG_REC_INCR(plog->tail);
326
327         if (plog->head == plog->tail)
328                 BFA_PL_LOG_REC_INCR(plog->head);
329 }
330
331 void
332 bfa_plog_init(struct bfa_plog_s *plog)
333 {
334         memset((char *)plog, 0, sizeof(struct bfa_plog_s));
335
336         memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
337         plog->head = plog->tail = 0;
338         plog->plog_enabled = 1;
339 }
340
341 void
342 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
343                 enum bfa_plog_eid event,
344                 u16 misc, char *log_str)
345 {
346         struct bfa_plog_rec_s  lp;
347
348         if (plog->plog_enabled) {
349                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
350                 lp.mid = mid;
351                 lp.eid = event;
352                 lp.log_type = BFA_PL_LOG_TYPE_STRING;
353                 lp.misc = misc;
354                 strncpy(lp.log_entry.string_log, log_str,
355                         BFA_PL_STRING_LOG_SZ - 1);
356                 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
357                 bfa_plog_add(plog, &lp);
358         }
359 }
360
361 void
362 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
363                 enum bfa_plog_eid event,
364                 u16 misc, u32 *intarr, u32 num_ints)
365 {
366         struct bfa_plog_rec_s  lp;
367         u32 i;
368
369         if (num_ints > BFA_PL_INT_LOG_SZ)
370                 num_ints = BFA_PL_INT_LOG_SZ;
371
372         if (plog->plog_enabled) {
373                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
374                 lp.mid = mid;
375                 lp.eid = event;
376                 lp.log_type = BFA_PL_LOG_TYPE_INT;
377                 lp.misc = misc;
378
379                 for (i = 0; i < num_ints; i++)
380                         lp.log_entry.int_log[i] = intarr[i];
381
382                 lp.log_num_ints = (u8) num_ints;
383
384                 bfa_plog_add(plog, &lp);
385         }
386 }
387
388 void
389 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
390                         enum bfa_plog_eid event,
391                         u16 misc, struct fchs_s *fchdr)
392 {
393         struct bfa_plog_rec_s  lp;
394         u32     *tmp_int = (u32 *) fchdr;
395         u32     ints[BFA_PL_INT_LOG_SZ];
396
397         if (plog->plog_enabled) {
398                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
399
400                 ints[0] = tmp_int[0];
401                 ints[1] = tmp_int[1];
402                 ints[2] = tmp_int[4];
403
404                 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
405         }
406 }
407
408 void
409 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
410                       enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
411                       u32 pld_w0)
412 {
413         struct bfa_plog_rec_s  lp;
414         u32     *tmp_int = (u32 *) fchdr;
415         u32     ints[BFA_PL_INT_LOG_SZ];
416
417         if (plog->plog_enabled) {
418                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
419
420                 ints[0] = tmp_int[0];
421                 ints[1] = tmp_int[1];
422                 ints[2] = tmp_int[4];
423                 ints[3] = pld_w0;
424
425                 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
426         }
427 }
428
429
430 /*
431  *  fcxp_pvt BFA FCXP private functions
432  */
433
434 static void
435 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
436 {
437         u16     i;
438         struct bfa_fcxp_s *fcxp;
439
440         fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
441         memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
442
443         INIT_LIST_HEAD(&mod->fcxp_req_free_q);
444         INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
445         INIT_LIST_HEAD(&mod->fcxp_active_q);
446         INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
447         INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
448
449         mod->fcxp_list = fcxp;
450
451         for (i = 0; i < mod->num_fcxps; i++) {
452                 fcxp->fcxp_mod = mod;
453                 fcxp->fcxp_tag = i;
454
455                 if (i < (mod->num_fcxps / 2)) {
456                         list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
457                         fcxp->req_rsp = BFA_TRUE;
458                 } else {
459                         list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
460                         fcxp->req_rsp = BFA_FALSE;
461                 }
462
463                 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
464                 fcxp->reqq_waiting = BFA_FALSE;
465
466                 fcxp = fcxp + 1;
467         }
468
469         bfa_mem_kva_curp(mod) = (void *)fcxp;
470 }
471
472 static void
473 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
474                 struct bfa_s *bfa)
475 {
476         struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
477         struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
478         struct bfa_mem_dma_s *seg_ptr;
479         u16     nsegs, idx, per_seg_fcxp;
480         u16     num_fcxps = cfg->fwcfg.num_fcxp_reqs;
481         u32     per_fcxp_sz;
482
483         if (num_fcxps == 0)
484                 return;
485
486         if (cfg->drvcfg.min_cfg)
487                 per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
488         else
489                 per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
490
491         /* dma memory */
492         nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
493         per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
494
495         bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
496                 if (num_fcxps >= per_seg_fcxp) {
497                         num_fcxps -= per_seg_fcxp;
498                         bfa_mem_dma_setup(minfo, seg_ptr,
499                                 per_seg_fcxp * per_fcxp_sz);
500                 } else
501                         bfa_mem_dma_setup(minfo, seg_ptr,
502                                 num_fcxps * per_fcxp_sz);
503         }
504
505         /* kva memory */
506         bfa_mem_kva_setup(minfo, fcxp_kva,
507                 cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
508 }
509
510 static void
511 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
512                 struct bfa_pcidev_s *pcidev)
513 {
514         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
515
516         mod->bfa = bfa;
517         mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
518
519         /*
520          * Initialize FCXP request and response payload sizes.
521          */
522         mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
523         if (!cfg->drvcfg.min_cfg)
524                 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
525
526         INIT_LIST_HEAD(&mod->req_wait_q);
527         INIT_LIST_HEAD(&mod->rsp_wait_q);
528
529         claim_fcxps_mem(mod);
530 }
531
532 static void
533 bfa_fcxp_detach(struct bfa_s *bfa)
534 {
535 }
536
537 static void
538 bfa_fcxp_start(struct bfa_s *bfa)
539 {
540 }
541
542 static void
543 bfa_fcxp_stop(struct bfa_s *bfa)
544 {
545 }
546
547 static void
548 bfa_fcxp_iocdisable(struct bfa_s *bfa)
549 {
550         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
551         struct bfa_fcxp_s *fcxp;
552         struct list_head              *qe, *qen;
553
554         /* Enqueue unused fcxp resources to free_q */
555         list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
556         list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
557
558         list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
559                 fcxp = (struct bfa_fcxp_s *) qe;
560                 if (fcxp->caller == NULL) {
561                         fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
562                                         BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
563                         bfa_fcxp_free(fcxp);
564                 } else {
565                         fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
566                         bfa_cb_queue(bfa, &fcxp->hcb_qe,
567                                      __bfa_fcxp_send_cbfn, fcxp);
568                 }
569         }
570 }
571
572 static struct bfa_fcxp_s *
573 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
574 {
575         struct bfa_fcxp_s *fcxp;
576
577         if (req)
578                 bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
579         else
580                 bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
581
582         if (fcxp)
583                 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
584
585         return fcxp;
586 }
587
588 static void
589 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
590                struct bfa_s *bfa,
591                u8 *use_ibuf,
592                u32 *nr_sgles,
593                bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
594                bfa_fcxp_get_sglen_t *r_sglen_cbfn,
595                struct list_head *r_sgpg_q,
596                int n_sgles,
597                bfa_fcxp_get_sgaddr_t sga_cbfn,
598                bfa_fcxp_get_sglen_t sglen_cbfn)
599 {
600
601         WARN_ON(bfa == NULL);
602
603         bfa_trc(bfa, fcxp->fcxp_tag);
604
605         if (n_sgles == 0) {
606                 *use_ibuf = 1;
607         } else {
608                 WARN_ON(*sga_cbfn == NULL);
609                 WARN_ON(*sglen_cbfn == NULL);
610
611                 *use_ibuf = 0;
612                 *r_sga_cbfn = sga_cbfn;
613                 *r_sglen_cbfn = sglen_cbfn;
614
615                 *nr_sgles = n_sgles;
616
617                 /*
618                  * alloc required sgpgs
619                  */
620                 if (n_sgles > BFI_SGE_INLINE)
621                         WARN_ON(1);
622         }
623
624 }
625
626 static void
627 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
628                void *caller, struct bfa_s *bfa, int nreq_sgles,
629                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
630                bfa_fcxp_get_sglen_t req_sglen_cbfn,
631                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
632                bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
633 {
634
635         WARN_ON(bfa == NULL);
636
637         bfa_trc(bfa, fcxp->fcxp_tag);
638
639         fcxp->caller = caller;
640
641         bfa_fcxp_init_reqrsp(fcxp, bfa,
642                 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
643                 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
644                 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
645
646         bfa_fcxp_init_reqrsp(fcxp, bfa,
647                 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
648                 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
649                 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
650
651 }
652
653 static void
654 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
655 {
656         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
657         struct bfa_fcxp_wqe_s *wqe;
658
659         if (fcxp->req_rsp)
660                 bfa_q_deq(&mod->req_wait_q, &wqe);
661         else
662                 bfa_q_deq(&mod->rsp_wait_q, &wqe);
663
664         if (wqe) {
665                 bfa_trc(mod->bfa, fcxp->fcxp_tag);
666
667                 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
668                         wqe->nrsp_sgles, wqe->req_sga_cbfn,
669                         wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
670                         wqe->rsp_sglen_cbfn);
671
672                 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
673                 return;
674         }
675
676         WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
677         list_del(&fcxp->qe);
678
679         if (fcxp->req_rsp)
680                 list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
681         else
682                 list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
683 }
684
685 static void
686 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
687                    bfa_status_t req_status, u32 rsp_len,
688                    u32 resid_len, struct fchs_s *rsp_fchs)
689 {
690         /* discarded fcxp completion */
691 }
692
693 static void
694 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
695 {
696         struct bfa_fcxp_s *fcxp = cbarg;
697
698         if (complete) {
699                 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
700                                 fcxp->rsp_status, fcxp->rsp_len,
701                                 fcxp->residue_len, &fcxp->rsp_fchs);
702         } else {
703                 bfa_fcxp_free(fcxp);
704         }
705 }
706
707 static void
708 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
709 {
710         struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
711         struct bfa_fcxp_s       *fcxp;
712         u16             fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
713
714         bfa_trc(bfa, fcxp_tag);
715
716         fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
717
718         /*
719          * @todo f/w should not set residue to non-0 when everything
720          *       is received.
721          */
722         if (fcxp_rsp->req_status == BFA_STATUS_OK)
723                 fcxp_rsp->residue_len = 0;
724         else
725                 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
726
727         fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
728
729         WARN_ON(fcxp->send_cbfn == NULL);
730
731         hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
732
733         if (fcxp->send_cbfn != NULL) {
734                 bfa_trc(mod->bfa, (NULL == fcxp->caller));
735                 if (fcxp->caller == NULL) {
736                         fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
737                                         fcxp_rsp->req_status, fcxp_rsp->rsp_len,
738                                         fcxp_rsp->residue_len, &fcxp_rsp->fchs);
739                         /*
740                          * fcxp automatically freed on return from the callback
741                          */
742                         bfa_fcxp_free(fcxp);
743                 } else {
744                         fcxp->rsp_status = fcxp_rsp->req_status;
745                         fcxp->rsp_len = fcxp_rsp->rsp_len;
746                         fcxp->residue_len = fcxp_rsp->residue_len;
747                         fcxp->rsp_fchs = fcxp_rsp->fchs;
748
749                         bfa_cb_queue(bfa, &fcxp->hcb_qe,
750                                         __bfa_fcxp_send_cbfn, fcxp);
751                 }
752         } else {
753                 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
754         }
755 }
756
757 static void
758 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
759                  struct fchs_s *fchs)
760 {
761         /*
762          * TODO: TX ox_id
763          */
764         if (reqlen > 0) {
765                 if (fcxp->use_ireqbuf) {
766                         u32     pld_w0 =
767                                 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
768
769                         bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
770                                         BFA_PL_EID_TX,
771                                         reqlen + sizeof(struct fchs_s), fchs,
772                                         pld_w0);
773                 } else {
774                         bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
775                                         BFA_PL_EID_TX,
776                                         reqlen + sizeof(struct fchs_s),
777                                         fchs);
778                 }
779         } else {
780                 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
781                                reqlen + sizeof(struct fchs_s), fchs);
782         }
783 }
784
785 static void
786 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
787                  struct bfi_fcxp_send_rsp_s *fcxp_rsp)
788 {
789         if (fcxp_rsp->rsp_len > 0) {
790                 if (fcxp->use_irspbuf) {
791                         u32     pld_w0 =
792                                 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
793
794                         bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
795                                               BFA_PL_EID_RX,
796                                               (u16) fcxp_rsp->rsp_len,
797                                               &fcxp_rsp->fchs, pld_w0);
798                 } else {
799                         bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
800                                        BFA_PL_EID_RX,
801                                        (u16) fcxp_rsp->rsp_len,
802                                        &fcxp_rsp->fchs);
803                 }
804         } else {
805                 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
806                                (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
807         }
808 }
809
810 /*
811  * Handler to resume sending fcxp when space in available in cpe queue.
812  */
813 static void
814 bfa_fcxp_qresume(void *cbarg)
815 {
816         struct bfa_fcxp_s               *fcxp = cbarg;
817         struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
818         struct bfi_fcxp_send_req_s      *send_req;
819
820         fcxp->reqq_waiting = BFA_FALSE;
821         send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
822         bfa_fcxp_queue(fcxp, send_req);
823 }
824
825 /*
826  * Queue fcxp send request to foimrware.
827  */
828 static void
829 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
830 {
831         struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
832         struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
833         struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
834         struct bfa_rport_s              *rport = reqi->bfa_rport;
835
836         bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
837                     bfa_fn_lpu(bfa));
838
839         send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
840         if (rport) {
841                 send_req->rport_fw_hndl = rport->fw_handle;
842                 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
843                 if (send_req->max_frmsz == 0)
844                         send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
845         } else {
846                 send_req->rport_fw_hndl = 0;
847                 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
848         }
849
850         send_req->vf_id = cpu_to_be16(reqi->vf_id);
851         send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
852         send_req->class = reqi->class;
853         send_req->rsp_timeout = rspi->rsp_timeout;
854         send_req->cts = reqi->cts;
855         send_req->fchs = reqi->fchs;
856
857         send_req->req_len = cpu_to_be32(reqi->req_tot_len);
858         send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
859
860         /*
861          * setup req sgles
862          */
863         if (fcxp->use_ireqbuf == 1) {
864                 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
865                                         BFA_FCXP_REQ_PLD_PA(fcxp));
866         } else {
867                 if (fcxp->nreq_sgles > 0) {
868                         WARN_ON(fcxp->nreq_sgles != 1);
869                         bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
870                                 fcxp->req_sga_cbfn(fcxp->caller, 0));
871                 } else {
872                         WARN_ON(reqi->req_tot_len != 0);
873                         bfa_alen_set(&send_req->rsp_alen, 0, 0);
874                 }
875         }
876
877         /*
878          * setup rsp sgles
879          */
880         if (fcxp->use_irspbuf == 1) {
881                 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
882
883                 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
884                                         BFA_FCXP_RSP_PLD_PA(fcxp));
885         } else {
886                 if (fcxp->nrsp_sgles > 0) {
887                         WARN_ON(fcxp->nrsp_sgles != 1);
888                         bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
889                                 fcxp->rsp_sga_cbfn(fcxp->caller, 0));
890
891                 } else {
892                         WARN_ON(rspi->rsp_maxlen != 0);
893                         bfa_alen_set(&send_req->rsp_alen, 0, 0);
894                 }
895         }
896
897         hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
898
899         bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
900
901         bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
902         bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
903 }
904
905 /*
906  * Allocate an FCXP instance to send a response or to send a request
907  * that has a response. Request/response buffers are allocated by caller.
908  *
909  * @param[in]   bfa             BFA bfa instance
910  * @param[in]   nreq_sgles      Number of SG elements required for request
911  *                              buffer. 0, if fcxp internal buffers are used.
912  *                              Use bfa_fcxp_get_reqbuf() to get the
913  *                              internal req buffer.
914  * @param[in]   req_sgles       SG elements describing request buffer. Will be
915  *                              copied in by BFA and hence can be freed on
916  *                              return from this function.
917  * @param[in]   get_req_sga     function ptr to be called to get a request SG
918  *                              Address (given the sge index).
919  * @param[in]   get_req_sglen   function ptr to be called to get a request SG
920  *                              len (given the sge index).
921  * @param[in]   get_rsp_sga     function ptr to be called to get a response SG
922  *                              Address (given the sge index).
923  * @param[in]   get_rsp_sglen   function ptr to be called to get a response SG
924  *                              len (given the sge index).
925  * @param[in]   req             Allocated FCXP is used to send req or rsp?
926  *                              request - BFA_TRUE, response - BFA_FALSE
927  *
928  * @return FCXP instance. NULL on failure.
929  */
930 struct bfa_fcxp_s *
931 bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
932                 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
933                 bfa_fcxp_get_sglen_t req_sglen_cbfn,
934                 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
935                 bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
936 {
937         struct bfa_fcxp_s *fcxp = NULL;
938
939         WARN_ON(bfa == NULL);
940
941         fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
942         if (fcxp == NULL)
943                 return NULL;
944
945         bfa_trc(bfa, fcxp->fcxp_tag);
946
947         bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
948                         req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
949
950         return fcxp;
951 }
952
953 /*
954  * Get the internal request buffer pointer
955  *
956  * @param[in]   fcxp    BFA fcxp pointer
957  *
958  * @return              pointer to the internal request buffer
959  */
960 void *
961 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
962 {
963         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
964         void    *reqbuf;
965
966         WARN_ON(fcxp->use_ireqbuf != 1);
967         reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
968                                 mod->req_pld_sz + mod->rsp_pld_sz);
969         return reqbuf;
970 }
971
972 u32
973 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
974 {
975         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
976
977         return mod->req_pld_sz;
978 }
979
980 /*
981  * Get the internal response buffer pointer
982  *
983  * @param[in]   fcxp    BFA fcxp pointer
984  *
985  * @return              pointer to the internal request buffer
986  */
987 void *
988 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
989 {
990         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
991         void    *fcxp_buf;
992
993         WARN_ON(fcxp->use_irspbuf != 1);
994
995         fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
996                                 mod->req_pld_sz + mod->rsp_pld_sz);
997
998         /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
999         return ((u8 *) fcxp_buf) + mod->req_pld_sz;
1000 }
1001
1002 /*
1003  * Free the BFA FCXP
1004  *
1005  * @param[in]   fcxp                    BFA fcxp pointer
1006  *
1007  * @return              void
1008  */
1009 void
1010 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1011 {
1012         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1013
1014         WARN_ON(fcxp == NULL);
1015         bfa_trc(mod->bfa, fcxp->fcxp_tag);
1016         bfa_fcxp_put(fcxp);
1017 }
1018
1019 /*
1020  * Send a FCXP request
1021  *
1022  * @param[in]   fcxp    BFA fcxp pointer
1023  * @param[in]   rport   BFA rport pointer. Could be left NULL for WKA rports
1024  * @param[in]   vf_id   virtual Fabric ID
1025  * @param[in]   lp_tag  lport tag
1026  * @param[in]   cts     use Continuous sequence
1027  * @param[in]   cos     fc Class of Service
1028  * @param[in]   reqlen  request length, does not include FCHS length
1029  * @param[in]   fchs    fc Header Pointer. The header content will be copied
1030  *                      in by BFA.
1031  *
1032  * @param[in]   cbfn    call back function to be called on receiving
1033  *                                                              the response
1034  * @param[in]   cbarg   arg for cbfn
1035  * @param[in]   rsp_timeout
1036  *                      response timeout
1037  *
1038  * @return              bfa_status_t
1039  */
1040 void
1041 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1042               u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1043               u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1044               void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1045 {
1046         struct bfa_s                    *bfa  = fcxp->fcxp_mod->bfa;
1047         struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
1048         struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
1049         struct bfi_fcxp_send_req_s      *send_req;
1050
1051         bfa_trc(bfa, fcxp->fcxp_tag);
1052
1053         /*
1054          * setup request/response info
1055          */
1056         reqi->bfa_rport = rport;
1057         reqi->vf_id = vf_id;
1058         reqi->lp_tag = lp_tag;
1059         reqi->class = cos;
1060         rspi->rsp_timeout = rsp_timeout;
1061         reqi->cts = cts;
1062         reqi->fchs = *fchs;
1063         reqi->req_tot_len = reqlen;
1064         rspi->rsp_maxlen = rsp_maxlen;
1065         fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1066         fcxp->send_cbarg = cbarg;
1067
1068         /*
1069          * If no room in CPE queue, wait for space in request queue
1070          */
1071         send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1072         if (!send_req) {
1073                 bfa_trc(bfa, fcxp->fcxp_tag);
1074                 fcxp->reqq_waiting = BFA_TRUE;
1075                 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1076                 return;
1077         }
1078
1079         bfa_fcxp_queue(fcxp, send_req);
1080 }
1081
1082 /*
1083  * Abort a BFA FCXP
1084  *
1085  * @param[in]   fcxp    BFA fcxp pointer
1086  *
1087  * @return              void
1088  */
1089 bfa_status_t
1090 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1091 {
1092         bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1093         WARN_ON(1);
1094         return BFA_STATUS_OK;
1095 }
1096
1097 void
1098 bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1099                bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1100                void *caller, int nreq_sgles,
1101                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1102                bfa_fcxp_get_sglen_t req_sglen_cbfn,
1103                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1104                bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
1105 {
1106         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1107
1108         if (req)
1109                 WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1110         else
1111                 WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
1112
1113         wqe->alloc_cbfn = alloc_cbfn;
1114         wqe->alloc_cbarg = alloc_cbarg;
1115         wqe->caller = caller;
1116         wqe->bfa = bfa;
1117         wqe->nreq_sgles = nreq_sgles;
1118         wqe->nrsp_sgles = nrsp_sgles;
1119         wqe->req_sga_cbfn = req_sga_cbfn;
1120         wqe->req_sglen_cbfn = req_sglen_cbfn;
1121         wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1122         wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1123
1124         if (req)
1125                 list_add_tail(&wqe->qe, &mod->req_wait_q);
1126         else
1127                 list_add_tail(&wqe->qe, &mod->rsp_wait_q);
1128 }
1129
1130 void
1131 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1132 {
1133         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1134
1135         WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1136                 !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
1137         list_del(&wqe->qe);
1138 }
1139
1140 void
1141 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1142 {
1143         /*
1144          * If waiting for room in request queue, cancel reqq wait
1145          * and free fcxp.
1146          */
1147         if (fcxp->reqq_waiting) {
1148                 fcxp->reqq_waiting = BFA_FALSE;
1149                 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1150                 bfa_fcxp_free(fcxp);
1151                 return;
1152         }
1153
1154         fcxp->send_cbfn = bfa_fcxp_null_comp;
1155 }
1156
1157 void
1158 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1159 {
1160         switch (msg->mhdr.msg_id) {
1161         case BFI_FCXP_I2H_SEND_RSP:
1162                 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1163                 break;
1164
1165         default:
1166                 bfa_trc(bfa, msg->mhdr.msg_id);
1167                 WARN_ON(1);
1168         }
1169 }
1170
1171 u32
1172 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1173 {
1174         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1175
1176         return mod->rsp_pld_sz;
1177 }
1178
1179 void
1180 bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1181 {
1182         struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
1183         struct list_head        *qe;
1184         int     i;
1185
1186         for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1187                 if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1188                         bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1189                         list_add_tail(qe, &mod->fcxp_req_unused_q);
1190                 } else {
1191                         bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1192                         list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1193                 }
1194         }
1195 }
1196
1197 /*
1198  *  BFA LPS state machine functions
1199  */
1200
1201 /*
1202  * Init state -- no login
1203  */
1204 static void
1205 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1206 {
1207         bfa_trc(lps->bfa, lps->bfa_tag);
1208         bfa_trc(lps->bfa, event);
1209
1210         switch (event) {
1211         case BFA_LPS_SM_LOGIN:
1212                 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1213                         bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1214                         bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1215                 } else {
1216                         bfa_sm_set_state(lps, bfa_lps_sm_login);
1217                         bfa_lps_send_login(lps);
1218                 }
1219
1220                 if (lps->fdisc)
1221                         bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1222                                 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1223                 else
1224                         bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1225                                 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1226                 break;
1227
1228         case BFA_LPS_SM_LOGOUT:
1229                 bfa_lps_logout_comp(lps);
1230                 break;
1231
1232         case BFA_LPS_SM_DELETE:
1233                 bfa_lps_free(lps);
1234                 break;
1235
1236         case BFA_LPS_SM_RX_CVL:
1237         case BFA_LPS_SM_OFFLINE:
1238                 break;
1239
1240         case BFA_LPS_SM_FWRSP:
1241                 /*
1242                  * Could happen when fabric detects loopback and discards
1243                  * the lps request. Fw will eventually sent out the timeout
1244                  * Just ignore
1245                  */
1246                 break;
1247
1248         default:
1249                 bfa_sm_fault(lps->bfa, event);
1250         }
1251 }
1252
1253 /*
1254  * login is in progress -- awaiting response from firmware
1255  */
1256 static void
1257 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1258 {
1259         bfa_trc(lps->bfa, lps->bfa_tag);
1260         bfa_trc(lps->bfa, event);
1261
1262         switch (event) {
1263         case BFA_LPS_SM_FWRSP:
1264                 if (lps->status == BFA_STATUS_OK) {
1265                         bfa_sm_set_state(lps, bfa_lps_sm_online);
1266                         if (lps->fdisc)
1267                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1268                                         BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1269                         else
1270                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1271                                         BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1272                         /* If N2N, send the assigned PID to FW */
1273                         bfa_trc(lps->bfa, lps->fport);
1274                         bfa_trc(lps->bfa, lps->lp_pid);
1275
1276                         if (!lps->fport && lps->lp_pid)
1277                                 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1278                 } else {
1279                         bfa_sm_set_state(lps, bfa_lps_sm_init);
1280                         if (lps->fdisc)
1281                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1282                                         BFA_PL_EID_LOGIN, 0,
1283                                         "FDISC Fail (RJT or timeout)");
1284                         else
1285                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1286                                         BFA_PL_EID_LOGIN, 0,
1287                                         "FLOGI Fail (RJT or timeout)");
1288                 }
1289                 bfa_lps_login_comp(lps);
1290                 break;
1291
1292         case BFA_LPS_SM_OFFLINE:
1293         case BFA_LPS_SM_DELETE:
1294                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1295                 break;
1296
1297         case BFA_LPS_SM_SET_N2N_PID:
1298                 bfa_trc(lps->bfa, lps->fport);
1299                 bfa_trc(lps->bfa, lps->lp_pid);
1300                 break;
1301
1302         default:
1303                 bfa_sm_fault(lps->bfa, event);
1304         }
1305 }
1306
1307 /*
1308  * login pending - awaiting space in request queue
1309  */
1310 static void
1311 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1312 {
1313         bfa_trc(lps->bfa, lps->bfa_tag);
1314         bfa_trc(lps->bfa, event);
1315
1316         switch (event) {
1317         case BFA_LPS_SM_RESUME:
1318                 bfa_sm_set_state(lps, bfa_lps_sm_login);
1319                 bfa_lps_send_login(lps);
1320                 break;
1321
1322         case BFA_LPS_SM_OFFLINE:
1323         case BFA_LPS_SM_DELETE:
1324                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1325                 bfa_reqq_wcancel(&lps->wqe);
1326                 break;
1327
1328         case BFA_LPS_SM_RX_CVL:
1329                 /*
1330                  * Login was not even sent out; so when getting out
1331                  * of this state, it will appear like a login retry
1332                  * after Clear virtual link
1333                  */
1334                 break;
1335
1336         default:
1337                 bfa_sm_fault(lps->bfa, event);
1338         }
1339 }
1340
1341 /*
1342  * login complete
1343  */
1344 static void
1345 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1346 {
1347         bfa_trc(lps->bfa, lps->bfa_tag);
1348         bfa_trc(lps->bfa, event);
1349
1350         switch (event) {
1351         case BFA_LPS_SM_LOGOUT:
1352                 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1353                         bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1354                         bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1355                 } else {
1356                         bfa_sm_set_state(lps, bfa_lps_sm_logout);
1357                         bfa_lps_send_logout(lps);
1358                 }
1359                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1360                         BFA_PL_EID_LOGO, 0, "Logout");
1361                 break;
1362
1363         case BFA_LPS_SM_RX_CVL:
1364                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1365
1366                 /* Let the vport module know about this event */
1367                 bfa_lps_cvl_event(lps);
1368                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1369                         BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1370                 break;
1371
1372         case BFA_LPS_SM_SET_N2N_PID:
1373                 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1374                         bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1375                         bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1376                 } else
1377                         bfa_lps_send_set_n2n_pid(lps);
1378                 break;
1379
1380         case BFA_LPS_SM_OFFLINE:
1381         case BFA_LPS_SM_DELETE:
1382                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1383                 break;
1384
1385         default:
1386                 bfa_sm_fault(lps->bfa, event);
1387         }
1388 }
1389
1390 /*
1391  * login complete
1392  */
1393 static void
1394 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1395 {
1396         bfa_trc(lps->bfa, lps->bfa_tag);
1397         bfa_trc(lps->bfa, event);
1398
1399         switch (event) {
1400         case BFA_LPS_SM_RESUME:
1401                 bfa_sm_set_state(lps, bfa_lps_sm_online);
1402                 bfa_lps_send_set_n2n_pid(lps);
1403                 break;
1404
1405         case BFA_LPS_SM_LOGOUT:
1406                 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1407                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1408                         BFA_PL_EID_LOGO, 0, "Logout");
1409                 break;
1410
1411         case BFA_LPS_SM_RX_CVL:
1412                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1413                 bfa_reqq_wcancel(&lps->wqe);
1414
1415                 /* Let the vport module know about this event */
1416                 bfa_lps_cvl_event(lps);
1417                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1418                         BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1419                 break;
1420
1421         case BFA_LPS_SM_OFFLINE:
1422         case BFA_LPS_SM_DELETE:
1423                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1424                 bfa_reqq_wcancel(&lps->wqe);
1425                 break;
1426
1427         default:
1428                 bfa_sm_fault(lps->bfa, event);
1429         }
1430 }
1431
1432 /*
1433  * logout in progress - awaiting firmware response
1434  */
1435 static void
1436 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1437 {
1438         bfa_trc(lps->bfa, lps->bfa_tag);
1439         bfa_trc(lps->bfa, event);
1440
1441         switch (event) {
1442         case BFA_LPS_SM_FWRSP:
1443         case BFA_LPS_SM_OFFLINE:
1444                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1445                 bfa_lps_logout_comp(lps);
1446                 break;
1447
1448         case BFA_LPS_SM_DELETE:
1449                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1450                 break;
1451
1452         default:
1453                 bfa_sm_fault(lps->bfa, event);
1454         }
1455 }
1456
1457 /*
1458  * logout pending -- awaiting space in request queue
1459  */
1460 static void
1461 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1462 {
1463         bfa_trc(lps->bfa, lps->bfa_tag);
1464         bfa_trc(lps->bfa, event);
1465
1466         switch (event) {
1467         case BFA_LPS_SM_RESUME:
1468                 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1469                 bfa_lps_send_logout(lps);
1470                 break;
1471
1472         case BFA_LPS_SM_OFFLINE:
1473         case BFA_LPS_SM_DELETE:
1474                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1475                 bfa_reqq_wcancel(&lps->wqe);
1476                 break;
1477
1478         default:
1479                 bfa_sm_fault(lps->bfa, event);
1480         }
1481 }
1482
1483
1484
1485 /*
1486  *  lps_pvt BFA LPS private functions
1487  */
1488
1489 /*
1490  * return memory requirement
1491  */
1492 static void
1493 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1494                 struct bfa_s *bfa)
1495 {
1496         struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1497
1498         if (cfg->drvcfg.min_cfg)
1499                 bfa_mem_kva_setup(minfo, lps_kva,
1500                         sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1501         else
1502                 bfa_mem_kva_setup(minfo, lps_kva,
1503                         sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1504 }
1505
1506 /*
1507  * bfa module attach at initialization time
1508  */
1509 static void
1510 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1511         struct bfa_pcidev_s *pcidev)
1512 {
1513         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1514         struct bfa_lps_s        *lps;
1515         int                     i;
1516
1517         mod->num_lps = BFA_LPS_MAX_LPORTS;
1518         if (cfg->drvcfg.min_cfg)
1519                 mod->num_lps = BFA_LPS_MIN_LPORTS;
1520         else
1521                 mod->num_lps = BFA_LPS_MAX_LPORTS;
1522         mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1523
1524         bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1525
1526         INIT_LIST_HEAD(&mod->lps_free_q);
1527         INIT_LIST_HEAD(&mod->lps_active_q);
1528         INIT_LIST_HEAD(&mod->lps_login_q);
1529
1530         for (i = 0; i < mod->num_lps; i++, lps++) {
1531                 lps->bfa        = bfa;
1532                 lps->bfa_tag    = (u8) i;
1533                 lps->reqq       = BFA_REQQ_LPS;
1534                 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1535                 list_add_tail(&lps->qe, &mod->lps_free_q);
1536         }
1537 }
1538
1539 static void
1540 bfa_lps_detach(struct bfa_s *bfa)
1541 {
1542 }
1543
1544 static void
1545 bfa_lps_start(struct bfa_s *bfa)
1546 {
1547 }
1548
1549 static void
1550 bfa_lps_stop(struct bfa_s *bfa)
1551 {
1552 }
1553
1554 /*
1555  * IOC in disabled state -- consider all lps offline
1556  */
1557 static void
1558 bfa_lps_iocdisable(struct bfa_s *bfa)
1559 {
1560         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1561         struct bfa_lps_s        *lps;
1562         struct list_head                *qe, *qen;
1563
1564         list_for_each_safe(qe, qen, &mod->lps_active_q) {
1565                 lps = (struct bfa_lps_s *) qe;
1566                 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1567         }
1568         list_for_each_safe(qe, qen, &mod->lps_login_q) {
1569                 lps = (struct bfa_lps_s *) qe;
1570                 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1571         }
1572         list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1573 }
1574
1575 /*
1576  * Firmware login response
1577  */
1578 static void
1579 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1580 {
1581         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1582         struct bfa_lps_s        *lps;
1583
1584         WARN_ON(rsp->bfa_tag >= mod->num_lps);
1585         lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1586
1587         lps->status = rsp->status;
1588         switch (rsp->status) {
1589         case BFA_STATUS_OK:
1590                 lps->fw_tag     = rsp->fw_tag;
1591                 lps->fport      = rsp->f_port;
1592                 if (lps->fport)
1593                         lps->lp_pid = rsp->lp_pid;
1594                 lps->npiv_en    = rsp->npiv_en;
1595                 lps->pr_bbcred  = be16_to_cpu(rsp->bb_credit);
1596                 lps->pr_pwwn    = rsp->port_name;
1597                 lps->pr_nwwn    = rsp->node_name;
1598                 lps->auth_req   = rsp->auth_req;
1599                 lps->lp_mac     = rsp->lp_mac;
1600                 lps->brcd_switch = rsp->brcd_switch;
1601                 lps->fcf_mac    = rsp->fcf_mac;
1602                 lps->pr_bbscn   = rsp->bb_scn;
1603
1604                 break;
1605
1606         case BFA_STATUS_FABRIC_RJT:
1607                 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1608                 lps->lsrjt_expl = rsp->lsrjt_expl;
1609
1610                 break;
1611
1612         case BFA_STATUS_EPROTOCOL:
1613                 lps->ext_status = rsp->ext_status;
1614
1615                 break;
1616
1617         case BFA_STATUS_VPORT_MAX:
1618                 if (rsp->ext_status)
1619                         bfa_lps_no_res(lps, rsp->ext_status);
1620                 break;
1621
1622         default:
1623                 /* Nothing to do with other status */
1624                 break;
1625         }
1626
1627         list_del(&lps->qe);
1628         list_add_tail(&lps->qe, &mod->lps_active_q);
1629         bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1630 }
1631
1632 static void
1633 bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1634 {
1635         struct bfa_s            *bfa = first_lps->bfa;
1636         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1637         struct list_head        *qe, *qe_next;
1638         struct bfa_lps_s        *lps;
1639
1640         bfa_trc(bfa, count);
1641
1642         qe = bfa_q_next(first_lps);
1643
1644         while (count && qe) {
1645                 qe_next = bfa_q_next(qe);
1646                 lps = (struct bfa_lps_s *)qe;
1647                 bfa_trc(bfa, lps->bfa_tag);
1648                 lps->status = first_lps->status;
1649                 list_del(&lps->qe);
1650                 list_add_tail(&lps->qe, &mod->lps_active_q);
1651                 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1652                 qe = qe_next;
1653                 count--;
1654         }
1655 }
1656
1657 /*
1658  * Firmware logout response
1659  */
1660 static void
1661 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1662 {
1663         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1664         struct bfa_lps_s        *lps;
1665
1666         WARN_ON(rsp->bfa_tag >= mod->num_lps);
1667         lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1668
1669         bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1670 }
1671
1672 /*
1673  * Firmware received a Clear virtual link request (for FCoE)
1674  */
1675 static void
1676 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1677 {
1678         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1679         struct bfa_lps_s        *lps;
1680
1681         lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1682
1683         bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1684 }
1685
1686 /*
1687  * Space is available in request queue, resume queueing request to firmware.
1688  */
1689 static void
1690 bfa_lps_reqq_resume(void *lps_arg)
1691 {
1692         struct bfa_lps_s        *lps = lps_arg;
1693
1694         bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1695 }
1696
1697 /*
1698  * lps is freed -- triggered by vport delete
1699  */
1700 static void
1701 bfa_lps_free(struct bfa_lps_s *lps)
1702 {
1703         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1704
1705         lps->lp_pid = 0;
1706         list_del(&lps->qe);
1707         list_add_tail(&lps->qe, &mod->lps_free_q);
1708 }
1709
1710 /*
1711  * send login request to firmware
1712  */
1713 static void
1714 bfa_lps_send_login(struct bfa_lps_s *lps)
1715 {
1716         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1717         struct bfi_lps_login_req_s      *m;
1718
1719         m = bfa_reqq_next(lps->bfa, lps->reqq);
1720         WARN_ON(!m);
1721
1722         bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1723                 bfa_fn_lpu(lps->bfa));
1724
1725         m->bfa_tag      = lps->bfa_tag;
1726         m->alpa         = lps->alpa;
1727         m->pdu_size     = cpu_to_be16(lps->pdusz);
1728         m->pwwn         = lps->pwwn;
1729         m->nwwn         = lps->nwwn;
1730         m->fdisc        = lps->fdisc;
1731         m->auth_en      = lps->auth_en;
1732         m->bb_scn       = lps->bb_scn;
1733
1734         bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1735         list_del(&lps->qe);
1736         list_add_tail(&lps->qe, &mod->lps_login_q);
1737 }
1738
1739 /*
1740  * send logout request to firmware
1741  */
1742 static void
1743 bfa_lps_send_logout(struct bfa_lps_s *lps)
1744 {
1745         struct bfi_lps_logout_req_s *m;
1746
1747         m = bfa_reqq_next(lps->bfa, lps->reqq);
1748         WARN_ON(!m);
1749
1750         bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1751                 bfa_fn_lpu(lps->bfa));
1752
1753         m->fw_tag = lps->fw_tag;
1754         m->port_name = lps->pwwn;
1755         bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1756 }
1757
1758 /*
1759  * send n2n pid set request to firmware
1760  */
1761 static void
1762 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1763 {
1764         struct bfi_lps_n2n_pid_req_s *m;
1765
1766         m = bfa_reqq_next(lps->bfa, lps->reqq);
1767         WARN_ON(!m);
1768
1769         bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1770                 bfa_fn_lpu(lps->bfa));
1771
1772         m->fw_tag = lps->fw_tag;
1773         m->lp_pid = lps->lp_pid;
1774         bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1775 }
1776
1777 /*
1778  * Indirect login completion handler for non-fcs
1779  */
1780 static void
1781 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1782 {
1783         struct bfa_lps_s *lps   = arg;
1784
1785         if (!complete)
1786                 return;
1787
1788         if (lps->fdisc)
1789                 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1790         else
1791                 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1792 }
1793
1794 /*
1795  * Login completion handler -- direct call for fcs, queue for others
1796  */
1797 static void
1798 bfa_lps_login_comp(struct bfa_lps_s *lps)
1799 {
1800         if (!lps->bfa->fcs) {
1801                 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1802                         lps);
1803                 return;
1804         }
1805
1806         if (lps->fdisc)
1807                 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1808         else
1809                 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1810 }
1811
1812 /*
1813  * Indirect logout completion handler for non-fcs
1814  */
1815 static void
1816 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1817 {
1818         struct bfa_lps_s *lps   = arg;
1819
1820         if (!complete)
1821                 return;
1822
1823         if (lps->fdisc)
1824                 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1825         else
1826                 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
1827 }
1828
1829 /*
1830  * Logout completion handler -- direct call for fcs, queue for others
1831  */
1832 static void
1833 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1834 {
1835         if (!lps->bfa->fcs) {
1836                 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1837                         lps);
1838                 return;
1839         }
1840         if (lps->fdisc)
1841                 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1842 }
1843
1844 /*
1845  * Clear virtual link completion handler for non-fcs
1846  */
1847 static void
1848 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1849 {
1850         struct bfa_lps_s *lps   = arg;
1851
1852         if (!complete)
1853                 return;
1854
1855         /* Clear virtual link to base port will result in link down */
1856         if (lps->fdisc)
1857                 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1858 }
1859
1860 /*
1861  * Received Clear virtual link event --direct call for fcs,
1862  * queue for others
1863  */
1864 static void
1865 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1866 {
1867         if (!lps->bfa->fcs) {
1868                 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1869                         lps);
1870                 return;
1871         }
1872
1873         /* Clear virtual link to base port will result in link down */
1874         if (lps->fdisc)
1875                 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1876 }
1877
1878
1879
1880 /*
1881  *  lps_public BFA LPS public functions
1882  */
1883
1884 u32
1885 bfa_lps_get_max_vport(struct bfa_s *bfa)
1886 {
1887         if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1888                 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1889         else
1890                 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1891 }
1892
1893 /*
1894  * Allocate a lport srvice tag.
1895  */
1896 struct bfa_lps_s  *
1897 bfa_lps_alloc(struct bfa_s *bfa)
1898 {
1899         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1900         struct bfa_lps_s        *lps = NULL;
1901
1902         bfa_q_deq(&mod->lps_free_q, &lps);
1903
1904         if (lps == NULL)
1905                 return NULL;
1906
1907         list_add_tail(&lps->qe, &mod->lps_active_q);
1908
1909         bfa_sm_set_state(lps, bfa_lps_sm_init);
1910         return lps;
1911 }
1912
1913 /*
1914  * Free lport service tag. This can be called anytime after an alloc.
1915  * No need to wait for any pending login/logout completions.
1916  */
1917 void
1918 bfa_lps_delete(struct bfa_lps_s *lps)
1919 {
1920         bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1921 }
1922
1923 /*
1924  * Initiate a lport login.
1925  */
1926 void
1927 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1928         wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
1929 {
1930         lps->uarg       = uarg;
1931         lps->alpa       = alpa;
1932         lps->pdusz      = pdusz;
1933         lps->pwwn       = pwwn;
1934         lps->nwwn       = nwwn;
1935         lps->fdisc      = BFA_FALSE;
1936         lps->auth_en    = auth_en;
1937         lps->bb_scn     = bb_scn;
1938         bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1939 }
1940
1941 /*
1942  * Initiate a lport fdisc login.
1943  */
1944 void
1945 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1946         wwn_t nwwn)
1947 {
1948         lps->uarg       = uarg;
1949         lps->alpa       = 0;
1950         lps->pdusz      = pdusz;
1951         lps->pwwn       = pwwn;
1952         lps->nwwn       = nwwn;
1953         lps->fdisc      = BFA_TRUE;
1954         lps->auth_en    = BFA_FALSE;
1955         bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1956 }
1957
1958
1959 /*
1960  * Initiate a lport FDSIC logout.
1961  */
1962 void
1963 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1964 {
1965         bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1966 }
1967
1968 u8
1969 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1970 {
1971         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1972
1973         return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1974 }
1975
1976 /*
1977  * Return lport services tag given the pid
1978  */
1979 u8
1980 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1981 {
1982         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1983         struct bfa_lps_s        *lps;
1984         int                     i;
1985
1986         for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1987                 if (lps->lp_pid == pid)
1988                         return lps->bfa_tag;
1989         }
1990
1991         /* Return base port tag anyway */
1992         return 0;
1993 }
1994
1995
1996 /*
1997  * return port id assigned to the base lport
1998  */
1999 u32
2000 bfa_lps_get_base_pid(struct bfa_s *bfa)
2001 {
2002         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
2003
2004         return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
2005 }
2006
2007 /*
2008  * Set PID in case of n2n (which is assigned during PLOGI)
2009  */
2010 void
2011 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
2012 {
2013         bfa_trc(lps->bfa, lps->bfa_tag);
2014         bfa_trc(lps->bfa, n2n_pid);
2015
2016         lps->lp_pid = n2n_pid;
2017         bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
2018 }
2019
2020 /*
2021  * LPS firmware message class handler.
2022  */
2023 void
2024 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2025 {
2026         union bfi_lps_i2h_msg_u msg;
2027
2028         bfa_trc(bfa, m->mhdr.msg_id);
2029         msg.msg = m;
2030
2031         switch (m->mhdr.msg_id) {
2032         case BFI_LPS_I2H_LOGIN_RSP:
2033                 bfa_lps_login_rsp(bfa, msg.login_rsp);
2034                 break;
2035
2036         case BFI_LPS_I2H_LOGOUT_RSP:
2037                 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2038                 break;
2039
2040         case BFI_LPS_I2H_CVL_EVENT:
2041                 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2042                 break;
2043
2044         default:
2045                 bfa_trc(bfa, m->mhdr.msg_id);
2046                 WARN_ON(1);
2047         }
2048 }
2049
2050 static void
2051 bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2052 {
2053         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2054         struct bfa_aen_entry_s  *aen_entry;
2055
2056         bfad_get_aen_entry(bfad, aen_entry);
2057         if (!aen_entry)
2058                 return;
2059
2060         aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2061         aen_entry->aen_data.port.pwwn = fcport->pwwn;
2062
2063         /* Send the AEN notification */
2064         bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2065                                   BFA_AEN_CAT_PORT, event);
2066 }
2067
2068 /*
2069  * FC PORT state machine functions
2070  */
2071 static void
2072 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2073                         enum bfa_fcport_sm_event event)
2074 {
2075         bfa_trc(fcport->bfa, event);
2076
2077         switch (event) {
2078         case BFA_FCPORT_SM_START:
2079                 /*
2080                  * Start event after IOC is configured and BFA is started.
2081                  */
2082                 fcport->use_flash_cfg = BFA_TRUE;
2083
2084                 if (bfa_fcport_send_enable(fcport)) {
2085                         bfa_trc(fcport->bfa, BFA_TRUE);
2086                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2087                 } else {
2088                         bfa_trc(fcport->bfa, BFA_FALSE);
2089                         bfa_sm_set_state(fcport,
2090                                         bfa_fcport_sm_enabling_qwait);
2091                 }
2092                 break;
2093
2094         case BFA_FCPORT_SM_ENABLE:
2095                 /*
2096                  * Port is persistently configured to be in enabled state. Do
2097                  * not change state. Port enabling is done when START event is
2098                  * received.
2099                  */
2100                 break;
2101
2102         case BFA_FCPORT_SM_DISABLE:
2103                 /*
2104                  * If a port is persistently configured to be disabled, the
2105                  * first event will a port disable request.
2106                  */
2107                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2108                 break;
2109
2110         case BFA_FCPORT_SM_HWFAIL:
2111                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2112                 break;
2113
2114         default:
2115                 bfa_sm_fault(fcport->bfa, event);
2116         }
2117 }
2118
2119 static void
2120 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2121                                 enum bfa_fcport_sm_event event)
2122 {
2123         char pwwn_buf[BFA_STRING_32];
2124         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2125         bfa_trc(fcport->bfa, event);
2126
2127         switch (event) {
2128         case BFA_FCPORT_SM_QRESUME:
2129                 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2130                 bfa_fcport_send_enable(fcport);
2131                 break;
2132
2133         case BFA_FCPORT_SM_STOP:
2134                 bfa_reqq_wcancel(&fcport->reqq_wait);
2135                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2136                 break;
2137
2138         case BFA_FCPORT_SM_ENABLE:
2139                 /*
2140                  * Already enable is in progress.
2141                  */
2142                 break;
2143
2144         case BFA_FCPORT_SM_DISABLE:
2145                 /*
2146                  * Just send disable request to firmware when room becomes
2147                  * available in request queue.
2148                  */
2149                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2150                 bfa_reqq_wcancel(&fcport->reqq_wait);
2151                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2152                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2153                 wwn2str(pwwn_buf, fcport->pwwn);
2154                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2155                         "Base port disabled: WWN = %s\n", pwwn_buf);
2156                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2157                 break;
2158
2159         case BFA_FCPORT_SM_LINKUP:
2160         case BFA_FCPORT_SM_LINKDOWN:
2161                 /*
2162                  * Possible to get link events when doing back-to-back
2163                  * enable/disables.
2164                  */
2165                 break;
2166
2167         case BFA_FCPORT_SM_HWFAIL:
2168                 bfa_reqq_wcancel(&fcport->reqq_wait);
2169                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2170                 break;
2171
2172         default:
2173                 bfa_sm_fault(fcport->bfa, event);
2174         }
2175 }
2176
2177 static void
2178 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2179                                                 enum bfa_fcport_sm_event event)
2180 {
2181         char pwwn_buf[BFA_STRING_32];
2182         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2183         bfa_trc(fcport->bfa, event);
2184
2185         switch (event) {
2186         case BFA_FCPORT_SM_FWRSP:
2187         case BFA_FCPORT_SM_LINKDOWN:
2188                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2189                 break;
2190
2191         case BFA_FCPORT_SM_LINKUP:
2192                 bfa_fcport_update_linkinfo(fcport);
2193                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2194
2195                 WARN_ON(!fcport->event_cbfn);
2196                 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2197                 break;
2198
2199         case BFA_FCPORT_SM_ENABLE:
2200                 /*
2201                  * Already being enabled.
2202                  */
2203                 break;
2204
2205         case BFA_FCPORT_SM_DISABLE:
2206                 if (bfa_fcport_send_disable(fcport))
2207                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2208                 else
2209                         bfa_sm_set_state(fcport,
2210                                          bfa_fcport_sm_disabling_qwait);
2211
2212                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2213                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2214                 wwn2str(pwwn_buf, fcport->pwwn);
2215                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2216                         "Base port disabled: WWN = %s\n", pwwn_buf);
2217                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2218                 break;
2219
2220         case BFA_FCPORT_SM_STOP:
2221                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2222                 break;
2223
2224         case BFA_FCPORT_SM_HWFAIL:
2225                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2226                 break;
2227
2228         default:
2229                 bfa_sm_fault(fcport->bfa, event);
2230         }
2231 }
2232
2233 static void
2234 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2235                                                 enum bfa_fcport_sm_event event)
2236 {
2237         struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2238         char pwwn_buf[BFA_STRING_32];
2239         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2240
2241         bfa_trc(fcport->bfa, event);
2242
2243         switch (event) {
2244         case BFA_FCPORT_SM_LINKUP:
2245                 bfa_fcport_update_linkinfo(fcport);
2246                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2247                 WARN_ON(!fcport->event_cbfn);
2248                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2249                                 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2250                 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2251
2252                         bfa_trc(fcport->bfa,
2253                                 pevent->link_state.vc_fcf.fcf.fipenabled);
2254                         bfa_trc(fcport->bfa,
2255                                 pevent->link_state.vc_fcf.fcf.fipfailed);
2256
2257                         if (pevent->link_state.vc_fcf.fcf.fipfailed)
2258                                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2259                                         BFA_PL_EID_FIP_FCF_DISC, 0,
2260                                         "FIP FCF Discovery Failed");
2261                         else
2262                                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2263                                         BFA_PL_EID_FIP_FCF_DISC, 0,
2264                                         "FIP FCF Discovered");
2265                 }
2266
2267                 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2268                 wwn2str(pwwn_buf, fcport->pwwn);
2269                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2270                         "Base port online: WWN = %s\n", pwwn_buf);
2271                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2272
2273                 /* If QoS is enabled and it is not online, send AEN */
2274                 if (fcport->cfg.qos_enabled &&
2275                     fcport->qos_attr.state != BFA_QOS_ONLINE)
2276                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2277                 break;
2278
2279         case BFA_FCPORT_SM_LINKDOWN:
2280                 /*
2281                  * Possible to get link down event.
2282                  */
2283                 break;
2284
2285         case BFA_FCPORT_SM_ENABLE:
2286                 /*
2287                  * Already enabled.
2288                  */
2289                 break;
2290
2291         case BFA_FCPORT_SM_DISABLE:
2292                 if (bfa_fcport_send_disable(fcport))
2293                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2294                 else
2295                         bfa_sm_set_state(fcport,
2296                                          bfa_fcport_sm_disabling_qwait);
2297
2298                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2299                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2300                 wwn2str(pwwn_buf, fcport->pwwn);
2301                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2302                         "Base port disabled: WWN = %s\n", pwwn_buf);
2303                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2304                 break;
2305
2306         case BFA_FCPORT_SM_STOP:
2307                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2308                 break;
2309
2310         case BFA_FCPORT_SM_HWFAIL:
2311                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2312                 break;
2313
2314         default:
2315                 bfa_sm_fault(fcport->bfa, event);
2316         }
2317 }
2318
2319 static void
2320 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2321         enum bfa_fcport_sm_event event)
2322 {
2323         char pwwn_buf[BFA_STRING_32];
2324         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2325
2326         bfa_trc(fcport->bfa, event);
2327
2328         switch (event) {
2329         case BFA_FCPORT_SM_ENABLE:
2330                 /*
2331                  * Already enabled.
2332                  */
2333                 break;
2334
2335         case BFA_FCPORT_SM_DISABLE:
2336                 if (bfa_fcport_send_disable(fcport))
2337                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2338                 else
2339                         bfa_sm_set_state(fcport,
2340                                          bfa_fcport_sm_disabling_qwait);
2341
2342                 bfa_fcport_reset_linkinfo(fcport);
2343                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2344                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2345                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2346                 wwn2str(pwwn_buf, fcport->pwwn);
2347                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2348                         "Base port offline: WWN = %s\n", pwwn_buf);
2349                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2350                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2351                         "Base port disabled: WWN = %s\n", pwwn_buf);
2352                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2353                 break;
2354
2355         case BFA_FCPORT_SM_LINKDOWN:
2356                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2357                 bfa_fcport_reset_linkinfo(fcport);
2358                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2359                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2360                                 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2361                 wwn2str(pwwn_buf, fcport->pwwn);
2362                 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2363                         BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2364                                 "Base port offline: WWN = %s\n", pwwn_buf);
2365                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2366                 } else {
2367                         BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2368                                 "Base port (WWN = %s) "
2369                                 "lost fabric connectivity\n", pwwn_buf);
2370                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2371                 }
2372                 break;
2373
2374         case BFA_FCPORT_SM_STOP:
2375                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2376                 bfa_fcport_reset_linkinfo(fcport);
2377                 wwn2str(pwwn_buf, fcport->pwwn);
2378                 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2379                         BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2380                                 "Base port offline: WWN = %s\n", pwwn_buf);
2381                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2382                 } else {
2383                         BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2384                                 "Base port (WWN = %s) "
2385                                 "lost fabric connectivity\n", pwwn_buf);
2386                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2387                 }
2388                 break;
2389
2390         case BFA_FCPORT_SM_HWFAIL:
2391                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2392                 bfa_fcport_reset_linkinfo(fcport);
2393                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2394                 wwn2str(pwwn_buf, fcport->pwwn);
2395                 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2396                         BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2397                                 "Base port offline: WWN = %s\n", pwwn_buf);
2398                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2399                 } else {
2400                         BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2401                                 "Base port (WWN = %s) "
2402                                 "lost fabric connectivity\n", pwwn_buf);
2403                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2404                 }
2405                 break;
2406
2407         default:
2408                 bfa_sm_fault(fcport->bfa, event);
2409         }
2410 }
2411
2412 static void
2413 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2414                                  enum bfa_fcport_sm_event event)
2415 {
2416         bfa_trc(fcport->bfa, event);
2417
2418         switch (event) {
2419         case BFA_FCPORT_SM_QRESUME:
2420                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2421                 bfa_fcport_send_disable(fcport);
2422                 break;
2423
2424         case BFA_FCPORT_SM_STOP:
2425                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2426                 bfa_reqq_wcancel(&fcport->reqq_wait);
2427                 break;
2428
2429         case BFA_FCPORT_SM_ENABLE:
2430                 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2431                 break;
2432
2433         case BFA_FCPORT_SM_DISABLE:
2434                 /*
2435                  * Already being disabled.
2436                  */
2437                 break;
2438
2439         case BFA_FCPORT_SM_LINKUP:
2440         case BFA_FCPORT_SM_LINKDOWN:
2441                 /*
2442                  * Possible to get link events when doing back-to-back
2443                  * enable/disables.
2444                  */
2445                 break;
2446
2447         case BFA_FCPORT_SM_HWFAIL:
2448                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2449                 bfa_reqq_wcancel(&fcport->reqq_wait);
2450                 break;
2451
2452         default:
2453                 bfa_sm_fault(fcport->bfa, event);
2454         }
2455 }
2456
2457 static void
2458 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2459                                  enum bfa_fcport_sm_event event)
2460 {
2461         bfa_trc(fcport->bfa, event);
2462
2463         switch (event) {
2464         case BFA_FCPORT_SM_QRESUME:
2465                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2466                 bfa_fcport_send_disable(fcport);
2467                 if (bfa_fcport_send_enable(fcport))
2468                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2469                 else
2470                         bfa_sm_set_state(fcport,
2471                                          bfa_fcport_sm_enabling_qwait);
2472                 break;
2473
2474         case BFA_FCPORT_SM_STOP:
2475                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2476                 bfa_reqq_wcancel(&fcport->reqq_wait);
2477                 break;
2478
2479         case BFA_FCPORT_SM_ENABLE:
2480                 break;
2481
2482         case BFA_FCPORT_SM_DISABLE:
2483                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2484                 break;
2485
2486         case BFA_FCPORT_SM_LINKUP:
2487         case BFA_FCPORT_SM_LINKDOWN:
2488                 /*
2489                  * Possible to get link events when doing back-to-back
2490                  * enable/disables.
2491                  */
2492                 break;
2493
2494         case BFA_FCPORT_SM_HWFAIL:
2495                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2496                 bfa_reqq_wcancel(&fcport->reqq_wait);
2497                 break;
2498
2499         default:
2500                 bfa_sm_fault(fcport->bfa, event);
2501         }
2502 }
2503
2504 static void
2505 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2506                                                 enum bfa_fcport_sm_event event)
2507 {
2508         char pwwn_buf[BFA_STRING_32];
2509         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2510         bfa_trc(fcport->bfa, event);
2511
2512         switch (event) {
2513         case BFA_FCPORT_SM_FWRSP:
2514                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2515                 break;
2516
2517         case BFA_FCPORT_SM_DISABLE:
2518                 /*
2519                  * Already being disabled.
2520                  */
2521                 break;
2522
2523         case BFA_FCPORT_SM_ENABLE:
2524                 if (bfa_fcport_send_enable(fcport))
2525                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2526                 else
2527                         bfa_sm_set_state(fcport,
2528                                          bfa_fcport_sm_enabling_qwait);
2529
2530                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2531                                 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2532                 wwn2str(pwwn_buf, fcport->pwwn);
2533                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2534                         "Base port enabled: WWN = %s\n", pwwn_buf);
2535                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2536                 break;
2537
2538         case BFA_FCPORT_SM_STOP:
2539                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2540                 break;
2541
2542         case BFA_FCPORT_SM_LINKUP:
2543         case BFA_FCPORT_SM_LINKDOWN:
2544                 /*
2545                  * Possible to get link events when doing back-to-back
2546                  * enable/disables.
2547                  */
2548                 break;
2549
2550         case BFA_FCPORT_SM_HWFAIL:
2551                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2552                 break;
2553
2554         default:
2555                 bfa_sm_fault(fcport->bfa, event);
2556         }
2557 }
2558
2559 static void
2560 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2561                                                 enum bfa_fcport_sm_event event)
2562 {
2563         char pwwn_buf[BFA_STRING_32];
2564         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2565         bfa_trc(fcport->bfa, event);
2566
2567         switch (event) {
2568         case BFA_FCPORT_SM_START:
2569                 /*
2570                  * Ignore start event for a port that is disabled.
2571                  */
2572                 break;
2573
2574         case BFA_FCPORT_SM_STOP:
2575                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2576                 break;
2577
2578         case BFA_FCPORT_SM_ENABLE:
2579                 if (bfa_fcport_send_enable(fcport))
2580                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2581                 else
2582                         bfa_sm_set_state(fcport,
2583                                          bfa_fcport_sm_enabling_qwait);
2584
2585                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2586                                 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2587                 wwn2str(pwwn_buf, fcport->pwwn);
2588                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2589                         "Base port enabled: WWN = %s\n", pwwn_buf);
2590                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2591                 break;
2592
2593         case BFA_FCPORT_SM_DISABLE:
2594                 /*
2595                  * Already disabled.
2596                  */
2597                 break;
2598
2599         case BFA_FCPORT_SM_HWFAIL:
2600                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2601                 break;
2602
2603         default:
2604                 bfa_sm_fault(fcport->bfa, event);
2605         }
2606 }
2607
2608 static void
2609 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2610                          enum bfa_fcport_sm_event event)
2611 {
2612         bfa_trc(fcport->bfa, event);
2613
2614         switch (event) {
2615         case BFA_FCPORT_SM_START:
2616                 if (bfa_fcport_send_enable(fcport))
2617                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2618                 else
2619                         bfa_sm_set_state(fcport,
2620                                          bfa_fcport_sm_enabling_qwait);
2621                 break;
2622
2623         default:
2624                 /*
2625                  * Ignore all other events.
2626                  */
2627                 ;
2628         }
2629 }
2630
2631 /*
2632  * Port is enabled. IOC is down/failed.
2633  */
2634 static void
2635 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2636                          enum bfa_fcport_sm_event event)
2637 {
2638         bfa_trc(fcport->bfa, event);
2639
2640         switch (event) {
2641         case BFA_FCPORT_SM_START:
2642                 if (bfa_fcport_send_enable(fcport))
2643                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2644                 else
2645                         bfa_sm_set_state(fcport,
2646                                          bfa_fcport_sm_enabling_qwait);
2647                 break;
2648
2649         default:
2650                 /*
2651                  * Ignore all events.
2652                  */
2653                 ;
2654         }
2655 }
2656
2657 /*
2658  * Port is disabled. IOC is down/failed.
2659  */
2660 static void
2661 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2662                          enum bfa_fcport_sm_event event)
2663 {
2664         bfa_trc(fcport->bfa, event);
2665
2666         switch (event) {
2667         case BFA_FCPORT_SM_START:
2668                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2669                 break;
2670
2671         case BFA_FCPORT_SM_ENABLE:
2672                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2673                 break;
2674
2675         default:
2676                 /*
2677                  * Ignore all events.
2678                  */
2679                 ;
2680         }
2681 }
2682
2683 /*
2684  * Link state is down
2685  */
2686 static void
2687 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2688                 enum bfa_fcport_ln_sm_event event)
2689 {
2690         bfa_trc(ln->fcport->bfa, event);
2691
2692         switch (event) {
2693         case BFA_FCPORT_LN_SM_LINKUP:
2694                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2695                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2696                 break;
2697
2698         default:
2699                 bfa_sm_fault(ln->fcport->bfa, event);
2700         }
2701 }
2702
2703 /*
2704  * Link state is waiting for down notification
2705  */
2706 static void
2707 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2708                 enum bfa_fcport_ln_sm_event event)
2709 {
2710         bfa_trc(ln->fcport->bfa, event);
2711
2712         switch (event) {
2713         case BFA_FCPORT_LN_SM_LINKUP:
2714                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2715                 break;
2716
2717         case BFA_FCPORT_LN_SM_NOTIFICATION:
2718                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2719                 break;
2720
2721         default:
2722                 bfa_sm_fault(ln->fcport->bfa, event);
2723         }
2724 }
2725
2726 /*
2727  * Link state is waiting for down notification and there is a pending up
2728  */
2729 static void
2730 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2731                 enum bfa_fcport_ln_sm_event event)
2732 {
2733         bfa_trc(ln->fcport->bfa, event);
2734
2735         switch (event) {
2736         case BFA_FCPORT_LN_SM_LINKDOWN:
2737                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2738                 break;
2739
2740         case BFA_FCPORT_LN_SM_NOTIFICATION:
2741                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2742                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2743                 break;
2744
2745         default:
2746                 bfa_sm_fault(ln->fcport->bfa, event);
2747         }
2748 }
2749
2750 /*
2751  * Link state is up
2752  */
2753 static void
2754 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2755                 enum bfa_fcport_ln_sm_event event)
2756 {
2757         bfa_trc(ln->fcport->bfa, event);
2758
2759         switch (event) {
2760         case BFA_FCPORT_LN_SM_LINKDOWN:
2761                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2762                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2763                 break;
2764
2765         default:
2766                 bfa_sm_fault(ln->fcport->bfa, event);
2767         }
2768 }
2769
2770 /*
2771  * Link state is waiting for up notification
2772  */
2773 static void
2774 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2775                 enum bfa_fcport_ln_sm_event event)
2776 {
2777         bfa_trc(ln->fcport->bfa, event);
2778
2779         switch (event) {
2780         case BFA_FCPORT_LN_SM_LINKDOWN:
2781                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2782                 break;
2783
2784         case BFA_FCPORT_LN_SM_NOTIFICATION:
2785                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2786                 break;
2787
2788         default:
2789                 bfa_sm_fault(ln->fcport->bfa, event);
2790         }
2791 }
2792
2793 /*
2794  * Link state is waiting for up notification and there is a pending down
2795  */
2796 static void
2797 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2798                 enum bfa_fcport_ln_sm_event event)
2799 {
2800         bfa_trc(ln->fcport->bfa, event);
2801
2802         switch (event) {
2803         case BFA_FCPORT_LN_SM_LINKUP:
2804                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2805                 break;
2806
2807         case BFA_FCPORT_LN_SM_NOTIFICATION:
2808                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2809                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2810                 break;
2811
2812         default:
2813                 bfa_sm_fault(ln->fcport->bfa, event);
2814         }
2815 }
2816
2817 /*
2818  * Link state is waiting for up notification and there are pending down and up
2819  */
2820 static void
2821 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2822                         enum bfa_fcport_ln_sm_event event)
2823 {
2824         bfa_trc(ln->fcport->bfa, event);
2825
2826         switch (event) {
2827         case BFA_FCPORT_LN_SM_LINKDOWN:
2828                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2829                 break;
2830
2831         case BFA_FCPORT_LN_SM_NOTIFICATION:
2832                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2833                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2834                 break;
2835
2836         default:
2837                 bfa_sm_fault(ln->fcport->bfa, event);
2838         }
2839 }
2840
2841 static void
2842 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2843 {
2844         struct bfa_fcport_ln_s *ln = cbarg;
2845
2846         if (complete)
2847                 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2848         else
2849                 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2850 }
2851
2852 /*
2853  * Send SCN notification to upper layers.
2854  * trunk - false if caller is fcport to ignore fcport event in trunked mode
2855  */
2856 static void
2857 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2858         bfa_boolean_t trunk)
2859 {
2860         if (fcport->cfg.trunked && !trunk)
2861                 return;
2862
2863         switch (event) {
2864         case BFA_PORT_LINKUP:
2865                 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2866                 break;
2867         case BFA_PORT_LINKDOWN:
2868                 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2869                 break;
2870         default:
2871                 WARN_ON(1);
2872         }
2873 }
2874
2875 static void
2876 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2877 {
2878         struct bfa_fcport_s *fcport = ln->fcport;
2879
2880         if (fcport->bfa->fcs) {
2881                 fcport->event_cbfn(fcport->event_cbarg, event);
2882                 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2883         } else {
2884                 ln->ln_event = event;
2885                 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2886                         __bfa_cb_fcport_event, ln);
2887         }
2888 }
2889
2890 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2891                                                         BFA_CACHELINE_SZ))
2892
2893 static void
2894 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2895                    struct bfa_s *bfa)
2896 {
2897         struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2898
2899         bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
2900 }
2901
2902 static void
2903 bfa_fcport_qresume(void *cbarg)
2904 {
2905         struct bfa_fcport_s *fcport = cbarg;
2906
2907         bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2908 }
2909
2910 static void
2911 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
2912 {
2913         struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
2914
2915         fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
2916         fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
2917         fcport->stats = (union bfa_fcport_stats_u *)
2918                                 bfa_mem_dma_virt(fcport_dma);
2919 }
2920
2921 /*
2922  * Memory initialization.
2923  */
2924 static void
2925 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2926                 struct bfa_pcidev_s *pcidev)
2927 {
2928         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2929         struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2930         struct bfa_fcport_ln_s *ln = &fcport->ln;
2931         struct timeval tv;
2932
2933         fcport->bfa = bfa;
2934         ln->fcport = fcport;
2935
2936         bfa_fcport_mem_claim(fcport);
2937
2938         bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2939         bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2940
2941         /*
2942          * initialize time stamp for stats reset
2943          */
2944         do_gettimeofday(&tv);
2945         fcport->stats_reset_time = tv.tv_sec;
2946
2947         /*
2948          * initialize and set default configuration
2949          */
2950         port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2951         port_cfg->speed = BFA_PORT_SPEED_AUTO;
2952         port_cfg->trunked = BFA_FALSE;
2953         port_cfg->maxfrsize = 0;
2954
2955         port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2956
2957         INIT_LIST_HEAD(&fcport->stats_pending_q);
2958         INIT_LIST_HEAD(&fcport->statsclr_pending_q);
2959
2960         bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2961 }
2962
2963 static void
2964 bfa_fcport_detach(struct bfa_s *bfa)
2965 {
2966 }
2967
2968 /*
2969  * Called when IOC is ready.
2970  */
2971 static void
2972 bfa_fcport_start(struct bfa_s *bfa)
2973 {
2974         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2975 }
2976
2977 /*
2978  * Called before IOC is stopped.
2979  */
2980 static void
2981 bfa_fcport_stop(struct bfa_s *bfa)
2982 {
2983         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2984         bfa_trunk_iocdisable(bfa);
2985 }
2986
2987 /*
2988  * Called when IOC failure is detected.
2989  */
2990 static void
2991 bfa_fcport_iocdisable(struct bfa_s *bfa)
2992 {
2993         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2994
2995         bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2996         bfa_trunk_iocdisable(bfa);
2997 }
2998
2999 static void
3000 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3001 {
3002         struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
3003         struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3004
3005         fcport->speed = pevent->link_state.speed;
3006         fcport->topology = pevent->link_state.topology;
3007
3008         if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
3009                 fcport->myalpa = 0;
3010
3011         /* QoS Details */
3012         fcport->qos_attr = pevent->link_state.qos_attr;
3013         fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
3014
3015         /*
3016          * update trunk state if applicable
3017          */
3018         if (!fcport->cfg.trunked)
3019                 trunk->attr.state = BFA_TRUNK_DISABLED;
3020
3021         /* update FCoE specific */
3022         fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
3023
3024         bfa_trc(fcport->bfa, fcport->speed);
3025         bfa_trc(fcport->bfa, fcport->topology);
3026 }
3027
3028 static void
3029 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3030 {
3031         fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3032         fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3033         fcport->bbsc_op_state = BFA_FALSE;
3034 }
3035
3036 /*
3037  * Send port enable message to firmware.
3038  */
3039 static bfa_boolean_t
3040 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3041 {
3042         struct bfi_fcport_enable_req_s *m;
3043
3044         /*
3045          * Increment message tag before queue check, so that responses to old
3046          * requests are discarded.
3047          */
3048         fcport->msgtag++;
3049
3050         /*
3051          * check for room in queue to send request now
3052          */
3053         m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3054         if (!m) {
3055                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3056                                                         &fcport->reqq_wait);
3057                 return BFA_FALSE;
3058         }
3059
3060         bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3061                         bfa_fn_lpu(fcport->bfa));
3062         m->nwwn = fcport->nwwn;
3063         m->pwwn = fcport->pwwn;
3064         m->port_cfg = fcport->cfg;
3065         m->msgtag = fcport->msgtag;
3066         m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3067          m->use_flash_cfg = fcport->use_flash_cfg;
3068         bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3069         bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3070         bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3071
3072         /*
3073          * queue I/O message to firmware
3074          */
3075         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3076         return BFA_TRUE;
3077 }
3078
3079 /*
3080  * Send port disable message to firmware.
3081  */
3082 static  bfa_boolean_t
3083 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3084 {
3085         struct bfi_fcport_req_s *m;
3086
3087         /*
3088          * Increment message tag before queue check, so that responses to old
3089          * requests are discarded.
3090          */
3091         fcport->msgtag++;
3092
3093         /*
3094          * check for room in queue to send request now
3095          */
3096         m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3097         if (!m) {
3098                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3099                                                         &fcport->reqq_wait);
3100                 return BFA_FALSE;
3101         }
3102
3103         bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3104                         bfa_fn_lpu(fcport->bfa));
3105         m->msgtag = fcport->msgtag;
3106
3107         /*
3108          * queue I/O message to firmware
3109          */
3110         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3111
3112         return BFA_TRUE;
3113 }
3114
3115 static void
3116 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3117 {
3118         fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3119         fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3120
3121         bfa_trc(fcport->bfa, fcport->pwwn);
3122         bfa_trc(fcport->bfa, fcport->nwwn);
3123 }
3124
3125 static void
3126 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3127         struct bfa_qos_stats_s *s)
3128 {
3129         u32     *dip = (u32 *) d;
3130         __be32  *sip = (__be32 *) s;
3131         int             i;
3132
3133         /* Now swap the 32 bit fields */
3134         for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3135                 dip[i] = be32_to_cpu(sip[i]);
3136 }
3137
3138 static void
3139 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3140         struct bfa_fcoe_stats_s *s)
3141 {
3142         u32     *dip = (u32 *) d;
3143         __be32  *sip = (__be32 *) s;
3144         int             i;
3145
3146         for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3147              i = i + 2) {
3148 #ifdef __BIG_ENDIAN
3149                 dip[i] = be32_to_cpu(sip[i]);
3150                 dip[i + 1] = be32_to_cpu(sip[i + 1]);
3151 #else
3152                 dip[i] = be32_to_cpu(sip[i + 1]);
3153                 dip[i + 1] = be32_to_cpu(sip[i]);
3154 #endif
3155         }
3156 }
3157
3158 static void
3159 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3160 {
3161         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3162         struct bfa_cb_pending_q_s *cb;
3163         struct list_head *qe, *qen;
3164         union bfa_fcport_stats_u *ret;
3165
3166         if (complete) {
3167                 struct timeval tv;
3168                 if (fcport->stats_status == BFA_STATUS_OK)
3169                         do_gettimeofday(&tv);
3170
3171                 list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3172                         bfa_q_deq(&fcport->stats_pending_q, &qe);
3173                         cb = (struct bfa_cb_pending_q_s *)qe;
3174                         if (fcport->stats_status == BFA_STATUS_OK) {
3175                                 ret = (union bfa_fcport_stats_u *)cb->data;
3176                                 /* Swap FC QoS or FCoE stats */
3177                                 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3178                                         bfa_fcport_qos_stats_swap(&ret->fcqos,
3179                                                         &fcport->stats->fcqos);
3180                                 else {
3181                                         bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3182                                                         &fcport->stats->fcoe);
3183                                         ret->fcoe.secs_reset =
3184                                         tv.tv_sec - fcport->stats_reset_time;
3185                                 }
3186                         }
3187                         bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3188                                         fcport->stats_status);
3189                 }
3190                 fcport->stats_status = BFA_STATUS_OK;
3191         } else {
3192                 INIT_LIST_HEAD(&fcport->stats_pending_q);
3193                 fcport->stats_status = BFA_STATUS_OK;
3194         }
3195 }
3196
3197 static void
3198 bfa_fcport_stats_get_timeout(void *cbarg)
3199 {
3200         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3201
3202         bfa_trc(fcport->bfa, fcport->stats_qfull);
3203
3204         if (fcport->stats_qfull) {
3205                 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3206                 fcport->stats_qfull = BFA_FALSE;
3207         }
3208
3209         fcport->stats_status = BFA_STATUS_ETIMER;
3210         __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3211 }
3212
3213 static void
3214 bfa_fcport_send_stats_get(void *cbarg)
3215 {
3216         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3217         struct bfi_fcport_req_s *msg;
3218
3219         msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3220
3221         if (!msg) {
3222                 fcport->stats_qfull = BFA_TRUE;
3223                 bfa_reqq_winit(&fcport->stats_reqq_wait,
3224                                 bfa_fcport_send_stats_get, fcport);
3225                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3226                                 &fcport->stats_reqq_wait);
3227                 return;
3228         }
3229         fcport->stats_qfull = BFA_FALSE;
3230
3231         memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3232         bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3233                         bfa_fn_lpu(fcport->bfa));
3234         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3235 }
3236
3237 static void
3238 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3239 {
3240         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3241         struct bfa_cb_pending_q_s *cb;
3242         struct list_head *qe, *qen;
3243
3244         if (complete) {
3245                 struct timeval tv;
3246
3247                 /*
3248                  * re-initialize time stamp for stats reset
3249                  */
3250                 do_gettimeofday(&tv);
3251                 fcport->stats_reset_time = tv.tv_sec;
3252                 list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3253                         bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3254                         cb = (struct bfa_cb_pending_q_s *)qe;
3255                         bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3256                                                 fcport->stats_status);
3257                 }
3258                 fcport->stats_status = BFA_STATUS_OK;
3259         } else {
3260                 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3261                 fcport->stats_status = BFA_STATUS_OK;
3262         }
3263 }
3264
3265 static void
3266 bfa_fcport_stats_clr_timeout(void *cbarg)
3267 {
3268         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3269
3270         bfa_trc(fcport->bfa, fcport->stats_qfull);
3271
3272         if (fcport->stats_qfull) {
3273                 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3274                 fcport->stats_qfull = BFA_FALSE;
3275         }
3276
3277         fcport->stats_status = BFA_STATUS_ETIMER;
3278         __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3279 }
3280
3281 static void
3282 bfa_fcport_send_stats_clear(void *cbarg)
3283 {
3284         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3285         struct bfi_fcport_req_s *msg;
3286
3287         msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3288
3289         if (!msg) {
3290                 fcport->stats_qfull = BFA_TRUE;
3291                 bfa_reqq_winit(&fcport->stats_reqq_wait,
3292                                 bfa_fcport_send_stats_clear, fcport);
3293                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3294                                                 &fcport->stats_reqq_wait);
3295                 return;
3296         }
3297         fcport->stats_qfull = BFA_FALSE;
3298
3299         memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3300         bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3301                         bfa_fn_lpu(fcport->bfa));
3302         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3303 }
3304
3305 /*
3306  * Handle trunk SCN event from firmware.
3307  */
3308 static void
3309 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3310 {
3311         struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3312         struct bfi_fcport_trunk_link_s *tlink;
3313         struct bfa_trunk_link_attr_s *lattr;
3314         enum bfa_trunk_state state_prev;
3315         int i;
3316         int link_bm = 0;
3317
3318         bfa_trc(fcport->bfa, fcport->cfg.trunked);
3319         WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3320                    scn->trunk_state != BFA_TRUNK_OFFLINE);
3321
3322         bfa_trc(fcport->bfa, trunk->attr.state);
3323         bfa_trc(fcport->bfa, scn->trunk_state);
3324         bfa_trc(fcport->bfa, scn->trunk_speed);
3325
3326         /*
3327          * Save off new state for trunk attribute query
3328          */
3329         state_prev = trunk->attr.state;
3330         if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3331                 trunk->attr.state = scn->trunk_state;
3332         trunk->attr.speed = scn->trunk_speed;
3333         for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3334                 lattr = &trunk->attr.link_attr[i];
3335                 tlink = &scn->tlink[i];
3336
3337                 lattr->link_state = tlink->state;
3338                 lattr->trunk_wwn  = tlink->trunk_wwn;
3339                 lattr->fctl       = tlink->fctl;
3340                 lattr->speed      = tlink->speed;
3341                 lattr->deskew     = be32_to_cpu(tlink->deskew);
3342
3343                 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3344                         fcport->speed    = tlink->speed;
3345                         fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3346                         link_bm |= 1 << i;
3347                 }
3348
3349                 bfa_trc(fcport->bfa, lattr->link_state);
3350                 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3351                 bfa_trc(fcport->bfa, lattr->fctl);
3352                 bfa_trc(fcport->bfa, lattr->speed);
3353                 bfa_trc(fcport->bfa, lattr->deskew);
3354         }
3355
3356         switch (link_bm) {
3357         case 3:
3358                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3359                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3360                 break;
3361         case 2:
3362                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3363                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3364                 break;
3365         case 1:
3366                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3367                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3368                 break;
3369         default:
3370                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3371                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3372         }
3373
3374         /*
3375          * Notify upper layers if trunk state changed.
3376          */
3377         if ((state_prev != trunk->attr.state) ||
3378                 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3379                 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3380                         BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3381         }
3382 }
3383
3384 static void
3385 bfa_trunk_iocdisable(struct bfa_s *bfa)
3386 {
3387         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3388         int i = 0;
3389
3390         /*
3391          * In trunked mode, notify upper layers that link is down
3392          */
3393         if (fcport->cfg.trunked) {
3394                 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3395                         bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3396
3397                 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3398                 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3399                 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3400                         fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3401                         fcport->trunk.attr.link_attr[i].fctl =
3402                                                 BFA_TRUNK_LINK_FCTL_NORMAL;
3403                         fcport->trunk.attr.link_attr[i].link_state =
3404                                                 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3405                         fcport->trunk.attr.link_attr[i].speed =
3406                                                 BFA_PORT_SPEED_UNKNOWN;
3407                         fcport->trunk.attr.link_attr[i].deskew = 0;
3408                 }
3409         }
3410 }
3411
3412 /*
3413  * Called to initialize port attributes
3414  */
3415 void
3416 bfa_fcport_init(struct bfa_s *bfa)
3417 {
3418         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3419
3420         /*
3421          * Initialize port attributes from IOC hardware data.
3422          */
3423         bfa_fcport_set_wwns(fcport);
3424         if (fcport->cfg.maxfrsize == 0)
3425                 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3426         fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3427         fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3428
3429         if (bfa_fcport_is_pbcdisabled(bfa))
3430                 bfa->modules.port.pbc_disabled = BFA_TRUE;
3431
3432         WARN_ON(!fcport->cfg.maxfrsize);
3433         WARN_ON(!fcport->cfg.rx_bbcredit);
3434         WARN_ON(!fcport->speed_sup);
3435 }
3436
3437 /*
3438  * Firmware message handler.
3439  */
3440 void
3441 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3442 {
3443         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3444         union bfi_fcport_i2h_msg_u i2hmsg;
3445
3446         i2hmsg.msg = msg;
3447         fcport->event_arg.i2hmsg = i2hmsg;
3448
3449         bfa_trc(bfa, msg->mhdr.msg_id);
3450         bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3451
3452         switch (msg->mhdr.msg_id) {
3453         case BFI_FCPORT_I2H_ENABLE_RSP:
3454                 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3455
3456                         if (fcport->use_flash_cfg) {
3457                                 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3458                                 fcport->cfg.maxfrsize =
3459                                         cpu_to_be16(fcport->cfg.maxfrsize);
3460                                 fcport->cfg.path_tov =
3461                                         cpu_to_be16(fcport->cfg.path_tov);
3462                                 fcport->cfg.q_depth =
3463                                         cpu_to_be16(fcport->cfg.q_depth);
3464
3465                                 if (fcport->cfg.trunked)
3466                                         fcport->trunk.attr.state =
3467                                                 BFA_TRUNK_OFFLINE;
3468                                 else
3469                                         fcport->trunk.attr.state =
3470                                                 BFA_TRUNK_DISABLED;
3471                                 fcport->use_flash_cfg = BFA_FALSE;
3472                         }
3473
3474                         if (fcport->cfg.qos_enabled)
3475                                 fcport->qos_attr.state = BFA_QOS_OFFLINE;
3476                         else
3477                                 fcport->qos_attr.state = BFA_QOS_DISABLED;
3478
3479                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3480                 }
3481                 break;
3482
3483         case BFI_FCPORT_I2H_DISABLE_RSP:
3484                 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3485                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3486                 break;
3487
3488         case BFI_FCPORT_I2H_EVENT:
3489                 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3490                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3491                 else
3492                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3493                 break;
3494
3495         case BFI_FCPORT_I2H_TRUNK_SCN:
3496                 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3497                 break;
3498
3499         case BFI_FCPORT_I2H_STATS_GET_RSP:
3500                 /*
3501                  * check for timer pop before processing the rsp
3502                  */
3503                 if (list_empty(&fcport->stats_pending_q) ||
3504                     (fcport->stats_status == BFA_STATUS_ETIMER))
3505                         break;
3506
3507                 bfa_timer_stop(&fcport->timer);
3508                 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3509                 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3510                 break;
3511
3512         case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3513                 /*
3514                  * check for timer pop before processing the rsp
3515                  */
3516                 if (list_empty(&fcport->statsclr_pending_q) ||
3517                     (fcport->stats_status == BFA_STATUS_ETIMER))
3518                         break;
3519
3520                 bfa_timer_stop(&fcport->timer);
3521                 fcport->stats_status = BFA_STATUS_OK;
3522                 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3523                 break;
3524
3525         case BFI_FCPORT_I2H_ENABLE_AEN:
3526                 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3527                 break;
3528
3529         case BFI_FCPORT_I2H_DISABLE_AEN:
3530                 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3531                 break;
3532
3533         default:
3534                 WARN_ON(1);
3535         break;
3536         }
3537 }
3538
3539 /*
3540  * Registered callback for port events.
3541  */
3542 void
3543 bfa_fcport_event_register(struct bfa_s *bfa,
3544                                 void (*cbfn) (void *cbarg,
3545                                 enum bfa_port_linkstate event),
3546                                 void *cbarg)
3547 {
3548         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3549
3550         fcport->event_cbfn = cbfn;
3551         fcport->event_cbarg = cbarg;
3552 }
3553
3554 bfa_status_t
3555 bfa_fcport_enable(struct bfa_s *bfa)
3556 {
3557         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3558
3559         if (bfa_fcport_is_pbcdisabled(bfa))
3560                 return BFA_STATUS_PBC;
3561
3562         if (bfa_ioc_is_disabled(&bfa->ioc))
3563                 return BFA_STATUS_IOC_DISABLED;
3564
3565         if (fcport->diag_busy)
3566                 return BFA_STATUS_DIAG_BUSY;
3567
3568         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3569         return BFA_STATUS_OK;
3570 }
3571
3572 bfa_status_t
3573 bfa_fcport_disable(struct bfa_s *bfa)
3574 {
3575         if (bfa_fcport_is_pbcdisabled(bfa))
3576                 return BFA_STATUS_PBC;
3577
3578         if (bfa_ioc_is_disabled(&bfa->ioc))
3579                 return BFA_STATUS_IOC_DISABLED;
3580
3581         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3582         return BFA_STATUS_OK;
3583 }
3584
3585 /* If PBC is disabled on port, return error */
3586 bfa_status_t
3587 bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3588 {
3589         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3590         struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3591         struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3592
3593         if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3594                 bfa_trc(bfa, fcport->pwwn);
3595                 return BFA_STATUS_PBC;
3596         }
3597         return BFA_STATUS_OK;
3598 }
3599
3600 /*
3601  * Configure port speed.
3602  */
3603 bfa_status_t
3604 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3605 {
3606         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3607
3608         bfa_trc(bfa, speed);
3609
3610         if (fcport->cfg.trunked == BFA_TRUE)
3611                 return BFA_STATUS_TRUNK_ENABLED;
3612         if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3613                 bfa_trc(bfa, fcport->speed_sup);
3614                 return BFA_STATUS_UNSUPP_SPEED;
3615         }
3616
3617         /* Port speed entered needs to be checked */
3618         if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3619                 /* For CT2, 1G is not supported */
3620                 if ((speed == BFA_PORT_SPEED_1GBPS) &&
3621                     (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3622                         return BFA_STATUS_UNSUPP_SPEED;
3623
3624                 /* Already checked for Auto Speed and Max Speed supp */
3625                 if (!(speed == BFA_PORT_SPEED_1GBPS ||
3626                       speed == BFA_PORT_SPEED_2GBPS ||
3627                       speed == BFA_PORT_SPEED_4GBPS ||
3628                       speed == BFA_PORT_SPEED_8GBPS ||
3629                       speed == BFA_PORT_SPEED_16GBPS ||
3630                       speed == BFA_PORT_SPEED_AUTO))
3631                         return BFA_STATUS_UNSUPP_SPEED;
3632         } else {
3633                 if (speed != BFA_PORT_SPEED_10GBPS)
3634                         return BFA_STATUS_UNSUPP_SPEED;
3635         }
3636
3637         fcport->cfg.speed = speed;
3638
3639         return BFA_STATUS_OK;
3640 }
3641
3642 /*
3643  * Get current speed.
3644  */
3645 enum bfa_port_speed
3646 bfa_fcport_get_speed(struct bfa_s *bfa)
3647 {
3648         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3649
3650         return fcport->speed;
3651 }
3652
3653 /*
3654  * Configure port topology.
3655  */
3656 bfa_status_t
3657 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3658 {
3659         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3660
3661         bfa_trc(bfa, topology);
3662         bfa_trc(bfa, fcport->cfg.topology);
3663
3664         switch (topology) {
3665         case BFA_PORT_TOPOLOGY_P2P:
3666         case BFA_PORT_TOPOLOGY_LOOP:
3667         case BFA_PORT_TOPOLOGY_AUTO:
3668                 break;
3669
3670         default:
3671                 return BFA_STATUS_EINVAL;
3672         }
3673
3674         fcport->cfg.topology = topology;
3675         return BFA_STATUS_OK;
3676 }
3677
3678 /*
3679  * Get current topology.
3680  */
3681 enum bfa_port_topology
3682 bfa_fcport_get_topology(struct bfa_s *bfa)
3683 {
3684         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3685
3686         return fcport->topology;
3687 }
3688
3689 bfa_status_t
3690 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3691 {
3692         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3693
3694         bfa_trc(bfa, alpa);
3695         bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3696         bfa_trc(bfa, fcport->cfg.hardalpa);
3697
3698         fcport->cfg.cfg_hardalpa = BFA_TRUE;
3699         fcport->cfg.hardalpa = alpa;
3700
3701         return BFA_STATUS_OK;
3702 }
3703
3704 bfa_status_t
3705 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3706 {
3707         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3708
3709         bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3710         bfa_trc(bfa, fcport->cfg.hardalpa);
3711
3712         fcport->cfg.cfg_hardalpa = BFA_FALSE;
3713         return BFA_STATUS_OK;
3714 }
3715
3716 bfa_boolean_t
3717 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3718 {
3719         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3720
3721         *alpa = fcport->cfg.hardalpa;
3722         return fcport->cfg.cfg_hardalpa;
3723 }
3724
3725 u8
3726 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3727 {
3728         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3729
3730         return fcport->myalpa;
3731 }
3732
3733 bfa_status_t
3734 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3735 {
3736         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3737
3738         bfa_trc(bfa, maxfrsize);
3739         bfa_trc(bfa, fcport->cfg.maxfrsize);
3740
3741         /* with in range */
3742         if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3743                 return BFA_STATUS_INVLD_DFSZ;
3744
3745         /* power of 2, if not the max frame size of 2112 */
3746         if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3747                 return BFA_STATUS_INVLD_DFSZ;
3748
3749         fcport->cfg.maxfrsize = maxfrsize;
3750         return BFA_STATUS_OK;
3751 }
3752
3753 u16
3754 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3755 {
3756         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3757
3758         return fcport->cfg.maxfrsize;
3759 }
3760
3761 u8
3762 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3763 {
3764         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3765
3766         return fcport->cfg.rx_bbcredit;
3767 }
3768
3769 void
3770 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
3771 {
3772         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3773
3774         fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3775         fcport->cfg.bb_scn = bb_scn;
3776         if (bb_scn)
3777                 fcport->bbsc_op_state = BFA_TRUE;
3778 }
3779
3780 /*
3781  * Get port attributes.
3782  */
3783
3784 wwn_t
3785 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3786 {
3787         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3788         if (node)
3789                 return fcport->nwwn;
3790         else
3791                 return fcport->pwwn;
3792 }
3793
3794 void
3795 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3796 {
3797         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3798
3799         memset(attr, 0, sizeof(struct bfa_port_attr_s));
3800
3801         attr->nwwn = fcport->nwwn;
3802         attr->pwwn = fcport->pwwn;
3803
3804         attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
3805         attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
3806
3807         memcpy(&attr->pport_cfg, &fcport->cfg,
3808                 sizeof(struct bfa_port_cfg_s));
3809         /* speed attributes */
3810         attr->pport_cfg.speed = fcport->cfg.speed;
3811         attr->speed_supported = fcport->speed_sup;
3812         attr->speed = fcport->speed;
3813         attr->cos_supported = FC_CLASS_3;
3814
3815         /* topology attributes */
3816         attr->pport_cfg.topology = fcport->cfg.topology;
3817         attr->topology = fcport->topology;
3818         attr->pport_cfg.trunked = fcport->cfg.trunked;
3819
3820         /* beacon attributes */
3821         attr->beacon = fcport->beacon;
3822         attr->link_e2e_beacon = fcport->link_e2e_beacon;
3823
3824         attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
3825         attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
3826         attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3827         attr->bbsc_op_status =  fcport->bbsc_op_state;
3828
3829         /* PBC Disabled State */
3830         if (bfa_fcport_is_pbcdisabled(bfa))
3831                 attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3832         else {
3833                 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3834                         attr->port_state = BFA_PORT_ST_IOCDIS;
3835                 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3836                         attr->port_state = BFA_PORT_ST_FWMISMATCH;
3837         }
3838
3839         /* FCoE vlan */
3840         attr->fcoe_vlan = fcport->fcoe_vlan;
3841 }
3842
3843 #define BFA_FCPORT_STATS_TOV    1000
3844
3845 /*
3846  * Fetch port statistics (FCQoS or FCoE).
3847  */
3848 bfa_status_t
3849 bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3850 {
3851         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3852
3853         if (bfa_ioc_is_disabled(&bfa->ioc))
3854                 return BFA_STATUS_IOC_DISABLED;
3855
3856         if (!list_empty(&fcport->statsclr_pending_q))
3857                 return BFA_STATUS_DEVBUSY;
3858
3859         if (list_empty(&fcport->stats_pending_q)) {
3860                 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3861                 bfa_fcport_send_stats_get(fcport);
3862                 bfa_timer_start(bfa, &fcport->timer,
3863                                 bfa_fcport_stats_get_timeout,
3864                                 fcport, BFA_FCPORT_STATS_TOV);
3865         } else
3866                 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3867
3868         return BFA_STATUS_OK;
3869 }
3870
3871 /*
3872  * Reset port statistics (FCQoS or FCoE).
3873  */
3874 bfa_status_t
3875 bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3876 {
3877         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3878
3879         if (!list_empty(&fcport->stats_pending_q))
3880                 return BFA_STATUS_DEVBUSY;
3881
3882         if (list_empty(&fcport->statsclr_pending_q)) {
3883                 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3884                 bfa_fcport_send_stats_clear(fcport);
3885                 bfa_timer_start(bfa, &fcport->timer,
3886                                 bfa_fcport_stats_clr_timeout,
3887                                 fcport, BFA_FCPORT_STATS_TOV);
3888         } else
3889                 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3890
3891         return BFA_STATUS_OK;
3892 }
3893
3894 /*
3895  * Fetch port attributes.
3896  */
3897 bfa_boolean_t
3898 bfa_fcport_is_disabled(struct bfa_s *bfa)
3899 {
3900         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3901
3902         return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3903                 BFA_PORT_ST_DISABLED;
3904
3905 }
3906
3907 bfa_boolean_t
3908 bfa_fcport_is_ratelim(struct bfa_s *bfa)
3909 {
3910         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3911
3912         return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3913
3914 }
3915
3916 /*
3917  *      Enable/Disable FAA feature in port config
3918  */
3919 void
3920 bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
3921 {
3922         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3923
3924         bfa_trc(bfa, state);
3925         fcport->cfg.faa_state = state;
3926 }
3927
3928 /*
3929  * Get default minimum ratelim speed
3930  */
3931 enum bfa_port_speed
3932 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3933 {
3934         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3935
3936         bfa_trc(bfa, fcport->cfg.trl_def_speed);
3937         return fcport->cfg.trl_def_speed;
3938
3939 }
3940
3941 void
3942 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
3943                   bfa_boolean_t link_e2e_beacon)
3944 {
3945         struct bfa_s *bfa = dev;
3946         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3947
3948         bfa_trc(bfa, beacon);
3949         bfa_trc(bfa, link_e2e_beacon);
3950         bfa_trc(bfa, fcport->beacon);
3951         bfa_trc(bfa, fcport->link_e2e_beacon);
3952
3953         fcport->beacon = beacon;
3954         fcport->link_e2e_beacon = link_e2e_beacon;
3955 }
3956
3957 bfa_boolean_t
3958 bfa_fcport_is_linkup(struct bfa_s *bfa)
3959 {
3960         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3961
3962         return  (!fcport->cfg.trunked &&
3963                  bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3964                 (fcport->cfg.trunked &&
3965                  fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3966 }
3967
3968 bfa_boolean_t
3969 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3970 {
3971         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3972
3973         return fcport->cfg.qos_enabled;
3974 }
3975
3976 bfa_boolean_t
3977 bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
3978 {
3979         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3980
3981         return fcport->cfg.trunked;
3982 }
3983
3984 /*
3985  * Rport State machine functions
3986  */
3987 /*
3988  * Beginning state, only online event expected.
3989  */
3990 static void
3991 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3992 {
3993         bfa_trc(rp->bfa, rp->rport_tag);
3994         bfa_trc(rp->bfa, event);
3995
3996         switch (event) {
3997         case BFA_RPORT_SM_CREATE:
3998                 bfa_stats(rp, sm_un_cr);
3999                 bfa_sm_set_state(rp, bfa_rport_sm_created);
4000                 break;
4001
4002         default:
4003                 bfa_stats(rp, sm_un_unexp);
4004                 bfa_sm_fault(rp->bfa, event);
4005         }
4006 }
4007
4008 static void
4009 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4010 {
4011         bfa_trc(rp->bfa, rp->rport_tag);
4012         bfa_trc(rp->bfa, event);
4013
4014         switch (event) {
4015         case BFA_RPORT_SM_ONLINE:
4016                 bfa_stats(rp, sm_cr_on);
4017                 if (bfa_rport_send_fwcreate(rp))
4018                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4019                 else
4020                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4021                 break;
4022
4023         case BFA_RPORT_SM_DELETE:
4024                 bfa_stats(rp, sm_cr_del);
4025                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4026                 bfa_rport_free(rp);
4027                 break;
4028
4029         case BFA_RPORT_SM_HWFAIL:
4030                 bfa_stats(rp, sm_cr_hwf);
4031                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4032                 break;
4033
4034         default:
4035                 bfa_stats(rp, sm_cr_unexp);
4036                 bfa_sm_fault(rp->bfa, event);
4037         }
4038 }
4039
4040 /*
4041  * Waiting for rport create response from firmware.
4042  */
4043 static void
4044 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4045 {
4046         bfa_trc(rp->bfa, rp->rport_tag);
4047         bfa_trc(rp->bfa, event);
4048
4049         switch (event) {
4050         case BFA_RPORT_SM_FWRSP:
4051                 bfa_stats(rp, sm_fwc_rsp);
4052                 bfa_sm_set_state(rp, bfa_rport_sm_online);
4053                 bfa_rport_online_cb(rp);
4054                 break;
4055
4056         case BFA_RPORT_SM_DELETE:
4057                 bfa_stats(rp, sm_fwc_del);
4058                 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4059                 break;
4060
4061         case BFA_RPORT_SM_OFFLINE:
4062                 bfa_stats(rp, sm_fwc_off);
4063                 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4064                 break;
4065
4066         case BFA_RPORT_SM_HWFAIL:
4067                 bfa_stats(rp, sm_fwc_hwf);
4068                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4069                 break;
4070
4071         default:
4072                 bfa_stats(rp, sm_fwc_unexp);
4073                 bfa_sm_fault(rp->bfa, event);
4074         }
4075 }
4076
4077 /*
4078  * Request queue is full, awaiting queue resume to send create request.
4079  */
4080 static void
4081 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4082 {
4083         bfa_trc(rp->bfa, rp->rport_tag);
4084         bfa_trc(rp->bfa, event);
4085
4086         switch (event) {
4087         case BFA_RPORT_SM_QRESUME:
4088                 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4089                 bfa_rport_send_fwcreate(rp);
4090                 break;
4091
4092         case BFA_RPORT_SM_DELETE:
4093                 bfa_stats(rp, sm_fwc_del);
4094                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4095                 bfa_reqq_wcancel(&rp->reqq_wait);
4096                 bfa_rport_free(rp);
4097                 break;
4098
4099         case BFA_RPORT_SM_OFFLINE:
4100                 bfa_stats(rp, sm_fwc_off);
4101                 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4102                 bfa_reqq_wcancel(&rp->reqq_wait);
4103                 bfa_rport_offline_cb(rp);
4104                 break;
4105
4106         case BFA_RPORT_SM_HWFAIL:
4107                 bfa_stats(rp, sm_fwc_hwf);
4108                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4109                 bfa_reqq_wcancel(&rp->reqq_wait);
4110                 break;
4111
4112         default:
4113                 bfa_stats(rp, sm_fwc_unexp);
4114                 bfa_sm_fault(rp->bfa, event);
4115         }
4116 }
4117
4118 /*
4119  * Online state - normal parking state.
4120  */
4121 static void
4122 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4123 {
4124         struct bfi_rport_qos_scn_s *qos_scn;
4125
4126         bfa_trc(rp->bfa, rp->rport_tag);
4127         bfa_trc(rp->bfa, event);
4128
4129         switch (event) {
4130         case BFA_RPORT_SM_OFFLINE:
4131                 bfa_stats(rp, sm_on_off);
4132                 if (bfa_rport_send_fwdelete(rp))
4133                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4134                 else
4135                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4136                 break;
4137
4138         case BFA_RPORT_SM_DELETE:
4139                 bfa_stats(rp, sm_on_del);
4140                 if (bfa_rport_send_fwdelete(rp))
4141                         bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4142                 else
4143                         bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4144                 break;
4145
4146         case BFA_RPORT_SM_HWFAIL:
4147                 bfa_stats(rp, sm_on_hwf);
4148                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4149                 break;
4150
4151         case BFA_RPORT_SM_SET_SPEED:
4152                 bfa_rport_send_fwspeed(rp);
4153                 break;
4154
4155         case BFA_RPORT_SM_QOS_SCN:
4156                 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4157                 rp->qos_attr = qos_scn->new_qos_attr;
4158                 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4159                 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4160                 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4161                 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4162
4163                 qos_scn->old_qos_attr.qos_flow_id  =
4164                         be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4165                 qos_scn->new_qos_attr.qos_flow_id  =
4166                         be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4167
4168                 if (qos_scn->old_qos_attr.qos_flow_id !=
4169                         qos_scn->new_qos_attr.qos_flow_id)
4170                         bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4171                                                     qos_scn->old_qos_attr,
4172                                                     qos_scn->new_qos_attr);
4173                 if (qos_scn->old_qos_attr.qos_priority !=
4174                         qos_scn->new_qos_attr.qos_priority)
4175                         bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4176                                                   qos_scn->old_qos_attr,
4177                                                   qos_scn->new_qos_attr);
4178                 break;
4179
4180         default:
4181                 bfa_stats(rp, sm_on_unexp);
4182                 bfa_sm_fault(rp->bfa, event);
4183         }
4184 }
4185
4186 /*
4187  * Firmware rport is being deleted - awaiting f/w response.
4188  */
4189 static void
4190 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4191 {
4192         bfa_trc(rp->bfa, rp->rport_tag);
4193         bfa_trc(rp->bfa, event);
4194
4195         switch (event) {
4196         case BFA_RPORT_SM_FWRSP:
4197                 bfa_stats(rp, sm_fwd_rsp);
4198                 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4199                 bfa_rport_offline_cb(rp);
4200                 break;
4201
4202         case BFA_RPORT_SM_DELETE:
4203                 bfa_stats(rp, sm_fwd_del);
4204                 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4205                 break;
4206
4207         case BFA_RPORT_SM_HWFAIL:
4208                 bfa_stats(rp, sm_fwd_hwf);
4209                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4210                 bfa_rport_offline_cb(rp);
4211                 break;
4212
4213         default:
4214                 bfa_stats(rp, sm_fwd_unexp);
4215                 bfa_sm_fault(rp->bfa, event);
4216         }
4217 }
4218
4219 static void
4220 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4221 {
4222         bfa_trc(rp->bfa, rp->rport_tag);
4223         bfa_trc(rp->bfa, event);
4224
4225         switch (event) {
4226         case BFA_RPORT_SM_QRESUME:
4227                 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4228                 bfa_rport_send_fwdelete(rp);
4229                 break;
4230
4231         case BFA_RPORT_SM_DELETE:
4232                 bfa_stats(rp, sm_fwd_del);
4233                 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4234                 break;
4235
4236         case BFA_RPORT_SM_HWFAIL:
4237                 bfa_stats(rp, sm_fwd_hwf);
4238                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4239                 bfa_reqq_wcancel(&rp->reqq_wait);
4240                 bfa_rport_offline_cb(rp);
4241                 break;
4242
4243         default:
4244                 bfa_stats(rp, sm_fwd_unexp);
4245                 bfa_sm_fault(rp->bfa, event);
4246         }
4247 }
4248
4249 /*
4250  * Offline state.
4251  */
4252 static void
4253 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4254 {
4255         bfa_trc(rp->bfa, rp->rport_tag);
4256         bfa_trc(rp->bfa, event);
4257
4258         switch (event) {
4259         case BFA_RPORT_SM_DELETE:
4260                 bfa_stats(rp, sm_off_del);
4261                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4262                 bfa_rport_free(rp);
4263                 break;
4264
4265         case BFA_RPORT_SM_ONLINE:
4266                 bfa_stats(rp, sm_off_on);
4267                 if (bfa_rport_send_fwcreate(rp))
4268                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4269                 else
4270                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4271                 break;
4272
4273         case BFA_RPORT_SM_HWFAIL:
4274                 bfa_stats(rp, sm_off_hwf);
4275                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4276                 break;
4277
4278         case BFA_RPORT_SM_OFFLINE:
4279                 bfa_rport_offline_cb(rp);
4280                 break;
4281
4282         default:
4283                 bfa_stats(rp, sm_off_unexp);
4284                 bfa_sm_fault(rp->bfa, event);
4285         }
4286 }
4287
4288 /*
4289  * Rport is deleted, waiting for firmware response to delete.
4290  */
4291 static void
4292 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4293 {
4294         bfa_trc(rp->bfa, rp->rport_tag);
4295         bfa_trc(rp->bfa, event);
4296
4297         switch (event) {
4298         case BFA_RPORT_SM_FWRSP:
4299                 bfa_stats(rp, sm_del_fwrsp);
4300                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4301                 bfa_rport_free(rp);
4302                 break;
4303
4304         case BFA_RPORT_SM_HWFAIL:
4305                 bfa_stats(rp, sm_del_hwf);
4306                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4307                 bfa_rport_free(rp);
4308                 break;
4309
4310         default:
4311                 bfa_sm_fault(rp->bfa, event);
4312         }
4313 }
4314
4315 static void
4316 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4317 {
4318         bfa_trc(rp->bfa, rp->rport_tag);
4319         bfa_trc(rp->bfa, event);
4320
4321         switch (event) {
4322         case BFA_RPORT_SM_QRESUME:
4323                 bfa_stats(rp, sm_del_fwrsp);
4324                 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4325                 bfa_rport_send_fwdelete(rp);
4326                 break;
4327
4328         case BFA_RPORT_SM_HWFAIL:
4329                 bfa_stats(rp, sm_del_hwf);
4330                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4331                 bfa_reqq_wcancel(&rp->reqq_wait);
4332                 bfa_rport_free(rp);
4333                 break;
4334
4335         default:
4336                 bfa_sm_fault(rp->bfa, event);
4337         }
4338 }
4339
4340 /*
4341  * Waiting for rport create response from firmware. A delete is pending.
4342  */
4343 static void
4344 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4345                                 enum bfa_rport_event event)
4346 {
4347         bfa_trc(rp->bfa, rp->rport_tag);
4348         bfa_trc(rp->bfa, event);
4349
4350         switch (event) {
4351         case BFA_RPORT_SM_FWRSP:
4352                 bfa_stats(rp, sm_delp_fwrsp);
4353                 if (bfa_rport_send_fwdelete(rp))
4354                         bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4355                 else
4356                         bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4357                 break;
4358
4359         case BFA_RPORT_SM_HWFAIL:
4360                 bfa_stats(rp, sm_delp_hwf);
4361                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4362                 bfa_rport_free(rp);
4363                 break;
4364
4365         default:
4366                 bfa_stats(rp, sm_delp_unexp);
4367                 bfa_sm_fault(rp->bfa, event);
4368         }
4369 }
4370
4371 /*
4372  * Waiting for rport create response from firmware. Rport offline is pending.
4373  */
4374 static void
4375 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4376                                  enum bfa_rport_event event)
4377 {
4378         bfa_trc(rp->bfa, rp->rport_tag);
4379         bfa_trc(rp->bfa, event);
4380
4381         switch (event) {
4382         case BFA_RPORT_SM_FWRSP:
4383                 bfa_stats(rp, sm_offp_fwrsp);
4384                 if (bfa_rport_send_fwdelete(rp))
4385                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4386                 else
4387                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4388                 break;
4389
4390         case BFA_RPORT_SM_DELETE:
4391                 bfa_stats(rp, sm_offp_del);
4392                 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4393                 break;
4394
4395         case BFA_RPORT_SM_HWFAIL:
4396                 bfa_stats(rp, sm_offp_hwf);
4397                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4398                 bfa_rport_offline_cb(rp);
4399                 break;
4400
4401         default:
4402                 bfa_stats(rp, sm_offp_unexp);
4403                 bfa_sm_fault(rp->bfa, event);
4404         }
4405 }
4406
4407 /*
4408  * IOC h/w failed.
4409  */
4410 static void
4411 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4412 {
4413         bfa_trc(rp->bfa, rp->rport_tag);
4414         bfa_trc(rp->bfa, event);
4415
4416         switch (event) {
4417         case BFA_RPORT_SM_OFFLINE:
4418                 bfa_stats(rp, sm_iocd_off);
4419                 bfa_rport_offline_cb(rp);
4420                 break;
4421
4422         case BFA_RPORT_SM_DELETE:
4423                 bfa_stats(rp, sm_iocd_del);
4424                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4425                 bfa_rport_free(rp);
4426                 break;
4427
4428         case BFA_RPORT_SM_ONLINE:
4429                 bfa_stats(rp, sm_iocd_on);
4430                 if (bfa_rport_send_fwcreate(rp))
4431                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4432                 else
4433                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4434                 break;
4435
4436         case BFA_RPORT_SM_HWFAIL:
4437                 break;
4438
4439         default:
4440                 bfa_stats(rp, sm_iocd_unexp);
4441                 bfa_sm_fault(rp->bfa, event);
4442         }
4443 }
4444
4445
4446
4447 /*
4448  *  bfa_rport_private BFA rport private functions
4449  */
4450
4451 static void
4452 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4453 {
4454         struct bfa_rport_s *rp = cbarg;
4455
4456         if (complete)
4457                 bfa_cb_rport_online(rp->rport_drv);
4458 }
4459
4460 static void
4461 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4462 {
4463         struct bfa_rport_s *rp = cbarg;
4464
4465         if (complete)
4466                 bfa_cb_rport_offline(rp->rport_drv);
4467 }
4468
4469 static void
4470 bfa_rport_qresume(void *cbarg)
4471 {
4472         struct bfa_rport_s      *rp = cbarg;
4473
4474         bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4475 }
4476
4477 static void
4478 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4479                 struct bfa_s *bfa)
4480 {
4481         struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4482
4483         if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4484                 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4485
4486         /* kva memory */
4487         bfa_mem_kva_setup(minfo, rport_kva,
4488                 cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4489 }
4490
4491 static void
4492 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4493                 struct bfa_pcidev_s *pcidev)
4494 {
4495         struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4496         struct bfa_rport_s *rp;
4497         u16 i;
4498
4499         INIT_LIST_HEAD(&mod->rp_free_q);
4500         INIT_LIST_HEAD(&mod->rp_active_q);
4501         INIT_LIST_HEAD(&mod->rp_unused_q);
4502
4503         rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4504         mod->rps_list = rp;
4505         mod->num_rports = cfg->fwcfg.num_rports;
4506
4507         WARN_ON(!mod->num_rports ||
4508                    (mod->num_rports & (mod->num_rports - 1)));
4509
4510         for (i = 0; i < mod->num_rports; i++, rp++) {
4511                 memset(rp, 0, sizeof(struct bfa_rport_s));
4512                 rp->bfa = bfa;
4513                 rp->rport_tag = i;
4514                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4515
4516                 /*
4517                  *  - is unused
4518                  */
4519                 if (i)
4520                         list_add_tail(&rp->qe, &mod->rp_free_q);
4521
4522                 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4523         }
4524
4525         /*
4526          * consume memory
4527          */
4528         bfa_mem_kva_curp(mod) = (u8 *) rp;
4529 }
4530
4531 static void
4532 bfa_rport_detach(struct bfa_s *bfa)
4533 {
4534 }
4535
4536 static void
4537 bfa_rport_start(struct bfa_s *bfa)
4538 {
4539 }
4540
4541 static void
4542 bfa_rport_stop(struct bfa_s *bfa)
4543 {
4544 }
4545
4546 static void
4547 bfa_rport_iocdisable(struct bfa_s *bfa)
4548 {
4549         struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4550         struct bfa_rport_s *rport;
4551         struct list_head *qe, *qen;
4552
4553         /* Enqueue unused rport resources to free_q */
4554         list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4555
4556         list_for_each_safe(qe, qen, &mod->rp_active_q) {
4557                 rport = (struct bfa_rport_s *) qe;
4558                 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4559         }
4560 }
4561
4562 static struct bfa_rport_s *
4563 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4564 {
4565         struct bfa_rport_s *rport;
4566
4567         bfa_q_deq(&mod->rp_free_q, &rport);
4568         if (rport)
4569                 list_add_tail(&rport->qe, &mod->rp_active_q);
4570
4571         return rport;
4572 }
4573
4574 static void
4575 bfa_rport_free(struct bfa_rport_s *rport)
4576 {
4577         struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4578
4579         WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4580         list_del(&rport->qe);
4581         list_add_tail(&rport->qe, &mod->rp_free_q);
4582 }
4583
4584 static bfa_boolean_t
4585 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4586 {
4587         struct bfi_rport_create_req_s *m;
4588
4589         /*
4590          * check for room in queue to send request now
4591          */
4592         m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4593         if (!m) {
4594                 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4595                 return BFA_FALSE;
4596         }
4597
4598         bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4599                         bfa_fn_lpu(rp->bfa));
4600         m->bfa_handle = rp->rport_tag;
4601         m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4602         m->pid = rp->rport_info.pid;
4603         m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4604         m->local_pid = rp->rport_info.local_pid;
4605         m->fc_class = rp->rport_info.fc_class;
4606         m->vf_en = rp->rport_info.vf_en;
4607         m->vf_id = rp->rport_info.vf_id;
4608         m->cisc = rp->rport_info.cisc;
4609
4610         /*
4611          * queue I/O message to firmware
4612          */
4613         bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4614         return BFA_TRUE;
4615 }
4616
4617 static bfa_boolean_t
4618 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4619 {
4620         struct bfi_rport_delete_req_s *m;
4621
4622         /*
4623          * check for room in queue to send request now
4624          */
4625         m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4626         if (!m) {
4627                 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4628                 return BFA_FALSE;
4629         }
4630
4631         bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4632                         bfa_fn_lpu(rp->bfa));
4633         m->fw_handle = rp->fw_handle;
4634
4635         /*
4636          * queue I/O message to firmware
4637          */
4638         bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4639         return BFA_TRUE;
4640 }
4641
4642 static bfa_boolean_t
4643 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4644 {
4645         struct bfa_rport_speed_req_s *m;
4646
4647         /*
4648          * check for room in queue to send request now
4649          */
4650         m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4651         if (!m) {
4652                 bfa_trc(rp->bfa, rp->rport_info.speed);
4653                 return BFA_FALSE;
4654         }
4655
4656         bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4657                         bfa_fn_lpu(rp->bfa));
4658         m->fw_handle = rp->fw_handle;
4659         m->speed = (u8)rp->rport_info.speed;
4660
4661         /*
4662          * queue I/O message to firmware
4663          */
4664         bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4665         return BFA_TRUE;
4666 }
4667
4668
4669
4670 /*
4671  *  bfa_rport_public
4672  */
4673
4674 /*
4675  * Rport interrupt processing.
4676  */
4677 void
4678 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4679 {
4680         union bfi_rport_i2h_msg_u msg;
4681         struct bfa_rport_s *rp;
4682
4683         bfa_trc(bfa, m->mhdr.msg_id);
4684
4685         msg.msg = m;
4686
4687         switch (m->mhdr.msg_id) {
4688         case BFI_RPORT_I2H_CREATE_RSP:
4689                 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4690                 rp->fw_handle = msg.create_rsp->fw_handle;
4691                 rp->qos_attr = msg.create_rsp->qos_attr;
4692                 bfa_rport_set_lunmask(bfa, rp);
4693                 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4694                 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4695                 break;
4696
4697         case BFI_RPORT_I2H_DELETE_RSP:
4698                 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4699                 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4700                 bfa_rport_unset_lunmask(bfa, rp);
4701                 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4702                 break;
4703
4704         case BFI_RPORT_I2H_QOS_SCN:
4705                 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4706                 rp->event_arg.fw_msg = msg.qos_scn_evt;
4707                 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4708                 break;
4709
4710         default:
4711                 bfa_trc(bfa, m->mhdr.msg_id);
4712                 WARN_ON(1);
4713         }
4714 }
4715
4716 void
4717 bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
4718 {
4719         struct bfa_rport_mod_s  *mod = BFA_RPORT_MOD(bfa);
4720         struct list_head        *qe;
4721         int     i;
4722
4723         for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
4724                 bfa_q_deq_tail(&mod->rp_free_q, &qe);
4725                 list_add_tail(qe, &mod->rp_unused_q);
4726         }
4727 }
4728
4729 /*
4730  *  bfa_rport_api
4731  */
4732
4733 struct bfa_rport_s *
4734 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4735 {
4736         struct bfa_rport_s *rp;
4737
4738         rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4739
4740         if (rp == NULL)
4741                 return NULL;
4742
4743         rp->bfa = bfa;
4744         rp->rport_drv = rport_drv;
4745         memset(&rp->stats, 0, sizeof(rp->stats));
4746
4747         WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4748         bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4749
4750         return rp;
4751 }
4752
4753 void
4754 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4755 {
4756         WARN_ON(rport_info->max_frmsz == 0);
4757
4758         /*
4759          * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4760          * responses. Default to minimum size.
4761          */
4762         if (rport_info->max_frmsz == 0) {
4763                 bfa_trc(rport->bfa, rport->rport_tag);
4764                 rport_info->max_frmsz = FC_MIN_PDUSZ;
4765         }
4766
4767         rport->rport_info = *rport_info;
4768         bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4769 }
4770
4771 void
4772 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4773 {
4774         WARN_ON(speed == 0);
4775         WARN_ON(speed == BFA_PORT_SPEED_AUTO);
4776
4777         if (rport) {
4778                 rport->rport_info.speed = speed;
4779                 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4780         }
4781 }
4782
4783 /* Set Rport LUN Mask */
4784 void
4785 bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4786 {
4787         struct bfa_lps_mod_s    *lps_mod = BFA_LPS_MOD(bfa);
4788         wwn_t   lp_wwn, rp_wwn;
4789         u8 lp_tag = (u8)rp->rport_info.lp_tag;
4790
4791         rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4792         lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4793
4794         BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4795                                         rp->lun_mask = BFA_TRUE;
4796         bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
4797 }
4798
4799 /* Unset Rport LUN mask */
4800 void
4801 bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4802 {
4803         struct bfa_lps_mod_s    *lps_mod = BFA_LPS_MOD(bfa);
4804         wwn_t   lp_wwn, rp_wwn;
4805
4806         rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4807         lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4808
4809         BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4810                                 rp->lun_mask = BFA_FALSE;
4811         bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
4812                         BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
4813 }
4814
4815 /*
4816  * SGPG related functions
4817  */
4818
4819 /*
4820  * Compute and return memory needed by FCP(im) module.
4821  */
4822 static void
4823 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4824                 struct bfa_s *bfa)
4825 {
4826         struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
4827         struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
4828         struct bfa_mem_dma_s *seg_ptr;
4829         u16     nsegs, idx, per_seg_sgpg, num_sgpg;
4830         u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
4831
4832         if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4833                 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4834         else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
4835                 cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
4836
4837         num_sgpg = cfg->drvcfg.num_sgpgs;
4838
4839         nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4840         per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
4841
4842         bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
4843                 if (num_sgpg >= per_seg_sgpg) {
4844                         num_sgpg -= per_seg_sgpg;
4845                         bfa_mem_dma_setup(minfo, seg_ptr,
4846                                         per_seg_sgpg * sgpg_sz);
4847                 } else
4848                         bfa_mem_dma_setup(minfo, seg_ptr,
4849                                         num_sgpg * sgpg_sz);
4850         }
4851
4852         /* kva memory */
4853         bfa_mem_kva_setup(minfo, sgpg_kva,
4854                 cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
4855 }
4856
4857 static void
4858 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4859                 struct bfa_pcidev_s *pcidev)
4860 {
4861         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4862         struct bfa_sgpg_s *hsgpg;
4863         struct bfi_sgpg_s *sgpg;
4864         u64 align_len;
4865         struct bfa_mem_dma_s *seg_ptr;
4866         u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
4867         u16     i, idx, nsegs, per_seg_sgpg, num_sgpg;
4868
4869         union {
4870                 u64 pa;
4871                 union bfi_addr_u addr;
4872         } sgpg_pa, sgpg_pa_tmp;
4873
4874         INIT_LIST_HEAD(&mod->sgpg_q);
4875         INIT_LIST_HEAD(&mod->sgpg_wait_q);
4876
4877         bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4878
4879         mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4880
4881         num_sgpg = cfg->drvcfg.num_sgpgs;
4882         nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4883
4884         /* dma/kva mem claim */
4885         hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
4886
4887         bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
4888
4889                 if (!bfa_mem_dma_virt(seg_ptr))
4890                         break;
4891
4892                 align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
4893                                              bfa_mem_dma_phys(seg_ptr);
4894
4895                 sgpg = (struct bfi_sgpg_s *)
4896                         (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
4897                 sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
4898                 WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
4899
4900                 per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
4901
4902                 for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
4903                         memset(hsgpg, 0, sizeof(*hsgpg));
4904                         memset(sgpg, 0, sizeof(*sgpg));
4905
4906                         hsgpg->sgpg = sgpg;
4907                         sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4908                         hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4909                         list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4910
4911                         sgpg++;
4912                         hsgpg++;
4913                         sgpg_pa.pa += sgpg_sz;
4914                 }
4915         }
4916
4917         bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
4918 }
4919
4920 static void
4921 bfa_sgpg_detach(struct bfa_s *bfa)
4922 {
4923 }
4924
4925 static void
4926 bfa_sgpg_start(struct bfa_s *bfa)
4927 {
4928 }
4929
4930 static void
4931 bfa_sgpg_stop(struct bfa_s *bfa)
4932 {
4933 }
4934
4935 static void
4936 bfa_sgpg_iocdisable(struct bfa_s *bfa)
4937 {
4938 }
4939
4940 bfa_status_t
4941 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4942 {
4943         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4944         struct bfa_sgpg_s *hsgpg;
4945         int i;
4946
4947         if (mod->free_sgpgs < nsgpgs)
4948                 return BFA_STATUS_ENOMEM;
4949
4950         for (i = 0; i < nsgpgs; i++) {
4951                 bfa_q_deq(&mod->sgpg_q, &hsgpg);
4952                 WARN_ON(!hsgpg);
4953                 list_add_tail(&hsgpg->qe, sgpg_q);
4954         }
4955
4956         mod->free_sgpgs -= nsgpgs;
4957         return BFA_STATUS_OK;
4958 }
4959
4960 void
4961 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4962 {
4963         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4964         struct bfa_sgpg_wqe_s *wqe;
4965
4966         mod->free_sgpgs += nsgpg;
4967         WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
4968
4969         list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4970
4971         if (list_empty(&mod->sgpg_wait_q))
4972                 return;
4973
4974         /*
4975          * satisfy as many waiting requests as possible
4976          */
4977         do {
4978                 wqe = bfa_q_first(&mod->sgpg_wait_q);
4979                 if (mod->free_sgpgs < wqe->nsgpg)
4980                         nsgpg = mod->free_sgpgs;
4981                 else
4982                         nsgpg = wqe->nsgpg;
4983                 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4984                 wqe->nsgpg -= nsgpg;
4985                 if (wqe->nsgpg == 0) {
4986                         list_del(&wqe->qe);
4987                         wqe->cbfn(wqe->cbarg);
4988                 }
4989         } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4990 }
4991
4992 void
4993 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4994 {
4995         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4996
4997         WARN_ON(nsgpg <= 0);
4998         WARN_ON(nsgpg <= mod->free_sgpgs);
4999
5000         wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5001
5002         /*
5003          * allocate any left to this one first
5004          */
5005         if (mod->free_sgpgs) {
5006                 /*
5007                  * no one else is waiting for SGPG
5008                  */
5009                 WARN_ON(!list_empty(&mod->sgpg_wait_q));
5010                 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5011                 wqe->nsgpg -= mod->free_sgpgs;
5012                 mod->free_sgpgs = 0;
5013         }
5014
5015         list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5016 }
5017
5018 void
5019 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5020 {
5021         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5022
5023         WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5024         list_del(&wqe->qe);
5025
5026         if (wqe->nsgpg_total != wqe->nsgpg)
5027                 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5028                                    wqe->nsgpg_total - wqe->nsgpg);
5029 }
5030
5031 void
5032 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5033                    void *cbarg)
5034 {
5035         INIT_LIST_HEAD(&wqe->sgpg_q);
5036         wqe->cbfn = cbfn;
5037         wqe->cbarg = cbarg;
5038 }
5039
5040 /*
5041  *  UF related functions
5042  */
5043 /*
5044  *****************************************************************************
5045  * Internal functions
5046  *****************************************************************************
5047  */
5048 static void
5049 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5050 {
5051         struct bfa_uf_s   *uf = cbarg;
5052         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5053
5054         if (complete)
5055                 ufm->ufrecv(ufm->cbarg, uf);
5056 }
5057
5058 static void
5059 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5060 {
5061         struct bfi_uf_buf_post_s *uf_bp_msg;
5062         u16 i;
5063         u16 buf_len;
5064
5065         ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5066         uf_bp_msg = ufm->uf_buf_posts;
5067
5068         for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5069              i++, uf_bp_msg++) {
5070                 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5071
5072                 uf_bp_msg->buf_tag = i;
5073                 buf_len = sizeof(struct bfa_uf_buf_s);
5074                 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5075                 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5076                             bfa_fn_lpu(ufm->bfa));
5077                 bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5078         }
5079
5080         /*
5081          * advance pointer beyond consumed memory
5082          */
5083         bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5084 }
5085
5086 static void
5087 claim_ufs(struct bfa_uf_mod_s *ufm)
5088 {
5089         u16 i;
5090         struct bfa_uf_s   *uf;
5091
5092         /*
5093          * Claim block of memory for UF list
5094          */
5095         ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5096
5097         /*
5098          * Initialize UFs and queue it in UF free queue
5099          */
5100         for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5101                 memset(uf, 0, sizeof(struct bfa_uf_s));
5102                 uf->bfa = ufm->bfa;
5103                 uf->uf_tag = i;
5104                 uf->pb_len = BFA_PER_UF_DMA_SZ;
5105                 uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5106                 uf->buf_pa = ufm_pbs_pa(ufm, i);
5107                 list_add_tail(&uf->qe, &ufm->uf_free_q);
5108         }
5109
5110         /*
5111          * advance memory pointer
5112          */
5113         bfa_mem_kva_curp(ufm) = (u8 *) uf;
5114 }
5115
5116 static void
5117 uf_mem_claim(struct bfa_uf_mod_s *ufm)
5118 {
5119         claim_ufs(ufm);
5120         claim_uf_post_msgs(ufm);
5121 }
5122
5123 static void
5124 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5125                 struct bfa_s *bfa)
5126 {
5127         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5128         struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5129         u32     num_ufs = cfg->fwcfg.num_uf_bufs;
5130         struct bfa_mem_dma_s *seg_ptr;
5131         u16     nsegs, idx, per_seg_uf = 0;
5132
5133         nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5134         per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5135
5136         bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5137                 if (num_ufs >= per_seg_uf) {
5138                         num_ufs -= per_seg_uf;
5139                         bfa_mem_dma_setup(minfo, seg_ptr,
5140                                 per_seg_uf * BFA_PER_UF_DMA_SZ);
5141                 } else
5142                         bfa_mem_dma_setup(minfo, seg_ptr,
5143                                 num_ufs * BFA_PER_UF_DMA_SZ);
5144         }
5145
5146         /* kva memory */
5147         bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5148                 (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5149 }
5150
5151 static void
5152 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5153                 struct bfa_pcidev_s *pcidev)
5154 {
5155         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5156
5157         ufm->bfa = bfa;
5158         ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5159         INIT_LIST_HEAD(&ufm->uf_free_q);
5160         INIT_LIST_HEAD(&ufm->uf_posted_q);
5161         INIT_LIST_HEAD(&ufm->uf_unused_q);
5162
5163         uf_mem_claim(ufm);
5164 }
5165
5166 static void
5167 bfa_uf_detach(struct bfa_s *bfa)
5168 {
5169 }
5170
5171 static struct bfa_uf_s *
5172 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5173 {
5174         struct bfa_uf_s   *uf;
5175
5176         bfa_q_deq(&uf_mod->uf_free_q, &uf);
5177         return uf;
5178 }
5179
5180 static void
5181 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5182 {
5183         list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5184 }
5185
5186 static bfa_status_t
5187 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5188 {
5189         struct bfi_uf_buf_post_s *uf_post_msg;
5190
5191         uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5192         if (!uf_post_msg)
5193                 return BFA_STATUS_FAILED;
5194
5195         memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5196                       sizeof(struct bfi_uf_buf_post_s));
5197         bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5198
5199         bfa_trc(ufm->bfa, uf->uf_tag);
5200
5201         list_add_tail(&uf->qe, &ufm->uf_posted_q);
5202         return BFA_STATUS_OK;
5203 }
5204
5205 static void
5206 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5207 {
5208         struct bfa_uf_s   *uf;
5209
5210         while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5211                 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5212                         break;
5213         }
5214 }
5215
5216 static void
5217 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5218 {
5219         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5220         u16 uf_tag = m->buf_tag;
5221         struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5222         struct bfa_uf_buf_s *uf_buf;
5223         uint8_t *buf;
5224         struct fchs_s *fchs;
5225
5226         uf_buf = (struct bfa_uf_buf_s *)
5227                         bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5228         buf = &uf_buf->d[0];
5229
5230         m->frm_len = be16_to_cpu(m->frm_len);
5231         m->xfr_len = be16_to_cpu(m->xfr_len);
5232
5233         fchs = (struct fchs_s *)uf_buf;
5234
5235         list_del(&uf->qe);      /* dequeue from posted queue */
5236
5237         uf->data_ptr = buf;
5238         uf->data_len = m->xfr_len;
5239
5240         WARN_ON(uf->data_len < sizeof(struct fchs_s));
5241
5242         if (uf->data_len == sizeof(struct fchs_s)) {
5243                 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5244                                uf->data_len, (struct fchs_s *)buf);
5245         } else {
5246                 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5247                 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5248                                       BFA_PL_EID_RX, uf->data_len,
5249                                       (struct fchs_s *)buf, pld_w0);
5250         }
5251
5252         if (bfa->fcs)
5253                 __bfa_cb_uf_recv(uf, BFA_TRUE);
5254         else
5255                 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5256 }
5257
5258 static void
5259 bfa_uf_stop(struct bfa_s *bfa)
5260 {
5261 }
5262
5263 static void
5264 bfa_uf_iocdisable(struct bfa_s *bfa)
5265 {
5266         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5267         struct bfa_uf_s *uf;
5268         struct list_head *qe, *qen;
5269
5270         /* Enqueue unused uf resources to free_q */
5271         list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5272
5273         list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5274                 uf = (struct bfa_uf_s *) qe;
5275                 list_del(&uf->qe);
5276                 bfa_uf_put(ufm, uf);
5277         }
5278 }
5279
5280 static void
5281 bfa_uf_start(struct bfa_s *bfa)
5282 {
5283         bfa_uf_post_all(BFA_UF_MOD(bfa));
5284 }
5285
5286 /*
5287  * Register handler for all unsolicted receive frames.
5288  *
5289  * @param[in]   bfa             BFA instance
5290  * @param[in]   ufrecv  receive handler function
5291  * @param[in]   cbarg   receive handler arg
5292  */
5293 void
5294 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5295 {
5296         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5297
5298         ufm->ufrecv = ufrecv;
5299         ufm->cbarg = cbarg;
5300 }
5301
5302 /*
5303  *      Free an unsolicited frame back to BFA.
5304  *
5305  * @param[in]           uf              unsolicited frame to be freed
5306  *
5307  * @return None
5308  */
5309 void
5310 bfa_uf_free(struct bfa_uf_s *uf)
5311 {
5312         bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5313         bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5314 }
5315
5316
5317
5318 /*
5319  *  uf_pub BFA uf module public functions
5320  */
5321 void
5322 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5323 {
5324         bfa_trc(bfa, msg->mhdr.msg_id);
5325
5326         switch (msg->mhdr.msg_id) {
5327         case BFI_UF_I2H_FRM_RCVD:
5328                 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5329                 break;
5330
5331         default:
5332                 bfa_trc(bfa, msg->mhdr.msg_id);
5333                 WARN_ON(1);
5334         }
5335 }
5336
5337 void
5338 bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5339 {
5340         struct bfa_uf_mod_s     *mod = BFA_UF_MOD(bfa);
5341         struct list_head        *qe;
5342         int     i;
5343
5344         for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5345                 bfa_q_deq_tail(&mod->uf_free_q, &qe);
5346                 list_add_tail(qe, &mod->uf_unused_q);
5347         }
5348 }
5349
5350 /*
5351  *      BFA fcdiag module
5352  */
5353 #define BFA_DIAG_QTEST_TOV      1000    /* msec */
5354
5355 /*
5356  *      Set port status to busy
5357  */
5358 static void
5359 bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5360 {
5361         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5362
5363         if (fcdiag->lb.lock)
5364                 fcport->diag_busy = BFA_TRUE;
5365         else
5366                 fcport->diag_busy = BFA_FALSE;
5367 }
5368
5369 static void
5370 bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5371                 struct bfa_s *bfa)
5372 {
5373 }
5374
5375 static void
5376 bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5377                 struct bfa_pcidev_s *pcidev)
5378 {
5379         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5380         fcdiag->bfa             = bfa;
5381         fcdiag->trcmod  = bfa->trcmod;
5382         /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5383 }
5384
5385 static void
5386 bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5387 {
5388         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5389         bfa_trc(fcdiag, fcdiag->lb.lock);
5390         if (fcdiag->lb.lock) {
5391                 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5392                 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5393                 fcdiag->lb.lock = 0;
5394                 bfa_fcdiag_set_busy_status(fcdiag);
5395         }
5396 }
5397
5398 static void
5399 bfa_fcdiag_detach(struct bfa_s *bfa)
5400 {
5401 }
5402
5403 static void
5404 bfa_fcdiag_start(struct bfa_s *bfa)
5405 {
5406 }
5407
5408 static void
5409 bfa_fcdiag_stop(struct bfa_s *bfa)
5410 {
5411 }
5412
5413 static void
5414 bfa_fcdiag_queuetest_timeout(void *cbarg)
5415 {
5416         struct bfa_fcdiag_s       *fcdiag = cbarg;
5417         struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5418
5419         bfa_trc(fcdiag, fcdiag->qtest.all);
5420         bfa_trc(fcdiag, fcdiag->qtest.count);
5421
5422         fcdiag->qtest.timer_active = 0;
5423
5424         res->status = BFA_STATUS_ETIMER;
5425         res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5426         if (fcdiag->qtest.all)
5427                 res->queue  = fcdiag->qtest.all;
5428
5429         bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5430         fcdiag->qtest.status = BFA_STATUS_ETIMER;
5431         fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5432         fcdiag->qtest.lock = 0;
5433 }
5434
5435 static bfa_status_t
5436 bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5437 {
5438         u32     i;
5439         struct bfi_diag_qtest_req_s *req;
5440
5441         req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5442         if (!req)
5443                 return BFA_STATUS_DEVBUSY;
5444
5445         /* build host command */
5446         bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5447                 bfa_fn_lpu(fcdiag->bfa));
5448
5449         for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5450                 req->data[i] = QTEST_PAT_DEFAULT;
5451
5452         bfa_trc(fcdiag, fcdiag->qtest.queue);
5453         /* ring door bell */
5454         bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5455         return BFA_STATUS_OK;
5456 }
5457
5458 static void
5459 bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5460                         bfi_diag_qtest_rsp_t *rsp)
5461 {
5462         struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5463         bfa_status_t status = BFA_STATUS_OK;
5464         int i;
5465
5466         /* Check timer, should still be active   */
5467         if (!fcdiag->qtest.timer_active) {
5468                 bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5469                 return;
5470         }
5471
5472         /* update count */
5473         fcdiag->qtest.count--;
5474
5475         /* Check result */
5476         for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5477                 if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5478                         res->status = BFA_STATUS_DATACORRUPTED;
5479                         break;
5480                 }
5481         }
5482
5483         if (res->status == BFA_STATUS_OK) {
5484                 if (fcdiag->qtest.count > 0) {
5485                         status = bfa_fcdiag_queuetest_send(fcdiag);
5486                         if (status == BFA_STATUS_OK)
5487                                 return;
5488                         else
5489                                 res->status = status;
5490                 } else if (fcdiag->qtest.all > 0 &&
5491                         fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5492                         fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5493                         fcdiag->qtest.queue++;
5494                         status = bfa_fcdiag_queuetest_send(fcdiag);
5495                         if (status == BFA_STATUS_OK)
5496                                 return;
5497                         else
5498                                 res->status = status;
5499                 }
5500         }
5501
5502         /* Stop timer when we comp all queue */
5503         if (fcdiag->qtest.timer_active) {
5504                 bfa_timer_stop(&fcdiag->qtest.timer);
5505                 fcdiag->qtest.timer_active = 0;
5506         }
5507         res->queue = fcdiag->qtest.queue;
5508         res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5509         bfa_trc(fcdiag, res->count);
5510         bfa_trc(fcdiag, res->status);
5511         fcdiag->qtest.status = res->status;
5512         fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5513         fcdiag->qtest.lock = 0;
5514 }
5515
5516 static void
5517 bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5518                         struct bfi_diag_lb_rsp_s *rsp)
5519 {
5520         struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5521
5522         res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
5523         res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
5524         res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
5525         res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
5526         res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
5527         res->status     = rsp->res.status;
5528         fcdiag->lb.status = rsp->res.status;
5529         bfa_trc(fcdiag, fcdiag->lb.status);
5530         fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5531         fcdiag->lb.lock = 0;
5532         bfa_fcdiag_set_busy_status(fcdiag);
5533 }
5534
5535 static bfa_status_t
5536 bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5537                         struct bfa_diag_loopback_s *loopback)
5538 {
5539         struct bfi_diag_lb_req_s *lb_req;
5540
5541         lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5542         if (!lb_req)
5543                 return BFA_STATUS_DEVBUSY;
5544
5545         /* build host command */
5546         bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5547                 bfa_fn_lpu(fcdiag->bfa));
5548
5549         lb_req->lb_mode = loopback->lb_mode;
5550         lb_req->speed = loopback->speed;
5551         lb_req->loopcnt = loopback->loopcnt;
5552         lb_req->pattern = loopback->pattern;
5553
5554         /* ring door bell */
5555         bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5556
5557         bfa_trc(fcdiag, loopback->lb_mode);
5558         bfa_trc(fcdiag, loopback->speed);
5559         bfa_trc(fcdiag, loopback->loopcnt);
5560         bfa_trc(fcdiag, loopback->pattern);
5561         return BFA_STATUS_OK;
5562 }
5563
5564 /*
5565  *      cpe/rme intr handler
5566  */
5567 void
5568 bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5569 {
5570         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5571
5572         switch (msg->mhdr.msg_id) {
5573         case BFI_DIAG_I2H_LOOPBACK:
5574                 bfa_fcdiag_loopback_comp(fcdiag,
5575                                 (struct bfi_diag_lb_rsp_s *) msg);
5576                 break;
5577         case BFI_DIAG_I2H_QTEST:
5578                 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5579                 break;
5580         default:
5581                 bfa_trc(fcdiag, msg->mhdr.msg_id);
5582                 WARN_ON(1);
5583         }
5584 }
5585
5586 /*
5587  *      Loopback test
5588  *
5589  *   @param[in] *bfa            - bfa data struct
5590  *   @param[in] opmode          - port operation mode
5591  *   @param[in] speed           - port speed
5592  *   @param[in] lpcnt           - loop count
5593  *   @param[in] pat                     - pattern to build packet
5594  *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
5595  *   @param[in] cbfn            - callback function
5596  *   @param[in] cbarg           - callback functioin arg
5597  *
5598  *   @param[out]
5599  */
5600 bfa_status_t
5601 bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5602                 enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5603                 struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5604                 void *cbarg)
5605 {
5606         struct  bfa_diag_loopback_s loopback;
5607         struct bfa_port_attr_s attr;
5608         bfa_status_t status;
5609         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5610
5611         if (!bfa_iocfc_is_operational(bfa))
5612                 return BFA_STATUS_IOC_NON_OP;
5613
5614         /* if port is PBC disabled, return error */
5615         if (bfa_fcport_is_pbcdisabled(bfa)) {
5616                 bfa_trc(fcdiag, BFA_STATUS_PBC);
5617                 return BFA_STATUS_PBC;
5618         }
5619
5620         if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5621                 bfa_trc(fcdiag, opmode);
5622                 return BFA_STATUS_PORT_NOT_DISABLED;
5623         }
5624
5625         /*
5626          * Check if input speed is supported by the port mode
5627          */
5628         if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5629                 if (!(speed == BFA_PORT_SPEED_1GBPS ||
5630                       speed == BFA_PORT_SPEED_2GBPS ||
5631                       speed == BFA_PORT_SPEED_4GBPS ||
5632                       speed == BFA_PORT_SPEED_8GBPS ||
5633                       speed == BFA_PORT_SPEED_16GBPS ||
5634                       speed == BFA_PORT_SPEED_AUTO)) {
5635                         bfa_trc(fcdiag, speed);
5636                         return BFA_STATUS_UNSUPP_SPEED;
5637                 }
5638                 bfa_fcport_get_attr(bfa, &attr);
5639                 bfa_trc(fcdiag, attr.speed_supported);
5640                 if (speed > attr.speed_supported)
5641                         return BFA_STATUS_UNSUPP_SPEED;
5642         } else {
5643                 if (speed != BFA_PORT_SPEED_10GBPS) {
5644                         bfa_trc(fcdiag, speed);
5645                         return BFA_STATUS_UNSUPP_SPEED;
5646                 }
5647         }
5648
5649         /* For Mezz card, port speed entered needs to be checked */
5650         if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5651                 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5652                         if ((speed == BFA_PORT_SPEED_1GBPS) &&
5653                             (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
5654                                 return BFA_STATUS_UNSUPP_SPEED;
5655                         if (!(speed == BFA_PORT_SPEED_1GBPS ||
5656                               speed == BFA_PORT_SPEED_2GBPS ||
5657                               speed == BFA_PORT_SPEED_4GBPS ||
5658                               speed == BFA_PORT_SPEED_8GBPS ||
5659                               speed == BFA_PORT_SPEED_16GBPS ||
5660                               speed == BFA_PORT_SPEED_AUTO))
5661                                 return BFA_STATUS_UNSUPP_SPEED;
5662                 } else {
5663                         if (speed != BFA_PORT_SPEED_10GBPS)
5664                                 return BFA_STATUS_UNSUPP_SPEED;
5665                 }
5666         }
5667
5668         /* check to see if there is another destructive diag cmd running */
5669         if (fcdiag->lb.lock) {
5670                 bfa_trc(fcdiag, fcdiag->lb.lock);
5671                 return BFA_STATUS_DEVBUSY;
5672         }
5673
5674         fcdiag->lb.lock = 1;
5675         loopback.lb_mode = opmode;
5676         loopback.speed = speed;
5677         loopback.loopcnt = lpcnt;
5678         loopback.pattern = pat;
5679         fcdiag->lb.result = result;
5680         fcdiag->lb.cbfn = cbfn;
5681         fcdiag->lb.cbarg = cbarg;
5682         memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
5683         bfa_fcdiag_set_busy_status(fcdiag);
5684
5685         /* Send msg to fw */
5686         status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
5687         return status;
5688 }
5689
5690 /*
5691  *      DIAG queue test command
5692  *
5693  *   @param[in] *bfa            - bfa data struct
5694  *   @param[in] force           - 1: don't do ioc op checking
5695  *   @param[in] queue           - queue no. to test
5696  *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
5697  *   @param[in] cbfn            - callback function
5698  *   @param[in] *cbarg          - callback functioin arg
5699  *
5700  *   @param[out]
5701  */
5702 bfa_status_t
5703 bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
5704                 struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
5705                 void *cbarg)
5706 {
5707         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5708         bfa_status_t status;
5709         bfa_trc(fcdiag, force);
5710         bfa_trc(fcdiag, queue);
5711
5712         if (!force && !bfa_iocfc_is_operational(bfa))
5713                 return BFA_STATUS_IOC_NON_OP;
5714
5715         /* check to see if there is another destructive diag cmd running */
5716         if (fcdiag->qtest.lock) {
5717                 bfa_trc(fcdiag, fcdiag->qtest.lock);
5718                 return BFA_STATUS_DEVBUSY;
5719         }
5720
5721         /* Initialization */
5722         fcdiag->qtest.lock = 1;
5723         fcdiag->qtest.cbfn = cbfn;
5724         fcdiag->qtest.cbarg = cbarg;
5725         fcdiag->qtest.result = result;
5726         fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5727
5728         /* Init test results */
5729         fcdiag->qtest.result->status = BFA_STATUS_OK;
5730         fcdiag->qtest.result->count  = 0;
5731
5732         /* send */
5733         if (queue < BFI_IOC_MAX_CQS) {
5734                 fcdiag->qtest.result->queue  = (u8)queue;
5735                 fcdiag->qtest.queue = (u8)queue;
5736                 fcdiag->qtest.all   = 0;
5737         } else {
5738                 fcdiag->qtest.result->queue  = 0;
5739                 fcdiag->qtest.queue = 0;
5740                 fcdiag->qtest.all   = 1;
5741         }
5742         status = bfa_fcdiag_queuetest_send(fcdiag);
5743
5744         /* Start a timer */
5745         if (status == BFA_STATUS_OK) {
5746                 bfa_timer_start(bfa, &fcdiag->qtest.timer,
5747                                 bfa_fcdiag_queuetest_timeout, fcdiag,
5748                                 BFA_DIAG_QTEST_TOV);
5749                 fcdiag->qtest.timer_active = 1;
5750         }
5751         return status;
5752 }
5753
5754 /*
5755  * DIAG PLB is running
5756  *
5757  *   @param[in] *bfa    - bfa data struct
5758  *
5759  *   @param[out]
5760  */
5761 bfa_status_t
5762 bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
5763 {
5764         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5765         return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
5766 }