]> Pileus Git - ~andy/linux/blob - drivers/scsi/bfa/bfa_fcpim.c
Merge tag 'stable/for-linus-3.7-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel...
[~andy/linux] / drivers / scsi / bfa / bfa_fcpim.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 #include "bfad_drv.h"
19 #include "bfa_modules.h"
20
21 BFA_TRC_FILE(HAL, FCPIM);
22
23 /*
24  *  BFA ITNIM Related definitions
25  */
26 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27 static void bfa_ioim_lm_init(struct bfa_s *bfa);
28
29 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
30         (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
31
32 #define bfa_fcpim_additn(__itnim)                                       \
33         list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
34 #define bfa_fcpim_delitn(__itnim)       do {                            \
35         WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));   \
36         bfa_itnim_update_del_itn_stats(__itnim);      \
37         list_del(&(__itnim)->qe);      \
38         WARN_ON(!list_empty(&(__itnim)->io_q));                         \
39         WARN_ON(!list_empty(&(__itnim)->io_cleanup_q));                 \
40         WARN_ON(!list_empty(&(__itnim)->pending_q));                    \
41 } while (0)
42
43 #define bfa_itnim_online_cb(__itnim) do {                               \
44         if ((__itnim)->bfa->fcs)                                        \
45                 bfa_cb_itnim_online((__itnim)->ditn);      \
46         else {                                                          \
47                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
48                 __bfa_cb_itnim_online, (__itnim));      \
49         }                                                               \
50 } while (0)
51
52 #define bfa_itnim_offline_cb(__itnim) do {                              \
53         if ((__itnim)->bfa->fcs)                                        \
54                 bfa_cb_itnim_offline((__itnim)->ditn);      \
55         else {                                                          \
56                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
57                 __bfa_cb_itnim_offline, (__itnim));      \
58         }                                                               \
59 } while (0)
60
61 #define bfa_itnim_sler_cb(__itnim) do {                                 \
62         if ((__itnim)->bfa->fcs)                                        \
63                 bfa_cb_itnim_sler((__itnim)->ditn);      \
64         else {                                                          \
65                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
66                 __bfa_cb_itnim_sler, (__itnim));      \
67         }                                                               \
68 } while (0)
69
70 enum bfa_ioim_lm_ua_status {
71         BFA_IOIM_LM_UA_RESET = 0,
72         BFA_IOIM_LM_UA_SET = 1,
73 };
74
75 /*
76  *  itnim state machine event
77  */
78 enum bfa_itnim_event {
79         BFA_ITNIM_SM_CREATE = 1,        /*  itnim is created */
80         BFA_ITNIM_SM_ONLINE = 2,        /*  itnim is online */
81         BFA_ITNIM_SM_OFFLINE = 3,       /*  itnim is offline */
82         BFA_ITNIM_SM_FWRSP = 4,         /*  firmware response */
83         BFA_ITNIM_SM_DELETE = 5,        /*  deleting an existing itnim */
84         BFA_ITNIM_SM_CLEANUP = 6,       /*  IO cleanup completion */
85         BFA_ITNIM_SM_SLER = 7,          /*  second level error recovery */
86         BFA_ITNIM_SM_HWFAIL = 8,        /*  IOC h/w failure event */
87         BFA_ITNIM_SM_QRESUME = 9,       /*  queue space available */
88 };
89
90 /*
91  *  BFA IOIM related definitions
92  */
93 #define bfa_ioim_move_to_comp_q(__ioim) do {                            \
94         list_del(&(__ioim)->qe);                                        \
95         list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q);    \
96 } while (0)
97
98
99 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do {                  \
100         if ((__fcpim)->profile_comp)                                    \
101                 (__fcpim)->profile_comp(__ioim);                        \
102 } while (0)
103
104 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do {                 \
105         if ((__fcpim)->profile_start)                                   \
106                 (__fcpim)->profile_start(__ioim);                       \
107 } while (0)
108
109 /*
110  * IO state machine events
111  */
112 enum bfa_ioim_event {
113         BFA_IOIM_SM_START       = 1,    /*  io start request from host */
114         BFA_IOIM_SM_COMP_GOOD   = 2,    /*  io good comp, resource free */
115         BFA_IOIM_SM_COMP        = 3,    /*  io comp, resource is free */
116         BFA_IOIM_SM_COMP_UTAG   = 4,    /*  io comp, resource is free */
117         BFA_IOIM_SM_DONE        = 5,    /*  io comp, resource not free */
118         BFA_IOIM_SM_FREE        = 6,    /*  io resource is freed */
119         BFA_IOIM_SM_ABORT       = 7,    /*  abort request from scsi stack */
120         BFA_IOIM_SM_ABORT_COMP  = 8,    /*  abort from f/w */
121         BFA_IOIM_SM_ABORT_DONE  = 9,    /*  abort completion from f/w */
122         BFA_IOIM_SM_QRESUME     = 10,   /*  CQ space available to queue IO */
123         BFA_IOIM_SM_SGALLOCED   = 11,   /*  SG page allocation successful */
124         BFA_IOIM_SM_SQRETRY     = 12,   /*  sequence recovery retry */
125         BFA_IOIM_SM_HCB         = 13,   /*  bfa callback complete */
126         BFA_IOIM_SM_CLEANUP     = 14,   /*  IO cleanup from itnim */
127         BFA_IOIM_SM_TMSTART     = 15,   /*  IO cleanup from tskim */
128         BFA_IOIM_SM_TMDONE      = 16,   /*  IO cleanup from tskim */
129         BFA_IOIM_SM_HWFAIL      = 17,   /*  IOC h/w failure event */
130         BFA_IOIM_SM_IOTOV       = 18,   /*  ITN offline TOV */
131 };
132
133
134 /*
135  *  BFA TSKIM related definitions
136  */
137
138 /*
139  * task management completion handling
140  */
141 #define bfa_tskim_qcomp(__tskim, __cbfn) do {                           \
142         bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
143         bfa_tskim_notify_comp(__tskim);      \
144 } while (0)
145
146 #define bfa_tskim_notify_comp(__tskim) do {                             \
147         if ((__tskim)->notify)                                          \
148                 bfa_itnim_tskdone((__tskim)->itnim);      \
149 } while (0)
150
151
152 enum bfa_tskim_event {
153         BFA_TSKIM_SM_START      = 1,    /*  TM command start            */
154         BFA_TSKIM_SM_DONE       = 2,    /*  TM completion               */
155         BFA_TSKIM_SM_QRESUME    = 3,    /*  resume after qfull          */
156         BFA_TSKIM_SM_HWFAIL     = 5,    /*  IOC h/w failure event       */
157         BFA_TSKIM_SM_HCB        = 6,    /*  BFA callback completion     */
158         BFA_TSKIM_SM_IOS_DONE   = 7,    /*  IO and sub TM completions   */
159         BFA_TSKIM_SM_CLEANUP    = 8,    /*  TM cleanup on ITN offline   */
160         BFA_TSKIM_SM_CLEANUP_DONE = 9,  /*  TM abort completion */
161 };
162
163 /*
164  * forward declaration for BFA ITNIM functions
165  */
166 static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
167 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
168 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
169 static void     bfa_itnim_cleanp_comp(void *itnim_cbarg);
170 static void     bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
171 static void     __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
172 static void     __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
173 static void     __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
174 static void     bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
175 static void     bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
176 static void     bfa_itnim_iotov(void *itnim_arg);
177 static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
178 static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
179 static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
180
181 /*
182  * forward declaration of ITNIM state machine
183  */
184 static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
185                                         enum bfa_itnim_event event);
186 static void     bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
187                                         enum bfa_itnim_event event);
188 static void     bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
189                                         enum bfa_itnim_event event);
190 static void     bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
191                                         enum bfa_itnim_event event);
192 static void     bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
193                                         enum bfa_itnim_event event);
194 static void     bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
195                                         enum bfa_itnim_event event);
196 static void     bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
197                                         enum bfa_itnim_event event);
198 static void     bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
199                                         enum bfa_itnim_event event);
200 static void     bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
201                                         enum bfa_itnim_event event);
202 static void     bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
203                                         enum bfa_itnim_event event);
204 static void     bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
205                                         enum bfa_itnim_event event);
206 static void     bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
207                                         enum bfa_itnim_event event);
208 static void     bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
209                                         enum bfa_itnim_event event);
210 static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
211                                         enum bfa_itnim_event event);
212 static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
213                                         enum bfa_itnim_event event);
214
215 /*
216  * forward declaration for BFA IOIM functions
217  */
218 static bfa_boolean_t    bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
219 static bfa_boolean_t    bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
220 static bfa_boolean_t    bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
221 static void             bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
222 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
223 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
224 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
225 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
226 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
227 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
228
229 /*
230  * forward declaration of BFA IO state machine
231  */
232 static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
233                                         enum bfa_ioim_event event);
234 static void     bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
235                                         enum bfa_ioim_event event);
236 static void     bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
237                                         enum bfa_ioim_event event);
238 static void     bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
239                                         enum bfa_ioim_event event);
240 static void     bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
241                                         enum bfa_ioim_event event);
242 static void     bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
243                                         enum bfa_ioim_event event);
244 static void     bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
245                                         enum bfa_ioim_event event);
246 static void     bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
247                                         enum bfa_ioim_event event);
248 static void     bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
249                                         enum bfa_ioim_event event);
250 static void     bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
251                                         enum bfa_ioim_event event);
252 static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
253                                         enum bfa_ioim_event event);
254 static void     bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
255                                         enum bfa_ioim_event event);
256 /*
257  * forward declaration for BFA TSKIM functions
258  */
259 static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
260 static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
261 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
262                                         struct scsi_lun lun);
263 static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
264 static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
265 static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
266 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
267 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
268 static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
269
270 /*
271  * forward declaration of BFA TSKIM state machine
272  */
273 static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
274                                         enum bfa_tskim_event event);
275 static void     bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
276                                         enum bfa_tskim_event event);
277 static void     bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
278                                         enum bfa_tskim_event event);
279 static void     bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
280                                         enum bfa_tskim_event event);
281 static void     bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
282                                         enum bfa_tskim_event event);
283 static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
284                                         enum bfa_tskim_event event);
285 static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
286                                         enum bfa_tskim_event event);
287 /*
288  *  BFA FCP Initiator Mode module
289  */
290
291 /*
292  * Compute and return memory needed by FCP(im) module.
293  */
294 static void
295 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
296 {
297         bfa_itnim_meminfo(cfg, km_len);
298
299         /*
300          * IO memory
301          */
302         *km_len += cfg->fwcfg.num_ioim_reqs *
303           (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
304
305         /*
306          * task management command memory
307          */
308         if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
309                 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
310         *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
311 }
312
313
314 static void
315 bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
316                 struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
317 {
318         struct bfa_fcpim_s *fcpim = &fcp->fcpim;
319         struct bfa_s *bfa = fcp->bfa;
320
321         bfa_trc(bfa, cfg->drvcfg.path_tov);
322         bfa_trc(bfa, cfg->fwcfg.num_rports);
323         bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
324         bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
325
326         fcpim->fcp              = fcp;
327         fcpim->bfa              = bfa;
328         fcpim->num_itnims       = cfg->fwcfg.num_rports;
329         fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
330         fcpim->path_tov         = cfg->drvcfg.path_tov;
331         fcpim->delay_comp       = cfg->drvcfg.delay_comp;
332         fcpim->profile_comp = NULL;
333         fcpim->profile_start = NULL;
334
335         bfa_itnim_attach(fcpim);
336         bfa_tskim_attach(fcpim);
337         bfa_ioim_attach(fcpim);
338 }
339
340 static void
341 bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
342 {
343         struct bfa_fcpim_s *fcpim = &fcp->fcpim;
344         struct bfa_itnim_s *itnim;
345         struct list_head *qe, *qen;
346
347         /* Enqueue unused ioim resources to free_q */
348         list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
349
350         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
351                 itnim = (struct bfa_itnim_s *) qe;
352                 bfa_itnim_iocdisable(itnim);
353         }
354 }
355
356 void
357 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
358 {
359         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
360
361         fcpim->path_tov = path_tov * 1000;
362         if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
363                 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
364 }
365
366 u16
367 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
368 {
369         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
370
371         return fcpim->path_tov / 1000;
372 }
373
374 #define bfa_fcpim_add_iostats(__l, __r, __stats)        \
375         (__l->__stats += __r->__stats)
376
377 void
378 bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
379                 struct bfa_itnim_iostats_s *rstats)
380 {
381         bfa_fcpim_add_iostats(lstats, rstats, total_ios);
382         bfa_fcpim_add_iostats(lstats, rstats, qresumes);
383         bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
384         bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
385         bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
386         bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
387         bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
388         bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
389         bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
390         bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
391         bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
392         bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
393         bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
394         bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
395         bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
396         bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
397         bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
398         bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
399         bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
400         bfa_fcpim_add_iostats(lstats, rstats, onlines);
401         bfa_fcpim_add_iostats(lstats, rstats, offlines);
402         bfa_fcpim_add_iostats(lstats, rstats, creates);
403         bfa_fcpim_add_iostats(lstats, rstats, deletes);
404         bfa_fcpim_add_iostats(lstats, rstats, create_comps);
405         bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
406         bfa_fcpim_add_iostats(lstats, rstats, sler_events);
407         bfa_fcpim_add_iostats(lstats, rstats, fw_create);
408         bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
409         bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
410         bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
411         bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
412         bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
413         bfa_fcpim_add_iostats(lstats, rstats, tm_success);
414         bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
415         bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
416         bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
417         bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
418         bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
419         bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
420         bfa_fcpim_add_iostats(lstats, rstats, io_comps);
421         bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
422         bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
423         bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
424         bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
425 }
426
427 bfa_status_t
428 bfa_fcpim_port_iostats(struct bfa_s *bfa,
429                 struct bfa_itnim_iostats_s *stats, u8 lp_tag)
430 {
431         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
432         struct list_head *qe, *qen;
433         struct bfa_itnim_s *itnim;
434
435         /* accumulate IO stats from itnim */
436         memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
437         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
438                 itnim = (struct bfa_itnim_s *) qe;
439                 if (itnim->rport->rport_info.lp_tag != lp_tag)
440                         continue;
441                 bfa_fcpim_add_stats(stats, &(itnim->stats));
442         }
443         return BFA_STATUS_OK;
444 }
445
446 void
447 bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
448 {
449         struct bfa_itnim_latency_s *io_lat =
450                         &(ioim->itnim->ioprofile.io_latency);
451         u32 val, idx;
452
453         val = (u32)(jiffies - ioim->start_time);
454         idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
455         bfa_itnim_ioprofile_update(ioim->itnim, idx);
456
457         io_lat->count[idx]++;
458         io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
459         io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
460         io_lat->avg[idx] += val;
461 }
462
463 void
464 bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
465 {
466         ioim->start_time = jiffies;
467 }
468
469 bfa_status_t
470 bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
471 {
472         struct bfa_itnim_s *itnim;
473         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
474         struct list_head *qe, *qen;
475
476         /* accumulate IO stats from itnim */
477         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
478                 itnim = (struct bfa_itnim_s *) qe;
479                 bfa_itnim_clear_stats(itnim);
480         }
481         fcpim->io_profile = BFA_TRUE;
482         fcpim->io_profile_start_time = time;
483         fcpim->profile_comp = bfa_ioim_profile_comp;
484         fcpim->profile_start = bfa_ioim_profile_start;
485         return BFA_STATUS_OK;
486 }
487
488 bfa_status_t
489 bfa_fcpim_profile_off(struct bfa_s *bfa)
490 {
491         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
492         fcpim->io_profile = BFA_FALSE;
493         fcpim->io_profile_start_time = 0;
494         fcpim->profile_comp = NULL;
495         fcpim->profile_start = NULL;
496         return BFA_STATUS_OK;
497 }
498
499 u16
500 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
501 {
502         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
503
504         return fcpim->q_depth;
505 }
506
507 /*
508  *  BFA ITNIM module state machine functions
509  */
510
511 /*
512  * Beginning/unallocated state - no events expected.
513  */
514 static void
515 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
516 {
517         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
518         bfa_trc(itnim->bfa, event);
519
520         switch (event) {
521         case BFA_ITNIM_SM_CREATE:
522                 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
523                 itnim->is_online = BFA_FALSE;
524                 bfa_fcpim_additn(itnim);
525                 break;
526
527         default:
528                 bfa_sm_fault(itnim->bfa, event);
529         }
530 }
531
532 /*
533  * Beginning state, only online event expected.
534  */
535 static void
536 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
537 {
538         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
539         bfa_trc(itnim->bfa, event);
540
541         switch (event) {
542         case BFA_ITNIM_SM_ONLINE:
543                 if (bfa_itnim_send_fwcreate(itnim))
544                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
545                 else
546                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
547                 break;
548
549         case BFA_ITNIM_SM_DELETE:
550                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
551                 bfa_fcpim_delitn(itnim);
552                 break;
553
554         case BFA_ITNIM_SM_HWFAIL:
555                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
556                 break;
557
558         default:
559                 bfa_sm_fault(itnim->bfa, event);
560         }
561 }
562
563 /*
564  *      Waiting for itnim create response from firmware.
565  */
566 static void
567 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
568 {
569         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
570         bfa_trc(itnim->bfa, event);
571
572         switch (event) {
573         case BFA_ITNIM_SM_FWRSP:
574                 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
575                 itnim->is_online = BFA_TRUE;
576                 bfa_itnim_iotov_online(itnim);
577                 bfa_itnim_online_cb(itnim);
578                 break;
579
580         case BFA_ITNIM_SM_DELETE:
581                 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
582                 break;
583
584         case BFA_ITNIM_SM_OFFLINE:
585                 if (bfa_itnim_send_fwdelete(itnim))
586                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
587                 else
588                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
589                 break;
590
591         case BFA_ITNIM_SM_HWFAIL:
592                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
593                 break;
594
595         default:
596                 bfa_sm_fault(itnim->bfa, event);
597         }
598 }
599
600 static void
601 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
602                         enum bfa_itnim_event event)
603 {
604         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
605         bfa_trc(itnim->bfa, event);
606
607         switch (event) {
608         case BFA_ITNIM_SM_QRESUME:
609                 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
610                 bfa_itnim_send_fwcreate(itnim);
611                 break;
612
613         case BFA_ITNIM_SM_DELETE:
614                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
615                 bfa_reqq_wcancel(&itnim->reqq_wait);
616                 bfa_fcpim_delitn(itnim);
617                 break;
618
619         case BFA_ITNIM_SM_OFFLINE:
620                 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
621                 bfa_reqq_wcancel(&itnim->reqq_wait);
622                 bfa_itnim_offline_cb(itnim);
623                 break;
624
625         case BFA_ITNIM_SM_HWFAIL:
626                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
627                 bfa_reqq_wcancel(&itnim->reqq_wait);
628                 break;
629
630         default:
631                 bfa_sm_fault(itnim->bfa, event);
632         }
633 }
634
635 /*
636  * Waiting for itnim create response from firmware, a delete is pending.
637  */
638 static void
639 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
640                                 enum bfa_itnim_event event)
641 {
642         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
643         bfa_trc(itnim->bfa, event);
644
645         switch (event) {
646         case BFA_ITNIM_SM_FWRSP:
647                 if (bfa_itnim_send_fwdelete(itnim))
648                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
649                 else
650                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
651                 break;
652
653         case BFA_ITNIM_SM_HWFAIL:
654                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
655                 bfa_fcpim_delitn(itnim);
656                 break;
657
658         default:
659                 bfa_sm_fault(itnim->bfa, event);
660         }
661 }
662
663 /*
664  * Online state - normal parking state.
665  */
666 static void
667 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
668 {
669         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
670         bfa_trc(itnim->bfa, event);
671
672         switch (event) {
673         case BFA_ITNIM_SM_OFFLINE:
674                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
675                 itnim->is_online = BFA_FALSE;
676                 bfa_itnim_iotov_start(itnim);
677                 bfa_itnim_cleanup(itnim);
678                 break;
679
680         case BFA_ITNIM_SM_DELETE:
681                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
682                 itnim->is_online = BFA_FALSE;
683                 bfa_itnim_cleanup(itnim);
684                 break;
685
686         case BFA_ITNIM_SM_SLER:
687                 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
688                 itnim->is_online = BFA_FALSE;
689                 bfa_itnim_iotov_start(itnim);
690                 bfa_itnim_sler_cb(itnim);
691                 break;
692
693         case BFA_ITNIM_SM_HWFAIL:
694                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
695                 itnim->is_online = BFA_FALSE;
696                 bfa_itnim_iotov_start(itnim);
697                 bfa_itnim_iocdisable_cleanup(itnim);
698                 break;
699
700         default:
701                 bfa_sm_fault(itnim->bfa, event);
702         }
703 }
704
705 /*
706  * Second level error recovery need.
707  */
708 static void
709 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
710 {
711         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
712         bfa_trc(itnim->bfa, event);
713
714         switch (event) {
715         case BFA_ITNIM_SM_OFFLINE:
716                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
717                 bfa_itnim_cleanup(itnim);
718                 break;
719
720         case BFA_ITNIM_SM_DELETE:
721                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
722                 bfa_itnim_cleanup(itnim);
723                 bfa_itnim_iotov_delete(itnim);
724                 break;
725
726         case BFA_ITNIM_SM_HWFAIL:
727                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
728                 bfa_itnim_iocdisable_cleanup(itnim);
729                 break;
730
731         default:
732                 bfa_sm_fault(itnim->bfa, event);
733         }
734 }
735
736 /*
737  * Going offline. Waiting for active IO cleanup.
738  */
739 static void
740 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
741                                  enum bfa_itnim_event event)
742 {
743         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
744         bfa_trc(itnim->bfa, event);
745
746         switch (event) {
747         case BFA_ITNIM_SM_CLEANUP:
748                 if (bfa_itnim_send_fwdelete(itnim))
749                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
750                 else
751                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
752                 break;
753
754         case BFA_ITNIM_SM_DELETE:
755                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
756                 bfa_itnim_iotov_delete(itnim);
757                 break;
758
759         case BFA_ITNIM_SM_HWFAIL:
760                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
761                 bfa_itnim_iocdisable_cleanup(itnim);
762                 bfa_itnim_offline_cb(itnim);
763                 break;
764
765         case BFA_ITNIM_SM_SLER:
766                 break;
767
768         default:
769                 bfa_sm_fault(itnim->bfa, event);
770         }
771 }
772
773 /*
774  * Deleting itnim. Waiting for active IO cleanup.
775  */
776 static void
777 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
778                                 enum bfa_itnim_event event)
779 {
780         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
781         bfa_trc(itnim->bfa, event);
782
783         switch (event) {
784         case BFA_ITNIM_SM_CLEANUP:
785                 if (bfa_itnim_send_fwdelete(itnim))
786                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
787                 else
788                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
789                 break;
790
791         case BFA_ITNIM_SM_HWFAIL:
792                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
793                 bfa_itnim_iocdisable_cleanup(itnim);
794                 break;
795
796         default:
797                 bfa_sm_fault(itnim->bfa, event);
798         }
799 }
800
801 /*
802  * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
803  */
804 static void
805 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
806 {
807         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
808         bfa_trc(itnim->bfa, event);
809
810         switch (event) {
811         case BFA_ITNIM_SM_FWRSP:
812                 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
813                 bfa_itnim_offline_cb(itnim);
814                 break;
815
816         case BFA_ITNIM_SM_DELETE:
817                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
818                 break;
819
820         case BFA_ITNIM_SM_HWFAIL:
821                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
822                 bfa_itnim_offline_cb(itnim);
823                 break;
824
825         default:
826                 bfa_sm_fault(itnim->bfa, event);
827         }
828 }
829
830 static void
831 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
832                         enum bfa_itnim_event event)
833 {
834         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
835         bfa_trc(itnim->bfa, event);
836
837         switch (event) {
838         case BFA_ITNIM_SM_QRESUME:
839                 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
840                 bfa_itnim_send_fwdelete(itnim);
841                 break;
842
843         case BFA_ITNIM_SM_DELETE:
844                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
845                 break;
846
847         case BFA_ITNIM_SM_HWFAIL:
848                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
849                 bfa_reqq_wcancel(&itnim->reqq_wait);
850                 bfa_itnim_offline_cb(itnim);
851                 break;
852
853         default:
854                 bfa_sm_fault(itnim->bfa, event);
855         }
856 }
857
858 /*
859  * Offline state.
860  */
861 static void
862 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
863 {
864         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
865         bfa_trc(itnim->bfa, event);
866
867         switch (event) {
868         case BFA_ITNIM_SM_DELETE:
869                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
870                 bfa_itnim_iotov_delete(itnim);
871                 bfa_fcpim_delitn(itnim);
872                 break;
873
874         case BFA_ITNIM_SM_ONLINE:
875                 if (bfa_itnim_send_fwcreate(itnim))
876                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
877                 else
878                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
879                 break;
880
881         case BFA_ITNIM_SM_HWFAIL:
882                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
883                 break;
884
885         default:
886                 bfa_sm_fault(itnim->bfa, event);
887         }
888 }
889
890 static void
891 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
892                                 enum bfa_itnim_event event)
893 {
894         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
895         bfa_trc(itnim->bfa, event);
896
897         switch (event) {
898         case BFA_ITNIM_SM_DELETE:
899                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
900                 bfa_itnim_iotov_delete(itnim);
901                 bfa_fcpim_delitn(itnim);
902                 break;
903
904         case BFA_ITNIM_SM_OFFLINE:
905                 bfa_itnim_offline_cb(itnim);
906                 break;
907
908         case BFA_ITNIM_SM_ONLINE:
909                 if (bfa_itnim_send_fwcreate(itnim))
910                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
911                 else
912                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
913                 break;
914
915         case BFA_ITNIM_SM_HWFAIL:
916                 break;
917
918         default:
919                 bfa_sm_fault(itnim->bfa, event);
920         }
921 }
922
923 /*
924  * Itnim is deleted, waiting for firmware response to delete.
925  */
926 static void
927 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
928 {
929         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
930         bfa_trc(itnim->bfa, event);
931
932         switch (event) {
933         case BFA_ITNIM_SM_FWRSP:
934         case BFA_ITNIM_SM_HWFAIL:
935                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
936                 bfa_fcpim_delitn(itnim);
937                 break;
938
939         default:
940                 bfa_sm_fault(itnim->bfa, event);
941         }
942 }
943
944 static void
945 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
946                 enum bfa_itnim_event event)
947 {
948         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
949         bfa_trc(itnim->bfa, event);
950
951         switch (event) {
952         case BFA_ITNIM_SM_QRESUME:
953                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
954                 bfa_itnim_send_fwdelete(itnim);
955                 break;
956
957         case BFA_ITNIM_SM_HWFAIL:
958                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
959                 bfa_reqq_wcancel(&itnim->reqq_wait);
960                 bfa_fcpim_delitn(itnim);
961                 break;
962
963         default:
964                 bfa_sm_fault(itnim->bfa, event);
965         }
966 }
967
968 /*
969  * Initiate cleanup of all IOs on an IOC failure.
970  */
971 static void
972 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
973 {
974         struct bfa_tskim_s *tskim;
975         struct bfa_ioim_s *ioim;
976         struct list_head        *qe, *qen;
977
978         list_for_each_safe(qe, qen, &itnim->tsk_q) {
979                 tskim = (struct bfa_tskim_s *) qe;
980                 bfa_tskim_iocdisable(tskim);
981         }
982
983         list_for_each_safe(qe, qen, &itnim->io_q) {
984                 ioim = (struct bfa_ioim_s *) qe;
985                 bfa_ioim_iocdisable(ioim);
986         }
987
988         /*
989          * For IO request in pending queue, we pretend an early timeout.
990          */
991         list_for_each_safe(qe, qen, &itnim->pending_q) {
992                 ioim = (struct bfa_ioim_s *) qe;
993                 bfa_ioim_tov(ioim);
994         }
995
996         list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
997                 ioim = (struct bfa_ioim_s *) qe;
998                 bfa_ioim_iocdisable(ioim);
999         }
1000 }
1001
1002 /*
1003  * IO cleanup completion
1004  */
1005 static void
1006 bfa_itnim_cleanp_comp(void *itnim_cbarg)
1007 {
1008         struct bfa_itnim_s *itnim = itnim_cbarg;
1009
1010         bfa_stats(itnim, cleanup_comps);
1011         bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1012 }
1013
1014 /*
1015  * Initiate cleanup of all IOs.
1016  */
1017 static void
1018 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1019 {
1020         struct bfa_ioim_s  *ioim;
1021         struct bfa_tskim_s *tskim;
1022         struct list_head        *qe, *qen;
1023
1024         bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1025
1026         list_for_each_safe(qe, qen, &itnim->io_q) {
1027                 ioim = (struct bfa_ioim_s *) qe;
1028
1029                 /*
1030                  * Move IO to a cleanup queue from active queue so that a later
1031                  * TM will not pickup this IO.
1032                  */
1033                 list_del(&ioim->qe);
1034                 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1035
1036                 bfa_wc_up(&itnim->wc);
1037                 bfa_ioim_cleanup(ioim);
1038         }
1039
1040         list_for_each_safe(qe, qen, &itnim->tsk_q) {
1041                 tskim = (struct bfa_tskim_s *) qe;
1042                 bfa_wc_up(&itnim->wc);
1043                 bfa_tskim_cleanup(tskim);
1044         }
1045
1046         bfa_wc_wait(&itnim->wc);
1047 }
1048
1049 static void
1050 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1051 {
1052         struct bfa_itnim_s *itnim = cbarg;
1053
1054         if (complete)
1055                 bfa_cb_itnim_online(itnim->ditn);
1056 }
1057
1058 static void
1059 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1060 {
1061         struct bfa_itnim_s *itnim = cbarg;
1062
1063         if (complete)
1064                 bfa_cb_itnim_offline(itnim->ditn);
1065 }
1066
1067 static void
1068 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1069 {
1070         struct bfa_itnim_s *itnim = cbarg;
1071
1072         if (complete)
1073                 bfa_cb_itnim_sler(itnim->ditn);
1074 }
1075
1076 /*
1077  * Call to resume any I/O requests waiting for room in request queue.
1078  */
1079 static void
1080 bfa_itnim_qresume(void *cbarg)
1081 {
1082         struct bfa_itnim_s *itnim = cbarg;
1083
1084         bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1085 }
1086
1087 /*
1088  *  bfa_itnim_public
1089  */
1090
1091 void
1092 bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1093 {
1094         bfa_wc_down(&itnim->wc);
1095 }
1096
1097 void
1098 bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1099 {
1100         bfa_wc_down(&itnim->wc);
1101 }
1102
1103 void
1104 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
1105 {
1106         /*
1107          * ITN memory
1108          */
1109         *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1110 }
1111
1112 void
1113 bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
1114 {
1115         struct bfa_s    *bfa = fcpim->bfa;
1116         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
1117         struct bfa_itnim_s *itnim;
1118         int     i, j;
1119
1120         INIT_LIST_HEAD(&fcpim->itnim_q);
1121
1122         itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
1123         fcpim->itnim_arr = itnim;
1124
1125         for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1126                 memset(itnim, 0, sizeof(struct bfa_itnim_s));
1127                 itnim->bfa = bfa;
1128                 itnim->fcpim = fcpim;
1129                 itnim->reqq = BFA_REQQ_QOS_LO;
1130                 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1131                 itnim->iotov_active = BFA_FALSE;
1132                 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1133
1134                 INIT_LIST_HEAD(&itnim->io_q);
1135                 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1136                 INIT_LIST_HEAD(&itnim->pending_q);
1137                 INIT_LIST_HEAD(&itnim->tsk_q);
1138                 INIT_LIST_HEAD(&itnim->delay_comp_q);
1139                 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1140                         itnim->ioprofile.io_latency.min[j] = ~0;
1141                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1142         }
1143
1144         bfa_mem_kva_curp(fcp) = (u8 *) itnim;
1145 }
1146
1147 void
1148 bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1149 {
1150         bfa_stats(itnim, ioc_disabled);
1151         bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1152 }
1153
1154 static bfa_boolean_t
1155 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1156 {
1157         struct bfi_itn_create_req_s *m;
1158
1159         itnim->msg_no++;
1160
1161         /*
1162          * check for room in queue to send request now
1163          */
1164         m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1165         if (!m) {
1166                 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1167                 return BFA_FALSE;
1168         }
1169
1170         bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
1171                         bfa_fn_lpu(itnim->bfa));
1172         m->fw_handle = itnim->rport->fw_handle;
1173         m->class = FC_CLASS_3;
1174         m->seq_rec = itnim->seq_rec;
1175         m->msg_no = itnim->msg_no;
1176         bfa_stats(itnim, fw_create);
1177
1178         /*
1179          * queue I/O message to firmware
1180          */
1181         bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1182         return BFA_TRUE;
1183 }
1184
1185 static bfa_boolean_t
1186 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1187 {
1188         struct bfi_itn_delete_req_s *m;
1189
1190         /*
1191          * check for room in queue to send request now
1192          */
1193         m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1194         if (!m) {
1195                 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1196                 return BFA_FALSE;
1197         }
1198
1199         bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
1200                         bfa_fn_lpu(itnim->bfa));
1201         m->fw_handle = itnim->rport->fw_handle;
1202         bfa_stats(itnim, fw_delete);
1203
1204         /*
1205          * queue I/O message to firmware
1206          */
1207         bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1208         return BFA_TRUE;
1209 }
1210
1211 /*
1212  * Cleanup all pending failed inflight requests.
1213  */
1214 static void
1215 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1216 {
1217         struct bfa_ioim_s *ioim;
1218         struct list_head *qe, *qen;
1219
1220         list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1221                 ioim = (struct bfa_ioim_s *)qe;
1222                 bfa_ioim_delayed_comp(ioim, iotov);
1223         }
1224 }
1225
1226 /*
1227  * Start all pending IO requests.
1228  */
1229 static void
1230 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1231 {
1232         struct bfa_ioim_s *ioim;
1233
1234         bfa_itnim_iotov_stop(itnim);
1235
1236         /*
1237          * Abort all inflight IO requests in the queue
1238          */
1239         bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1240
1241         /*
1242          * Start all pending IO requests.
1243          */
1244         while (!list_empty(&itnim->pending_q)) {
1245                 bfa_q_deq(&itnim->pending_q, &ioim);
1246                 list_add_tail(&ioim->qe, &itnim->io_q);
1247                 bfa_ioim_start(ioim);
1248         }
1249 }
1250
1251 /*
1252  * Fail all pending IO requests
1253  */
1254 static void
1255 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1256 {
1257         struct bfa_ioim_s *ioim;
1258
1259         /*
1260          * Fail all inflight IO requests in the queue
1261          */
1262         bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1263
1264         /*
1265          * Fail any pending IO requests.
1266          */
1267         while (!list_empty(&itnim->pending_q)) {
1268                 bfa_q_deq(&itnim->pending_q, &ioim);
1269                 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1270                 bfa_ioim_tov(ioim);
1271         }
1272 }
1273
1274 /*
1275  * IO TOV timer callback. Fail any pending IO requests.
1276  */
1277 static void
1278 bfa_itnim_iotov(void *itnim_arg)
1279 {
1280         struct bfa_itnim_s *itnim = itnim_arg;
1281
1282         itnim->iotov_active = BFA_FALSE;
1283
1284         bfa_cb_itnim_tov_begin(itnim->ditn);
1285         bfa_itnim_iotov_cleanup(itnim);
1286         bfa_cb_itnim_tov(itnim->ditn);
1287 }
1288
1289 /*
1290  * Start IO TOV timer for failing back pending IO requests in offline state.
1291  */
1292 static void
1293 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1294 {
1295         if (itnim->fcpim->path_tov > 0) {
1296
1297                 itnim->iotov_active = BFA_TRUE;
1298                 WARN_ON(!bfa_itnim_hold_io(itnim));
1299                 bfa_timer_start(itnim->bfa, &itnim->timer,
1300                         bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1301         }
1302 }
1303
1304 /*
1305  * Stop IO TOV timer.
1306  */
1307 static void
1308 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1309 {
1310         if (itnim->iotov_active) {
1311                 itnim->iotov_active = BFA_FALSE;
1312                 bfa_timer_stop(&itnim->timer);
1313         }
1314 }
1315
1316 /*
1317  * Stop IO TOV timer.
1318  */
1319 static void
1320 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1321 {
1322         bfa_boolean_t pathtov_active = BFA_FALSE;
1323
1324         if (itnim->iotov_active)
1325                 pathtov_active = BFA_TRUE;
1326
1327         bfa_itnim_iotov_stop(itnim);
1328         if (pathtov_active)
1329                 bfa_cb_itnim_tov_begin(itnim->ditn);
1330         bfa_itnim_iotov_cleanup(itnim);
1331         if (pathtov_active)
1332                 bfa_cb_itnim_tov(itnim->ditn);
1333 }
1334
1335 static void
1336 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1337 {
1338         struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1339         fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1340                 itnim->stats.iocomp_aborted;
1341         fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1342                 itnim->stats.iocomp_timedout;
1343         fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1344                 itnim->stats.iocom_sqer_needed;
1345         fcpim->del_itn_stats.del_itn_iocom_res_free +=
1346                 itnim->stats.iocom_res_free;
1347         fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1348                 itnim->stats.iocom_hostabrts;
1349         fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1350         fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1351         fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1352 }
1353
1354 /*
1355  * bfa_itnim_public
1356  */
1357
1358 /*
1359  * Itnim interrupt processing.
1360  */
1361 void
1362 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1363 {
1364         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1365         union bfi_itn_i2h_msg_u msg;
1366         struct bfa_itnim_s *itnim;
1367
1368         bfa_trc(bfa, m->mhdr.msg_id);
1369
1370         msg.msg = m;
1371
1372         switch (m->mhdr.msg_id) {
1373         case BFI_ITN_I2H_CREATE_RSP:
1374                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1375                                                 msg.create_rsp->bfa_handle);
1376                 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1377                 bfa_stats(itnim, create_comps);
1378                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1379                 break;
1380
1381         case BFI_ITN_I2H_DELETE_RSP:
1382                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1383                                                 msg.delete_rsp->bfa_handle);
1384                 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1385                 bfa_stats(itnim, delete_comps);
1386                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1387                 break;
1388
1389         case BFI_ITN_I2H_SLER_EVENT:
1390                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1391                                                 msg.sler_event->bfa_handle);
1392                 bfa_stats(itnim, sler_events);
1393                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1394                 break;
1395
1396         default:
1397                 bfa_trc(bfa, m->mhdr.msg_id);
1398                 WARN_ON(1);
1399         }
1400 }
1401
1402 /*
1403  * bfa_itnim_api
1404  */
1405
1406 struct bfa_itnim_s *
1407 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1408 {
1409         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1410         struct bfa_itnim_s *itnim;
1411
1412         bfa_itn_create(bfa, rport, bfa_itnim_isr);
1413
1414         itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1415         WARN_ON(itnim->rport != rport);
1416
1417         itnim->ditn = ditn;
1418
1419         bfa_stats(itnim, creates);
1420         bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1421
1422         return itnim;
1423 }
1424
1425 void
1426 bfa_itnim_delete(struct bfa_itnim_s *itnim)
1427 {
1428         bfa_stats(itnim, deletes);
1429         bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1430 }
1431
1432 void
1433 bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1434 {
1435         itnim->seq_rec = seq_rec;
1436         bfa_stats(itnim, onlines);
1437         bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1438 }
1439
1440 void
1441 bfa_itnim_offline(struct bfa_itnim_s *itnim)
1442 {
1443         bfa_stats(itnim, offlines);
1444         bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1445 }
1446
1447 /*
1448  * Return true if itnim is considered offline for holding off IO request.
1449  * IO is not held if itnim is being deleted.
1450  */
1451 bfa_boolean_t
1452 bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1453 {
1454         return itnim->fcpim->path_tov && itnim->iotov_active &&
1455                 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1456                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1457                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1458                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1459                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1460                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1461 }
1462
1463 #define bfa_io_lat_clock_res_div        HZ
1464 #define bfa_io_lat_clock_res_mul        1000
1465 bfa_status_t
1466 bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1467                         struct bfa_itnim_ioprofile_s *ioprofile)
1468 {
1469         struct bfa_fcpim_s *fcpim;
1470
1471         if (!itnim)
1472                 return BFA_STATUS_NO_FCPIM_NEXUS;
1473
1474         fcpim = BFA_FCPIM(itnim->bfa);
1475
1476         if (!fcpim->io_profile)
1477                 return BFA_STATUS_IOPROFILE_OFF;
1478
1479         itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1480         itnim->ioprofile.io_profile_start_time =
1481                                 bfa_io_profile_start_time(itnim->bfa);
1482         itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1483         itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1484         *ioprofile = itnim->ioprofile;
1485
1486         return BFA_STATUS_OK;
1487 }
1488
1489 void
1490 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1491 {
1492         int j;
1493
1494         if (!itnim)
1495                 return;
1496
1497         memset(&itnim->stats, 0, sizeof(itnim->stats));
1498         memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1499         for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1500                 itnim->ioprofile.io_latency.min[j] = ~0;
1501 }
1502
1503 /*
1504  *  BFA IO module state machine functions
1505  */
1506
1507 /*
1508  * IO is not started (unallocated).
1509  */
1510 static void
1511 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1512 {
1513         switch (event) {
1514         case BFA_IOIM_SM_START:
1515                 if (!bfa_itnim_is_online(ioim->itnim)) {
1516                         if (!bfa_itnim_hold_io(ioim->itnim)) {
1517                                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1518                                 list_del(&ioim->qe);
1519                                 list_add_tail(&ioim->qe,
1520                                         &ioim->fcpim->ioim_comp_q);
1521                                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1522                                                 __bfa_cb_ioim_pathtov, ioim);
1523                         } else {
1524                                 list_del(&ioim->qe);
1525                                 list_add_tail(&ioim->qe,
1526                                         &ioim->itnim->pending_q);
1527                         }
1528                         break;
1529                 }
1530
1531                 if (ioim->nsges > BFI_SGE_INLINE) {
1532                         if (!bfa_ioim_sgpg_alloc(ioim)) {
1533                                 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1534                                 return;
1535                         }
1536                 }
1537
1538                 if (!bfa_ioim_send_ioreq(ioim)) {
1539                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1540                         break;
1541                 }
1542
1543                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1544                 break;
1545
1546         case BFA_IOIM_SM_IOTOV:
1547                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1548                 bfa_ioim_move_to_comp_q(ioim);
1549                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1550                                 __bfa_cb_ioim_pathtov, ioim);
1551                 break;
1552
1553         case BFA_IOIM_SM_ABORT:
1554                 /*
1555                  * IO in pending queue can get abort requests. Complete abort
1556                  * requests immediately.
1557                  */
1558                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1559                 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1560                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1561                         __bfa_cb_ioim_abort, ioim);
1562                 break;
1563
1564         default:
1565                 bfa_sm_fault(ioim->bfa, event);
1566         }
1567 }
1568
1569 /*
1570  * IO is waiting for SG pages.
1571  */
1572 static void
1573 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1574 {
1575         bfa_trc(ioim->bfa, ioim->iotag);
1576         bfa_trc(ioim->bfa, event);
1577
1578         switch (event) {
1579         case BFA_IOIM_SM_SGALLOCED:
1580                 if (!bfa_ioim_send_ioreq(ioim)) {
1581                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1582                         break;
1583                 }
1584                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1585                 break;
1586
1587         case BFA_IOIM_SM_CLEANUP:
1588                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1589                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1590                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1591                               ioim);
1592                 bfa_ioim_notify_cleanup(ioim);
1593                 break;
1594
1595         case BFA_IOIM_SM_ABORT:
1596                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1597                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1598                 bfa_ioim_move_to_comp_q(ioim);
1599                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1600                               ioim);
1601                 break;
1602
1603         case BFA_IOIM_SM_HWFAIL:
1604                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1605                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1606                 bfa_ioim_move_to_comp_q(ioim);
1607                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1608                               ioim);
1609                 break;
1610
1611         default:
1612                 bfa_sm_fault(ioim->bfa, event);
1613         }
1614 }
1615
1616 /*
1617  * IO is active.
1618  */
1619 static void
1620 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1621 {
1622         switch (event) {
1623         case BFA_IOIM_SM_COMP_GOOD:
1624                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1625                 bfa_ioim_move_to_comp_q(ioim);
1626                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1627                               __bfa_cb_ioim_good_comp, ioim);
1628                 break;
1629
1630         case BFA_IOIM_SM_COMP:
1631                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1632                 bfa_ioim_move_to_comp_q(ioim);
1633                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1634                               ioim);
1635                 break;
1636
1637         case BFA_IOIM_SM_DONE:
1638                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1639                 bfa_ioim_move_to_comp_q(ioim);
1640                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1641                               ioim);
1642                 break;
1643
1644         case BFA_IOIM_SM_ABORT:
1645                 ioim->iosp->abort_explicit = BFA_TRUE;
1646                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1647
1648                 if (bfa_ioim_send_abort(ioim))
1649                         bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1650                 else {
1651                         bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1652                         bfa_stats(ioim->itnim, qwait);
1653                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1654                                           &ioim->iosp->reqq_wait);
1655                 }
1656                 break;
1657
1658         case BFA_IOIM_SM_CLEANUP:
1659                 ioim->iosp->abort_explicit = BFA_FALSE;
1660                 ioim->io_cbfn = __bfa_cb_ioim_failed;
1661
1662                 if (bfa_ioim_send_abort(ioim))
1663                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1664                 else {
1665                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1666                         bfa_stats(ioim->itnim, qwait);
1667                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1668                                           &ioim->iosp->reqq_wait);
1669                 }
1670                 break;
1671
1672         case BFA_IOIM_SM_HWFAIL:
1673                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1674                 bfa_ioim_move_to_comp_q(ioim);
1675                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1676                               ioim);
1677                 break;
1678
1679         case BFA_IOIM_SM_SQRETRY:
1680                 if (bfa_ioim_maxretry_reached(ioim)) {
1681                         /* max retry reached, free IO */
1682                         bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1683                         bfa_ioim_move_to_comp_q(ioim);
1684                         bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1685                                         __bfa_cb_ioim_failed, ioim);
1686                         break;
1687                 }
1688                 /* waiting for IO tag resource free */
1689                 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1690                 break;
1691
1692         default:
1693                 bfa_sm_fault(ioim->bfa, event);
1694         }
1695 }
1696
1697 /*
1698  * IO is retried with new tag.
1699  */
1700 static void
1701 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1702 {
1703         switch (event) {
1704         case BFA_IOIM_SM_FREE:
1705                 /* abts and rrq done. Now retry the IO with new tag */
1706                 bfa_ioim_update_iotag(ioim);
1707                 if (!bfa_ioim_send_ioreq(ioim)) {
1708                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1709                         break;
1710                 }
1711                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1712         break;
1713
1714         case BFA_IOIM_SM_CLEANUP:
1715                 ioim->iosp->abort_explicit = BFA_FALSE;
1716                 ioim->io_cbfn = __bfa_cb_ioim_failed;
1717
1718                 if (bfa_ioim_send_abort(ioim))
1719                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1720                 else {
1721                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1722                         bfa_stats(ioim->itnim, qwait);
1723                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1724                                           &ioim->iosp->reqq_wait);
1725                 }
1726         break;
1727
1728         case BFA_IOIM_SM_HWFAIL:
1729                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1730                 bfa_ioim_move_to_comp_q(ioim);
1731                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1732                          __bfa_cb_ioim_failed, ioim);
1733                 break;
1734
1735         case BFA_IOIM_SM_ABORT:
1736                 /* in this state IO abort is done.
1737                  * Waiting for IO tag resource free.
1738                  */
1739                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1740                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1741                               ioim);
1742                 break;
1743
1744         default:
1745                 bfa_sm_fault(ioim->bfa, event);
1746         }
1747 }
1748
1749 /*
1750  * IO is being aborted, waiting for completion from firmware.
1751  */
1752 static void
1753 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1754 {
1755         bfa_trc(ioim->bfa, ioim->iotag);
1756         bfa_trc(ioim->bfa, event);
1757
1758         switch (event) {
1759         case BFA_IOIM_SM_COMP_GOOD:
1760         case BFA_IOIM_SM_COMP:
1761         case BFA_IOIM_SM_DONE:
1762         case BFA_IOIM_SM_FREE:
1763                 break;
1764
1765         case BFA_IOIM_SM_ABORT_DONE:
1766                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1767                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1768                               ioim);
1769                 break;
1770
1771         case BFA_IOIM_SM_ABORT_COMP:
1772                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1773                 bfa_ioim_move_to_comp_q(ioim);
1774                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1775                               ioim);
1776                 break;
1777
1778         case BFA_IOIM_SM_COMP_UTAG:
1779                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1780                 bfa_ioim_move_to_comp_q(ioim);
1781                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1782                               ioim);
1783                 break;
1784
1785         case BFA_IOIM_SM_CLEANUP:
1786                 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1787                 ioim->iosp->abort_explicit = BFA_FALSE;
1788
1789                 if (bfa_ioim_send_abort(ioim))
1790                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1791                 else {
1792                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1793                         bfa_stats(ioim->itnim, qwait);
1794                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1795                                           &ioim->iosp->reqq_wait);
1796                 }
1797                 break;
1798
1799         case BFA_IOIM_SM_HWFAIL:
1800                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1801                 bfa_ioim_move_to_comp_q(ioim);
1802                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1803                               ioim);
1804                 break;
1805
1806         default:
1807                 bfa_sm_fault(ioim->bfa, event);
1808         }
1809 }
1810
1811 /*
1812  * IO is being cleaned up (implicit abort), waiting for completion from
1813  * firmware.
1814  */
1815 static void
1816 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1817 {
1818         bfa_trc(ioim->bfa, ioim->iotag);
1819         bfa_trc(ioim->bfa, event);
1820
1821         switch (event) {
1822         case BFA_IOIM_SM_COMP_GOOD:
1823         case BFA_IOIM_SM_COMP:
1824         case BFA_IOIM_SM_DONE:
1825         case BFA_IOIM_SM_FREE:
1826                 break;
1827
1828         case BFA_IOIM_SM_ABORT:
1829                 /*
1830                  * IO is already being aborted implicitly
1831                  */
1832                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1833                 break;
1834
1835         case BFA_IOIM_SM_ABORT_DONE:
1836                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1837                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1838                 bfa_ioim_notify_cleanup(ioim);
1839                 break;
1840
1841         case BFA_IOIM_SM_ABORT_COMP:
1842                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1843                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1844                 bfa_ioim_notify_cleanup(ioim);
1845                 break;
1846
1847         case BFA_IOIM_SM_COMP_UTAG:
1848                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1849                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1850                 bfa_ioim_notify_cleanup(ioim);
1851                 break;
1852
1853         case BFA_IOIM_SM_HWFAIL:
1854                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1855                 bfa_ioim_move_to_comp_q(ioim);
1856                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1857                               ioim);
1858                 break;
1859
1860         case BFA_IOIM_SM_CLEANUP:
1861                 /*
1862                  * IO can be in cleanup state already due to TM command.
1863                  * 2nd cleanup request comes from ITN offline event.
1864                  */
1865                 break;
1866
1867         default:
1868                 bfa_sm_fault(ioim->bfa, event);
1869         }
1870 }
1871
1872 /*
1873  * IO is waiting for room in request CQ
1874  */
1875 static void
1876 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1877 {
1878         bfa_trc(ioim->bfa, ioim->iotag);
1879         bfa_trc(ioim->bfa, event);
1880
1881         switch (event) {
1882         case BFA_IOIM_SM_QRESUME:
1883                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1884                 bfa_ioim_send_ioreq(ioim);
1885                 break;
1886
1887         case BFA_IOIM_SM_ABORT:
1888                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1889                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1890                 bfa_ioim_move_to_comp_q(ioim);
1891                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1892                               ioim);
1893                 break;
1894
1895         case BFA_IOIM_SM_CLEANUP:
1896                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1897                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1898                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1899                               ioim);
1900                 bfa_ioim_notify_cleanup(ioim);
1901                 break;
1902
1903         case BFA_IOIM_SM_HWFAIL:
1904                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1905                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1906                 bfa_ioim_move_to_comp_q(ioim);
1907                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1908                               ioim);
1909                 break;
1910
1911         default:
1912                 bfa_sm_fault(ioim->bfa, event);
1913         }
1914 }
1915
1916 /*
1917  * Active IO is being aborted, waiting for room in request CQ.
1918  */
1919 static void
1920 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1921 {
1922         bfa_trc(ioim->bfa, ioim->iotag);
1923         bfa_trc(ioim->bfa, event);
1924
1925         switch (event) {
1926         case BFA_IOIM_SM_QRESUME:
1927                 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1928                 bfa_ioim_send_abort(ioim);
1929                 break;
1930
1931         case BFA_IOIM_SM_CLEANUP:
1932                 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1933                 ioim->iosp->abort_explicit = BFA_FALSE;
1934                 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1935                 break;
1936
1937         case BFA_IOIM_SM_COMP_GOOD:
1938         case BFA_IOIM_SM_COMP:
1939                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1940                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1941                 bfa_ioim_move_to_comp_q(ioim);
1942                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1943                               ioim);
1944                 break;
1945
1946         case BFA_IOIM_SM_DONE:
1947                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1948                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1949                 bfa_ioim_move_to_comp_q(ioim);
1950                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1951                               ioim);
1952                 break;
1953
1954         case BFA_IOIM_SM_HWFAIL:
1955                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1956                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1957                 bfa_ioim_move_to_comp_q(ioim);
1958                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1959                               ioim);
1960                 break;
1961
1962         default:
1963                 bfa_sm_fault(ioim->bfa, event);
1964         }
1965 }
1966
1967 /*
1968  * Active IO is being cleaned up, waiting for room in request CQ.
1969  */
1970 static void
1971 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1972 {
1973         bfa_trc(ioim->bfa, ioim->iotag);
1974         bfa_trc(ioim->bfa, event);
1975
1976         switch (event) {
1977         case BFA_IOIM_SM_QRESUME:
1978                 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1979                 bfa_ioim_send_abort(ioim);
1980                 break;
1981
1982         case BFA_IOIM_SM_ABORT:
1983                 /*
1984                  * IO is already being cleaned up implicitly
1985                  */
1986                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1987                 break;
1988
1989         case BFA_IOIM_SM_COMP_GOOD:
1990         case BFA_IOIM_SM_COMP:
1991                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1992                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1993                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1994                 bfa_ioim_notify_cleanup(ioim);
1995                 break;
1996
1997         case BFA_IOIM_SM_DONE:
1998                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1999                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2000                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2001                 bfa_ioim_notify_cleanup(ioim);
2002                 break;
2003
2004         case BFA_IOIM_SM_HWFAIL:
2005                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2006                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2007                 bfa_ioim_move_to_comp_q(ioim);
2008                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2009                               ioim);
2010                 break;
2011
2012         default:
2013                 bfa_sm_fault(ioim->bfa, event);
2014         }
2015 }
2016
2017 /*
2018  * IO bfa callback is pending.
2019  */
2020 static void
2021 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2022 {
2023         switch (event) {
2024         case BFA_IOIM_SM_HCB:
2025                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2026                 bfa_ioim_free(ioim);
2027                 break;
2028
2029         case BFA_IOIM_SM_CLEANUP:
2030                 bfa_ioim_notify_cleanup(ioim);
2031                 break;
2032
2033         case BFA_IOIM_SM_HWFAIL:
2034                 break;
2035
2036         default:
2037                 bfa_sm_fault(ioim->bfa, event);
2038         }
2039 }
2040
2041 /*
2042  * IO bfa callback is pending. IO resource cannot be freed.
2043  */
2044 static void
2045 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2046 {
2047         bfa_trc(ioim->bfa, ioim->iotag);
2048         bfa_trc(ioim->bfa, event);
2049
2050         switch (event) {
2051         case BFA_IOIM_SM_HCB:
2052                 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2053                 list_del(&ioim->qe);
2054                 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2055                 break;
2056
2057         case BFA_IOIM_SM_FREE:
2058                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2059                 break;
2060
2061         case BFA_IOIM_SM_CLEANUP:
2062                 bfa_ioim_notify_cleanup(ioim);
2063                 break;
2064
2065         case BFA_IOIM_SM_HWFAIL:
2066                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2067                 break;
2068
2069         default:
2070                 bfa_sm_fault(ioim->bfa, event);
2071         }
2072 }
2073
2074 /*
2075  * IO is completed, waiting resource free from firmware.
2076  */
2077 static void
2078 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2079 {
2080         bfa_trc(ioim->bfa, ioim->iotag);
2081         bfa_trc(ioim->bfa, event);
2082
2083         switch (event) {
2084         case BFA_IOIM_SM_FREE:
2085                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2086                 bfa_ioim_free(ioim);
2087                 break;
2088
2089         case BFA_IOIM_SM_CLEANUP:
2090                 bfa_ioim_notify_cleanup(ioim);
2091                 break;
2092
2093         case BFA_IOIM_SM_HWFAIL:
2094                 break;
2095
2096         default:
2097                 bfa_sm_fault(ioim->bfa, event);
2098         }
2099 }
2100
2101 /*
2102  * This is called from bfa_fcpim_start after the bfa_init() with flash read
2103  * is complete by driver. now invalidate the stale content of lun mask
2104  * like unit attention, rp tag and lp tag.
2105  */
2106 static void
2107 bfa_ioim_lm_init(struct bfa_s *bfa)
2108 {
2109         struct bfa_lun_mask_s *lunm_list;
2110         int     i;
2111
2112         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2113                 return;
2114
2115         lunm_list = bfa_get_lun_mask_list(bfa);
2116         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2117                 lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
2118                 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2119                 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2120         }
2121 }
2122
2123 static void
2124 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2125 {
2126         struct bfa_ioim_s *ioim = cbarg;
2127
2128         if (!complete) {
2129                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2130                 return;
2131         }
2132
2133         bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2134 }
2135
2136 static void
2137 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2138 {
2139         struct bfa_ioim_s       *ioim = cbarg;
2140         struct bfi_ioim_rsp_s *m;
2141         u8      *snsinfo = NULL;
2142         u8      sns_len = 0;
2143         s32     residue = 0;
2144
2145         if (!complete) {
2146                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2147                 return;
2148         }
2149
2150         m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2151         if (m->io_status == BFI_IOIM_STS_OK) {
2152                 /*
2153                  * setup sense information, if present
2154                  */
2155                 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2156                                         m->sns_len) {
2157                         sns_len = m->sns_len;
2158                         snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2159                                                 ioim->iotag);
2160                 }
2161
2162                 /*
2163                  * setup residue value correctly for normal completions
2164                  */
2165                 if (m->resid_flags == FCP_RESID_UNDER) {
2166                         residue = be32_to_cpu(m->residue);
2167                         bfa_stats(ioim->itnim, iocomp_underrun);
2168                 }
2169                 if (m->resid_flags == FCP_RESID_OVER) {
2170                         residue = be32_to_cpu(m->residue);
2171                         residue = -residue;
2172                         bfa_stats(ioim->itnim, iocomp_overrun);
2173                 }
2174         }
2175
2176         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2177                           m->scsi_status, sns_len, snsinfo, residue);
2178 }
2179
2180 void
2181 bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2182                         u16 rp_tag, u8 lp_tag)
2183 {
2184         struct bfa_lun_mask_s *lun_list;
2185         u8      i;
2186
2187         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2188                 return;
2189
2190         lun_list = bfa_get_lun_mask_list(bfa);
2191         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2192                 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2193                         if ((lun_list[i].lp_wwn == lp_wwn) &&
2194                             (lun_list[i].rp_wwn == rp_wwn)) {
2195                                 lun_list[i].rp_tag = rp_tag;
2196                                 lun_list[i].lp_tag = lp_tag;
2197                         }
2198                 }
2199         }
2200 }
2201
2202 /*
2203  * set UA for all active luns in LM DB
2204  */
2205 static void
2206 bfa_ioim_lm_set_ua(struct bfa_s *bfa)
2207 {
2208         struct bfa_lun_mask_s   *lunm_list;
2209         int     i;
2210
2211         lunm_list = bfa_get_lun_mask_list(bfa);
2212         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2213                 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2214                         continue;
2215                 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2216         }
2217 }
2218
2219 bfa_status_t
2220 bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
2221 {
2222         struct bfa_lunmask_cfg_s        *lun_mask;
2223
2224         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2225         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2226                 return BFA_STATUS_FAILED;
2227
2228         if (bfa_get_lun_mask_status(bfa) == update)
2229                 return BFA_STATUS_NO_CHANGE;
2230
2231         lun_mask = bfa_get_lun_mask(bfa);
2232         lun_mask->status = update;
2233
2234         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
2235                 bfa_ioim_lm_set_ua(bfa);
2236
2237         return  bfa_dconf_update(bfa);
2238 }
2239
2240 bfa_status_t
2241 bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
2242 {
2243         int i;
2244         struct bfa_lun_mask_s   *lunm_list;
2245
2246         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2247         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2248                 return BFA_STATUS_FAILED;
2249
2250         lunm_list = bfa_get_lun_mask_list(bfa);
2251         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2252                 if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2253                         if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
2254                                 bfa_rport_unset_lunmask(bfa,
2255                                   BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
2256                 }
2257         }
2258
2259         memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
2260         return bfa_dconf_update(bfa);
2261 }
2262
2263 bfa_status_t
2264 bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
2265 {
2266         struct bfa_lunmask_cfg_s *lun_mask;
2267
2268         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2269         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2270                 return BFA_STATUS_FAILED;
2271
2272         lun_mask = bfa_get_lun_mask(bfa);
2273         memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
2274         return BFA_STATUS_OK;
2275 }
2276
2277 bfa_status_t
2278 bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2279                       wwn_t rpwwn, struct scsi_lun lun)
2280 {
2281         struct bfa_lun_mask_s *lunm_list;
2282         struct bfa_rport_s *rp = NULL;
2283         int i, free_index = MAX_LUN_MASK_CFG + 1;
2284         struct bfa_fcs_lport_s *port = NULL;
2285         struct bfa_fcs_rport_s *rp_fcs;
2286
2287         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2288         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2289                 return BFA_STATUS_FAILED;
2290
2291         port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
2292                                    vf_id, *pwwn);
2293         if (port) {
2294                 *pwwn = port->port_cfg.pwwn;
2295                 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2296                 if (rp_fcs)
2297                         rp = rp_fcs->bfa_rport;
2298         }
2299
2300         lunm_list = bfa_get_lun_mask_list(bfa);
2301         /* if entry exists */
2302         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2303                 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2304                         free_index = i;
2305                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2306                     (lunm_list[i].rp_wwn == rpwwn) &&
2307                     (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2308                      scsilun_to_int((struct scsi_lun *)&lun)))
2309                         return  BFA_STATUS_ENTRY_EXISTS;
2310         }
2311
2312         if (free_index > MAX_LUN_MASK_CFG)
2313                 return BFA_STATUS_MAX_ENTRY_REACHED;
2314
2315         if (rp) {
2316                 lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
2317                                                    rp->rport_info.local_pid);
2318                 lunm_list[free_index].rp_tag = rp->rport_tag;
2319         } else {
2320                 lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
2321                 lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
2322         }
2323
2324         lunm_list[free_index].lp_wwn = *pwwn;
2325         lunm_list[free_index].rp_wwn = rpwwn;
2326         lunm_list[free_index].lun = lun;
2327         lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
2328
2329         /* set for all luns in this rp */
2330         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2331                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2332                     (lunm_list[i].rp_wwn == rpwwn))
2333                         lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2334         }
2335
2336         return bfa_dconf_update(bfa);
2337 }
2338
2339 bfa_status_t
2340 bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2341                          wwn_t rpwwn, struct scsi_lun lun)
2342 {
2343         struct bfa_lun_mask_s   *lunm_list;
2344         struct bfa_rport_s      *rp = NULL;
2345         struct bfa_fcs_lport_s *port = NULL;
2346         struct bfa_fcs_rport_s *rp_fcs;
2347         int     i;
2348
2349         /* in min cfg lunm_list could be NULL but  no commands should run. */
2350         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2351                 return BFA_STATUS_FAILED;
2352
2353         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2354         bfa_trc(bfa, *pwwn);
2355         bfa_trc(bfa, rpwwn);
2356         bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
2357
2358         if (*pwwn == 0) {
2359                 port = bfa_fcs_lookup_port(
2360                                 &((struct bfad_s *)bfa->bfad)->bfa_fcs,
2361                                 vf_id, *pwwn);
2362                 if (port) {
2363                         *pwwn = port->port_cfg.pwwn;
2364                         rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2365                         if (rp_fcs)
2366                                 rp = rp_fcs->bfa_rport;
2367                 }
2368         }
2369
2370         lunm_list = bfa_get_lun_mask_list(bfa);
2371         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2372                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2373                     (lunm_list[i].rp_wwn == rpwwn) &&
2374                     (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2375                      scsilun_to_int((struct scsi_lun *)&lun))) {
2376                         lunm_list[i].lp_wwn = 0;
2377                         lunm_list[i].rp_wwn = 0;
2378                         int_to_scsilun(0, &lunm_list[i].lun);
2379                         lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
2380                         if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
2381                                 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2382                                 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2383                         }
2384                         return bfa_dconf_update(bfa);
2385                 }
2386         }
2387
2388         /* set for all luns in this rp */
2389         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2390                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2391                     (lunm_list[i].rp_wwn == rpwwn))
2392                         lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2393         }
2394
2395         return BFA_STATUS_ENTRY_NOT_EXISTS;
2396 }
2397
2398 static void
2399 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2400 {
2401         struct bfa_ioim_s *ioim = cbarg;
2402
2403         if (!complete) {
2404                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2405                 return;
2406         }
2407
2408         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2409                           0, 0, NULL, 0);
2410 }
2411
2412 static void
2413 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2414 {
2415         struct bfa_ioim_s *ioim = cbarg;
2416
2417         bfa_stats(ioim->itnim, path_tov_expired);
2418         if (!complete) {
2419                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2420                 return;
2421         }
2422
2423         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2424                           0, 0, NULL, 0);
2425 }
2426
2427 static void
2428 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2429 {
2430         struct bfa_ioim_s *ioim = cbarg;
2431
2432         if (!complete) {
2433                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2434                 return;
2435         }
2436
2437         bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2438 }
2439
2440 static void
2441 bfa_ioim_sgpg_alloced(void *cbarg)
2442 {
2443         struct bfa_ioim_s *ioim = cbarg;
2444
2445         ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2446         list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2447         ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2448         bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2449 }
2450
2451 /*
2452  * Send I/O request to firmware.
2453  */
2454 static  bfa_boolean_t
2455 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2456 {
2457         struct bfa_itnim_s *itnim = ioim->itnim;
2458         struct bfi_ioim_req_s *m;
2459         static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2460         struct bfi_sge_s *sge, *sgpge;
2461         u32     pgdlen = 0;
2462         u32     fcp_dl;
2463         u64 addr;
2464         struct scatterlist *sg;
2465         struct bfa_sgpg_s *sgpg;
2466         struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2467         u32 i, sge_id, pgcumsz;
2468         enum dma_data_direction dmadir;
2469
2470         /*
2471          * check for room in queue to send request now
2472          */
2473         m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2474         if (!m) {
2475                 bfa_stats(ioim->itnim, qwait);
2476                 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2477                                   &ioim->iosp->reqq_wait);
2478                 return BFA_FALSE;
2479         }
2480
2481         /*
2482          * build i/o request message next
2483          */
2484         m->io_tag = cpu_to_be16(ioim->iotag);
2485         m->rport_hdl = ioim->itnim->rport->fw_handle;
2486         m->io_timeout = 0;
2487
2488         sge = &m->sges[0];
2489         sgpg = ioim->sgpg;
2490         sge_id = 0;
2491         sgpge = NULL;
2492         pgcumsz = 0;
2493         scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2494                 if (i == 0) {
2495                         /* build inline IO SG element */
2496                         addr = bfa_sgaddr_le(sg_dma_address(sg));
2497                         sge->sga = *(union bfi_addr_u *) &addr;
2498                         pgdlen = sg_dma_len(sg);
2499                         sge->sg_len = pgdlen;
2500                         sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2501                                         BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2502                         bfa_sge_to_be(sge);
2503                         sge++;
2504                 } else {
2505                         if (sge_id == 0)
2506                                 sgpge = sgpg->sgpg->sges;
2507
2508                         addr = bfa_sgaddr_le(sg_dma_address(sg));
2509                         sgpge->sga = *(union bfi_addr_u *) &addr;
2510                         sgpge->sg_len = sg_dma_len(sg);
2511                         pgcumsz += sgpge->sg_len;
2512
2513                         /* set flags */
2514                         if (i < (ioim->nsges - 1) &&
2515                                         sge_id < (BFI_SGPG_DATA_SGES - 1))
2516                                 sgpge->flags = BFI_SGE_DATA;
2517                         else if (i < (ioim->nsges - 1))
2518                                 sgpge->flags = BFI_SGE_DATA_CPL;
2519                         else
2520                                 sgpge->flags = BFI_SGE_DATA_LAST;
2521
2522                         bfa_sge_to_le(sgpge);
2523
2524                         sgpge++;
2525                         if (i == (ioim->nsges - 1)) {
2526                                 sgpge->flags = BFI_SGE_PGDLEN;
2527                                 sgpge->sga.a32.addr_lo = 0;
2528                                 sgpge->sga.a32.addr_hi = 0;
2529                                 sgpge->sg_len = pgcumsz;
2530                                 bfa_sge_to_le(sgpge);
2531                         } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2532                                 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2533                                 sgpge->flags = BFI_SGE_LINK;
2534                                 sgpge->sga = sgpg->sgpg_pa;
2535                                 sgpge->sg_len = pgcumsz;
2536                                 bfa_sge_to_le(sgpge);
2537                                 sge_id = 0;
2538                                 pgcumsz = 0;
2539                         }
2540                 }
2541         }
2542
2543         if (ioim->nsges > BFI_SGE_INLINE) {
2544                 sge->sga = ioim->sgpg->sgpg_pa;
2545         } else {
2546                 sge->sga.a32.addr_lo = 0;
2547                 sge->sga.a32.addr_hi = 0;
2548         }
2549         sge->sg_len = pgdlen;
2550         sge->flags = BFI_SGE_PGDLEN;
2551         bfa_sge_to_be(sge);
2552
2553         /*
2554          * set up I/O command parameters
2555          */
2556         m->cmnd = cmnd_z0;
2557         int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2558         dmadir = cmnd->sc_data_direction;
2559         if (dmadir == DMA_TO_DEVICE)
2560                 m->cmnd.iodir = FCP_IODIR_WRITE;
2561         else if (dmadir == DMA_FROM_DEVICE)
2562                 m->cmnd.iodir = FCP_IODIR_READ;
2563         else
2564                 m->cmnd.iodir = FCP_IODIR_NONE;
2565
2566         m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
2567         fcp_dl = scsi_bufflen(cmnd);
2568         m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2569
2570         /*
2571          * set up I/O message header
2572          */
2573         switch (m->cmnd.iodir) {
2574         case FCP_IODIR_READ:
2575                 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
2576                 bfa_stats(itnim, input_reqs);
2577                 ioim->itnim->stats.rd_throughput += fcp_dl;
2578                 break;
2579         case FCP_IODIR_WRITE:
2580                 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
2581                 bfa_stats(itnim, output_reqs);
2582                 ioim->itnim->stats.wr_throughput += fcp_dl;
2583                 break;
2584         case FCP_IODIR_RW:
2585                 bfa_stats(itnim, input_reqs);
2586                 bfa_stats(itnim, output_reqs);
2587         default:
2588                 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2589         }
2590         if (itnim->seq_rec ||
2591             (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2592                 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2593
2594         /*
2595          * queue I/O message to firmware
2596          */
2597         bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2598         return BFA_TRUE;
2599 }
2600
2601 /*
2602  * Setup any additional SG pages needed.Inline SG element is setup
2603  * at queuing time.
2604  */
2605 static bfa_boolean_t
2606 bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2607 {
2608         u16     nsgpgs;
2609
2610         WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2611
2612         /*
2613          * allocate SG pages needed
2614          */
2615         nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2616         if (!nsgpgs)
2617                 return BFA_TRUE;
2618
2619         if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2620             != BFA_STATUS_OK) {
2621                 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2622                 return BFA_FALSE;
2623         }
2624
2625         ioim->nsgpgs = nsgpgs;
2626         ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2627
2628         return BFA_TRUE;
2629 }
2630
2631 /*
2632  * Send I/O abort request to firmware.
2633  */
2634 static  bfa_boolean_t
2635 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2636 {
2637         struct bfi_ioim_abort_req_s *m;
2638         enum bfi_ioim_h2i       msgop;
2639
2640         /*
2641          * check for room in queue to send request now
2642          */
2643         m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2644         if (!m)
2645                 return BFA_FALSE;
2646
2647         /*
2648          * build i/o request message next
2649          */
2650         if (ioim->iosp->abort_explicit)
2651                 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2652         else
2653                 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2654
2655         bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
2656         m->io_tag    = cpu_to_be16(ioim->iotag);
2657         m->abort_tag = ++ioim->abort_tag;
2658
2659         /*
2660          * queue I/O message to firmware
2661          */
2662         bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2663         return BFA_TRUE;
2664 }
2665
2666 /*
2667  * Call to resume any I/O requests waiting for room in request queue.
2668  */
2669 static void
2670 bfa_ioim_qresume(void *cbarg)
2671 {
2672         struct bfa_ioim_s *ioim = cbarg;
2673
2674         bfa_stats(ioim->itnim, qresumes);
2675         bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2676 }
2677
2678
2679 static void
2680 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2681 {
2682         /*
2683          * Move IO from itnim queue to fcpim global queue since itnim will be
2684          * freed.
2685          */
2686         list_del(&ioim->qe);
2687         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2688
2689         if (!ioim->iosp->tskim) {
2690                 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2691                         bfa_cb_dequeue(&ioim->hcb_qe);
2692                         list_del(&ioim->qe);
2693                         list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2694                 }
2695                 bfa_itnim_iodone(ioim->itnim);
2696         } else
2697                 bfa_wc_down(&ioim->iosp->tskim->wc);
2698 }
2699
2700 static bfa_boolean_t
2701 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2702 {
2703         if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2704             (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)))    ||
2705             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort))         ||
2706             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull))   ||
2707             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb))           ||
2708             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free))      ||
2709             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2710                 return BFA_FALSE;
2711
2712         return BFA_TRUE;
2713 }
2714
2715 void
2716 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2717 {
2718         /*
2719          * If path tov timer expired, failback with PATHTOV status - these
2720          * IO requests are not normally retried by IO stack.
2721          *
2722          * Otherwise device cameback online and fail it with normal failed
2723          * status so that IO stack retries these failed IO requests.
2724          */
2725         if (iotov)
2726                 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2727         else {
2728                 ioim->io_cbfn = __bfa_cb_ioim_failed;
2729                 bfa_stats(ioim->itnim, iocom_nexus_abort);
2730         }
2731         bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2732
2733         /*
2734          * Move IO to fcpim global queue since itnim will be
2735          * freed.
2736          */
2737         list_del(&ioim->qe);
2738         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2739 }
2740
2741
2742 /*
2743  * Memory allocation and initialization.
2744  */
2745 void
2746 bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
2747 {
2748         struct bfa_ioim_s               *ioim;
2749         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
2750         struct bfa_ioim_sp_s    *iosp;
2751         u16             i;
2752
2753         /*
2754          * claim memory first
2755          */
2756         ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
2757         fcpim->ioim_arr = ioim;
2758         bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
2759
2760         iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
2761         fcpim->ioim_sp_arr = iosp;
2762         bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
2763
2764         /*
2765          * Initialize ioim free queues
2766          */
2767         INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2768         INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2769
2770         for (i = 0; i < fcpim->fcp->num_ioim_reqs;
2771              i++, ioim++, iosp++) {
2772                 /*
2773                  * initialize IOIM
2774                  */
2775                 memset(ioim, 0, sizeof(struct bfa_ioim_s));
2776                 ioim->iotag   = i;
2777                 ioim->bfa     = fcpim->bfa;
2778                 ioim->fcpim   = fcpim;
2779                 ioim->iosp    = iosp;
2780                 INIT_LIST_HEAD(&ioim->sgpg_q);
2781                 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2782                                    bfa_ioim_qresume, ioim);
2783                 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2784                                    bfa_ioim_sgpg_alloced, ioim);
2785                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2786         }
2787 }
2788
2789 void
2790 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2791 {
2792         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2793         struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2794         struct bfa_ioim_s *ioim;
2795         u16     iotag;
2796         enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2797
2798         iotag = be16_to_cpu(rsp->io_tag);
2799
2800         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2801         WARN_ON(ioim->iotag != iotag);
2802
2803         bfa_trc(ioim->bfa, ioim->iotag);
2804         bfa_trc(ioim->bfa, rsp->io_status);
2805         bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2806
2807         if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2808                 ioim->iosp->comp_rspmsg = *m;
2809
2810         switch (rsp->io_status) {
2811         case BFI_IOIM_STS_OK:
2812                 bfa_stats(ioim->itnim, iocomp_ok);
2813                 if (rsp->reuse_io_tag == 0)
2814                         evt = BFA_IOIM_SM_DONE;
2815                 else
2816                         evt = BFA_IOIM_SM_COMP;
2817                 break;
2818
2819         case BFI_IOIM_STS_TIMEDOUT:
2820                 bfa_stats(ioim->itnim, iocomp_timedout);
2821         case BFI_IOIM_STS_ABORTED:
2822                 rsp->io_status = BFI_IOIM_STS_ABORTED;
2823                 bfa_stats(ioim->itnim, iocomp_aborted);
2824                 if (rsp->reuse_io_tag == 0)
2825                         evt = BFA_IOIM_SM_DONE;
2826                 else
2827                         evt = BFA_IOIM_SM_COMP;
2828                 break;
2829
2830         case BFI_IOIM_STS_PROTO_ERR:
2831                 bfa_stats(ioim->itnim, iocom_proto_err);
2832                 WARN_ON(!rsp->reuse_io_tag);
2833                 evt = BFA_IOIM_SM_COMP;
2834                 break;
2835
2836         case BFI_IOIM_STS_SQER_NEEDED:
2837                 bfa_stats(ioim->itnim, iocom_sqer_needed);
2838                 WARN_ON(rsp->reuse_io_tag != 0);
2839                 evt = BFA_IOIM_SM_SQRETRY;
2840                 break;
2841
2842         case BFI_IOIM_STS_RES_FREE:
2843                 bfa_stats(ioim->itnim, iocom_res_free);
2844                 evt = BFA_IOIM_SM_FREE;
2845                 break;
2846
2847         case BFI_IOIM_STS_HOST_ABORTED:
2848                 bfa_stats(ioim->itnim, iocom_hostabrts);
2849                 if (rsp->abort_tag != ioim->abort_tag) {
2850                         bfa_trc(ioim->bfa, rsp->abort_tag);
2851                         bfa_trc(ioim->bfa, ioim->abort_tag);
2852                         return;
2853                 }
2854
2855                 if (rsp->reuse_io_tag)
2856                         evt = BFA_IOIM_SM_ABORT_COMP;
2857                 else
2858                         evt = BFA_IOIM_SM_ABORT_DONE;
2859                 break;
2860
2861         case BFI_IOIM_STS_UTAG:
2862                 bfa_stats(ioim->itnim, iocom_utags);
2863                 evt = BFA_IOIM_SM_COMP_UTAG;
2864                 break;
2865
2866         default:
2867                 WARN_ON(1);
2868         }
2869
2870         bfa_sm_send_event(ioim, evt);
2871 }
2872
2873 void
2874 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2875 {
2876         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2877         struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2878         struct bfa_ioim_s *ioim;
2879         u16     iotag;
2880
2881         iotag = be16_to_cpu(rsp->io_tag);
2882
2883         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2884         WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
2885
2886         bfa_ioim_cb_profile_comp(fcpim, ioim);
2887
2888         bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2889 }
2890
2891 /*
2892  * Called by itnim to clean up IO while going offline.
2893  */
2894 void
2895 bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2896 {
2897         bfa_trc(ioim->bfa, ioim->iotag);
2898         bfa_stats(ioim->itnim, io_cleanups);
2899
2900         ioim->iosp->tskim = NULL;
2901         bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2902 }
2903
2904 void
2905 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2906 {
2907         bfa_trc(ioim->bfa, ioim->iotag);
2908         bfa_stats(ioim->itnim, io_tmaborts);
2909
2910         ioim->iosp->tskim = tskim;
2911         bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2912 }
2913
2914 /*
2915  * IOC failure handling.
2916  */
2917 void
2918 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2919 {
2920         bfa_trc(ioim->bfa, ioim->iotag);
2921         bfa_stats(ioim->itnim, io_iocdowns);
2922         bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2923 }
2924
2925 /*
2926  * IO offline TOV popped. Fail the pending IO.
2927  */
2928 void
2929 bfa_ioim_tov(struct bfa_ioim_s *ioim)
2930 {
2931         bfa_trc(ioim->bfa, ioim->iotag);
2932         bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2933 }
2934
2935
2936 /*
2937  * Allocate IOIM resource for initiator mode I/O request.
2938  */
2939 struct bfa_ioim_s *
2940 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2941                 struct bfa_itnim_s *itnim, u16 nsges)
2942 {
2943         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2944         struct bfa_ioim_s *ioim;
2945         struct bfa_iotag_s *iotag = NULL;
2946
2947         /*
2948          * alocate IOIM resource
2949          */
2950         bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
2951         if (!iotag) {
2952                 bfa_stats(itnim, no_iotags);
2953                 return NULL;
2954         }
2955
2956         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
2957
2958         ioim->dio = dio;
2959         ioim->itnim = itnim;
2960         ioim->nsges = nsges;
2961         ioim->nsgpgs = 0;
2962
2963         bfa_stats(itnim, total_ios);
2964         fcpim->ios_active++;
2965
2966         list_add_tail(&ioim->qe, &itnim->io_q);
2967
2968         return ioim;
2969 }
2970
2971 void
2972 bfa_ioim_free(struct bfa_ioim_s *ioim)
2973 {
2974         struct bfa_fcpim_s *fcpim = ioim->fcpim;
2975         struct bfa_iotag_s *iotag;
2976
2977         if (ioim->nsgpgs > 0)
2978                 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2979
2980         bfa_stats(ioim->itnim, io_comps);
2981         fcpim->ios_active--;
2982
2983         ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2984
2985         WARN_ON(!(ioim->iotag <
2986                 (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
2987         iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
2988
2989         if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
2990                 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
2991         else
2992                 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
2993
2994         list_del(&ioim->qe);
2995 }
2996
2997 void
2998 bfa_ioim_start(struct bfa_ioim_s *ioim)
2999 {
3000         bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
3001
3002         /*
3003          * Obtain the queue over which this request has to be issued
3004          */
3005         ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
3006                         BFA_FALSE : bfa_itnim_get_reqq(ioim);
3007
3008         bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
3009 }
3010
3011 /*
3012  * Driver I/O abort request.
3013  */
3014 bfa_status_t
3015 bfa_ioim_abort(struct bfa_ioim_s *ioim)
3016 {
3017
3018         bfa_trc(ioim->bfa, ioim->iotag);
3019
3020         if (!bfa_ioim_is_abortable(ioim))
3021                 return BFA_STATUS_FAILED;
3022
3023         bfa_stats(ioim->itnim, io_aborts);
3024         bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
3025
3026         return BFA_STATUS_OK;
3027 }
3028
3029 /*
3030  *  BFA TSKIM state machine functions
3031  */
3032
3033 /*
3034  * Task management command beginning state.
3035  */
3036 static void
3037 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3038 {
3039         bfa_trc(tskim->bfa, event);
3040
3041         switch (event) {
3042         case BFA_TSKIM_SM_START:
3043                 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3044                 bfa_tskim_gather_ios(tskim);
3045
3046                 /*
3047                  * If device is offline, do not send TM on wire. Just cleanup
3048                  * any pending IO requests and complete TM request.
3049                  */
3050                 if (!bfa_itnim_is_online(tskim->itnim)) {
3051                         bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3052                         tskim->tsk_status = BFI_TSKIM_STS_OK;
3053                         bfa_tskim_cleanup_ios(tskim);
3054                         return;
3055                 }
3056
3057                 if (!bfa_tskim_send(tskim)) {
3058                         bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3059                         bfa_stats(tskim->itnim, tm_qwait);
3060                         bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3061                                           &tskim->reqq_wait);
3062                 }
3063                 break;
3064
3065         default:
3066                 bfa_sm_fault(tskim->bfa, event);
3067         }
3068 }
3069
3070 /*
3071  * TM command is active, awaiting completion from firmware to
3072  * cleanup IO requests in TM scope.
3073  */
3074 static void
3075 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3076 {
3077         bfa_trc(tskim->bfa, event);
3078
3079         switch (event) {
3080         case BFA_TSKIM_SM_DONE:
3081                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3082                 bfa_tskim_cleanup_ios(tskim);
3083                 break;
3084
3085         case BFA_TSKIM_SM_CLEANUP:
3086                 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3087                 if (!bfa_tskim_send_abort(tskim)) {
3088                         bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3089                         bfa_stats(tskim->itnim, tm_qwait);
3090                         bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3091                                 &tskim->reqq_wait);
3092                 }
3093                 break;
3094
3095         case BFA_TSKIM_SM_HWFAIL:
3096                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3097                 bfa_tskim_iocdisable_ios(tskim);
3098                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3099                 break;
3100
3101         default:
3102                 bfa_sm_fault(tskim->bfa, event);
3103         }
3104 }
3105
3106 /*
3107  * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3108  * completion event from firmware.
3109  */
3110 static void
3111 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3112 {
3113         bfa_trc(tskim->bfa, event);
3114
3115         switch (event) {
3116         case BFA_TSKIM_SM_DONE:
3117                 /*
3118                  * Ignore and wait for ABORT completion from firmware.
3119                  */
3120                 break;
3121
3122         case BFA_TSKIM_SM_CLEANUP_DONE:
3123                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3124                 bfa_tskim_cleanup_ios(tskim);
3125                 break;
3126
3127         case BFA_TSKIM_SM_HWFAIL:
3128                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3129                 bfa_tskim_iocdisable_ios(tskim);
3130                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3131                 break;
3132
3133         default:
3134                 bfa_sm_fault(tskim->bfa, event);
3135         }
3136 }
3137
3138 static void
3139 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3140 {
3141         bfa_trc(tskim->bfa, event);
3142
3143         switch (event) {
3144         case BFA_TSKIM_SM_IOS_DONE:
3145                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3146                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3147                 break;
3148
3149         case BFA_TSKIM_SM_CLEANUP:
3150                 /*
3151                  * Ignore, TM command completed on wire.
3152                  * Notify TM conmpletion on IO cleanup completion.
3153                  */
3154                 break;
3155
3156         case BFA_TSKIM_SM_HWFAIL:
3157                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3158                 bfa_tskim_iocdisable_ios(tskim);
3159                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3160                 break;
3161
3162         default:
3163                 bfa_sm_fault(tskim->bfa, event);
3164         }
3165 }
3166
3167 /*
3168  * Task management command is waiting for room in request CQ
3169  */
3170 static void
3171 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3172 {
3173         bfa_trc(tskim->bfa, event);
3174
3175         switch (event) {
3176         case BFA_TSKIM_SM_QRESUME:
3177                 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3178                 bfa_tskim_send(tskim);
3179                 break;
3180
3181         case BFA_TSKIM_SM_CLEANUP:
3182                 /*
3183                  * No need to send TM on wire since ITN is offline.
3184                  */
3185                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3186                 bfa_reqq_wcancel(&tskim->reqq_wait);
3187                 bfa_tskim_cleanup_ios(tskim);
3188                 break;
3189
3190         case BFA_TSKIM_SM_HWFAIL:
3191                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3192                 bfa_reqq_wcancel(&tskim->reqq_wait);
3193                 bfa_tskim_iocdisable_ios(tskim);
3194                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3195                 break;
3196
3197         default:
3198                 bfa_sm_fault(tskim->bfa, event);
3199         }
3200 }
3201
3202 /*
3203  * Task management command is active, awaiting for room in request CQ
3204  * to send clean up request.
3205  */
3206 static void
3207 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3208                 enum bfa_tskim_event event)
3209 {
3210         bfa_trc(tskim->bfa, event);
3211
3212         switch (event) {
3213         case BFA_TSKIM_SM_DONE:
3214                 bfa_reqq_wcancel(&tskim->reqq_wait);
3215                 /*
3216                  * Fall through !!!
3217                  */
3218         case BFA_TSKIM_SM_QRESUME:
3219                 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3220                 bfa_tskim_send_abort(tskim);
3221                 break;
3222
3223         case BFA_TSKIM_SM_HWFAIL:
3224                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3225                 bfa_reqq_wcancel(&tskim->reqq_wait);
3226                 bfa_tskim_iocdisable_ios(tskim);
3227                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3228                 break;
3229
3230         default:
3231                 bfa_sm_fault(tskim->bfa, event);
3232         }
3233 }
3234
3235 /*
3236  * BFA callback is pending
3237  */
3238 static void
3239 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3240 {
3241         bfa_trc(tskim->bfa, event);
3242
3243         switch (event) {
3244         case BFA_TSKIM_SM_HCB:
3245                 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3246                 bfa_tskim_free(tskim);
3247                 break;
3248
3249         case BFA_TSKIM_SM_CLEANUP:
3250                 bfa_tskim_notify_comp(tskim);
3251                 break;
3252
3253         case BFA_TSKIM_SM_HWFAIL:
3254                 break;
3255
3256         default:
3257                 bfa_sm_fault(tskim->bfa, event);
3258         }
3259 }
3260
3261 static void
3262 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3263 {
3264         struct bfa_tskim_s *tskim = cbarg;
3265
3266         if (!complete) {
3267                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3268                 return;
3269         }
3270
3271         bfa_stats(tskim->itnim, tm_success);
3272         bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3273 }
3274
3275 static void
3276 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3277 {
3278         struct bfa_tskim_s *tskim = cbarg;
3279
3280         if (!complete) {
3281                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3282                 return;
3283         }
3284
3285         bfa_stats(tskim->itnim, tm_failures);
3286         bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3287                                 BFI_TSKIM_STS_FAILED);
3288 }
3289
3290 static bfa_boolean_t
3291 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
3292 {
3293         switch (tskim->tm_cmnd) {
3294         case FCP_TM_TARGET_RESET:
3295                 return BFA_TRUE;
3296
3297         case FCP_TM_ABORT_TASK_SET:
3298         case FCP_TM_CLEAR_TASK_SET:
3299         case FCP_TM_LUN_RESET:
3300         case FCP_TM_CLEAR_ACA:
3301                 return !memcmp(&tskim->lun, &lun, sizeof(lun));
3302
3303         default:
3304                 WARN_ON(1);
3305         }
3306
3307         return BFA_FALSE;
3308 }
3309
3310 /*
3311  * Gather affected IO requests and task management commands.
3312  */
3313 static void
3314 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3315 {
3316         struct bfa_itnim_s *itnim = tskim->itnim;
3317         struct bfa_ioim_s *ioim;
3318         struct list_head *qe, *qen;
3319         struct scsi_cmnd *cmnd;
3320         struct scsi_lun scsilun;
3321
3322         INIT_LIST_HEAD(&tskim->io_q);
3323
3324         /*
3325          * Gather any active IO requests first.
3326          */
3327         list_for_each_safe(qe, qen, &itnim->io_q) {
3328                 ioim = (struct bfa_ioim_s *) qe;
3329                 cmnd = (struct scsi_cmnd *) ioim->dio;
3330                 int_to_scsilun(cmnd->device->lun, &scsilun);
3331                 if (bfa_tskim_match_scope(tskim, scsilun)) {
3332                         list_del(&ioim->qe);
3333                         list_add_tail(&ioim->qe, &tskim->io_q);
3334                 }
3335         }
3336
3337         /*
3338          * Failback any pending IO requests immediately.
3339          */
3340         list_for_each_safe(qe, qen, &itnim->pending_q) {
3341                 ioim = (struct bfa_ioim_s *) qe;
3342                 cmnd = (struct scsi_cmnd *) ioim->dio;
3343                 int_to_scsilun(cmnd->device->lun, &scsilun);
3344                 if (bfa_tskim_match_scope(tskim, scsilun)) {
3345                         list_del(&ioim->qe);
3346                         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3347                         bfa_ioim_tov(ioim);
3348                 }
3349         }
3350 }
3351
3352 /*
3353  * IO cleanup completion
3354  */
3355 static void
3356 bfa_tskim_cleanp_comp(void *tskim_cbarg)
3357 {
3358         struct bfa_tskim_s *tskim = tskim_cbarg;
3359
3360         bfa_stats(tskim->itnim, tm_io_comps);
3361         bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3362 }
3363
3364 /*
3365  * Gather affected IO requests and task management commands.
3366  */
3367 static void
3368 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3369 {
3370         struct bfa_ioim_s *ioim;
3371         struct list_head        *qe, *qen;
3372
3373         bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3374
3375         list_for_each_safe(qe, qen, &tskim->io_q) {
3376                 ioim = (struct bfa_ioim_s *) qe;
3377                 bfa_wc_up(&tskim->wc);
3378                 bfa_ioim_cleanup_tm(ioim, tskim);
3379         }
3380
3381         bfa_wc_wait(&tskim->wc);
3382 }
3383
3384 /*
3385  * Send task management request to firmware.
3386  */
3387 static bfa_boolean_t
3388 bfa_tskim_send(struct bfa_tskim_s *tskim)
3389 {
3390         struct bfa_itnim_s *itnim = tskim->itnim;
3391         struct bfi_tskim_req_s *m;
3392
3393         /*
3394          * check for room in queue to send request now
3395          */
3396         m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3397         if (!m)
3398                 return BFA_FALSE;
3399
3400         /*
3401          * build i/o request message next
3402          */
3403         bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3404                         bfa_fn_lpu(tskim->bfa));
3405
3406         m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3407         m->itn_fhdl = tskim->itnim->rport->fw_handle;
3408         m->t_secs = tskim->tsecs;
3409         m->lun = tskim->lun;
3410         m->tm_flags = tskim->tm_cmnd;
3411
3412         /*
3413          * queue I/O message to firmware
3414          */
3415         bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3416         return BFA_TRUE;
3417 }
3418
3419 /*
3420  * Send abort request to cleanup an active TM to firmware.
3421  */
3422 static bfa_boolean_t
3423 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3424 {
3425         struct bfa_itnim_s      *itnim = tskim->itnim;
3426         struct bfi_tskim_abortreq_s     *m;
3427
3428         /*
3429          * check for room in queue to send request now
3430          */
3431         m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3432         if (!m)
3433                 return BFA_FALSE;
3434
3435         /*
3436          * build i/o request message next
3437          */
3438         bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3439                         bfa_fn_lpu(tskim->bfa));
3440
3441         m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
3442
3443         /*
3444          * queue I/O message to firmware
3445          */
3446         bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3447         return BFA_TRUE;
3448 }
3449
3450 /*
3451  * Call to resume task management cmnd waiting for room in request queue.
3452  */
3453 static void
3454 bfa_tskim_qresume(void *cbarg)
3455 {
3456         struct bfa_tskim_s *tskim = cbarg;
3457
3458         bfa_stats(tskim->itnim, tm_qresumes);
3459         bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3460 }
3461
3462 /*
3463  * Cleanup IOs associated with a task mangement command on IOC failures.
3464  */
3465 static void
3466 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3467 {
3468         struct bfa_ioim_s *ioim;
3469         struct list_head        *qe, *qen;
3470
3471         list_for_each_safe(qe, qen, &tskim->io_q) {
3472                 ioim = (struct bfa_ioim_s *) qe;
3473                 bfa_ioim_iocdisable(ioim);
3474         }
3475 }
3476
3477 /*
3478  * Notification on completions from related ioim.
3479  */
3480 void
3481 bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3482 {
3483         bfa_wc_down(&tskim->wc);
3484 }
3485
3486 /*
3487  * Handle IOC h/w failure notification from itnim.
3488  */
3489 void
3490 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3491 {
3492         tskim->notify = BFA_FALSE;
3493         bfa_stats(tskim->itnim, tm_iocdowns);
3494         bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3495 }
3496
3497 /*
3498  * Cleanup TM command and associated IOs as part of ITNIM offline.
3499  */
3500 void
3501 bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3502 {
3503         tskim->notify = BFA_TRUE;
3504         bfa_stats(tskim->itnim, tm_cleanups);
3505         bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3506 }
3507
3508 /*
3509  * Memory allocation and initialization.
3510  */
3511 void
3512 bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
3513 {
3514         struct bfa_tskim_s *tskim;
3515         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
3516         u16     i;
3517
3518         INIT_LIST_HEAD(&fcpim->tskim_free_q);
3519         INIT_LIST_HEAD(&fcpim->tskim_unused_q);
3520
3521         tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
3522         fcpim->tskim_arr = tskim;
3523
3524         for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3525                 /*
3526                  * initialize TSKIM
3527                  */
3528                 memset(tskim, 0, sizeof(struct bfa_tskim_s));
3529                 tskim->tsk_tag = i;
3530                 tskim->bfa      = fcpim->bfa;
3531                 tskim->fcpim    = fcpim;
3532                 tskim->notify  = BFA_FALSE;
3533                 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3534                                         tskim);
3535                 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3536
3537                 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3538         }
3539
3540         bfa_mem_kva_curp(fcp) = (u8 *) tskim;
3541 }
3542
3543 void
3544 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3545 {
3546         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3547         struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3548         struct bfa_tskim_s *tskim;
3549         u16     tsk_tag = be16_to_cpu(rsp->tsk_tag);
3550
3551         tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3552         WARN_ON(tskim->tsk_tag != tsk_tag);
3553
3554         tskim->tsk_status = rsp->tsk_status;
3555
3556         /*
3557          * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3558          * requests. All other statuses are for normal completions.
3559          */
3560         if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3561                 bfa_stats(tskim->itnim, tm_cleanup_comps);
3562                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3563         } else {
3564                 bfa_stats(tskim->itnim, tm_fw_rsps);
3565                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3566         }
3567 }
3568
3569
3570 struct bfa_tskim_s *
3571 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3572 {
3573         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3574         struct bfa_tskim_s *tskim;
3575
3576         bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3577
3578         if (tskim)
3579                 tskim->dtsk = dtsk;
3580
3581         return tskim;
3582 }
3583
3584 void
3585 bfa_tskim_free(struct bfa_tskim_s *tskim)
3586 {
3587         WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3588         list_del(&tskim->qe);
3589         list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3590 }
3591
3592 /*
3593  * Start a task management command.
3594  *
3595  * @param[in]   tskim   BFA task management command instance
3596  * @param[in]   itnim   i-t nexus for the task management command
3597  * @param[in]   lun     lun, if applicable
3598  * @param[in]   tm_cmnd Task management command code.
3599  * @param[in]   t_secs  Timeout in seconds
3600  *
3601  * @return None.
3602  */
3603 void
3604 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3605                         struct scsi_lun lun,
3606                         enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3607 {
3608         tskim->itnim    = itnim;
3609         tskim->lun      = lun;
3610         tskim->tm_cmnd = tm_cmnd;
3611         tskim->tsecs    = tsecs;
3612         tskim->notify  = BFA_FALSE;
3613         bfa_stats(itnim, tm_cmnds);
3614
3615         list_add_tail(&tskim->qe, &itnim->tsk_q);
3616         bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3617 }
3618
3619 void
3620 bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
3621 {
3622         struct bfa_fcpim_s      *fcpim = BFA_FCPIM(bfa);
3623         struct list_head        *qe;
3624         int     i;
3625
3626         for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
3627                 bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
3628                 list_add_tail(qe, &fcpim->tskim_unused_q);
3629         }
3630 }
3631
3632 /* BFA FCP module - parent module for fcpim */
3633
3634 BFA_MODULE(fcp);
3635
3636 static void
3637 bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3638                 struct bfa_s *bfa)
3639 {
3640         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3641         struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
3642         struct bfa_mem_dma_s *seg_ptr;
3643         u16     nsegs, idx, per_seg_ios, num_io_req;
3644         u32     km_len = 0;
3645
3646         /*
3647          * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
3648          * So if the values are non zero, adjust them appropriately.
3649          */
3650         if (cfg->fwcfg.num_ioim_reqs &&
3651             cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
3652                 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
3653         else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
3654                 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3655
3656         if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
3657                 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3658
3659         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3660         if (num_io_req > BFA_IO_MAX) {
3661                 if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
3662                         cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
3663                         cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
3664                 } else if (cfg->fwcfg.num_fwtio_reqs)
3665                         cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3666                 else
3667                         cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3668         }
3669
3670         bfa_fcpim_meminfo(cfg, &km_len);
3671
3672         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3673         km_len += num_io_req * sizeof(struct bfa_iotag_s);
3674         km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
3675
3676         /* dma memory */
3677         nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3678         per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
3679
3680         bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3681                 if (num_io_req >= per_seg_ios) {
3682                         num_io_req -= per_seg_ios;
3683                         bfa_mem_dma_setup(minfo, seg_ptr,
3684                                 per_seg_ios * BFI_IOIM_SNSLEN);
3685                 } else
3686                         bfa_mem_dma_setup(minfo, seg_ptr,
3687                                 num_io_req * BFI_IOIM_SNSLEN);
3688         }
3689
3690         /* kva memory */
3691         bfa_mem_kva_setup(minfo, fcp_kva, km_len);
3692 }
3693
3694 static void
3695 bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3696                 struct bfa_pcidev_s *pcidev)
3697 {
3698         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3699         struct bfa_mem_dma_s *seg_ptr;
3700         u16     idx, nsegs, num_io_req;
3701
3702         fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3703         fcp->num_fwtio_reqs  = cfg->fwcfg.num_fwtio_reqs;
3704         fcp->num_itns   = cfg->fwcfg.num_rports;
3705         fcp->bfa = bfa;
3706
3707         /*
3708          * Setup the pool of snsbase addr's, that is passed to fw as
3709          * part of bfi_iocfc_cfg_s.
3710          */
3711         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3712         nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3713
3714         bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3715
3716                 if (!bfa_mem_dma_virt(seg_ptr))
3717                         break;
3718
3719                 fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
3720                 fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
3721                 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
3722         }
3723
3724         bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
3725
3726         bfa_iotag_attach(fcp);
3727
3728         fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
3729         bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
3730                         (fcp->num_itns * sizeof(struct bfa_itn_s));
3731         memset(fcp->itn_arr, 0,
3732                         (fcp->num_itns * sizeof(struct bfa_itn_s)));
3733 }
3734
3735 static void
3736 bfa_fcp_detach(struct bfa_s *bfa)
3737 {
3738 }
3739
3740 static void
3741 bfa_fcp_start(struct bfa_s *bfa)
3742 {
3743         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3744
3745         /*
3746          * bfa_init() with flash read is complete. now invalidate the stale
3747          * content of lun mask like unit attention, rp tag and lp tag.
3748          */
3749         bfa_ioim_lm_init(fcp->bfa);
3750 }
3751
3752 static void
3753 bfa_fcp_stop(struct bfa_s *bfa)
3754 {
3755 }
3756
3757 static void
3758 bfa_fcp_iocdisable(struct bfa_s *bfa)
3759 {
3760         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3761
3762         /* Enqueue unused ioim resources to free_q */
3763         list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
3764
3765         bfa_fcpim_iocdisable(fcp);
3766 }
3767
3768 void
3769 bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
3770 {
3771         struct bfa_fcp_mod_s    *mod = BFA_FCP_MOD(bfa);
3772         struct list_head        *qe;
3773         int     i;
3774
3775         for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3776                 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3777                 list_add_tail(qe, &mod->iotag_unused_q);
3778         }
3779 }
3780
3781 void
3782 bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
3783                 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
3784 {
3785         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3786         struct bfa_itn_s *itn;
3787
3788         itn =  BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
3789         itn->isr = isr;
3790 }
3791
3792 /*
3793  * Itn interrupt processing.
3794  */
3795 void
3796 bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3797 {
3798         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3799         union bfi_itn_i2h_msg_u msg;
3800         struct bfa_itn_s *itn;
3801
3802         msg.msg = m;
3803         itn =  BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
3804
3805         if (itn->isr)
3806                 itn->isr(bfa, m);
3807         else
3808                 WARN_ON(1);
3809 }
3810
3811 void
3812 bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
3813 {
3814         struct bfa_iotag_s *iotag;
3815         u16     num_io_req, i;
3816
3817         iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
3818         fcp->iotag_arr = iotag;
3819
3820         INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
3821         INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
3822         INIT_LIST_HEAD(&fcp->iotag_unused_q);
3823
3824         num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
3825         for (i = 0; i < num_io_req; i++, iotag++) {
3826                 memset(iotag, 0, sizeof(struct bfa_iotag_s));
3827                 iotag->tag = i;
3828                 if (i < fcp->num_ioim_reqs)
3829                         list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
3830                 else
3831                         list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
3832         }
3833
3834         bfa_mem_kva_curp(fcp) = (u8 *) iotag;
3835 }