]> Pileus Git - ~andy/linux/blob - drivers/scsi/lpfc/lpfc_hbadisc.c
[SCSI] lpfc 8.2.2 : Error messages and debugfs updates
[~andy/linux] / drivers / scsi / lpfc / lpfc_hbadisc.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31
32 #include "lpfc_hw.h"
33 #include "lpfc_disc.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_scsi.h"
36 #include "lpfc.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_vport.h"
40 #include "lpfc_debugfs.h"
41
42 /* AlpaArray for assignment of scsid for scan-down and bind_method */
43 static uint8_t lpfcAlpaArray[] = {
44         0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45         0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46         0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47         0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48         0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49         0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50         0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51         0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52         0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53         0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54         0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55         0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56         0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57 };
58
59 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
60
61 void
62 lpfc_terminate_rport_io(struct fc_rport *rport)
63 {
64         struct lpfc_rport_data *rdata;
65         struct lpfc_nodelist * ndlp;
66         struct lpfc_hba *phba;
67
68         rdata = rport->dd_data;
69         ndlp = rdata->pnode;
70
71         if (!ndlp) {
72                 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
73                         printk(KERN_ERR "Cannot find remote node"
74                         " to terminate I/O Data x%x\n",
75                         rport->port_id);
76                 return;
77         }
78
79         phba  = ndlp->vport->phba;
80
81         lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
82                 "rport terminate: sid:x%x did:x%x flg:x%x",
83                 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
84
85         if (ndlp->nlp_sid != NLP_NO_SID) {
86                 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
87                         ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
88         }
89
90         return;
91 }
92
93 /*
94  * This function will be called when dev_loss_tmo fire.
95  */
96 void
97 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
98 {
99         struct lpfc_rport_data *rdata;
100         struct lpfc_nodelist * ndlp;
101         struct lpfc_vport *vport;
102         struct lpfc_hba   *phba;
103         struct completion devloss_compl;
104         struct lpfc_work_evt *evtp;
105
106         rdata = rport->dd_data;
107         ndlp = rdata->pnode;
108
109         if (!ndlp) {
110                 if (rport->scsi_target_id != -1) {
111                         printk(KERN_ERR "Cannot find remote node"
112                                 " for rport in dev_loss_tmo_callbk x%x\n",
113                                 rport->port_id);
114                 }
115                 return;
116         }
117
118         vport = ndlp->vport;
119         phba  = vport->phba;
120
121         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
122                 "rport devlosscb: sid:x%x did:x%x flg:x%x",
123                 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
124
125         init_completion(&devloss_compl);
126         evtp = &ndlp->dev_loss_evt;
127
128         if (!list_empty(&evtp->evt_listp))
129                 return;
130
131         spin_lock_irq(&phba->hbalock);
132         evtp->evt_arg1  = ndlp;
133         evtp->evt_arg2  = &devloss_compl;
134         evtp->evt       = LPFC_EVT_DEV_LOSS;
135         list_add_tail(&evtp->evt_listp, &phba->work_list);
136         if (phba->work_wait)
137                 wake_up(phba->work_wait);
138
139         spin_unlock_irq(&phba->hbalock);
140
141         wait_for_completion(&devloss_compl);
142
143         return;
144 }
145
146 /*
147  * This function is called from the worker thread when dev_loss_tmo
148  * expire.
149  */
150 void
151 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
152 {
153         struct lpfc_rport_data *rdata;
154         struct fc_rport   *rport;
155         struct lpfc_vport *vport;
156         struct lpfc_hba   *phba;
157         uint8_t *name;
158         int warn_on = 0;
159
160         rport = ndlp->rport;
161
162         if (!rport)
163                 return;
164
165         rdata = rport->dd_data;
166         name = (uint8_t *) &ndlp->nlp_portname;
167         vport = ndlp->vport;
168         phba  = vport->phba;
169
170         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
171                 "rport devlosstmo:did:x%x type:x%x id:x%x",
172                 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
173
174         if (!(vport->load_flag & FC_UNLOADING) &&
175             ndlp->nlp_state == NLP_STE_MAPPED_NODE)
176                 return;
177
178         if (ndlp->nlp_type & NLP_FABRIC) {
179                 int  put_node;
180                 int  put_rport;
181
182                 /* We will clean up these Nodes in linkup */
183                 put_node = rdata->pnode != NULL;
184                 put_rport = ndlp->rport != NULL;
185                 rdata->pnode = NULL;
186                 ndlp->rport = NULL;
187                 if (put_node)
188                         lpfc_nlp_put(ndlp);
189                 if (put_rport)
190                         put_device(&rport->dev);
191                 return;
192         }
193
194         if (ndlp->nlp_sid != NLP_NO_SID) {
195                 warn_on = 1;
196                 /* flush the target */
197                 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
198                                     ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
199         }
200         if (vport->load_flag & FC_UNLOADING)
201                 warn_on = 0;
202
203         if (warn_on) {
204                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
205                                 "%d (%d):0203 Devloss timeout on "
206                                 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
207                                 "NPort x%x Data: x%x x%x x%x\n",
208                                 phba->brd_no, vport->vpi,
209                                 *name, *(name+1), *(name+2), *(name+3),
210                                 *(name+4), *(name+5), *(name+6), *(name+7),
211                                 ndlp->nlp_DID, ndlp->nlp_flag,
212                                 ndlp->nlp_state, ndlp->nlp_rpi);
213         } else {
214                 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
215                                 "%d (%d):0204 Devloss timeout on "
216                                 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
217                                 "NPort x%x Data: x%x x%x x%x\n",
218                                 phba->brd_no, vport->vpi,
219                                 *name, *(name+1), *(name+2), *(name+3),
220                                 *(name+4), *(name+5), *(name+6), *(name+7),
221                                 ndlp->nlp_DID, ndlp->nlp_flag,
222                                 ndlp->nlp_state, ndlp->nlp_rpi);
223         }
224
225         if (!(vport->load_flag & FC_UNLOADING) &&
226             !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
227             !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
228             (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
229                 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
230         else {
231                 int  put_node;
232                 int  put_rport;
233
234                 put_node = rdata->pnode != NULL;
235                 put_rport = ndlp->rport != NULL;
236                 rdata->pnode = NULL;
237                 ndlp->rport = NULL;
238                 if (put_node)
239                         lpfc_nlp_put(ndlp);
240                 if (put_rport)
241                         put_device(&rport->dev);
242         }
243 }
244
245
246 void
247 lpfc_worker_wake_up(struct lpfc_hba *phba)
248 {
249         wake_up(phba->work_wait);
250         return;
251 }
252
253 static void
254 lpfc_work_list_done(struct lpfc_hba *phba)
255 {
256         struct lpfc_work_evt  *evtp = NULL;
257         struct lpfc_nodelist  *ndlp;
258         struct lpfc_vport     *vport;
259         int free_evt;
260
261         spin_lock_irq(&phba->hbalock);
262         while (!list_empty(&phba->work_list)) {
263                 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
264                                  evt_listp);
265                 spin_unlock_irq(&phba->hbalock);
266                 free_evt = 1;
267                 switch (evtp->evt) {
268                 case LPFC_EVT_DEV_LOSS_DELAY:
269                         free_evt = 0; /* evt is part of ndlp */
270                         ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
271                         vport = ndlp->vport;
272                         if (!vport)
273                                 break;
274
275                         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
276                                 "rport devlossdly:did:x%x flg:x%x",
277                                 ndlp->nlp_DID, ndlp->nlp_flag, 0);
278
279                         if (!(vport->load_flag & FC_UNLOADING) &&
280                             !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
281                             !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
282                                 lpfc_disc_state_machine(vport, ndlp, NULL,
283                                         NLP_EVT_DEVICE_RM);
284                         }
285                         break;
286                 case LPFC_EVT_ELS_RETRY:
287                         ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
288                         lpfc_els_retry_delay_handler(ndlp);
289                         free_evt = 0; /* evt is part of ndlp */
290                         break;
291                 case LPFC_EVT_DEV_LOSS:
292                         ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
293                         lpfc_nlp_get(ndlp);
294                         lpfc_dev_loss_tmo_handler(ndlp);
295                         free_evt = 0;
296                         complete((struct completion *)(evtp->evt_arg2));
297                         lpfc_nlp_put(ndlp);
298                         break;
299                 case LPFC_EVT_ONLINE:
300                         if (phba->link_state < LPFC_LINK_DOWN)
301                                 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
302                         else
303                                 *(int *) (evtp->evt_arg1) = 0;
304                         complete((struct completion *)(evtp->evt_arg2));
305                         break;
306                 case LPFC_EVT_OFFLINE_PREP:
307                         if (phba->link_state >= LPFC_LINK_DOWN)
308                                 lpfc_offline_prep(phba);
309                         *(int *)(evtp->evt_arg1) = 0;
310                         complete((struct completion *)(evtp->evt_arg2));
311                         break;
312                 case LPFC_EVT_OFFLINE:
313                         lpfc_offline(phba);
314                         lpfc_sli_brdrestart(phba);
315                         *(int *)(evtp->evt_arg1) =
316                                 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
317                         lpfc_unblock_mgmt_io(phba);
318                         complete((struct completion *)(evtp->evt_arg2));
319                         break;
320                 case LPFC_EVT_WARM_START:
321                         lpfc_offline(phba);
322                         lpfc_reset_barrier(phba);
323                         lpfc_sli_brdreset(phba);
324                         lpfc_hba_down_post(phba);
325                         *(int *)(evtp->evt_arg1) =
326                                 lpfc_sli_brdready(phba, HS_MBRDY);
327                         lpfc_unblock_mgmt_io(phba);
328                         complete((struct completion *)(evtp->evt_arg2));
329                         break;
330                 case LPFC_EVT_KILL:
331                         lpfc_offline(phba);
332                         *(int *)(evtp->evt_arg1)
333                                 = (phba->pport->stopped)
334                                         ? 0 : lpfc_sli_brdkill(phba);
335                         lpfc_unblock_mgmt_io(phba);
336                         complete((struct completion *)(evtp->evt_arg2));
337                         break;
338                 }
339                 if (free_evt)
340                         kfree(evtp);
341                 spin_lock_irq(&phba->hbalock);
342         }
343         spin_unlock_irq(&phba->hbalock);
344
345 }
346
347 void
348 lpfc_work_done(struct lpfc_hba *phba)
349 {
350         struct lpfc_sli_ring *pring;
351         uint32_t ha_copy, status, control, work_port_events;
352         struct lpfc_vport *vport;
353
354         spin_lock_irq(&phba->hbalock);
355         ha_copy = phba->work_ha;
356         phba->work_ha = 0;
357         spin_unlock_irq(&phba->hbalock);
358
359         if (ha_copy & HA_ERATT)
360                 lpfc_handle_eratt(phba);
361
362         if (ha_copy & HA_MBATT)
363                 lpfc_sli_handle_mb_event(phba);
364
365         if (ha_copy & HA_LATT)
366                 lpfc_handle_latt(phba);
367
368         spin_lock_irq(&phba->hbalock);
369         list_for_each_entry(vport, &phba->port_list, listentry) {
370                 struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
371
372                 if (!scsi_host_get(shost)) {
373                         continue;
374                 }
375                 spin_unlock_irq(&phba->hbalock);
376                 work_port_events = vport->work_port_events;
377
378                 if (work_port_events & WORKER_DISC_TMO)
379                         lpfc_disc_timeout_handler(vport);
380
381                 if (work_port_events & WORKER_ELS_TMO)
382                         lpfc_els_timeout_handler(vport);
383
384                 if (work_port_events & WORKER_HB_TMO)
385                         lpfc_hb_timeout_handler(phba);
386
387                 if (work_port_events & WORKER_MBOX_TMO)
388                         lpfc_mbox_timeout_handler(phba);
389
390                 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
391                         lpfc_unblock_fabric_iocbs(phba);
392
393                 if (work_port_events & WORKER_FDMI_TMO)
394                         lpfc_fdmi_timeout_handler(vport);
395
396                 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
397                         lpfc_ramp_down_queue_handler(phba);
398
399                 if (work_port_events & WORKER_RAMP_UP_QUEUE)
400                         lpfc_ramp_up_queue_handler(phba);
401
402                 spin_lock_irq(&vport->work_port_lock);
403                 vport->work_port_events &= ~work_port_events;
404                 spin_unlock_irq(&vport->work_port_lock);
405                 scsi_host_put(shost);
406                 spin_lock_irq(&phba->hbalock);
407         }
408         spin_unlock_irq(&phba->hbalock);
409
410         pring = &phba->sli.ring[LPFC_ELS_RING];
411         status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
412         status >>= (4*LPFC_ELS_RING);
413         if ((status & HA_RXMASK)
414                 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
415                 if (pring->flag & LPFC_STOP_IOCB_MASK) {
416                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
417                 } else {
418                         lpfc_sli_handle_slow_ring_event(phba, pring,
419                                                         (status &
420                                                          HA_RXMASK));
421                         pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
422                 }
423                 /*
424                  * Turn on Ring interrupts
425                  */
426                 spin_lock_irq(&phba->hbalock);
427                 control = readl(phba->HCregaddr);
428                 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
429                         lpfc_debugfs_slow_ring_trc(phba,
430                                 "WRK Enable ring: cntl:x%x hacopy:x%x",
431                                 control, ha_copy, 0);
432
433                         control |= (HC_R0INT_ENA << LPFC_ELS_RING);
434                         writel(control, phba->HCregaddr);
435                         readl(phba->HCregaddr); /* flush */
436                 }
437                 else {
438                         lpfc_debugfs_slow_ring_trc(phba,
439                                 "WRK Ring ok:     cntl:x%x hacopy:x%x",
440                                 control, ha_copy, 0);
441                 }
442                 spin_unlock_irq(&phba->hbalock);
443         }
444         lpfc_work_list_done(phba);
445 }
446
447 static int
448 check_work_wait_done(struct lpfc_hba *phba)
449 {
450         struct lpfc_vport *vport;
451         struct lpfc_sli_ring *pring;
452         int rc = 0;
453
454         spin_lock_irq(&phba->hbalock);
455         list_for_each_entry(vport, &phba->port_list, listentry) {
456                 if (vport->work_port_events) {
457                         rc = 1;
458                         goto exit;
459                 }
460         }
461
462         if (phba->work_ha || (!list_empty(&phba->work_list)) ||
463             kthread_should_stop()) {
464                 rc = 1;
465                 goto exit;
466         }
467
468         pring = &phba->sli.ring[LPFC_ELS_RING];
469         if (pring->flag & LPFC_DEFERRED_RING_EVENT)
470                 rc = 1;
471 exit:
472         if (rc)
473                 phba->work_found++;
474         else
475                 phba->work_found = 0;
476
477         spin_unlock_irq(&phba->hbalock);
478         return rc;
479 }
480
481
482 int
483 lpfc_do_work(void *p)
484 {
485         struct lpfc_hba *phba = p;
486         int rc;
487         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
488
489         set_user_nice(current, -20);
490         phba->work_wait = &work_waitq;
491         phba->work_found = 0;
492
493         while (1) {
494
495                 rc = wait_event_interruptible(work_waitq,
496                                               check_work_wait_done(phba));
497
498                 BUG_ON(rc);
499
500                 if (kthread_should_stop())
501                         break;
502
503                 lpfc_work_done(phba);
504
505                 /* If there is alot of slow ring work, like during link up
506                  * check_work_wait_done() may cause this thread to not give
507                  * up the CPU for very long periods of time. This may cause
508                  * soft lockups or other problems. To avoid these situations
509                  * give up the CPU here after LPFC_MAX_WORKER_ITERATION
510                  * consecutive iterations.
511                  */
512                 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
513                         phba->work_found = 0;
514                         schedule();
515                 }
516         }
517         phba->work_wait = NULL;
518         return 0;
519 }
520
521 /*
522  * This is only called to handle FC worker events. Since this a rare
523  * occurance, we allocate a struct lpfc_work_evt structure here instead of
524  * embedding it in the IOCB.
525  */
526 int
527 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
528                       uint32_t evt)
529 {
530         struct lpfc_work_evt  *evtp;
531         unsigned long flags;
532
533         /*
534          * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
535          * be queued to worker thread for processing
536          */
537         evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
538         if (!evtp)
539                 return 0;
540
541         evtp->evt_arg1  = arg1;
542         evtp->evt_arg2  = arg2;
543         evtp->evt       = evt;
544
545         spin_lock_irqsave(&phba->hbalock, flags);
546         list_add_tail(&evtp->evt_listp, &phba->work_list);
547         if (phba->work_wait)
548                 lpfc_worker_wake_up(phba);
549         spin_unlock_irqrestore(&phba->hbalock, flags);
550
551         return 1;
552 }
553
554 void
555 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
556 {
557         struct lpfc_hba  *phba = vport->phba;
558         struct lpfc_nodelist *ndlp, *next_ndlp;
559         int  rc;
560
561         list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
562                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
563                         continue;
564
565                 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
566                         lpfc_unreg_rpi(vport, ndlp);
567
568                 /* Leave Fabric nodes alone on link down */
569                 if (!remove && ndlp->nlp_type & NLP_FABRIC)
570                         continue;
571                 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
572                                              remove
573                                              ? NLP_EVT_DEVICE_RM
574                                              : NLP_EVT_DEVICE_RECOVERY);
575         }
576         if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
577                 lpfc_mbx_unreg_vpi(vport);
578                 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
579         }
580 }
581
582 static void
583 lpfc_linkdown_port(struct lpfc_vport *vport)
584 {
585         struct lpfc_nodelist *ndlp, *next_ndlp;
586         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
587
588         fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
589
590         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
591                 "Link Down:       state:x%x rtry:x%x flg:x%x",
592                 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
593
594         /* Cleanup any outstanding RSCN activity */
595         lpfc_els_flush_rscn(vport);
596
597         /* Cleanup any outstanding ELS commands */
598         lpfc_els_flush_cmd(vport);
599
600         lpfc_cleanup_rpis(vport, 0);
601
602         /* free any ndlp's on unused list */
603         list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
604                                 /* free any ndlp's in unused state */
605                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
606                         lpfc_drop_node(vport, ndlp);
607
608         /* Turn off discovery timer if its running */
609         lpfc_can_disctmo(vport);
610 }
611
612 int
613 lpfc_linkdown(struct lpfc_hba *phba)
614 {
615         struct lpfc_vport *vport = phba->pport;
616         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
617         struct lpfc_vport *port_iterator;
618         LPFC_MBOXQ_t          *mb;
619
620         if (phba->link_state == LPFC_LINK_DOWN) {
621                 return 0;
622         }
623         spin_lock_irq(&phba->hbalock);
624         if (phba->link_state > LPFC_LINK_DOWN) {
625                 phba->link_state = LPFC_LINK_DOWN;
626                 phba->pport->fc_flag &= ~FC_LBIT;
627         }
628         spin_unlock_irq(&phba->hbalock);
629
630         list_for_each_entry(port_iterator, &phba->port_list, listentry) {
631
632                                 /* Issue a LINK DOWN event to all nodes */
633                 lpfc_linkdown_port(port_iterator);
634         }
635
636         /* Clean up any firmware default rpi's */
637         mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
638         if (mb) {
639                 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
640                 mb->vport = vport;
641                 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
642                 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
643                     == MBX_NOT_FINISHED) {
644                         mempool_free(mb, phba->mbox_mem_pool);
645                 }
646         }
647
648         /* Setup myDID for link up if we are in pt2pt mode */
649         if (phba->pport->fc_flag & FC_PT2PT) {
650                 phba->pport->fc_myDID = 0;
651                 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
652                 if (mb) {
653                         lpfc_config_link(phba, mb);
654                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
655                         mb->vport = vport;
656                         if (lpfc_sli_issue_mbox(phba, mb,
657                                                 (MBX_NOWAIT | MBX_STOP_IOCB))
658                             == MBX_NOT_FINISHED) {
659                                 mempool_free(mb, phba->mbox_mem_pool);
660                         }
661                 }
662                 spin_lock_irq(shost->host_lock);
663                 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
664                 spin_unlock_irq(shost->host_lock);
665         }
666
667         return 0;
668 }
669
670 static void
671 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
672 {
673         struct lpfc_nodelist *ndlp;
674
675         list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
676                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
677                         continue;
678
679                 if (ndlp->nlp_type & NLP_FABRIC) {
680                                 /* On Linkup its safe to clean up the ndlp
681                                  * from Fabric connections.
682                                  */
683                         if (ndlp->nlp_DID != Fabric_DID)
684                                 lpfc_unreg_rpi(vport, ndlp);
685                         lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
686                 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
687                                 /* Fail outstanding IO now since device is
688                                  * marked for PLOGI.
689                                  */
690                         lpfc_unreg_rpi(vport, ndlp);
691                 }
692         }
693 }
694
695 static void
696 lpfc_linkup_port(struct lpfc_vport *vport)
697 {
698         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
699         struct lpfc_nodelist *ndlp, *next_ndlp;
700         struct lpfc_hba  *phba = vport->phba;
701
702         if ((vport->load_flag & FC_UNLOADING) != 0)
703                 return;
704
705         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
706                 "Link Up:         top:x%x speed:x%x flg:x%x",
707                 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
708
709         /* If NPIV is not enabled, only bring the physical port up */
710         if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
711                 (vport != phba->pport))
712                 return;
713
714         fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
715
716         spin_lock_irq(shost->host_lock);
717         vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
718                             FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
719         vport->fc_flag |= FC_NDISC_ACTIVE;
720         vport->fc_ns_retry = 0;
721         spin_unlock_irq(shost->host_lock);
722
723         if (vport->fc_flag & FC_LBIT)
724                 lpfc_linkup_cleanup_nodes(vport);
725
726                                 /* free any ndlp's in unused state */
727         list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
728                                  nlp_listp)
729                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
730                         lpfc_drop_node(vport, ndlp);
731 }
732
733 static int
734 lpfc_linkup(struct lpfc_hba *phba)
735 {
736         struct lpfc_vport *vport;
737
738         phba->link_state = LPFC_LINK_UP;
739
740         /* Unblock fabric iocbs if they are blocked */
741         clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
742         del_timer_sync(&phba->fabric_block_timer);
743
744         list_for_each_entry(vport, &phba->port_list, listentry) {
745                 lpfc_linkup_port(vport);
746         }
747         if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
748                 lpfc_issue_clear_la(phba, phba->pport);
749
750         return 0;
751 }
752
753 /*
754  * This routine handles processing a CLEAR_LA mailbox
755  * command upon completion. It is setup in the LPFC_MBOXQ
756  * as the completion routine when the command is
757  * handed off to the SLI layer.
758  */
759 void
760 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
761 {
762         struct lpfc_vport *vport = pmb->vport;
763         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
764         struct lpfc_sli   *psli = &phba->sli;
765         MAILBOX_t *mb = &pmb->mb;
766         uint32_t control;
767
768         /* Since we don't do discovery right now, turn these off here */
769         psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
770         psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
771         psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
772
773         /* Check for error */
774         if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
775                 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
776                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
777                                 "%d (%d):0320 CLEAR_LA mbxStatus error x%x hba "
778                                 "state x%x\n",
779                                 phba->brd_no, vport->vpi, mb->mbxStatus,
780                                 vport->port_state);
781
782                 phba->link_state = LPFC_HBA_ERROR;
783                 goto out;
784         }
785
786         if (vport->port_type == LPFC_PHYSICAL_PORT)
787                 phba->link_state = LPFC_HBA_READY;
788
789         spin_lock_irq(&phba->hbalock);
790         psli->sli_flag |= LPFC_PROCESS_LA;
791         control = readl(phba->HCregaddr);
792         control |= HC_LAINT_ENA;
793         writel(control, phba->HCregaddr);
794         readl(phba->HCregaddr); /* flush */
795         spin_unlock_irq(&phba->hbalock);
796         return;
797
798         vport->num_disc_nodes = 0;
799         /* go thru NPR nodes and issue ELS PLOGIs */
800         if (vport->fc_npr_cnt)
801                 lpfc_els_disc_plogi(vport);
802
803         if (!vport->num_disc_nodes) {
804                 spin_lock_irq(shost->host_lock);
805                 vport->fc_flag &= ~FC_NDISC_ACTIVE;
806                 spin_unlock_irq(shost->host_lock);
807         }
808
809         vport->port_state = LPFC_VPORT_READY;
810
811 out:
812         /* Device Discovery completes */
813         lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
814                         "%d (%d):0225 Device Discovery completes\n",
815                         phba->brd_no, vport->vpi);
816
817         mempool_free(pmb, phba->mbox_mem_pool);
818
819         spin_lock_irq(shost->host_lock);
820         vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
821         spin_unlock_irq(shost->host_lock);
822
823         del_timer_sync(&phba->fc_estabtmo);
824
825         lpfc_can_disctmo(vport);
826
827         /* turn on Link Attention interrupts */
828
829         spin_lock_irq(&phba->hbalock);
830         psli->sli_flag |= LPFC_PROCESS_LA;
831         control = readl(phba->HCregaddr);
832         control |= HC_LAINT_ENA;
833         writel(control, phba->HCregaddr);
834         readl(phba->HCregaddr); /* flush */
835         spin_unlock_irq(&phba->hbalock);
836
837         return;
838 }
839
840
841 static void
842 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
843 {
844         struct lpfc_vport *vport = pmb->vport;
845
846         if (pmb->mb.mbxStatus)
847                 goto out;
848
849         mempool_free(pmb, phba->mbox_mem_pool);
850
851         if (phba->fc_topology == TOPOLOGY_LOOP &&
852             vport->fc_flag & FC_PUBLIC_LOOP &&
853             !(vport->fc_flag & FC_LBIT)) {
854                         /* Need to wait for FAN - use discovery timer
855                          * for timeout.  port_state is identically
856                          * LPFC_LOCAL_CFG_LINK while waiting for FAN
857                          */
858                         lpfc_set_disctmo(vport);
859                         return;
860         }
861
862         /* Start discovery by sending a FLOGI. port_state is identically
863          * LPFC_FLOGI while waiting for FLOGI cmpl
864          */
865         if (vport->port_state != LPFC_FLOGI) {
866                 vport->port_state = LPFC_FLOGI;
867                 lpfc_set_disctmo(vport);
868                 lpfc_initial_flogi(vport);
869         }
870         return;
871
872 out:
873         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
874                         "%d (%d):0306 CONFIG_LINK mbxStatus error x%x "
875                         "HBA state x%x\n",
876                         phba->brd_no, vport->vpi, pmb->mb.mbxStatus,
877                         vport->port_state);
878
879         mempool_free(pmb, phba->mbox_mem_pool);
880
881         lpfc_linkdown(phba);
882
883         lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
884                         "%d (%d):0200 CONFIG_LINK bad hba state x%x\n",
885                         phba->brd_no, vport->vpi, vport->port_state);
886
887         lpfc_issue_clear_la(phba, vport);
888         return;
889 }
890
891 static void
892 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
893 {
894         MAILBOX_t *mb = &pmb->mb;
895         struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
896         struct lpfc_vport  *vport = pmb->vport;
897
898
899         /* Check for error */
900         if (mb->mbxStatus) {
901                 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
902                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
903                                 "%d (%d):0319 READ_SPARAM mbxStatus error x%x "
904                                 "hba state x%x>\n",
905                                 phba->brd_no, vport->vpi, mb->mbxStatus,
906                                 vport->port_state);
907
908                 lpfc_linkdown(phba);
909                 goto out;
910         }
911
912         memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
913                sizeof (struct serv_parm));
914         if (phba->cfg_soft_wwnn)
915                 u64_to_wwn(phba->cfg_soft_wwnn,
916                            vport->fc_sparam.nodeName.u.wwn);
917         if (phba->cfg_soft_wwpn)
918                 u64_to_wwn(phba->cfg_soft_wwpn,
919                            vport->fc_sparam.portName.u.wwn);
920         memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
921                sizeof(vport->fc_nodename));
922         memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
923                sizeof(vport->fc_portname));
924         if (vport->port_type == LPFC_PHYSICAL_PORT) {
925                 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
926                 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
927         }
928
929         lpfc_mbuf_free(phba, mp->virt, mp->phys);
930         kfree(mp);
931         mempool_free(pmb, phba->mbox_mem_pool);
932         return;
933
934 out:
935         pmb->context1 = NULL;
936         lpfc_mbuf_free(phba, mp->virt, mp->phys);
937         kfree(mp);
938         lpfc_issue_clear_la(phba, vport);
939         mempool_free(pmb, phba->mbox_mem_pool);
940         return;
941 }
942
943 static void
944 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
945 {
946         struct lpfc_vport *vport = phba->pport;
947         LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
948         int i;
949         struct lpfc_dmabuf *mp;
950         int rc;
951
952         sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
953         cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
954
955         spin_lock_irq(&phba->hbalock);
956         switch (la->UlnkSpeed) {
957         case LA_1GHZ_LINK:
958                 phba->fc_linkspeed = LA_1GHZ_LINK;
959                 break;
960         case LA_2GHZ_LINK:
961                 phba->fc_linkspeed = LA_2GHZ_LINK;
962                 break;
963         case LA_4GHZ_LINK:
964                 phba->fc_linkspeed = LA_4GHZ_LINK;
965                 break;
966         case LA_8GHZ_LINK:
967                 phba->fc_linkspeed = LA_8GHZ_LINK;
968                 break;
969         default:
970                 phba->fc_linkspeed = LA_UNKNW_LINK;
971                 break;
972         }
973
974         phba->fc_topology = la->topology;
975         phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
976
977         if (phba->fc_topology == TOPOLOGY_LOOP) {
978                 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
979
980                                 /* Get Loop Map information */
981                 if (la->il)
982                         vport->fc_flag |= FC_LBIT;
983
984                 vport->fc_myDID = la->granted_AL_PA;
985                 i = la->un.lilpBde64.tus.f.bdeSize;
986
987                 if (i == 0) {
988                         phba->alpa_map[0] = 0;
989                 } else {
990                         if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
991                                 int numalpa, j, k;
992                                 union {
993                                         uint8_t pamap[16];
994                                         struct {
995                                                 uint32_t wd1;
996                                                 uint32_t wd2;
997                                                 uint32_t wd3;
998                                                 uint32_t wd4;
999                                         } pa;
1000                                 } un;
1001                                 numalpa = phba->alpa_map[0];
1002                                 j = 0;
1003                                 while (j < numalpa) {
1004                                         memset(un.pamap, 0, 16);
1005                                         for (k = 1; j < numalpa; k++) {
1006                                                 un.pamap[k - 1] =
1007                                                         phba->alpa_map[j + 1];
1008                                                 j++;
1009                                                 if (k == 16)
1010                                                         break;
1011                                         }
1012                                         /* Link Up Event ALPA map */
1013                                         lpfc_printf_log(phba,
1014                                                         KERN_WARNING,
1015                                                         LOG_LINK_EVENT,
1016                                                         "%d:1304 Link Up Event "
1017                                                         "ALPA map Data: x%x "
1018                                                         "x%x x%x x%x\n",
1019                                                         phba->brd_no,
1020                                                         un.pa.wd1, un.pa.wd2,
1021                                                         un.pa.wd3, un.pa.wd4);
1022                                 }
1023                         }
1024                 }
1025         } else {
1026                 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
1027                         if (phba->max_vpi && phba->cfg_npiv_enable &&
1028                            (phba->sli_rev == 3))
1029                                 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
1030                 }
1031                 vport->fc_myDID = phba->fc_pref_DID;
1032                 vport->fc_flag |= FC_LBIT;
1033         }
1034         spin_unlock_irq(&phba->hbalock);
1035
1036         lpfc_linkup(phba);
1037         if (sparam_mbox) {
1038                 lpfc_read_sparam(phba, sparam_mbox, 0);
1039                 sparam_mbox->vport = vport;
1040                 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1041                 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
1042                                     (MBX_NOWAIT | MBX_STOP_IOCB));
1043                 if (rc == MBX_NOT_FINISHED) {
1044                         mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
1045                         lpfc_mbuf_free(phba, mp->virt, mp->phys);
1046                         kfree(mp);
1047                         mempool_free(sparam_mbox, phba->mbox_mem_pool);
1048                         if (cfglink_mbox)
1049                                 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1050                         goto out;
1051                 }
1052         }
1053
1054         if (cfglink_mbox) {
1055                 vport->port_state = LPFC_LOCAL_CFG_LINK;
1056                 lpfc_config_link(phba, cfglink_mbox);
1057                 cfglink_mbox->vport = vport;
1058                 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1059                 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
1060                                     (MBX_NOWAIT | MBX_STOP_IOCB));
1061                 if (rc != MBX_NOT_FINISHED)
1062                         return;
1063                 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1064         }
1065 out:
1066         lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1067         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1068                 "%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1069                 phba->brd_no, vport->vpi,
1070                 vport->port_state, sparam_mbox, cfglink_mbox);
1071
1072         lpfc_issue_clear_la(phba, vport);
1073         return;
1074 }
1075
1076 static void
1077 lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1078 {
1079         uint32_t control;
1080         struct lpfc_sli *psli = &phba->sli;
1081
1082         lpfc_linkdown(phba);
1083
1084         /* turn on Link Attention interrupts - no CLEAR_LA needed */
1085         spin_lock_irq(&phba->hbalock);
1086         psli->sli_flag |= LPFC_PROCESS_LA;
1087         control = readl(phba->HCregaddr);
1088         control |= HC_LAINT_ENA;
1089         writel(control, phba->HCregaddr);
1090         readl(phba->HCregaddr); /* flush */
1091         spin_unlock_irq(&phba->hbalock);
1092 }
1093
1094 /*
1095  * This routine handles processing a READ_LA mailbox
1096  * command upon completion. It is setup in the LPFC_MBOXQ
1097  * as the completion routine when the command is
1098  * handed off to the SLI layer.
1099  */
1100 void
1101 lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1102 {
1103         struct lpfc_vport *vport = pmb->vport;
1104         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1105         READ_LA_VAR *la;
1106         MAILBOX_t *mb = &pmb->mb;
1107         struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1108
1109         /* Check for error */
1110         if (mb->mbxStatus) {
1111                 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1112                                 "%d:1307 READ_LA mbox error x%x state x%x\n",
1113                                 phba->brd_no, mb->mbxStatus, vport->port_state);
1114                 lpfc_mbx_issue_link_down(phba);
1115                 phba->link_state = LPFC_HBA_ERROR;
1116                 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1117         }
1118
1119         la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
1120
1121         memcpy(&phba->alpa_map[0], mp->virt, 128);
1122
1123         spin_lock_irq(shost->host_lock);
1124         if (la->pb)
1125                 vport->fc_flag |= FC_BYPASSED_MODE;
1126         else
1127                 vport->fc_flag &= ~FC_BYPASSED_MODE;
1128         spin_unlock_irq(shost->host_lock);
1129
1130         if (((phba->fc_eventTag + 1) < la->eventTag) ||
1131             (phba->fc_eventTag == la->eventTag)) {
1132                 phba->fc_stat.LinkMultiEvent++;
1133                 if (la->attType == AT_LINK_UP)
1134                         if (phba->fc_eventTag != 0)
1135                                 lpfc_linkdown(phba);
1136         }
1137
1138         phba->fc_eventTag = la->eventTag;
1139
1140         if (la->attType == AT_LINK_UP) {
1141                 phba->fc_stat.LinkUp++;
1142                 if (phba->link_flag & LS_LOOPBACK_MODE) {
1143                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1144                                 "%d:1306 Link Up Event in loop back mode "
1145                                 "x%x received Data: x%x x%x x%x x%x\n",
1146                                 phba->brd_no, la->eventTag, phba->fc_eventTag,
1147                                 la->granted_AL_PA, la->UlnkSpeed,
1148                                 phba->alpa_map[0]);
1149                 } else {
1150                         lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1151                                 "%d:1303 Link Up Event x%x received "
1152                                 "Data: x%x x%x x%x x%x\n",
1153                                 phba->brd_no, la->eventTag, phba->fc_eventTag,
1154                                 la->granted_AL_PA, la->UlnkSpeed,
1155                                 phba->alpa_map[0]);
1156                 }
1157                 lpfc_mbx_process_link_up(phba, la);
1158         } else {
1159                 phba->fc_stat.LinkDown++;
1160                 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1161                                 "%d:1305 Link Down Event x%x received "
1162                                 "Data: x%x x%x x%x\n",
1163                                 phba->brd_no, la->eventTag, phba->fc_eventTag,
1164                                 phba->pport->port_state, vport->fc_flag);
1165                 lpfc_mbx_issue_link_down(phba);
1166         }
1167
1168 lpfc_mbx_cmpl_read_la_free_mbuf:
1169         lpfc_mbuf_free(phba, mp->virt, mp->phys);
1170         kfree(mp);
1171         mempool_free(pmb, phba->mbox_mem_pool);
1172         return;
1173 }
1174
1175 /*
1176  * This routine handles processing a REG_LOGIN mailbox
1177  * command upon completion. It is setup in the LPFC_MBOXQ
1178  * as the completion routine when the command is
1179  * handed off to the SLI layer.
1180  */
1181 void
1182 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1183 {
1184         struct lpfc_vport  *vport = pmb->vport;
1185         struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1186         struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1187
1188         pmb->context1 = NULL;
1189
1190         /* Good status, call state machine */
1191         lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
1192         lpfc_mbuf_free(phba, mp->virt, mp->phys);
1193         kfree(mp);
1194         mempool_free(pmb, phba->mbox_mem_pool);
1195         lpfc_nlp_put(ndlp);
1196
1197         return;
1198 }
1199
1200 static void
1201 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1202 {
1203         MAILBOX_t *mb = &pmb->mb;
1204         struct lpfc_vport *vport = pmb->vport;
1205         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1206
1207         switch (mb->mbxStatus) {
1208         case 0x0011:
1209         case 0x0020:
1210         case 0x9700:
1211                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1212                                 "%d (%d):0911 cmpl_unreg_vpi, "
1213                                 "mb status = 0x%x\n",
1214                                 phba->brd_no, vport->vpi, mb->mbxStatus);
1215                 break;
1216         }
1217         vport->unreg_vpi_cmpl = VPORT_OK;
1218         mempool_free(pmb, phba->mbox_mem_pool);
1219         /*
1220          * This shost reference might have been taken at the beginning of
1221          * lpfc_vport_delete()
1222          */
1223         if (vport->load_flag & FC_UNLOADING)
1224                 scsi_host_put(shost);
1225 }
1226
1227 void
1228 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1229 {
1230         struct lpfc_hba  *phba = vport->phba;
1231         LPFC_MBOXQ_t *mbox;
1232         int rc;
1233
1234         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1235         if (!mbox)
1236                 return;
1237
1238         lpfc_unreg_vpi(phba, vport->vpi, mbox);
1239         mbox->vport = vport;
1240         mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1241         rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1242         if (rc == MBX_NOT_FINISHED) {
1243                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1244                                 "%d (%d):1800 Could not issue unreg_vpi\n",
1245                                 phba->brd_no, vport->vpi);
1246                 mempool_free(mbox, phba->mbox_mem_pool);
1247                 vport->unreg_vpi_cmpl = VPORT_ERROR;
1248         }
1249 }
1250
1251 static void
1252 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1253 {
1254         struct lpfc_vport *vport = pmb->vport;
1255         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1256         MAILBOX_t *mb = &pmb->mb;
1257
1258         switch (mb->mbxStatus) {
1259         case 0x0011:
1260         case 0x9601:
1261         case 0x9602:
1262                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1263                                 "%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n",
1264                                 phba->brd_no, vport->vpi, mb->mbxStatus);
1265                 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1266                 spin_lock_irq(shost->host_lock);
1267                 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1268                 spin_unlock_irq(shost->host_lock);
1269                 vport->fc_myDID = 0;
1270                 goto out;
1271         }
1272
1273         vport->num_disc_nodes = 0;
1274         /* go thru NPR list and issue ELS PLOGIs */
1275         if (vport->fc_npr_cnt)
1276                 lpfc_els_disc_plogi(vport);
1277
1278         if (!vport->num_disc_nodes) {
1279                 spin_lock_irq(shost->host_lock);
1280                 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1281                 spin_unlock_irq(shost->host_lock);
1282                 lpfc_can_disctmo(vport);
1283         }
1284         vport->port_state = LPFC_VPORT_READY;
1285
1286 out:
1287         mempool_free(pmb, phba->mbox_mem_pool);
1288         return;
1289 }
1290
1291 /*
1292  * This routine handles processing a Fabric REG_LOGIN mailbox
1293  * command upon completion. It is setup in the LPFC_MBOXQ
1294  * as the completion routine when the command is
1295  * handed off to the SLI layer.
1296  */
1297 void
1298 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1299 {
1300         struct lpfc_vport *vport = pmb->vport;
1301         struct lpfc_vport *next_vport;
1302         MAILBOX_t *mb = &pmb->mb;
1303         struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1304         struct lpfc_nodelist *ndlp;
1305         ndlp = (struct lpfc_nodelist *) pmb->context2;
1306
1307         pmb->context1 = NULL;
1308         pmb->context2 = NULL;
1309
1310         if (mb->mbxStatus) {
1311                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1312                 kfree(mp);
1313                 mempool_free(pmb, phba->mbox_mem_pool);
1314                 lpfc_nlp_put(ndlp);
1315
1316                 if (phba->fc_topology == TOPOLOGY_LOOP) {
1317                         /* FLOGI failed, use loop map to make discovery list */
1318                         lpfc_disc_list_loopmap(vport);
1319
1320                         /* Start discovery */
1321                         lpfc_disc_start(vport);
1322                         return;
1323                 }
1324
1325                 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1326                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1327                         "%d (%d):0258 Register Fabric login error: 0x%x\n",
1328                         phba->brd_no, vport->vpi, mb->mbxStatus);
1329
1330                 return;
1331         }
1332
1333         ndlp->nlp_rpi = mb->un.varWords[0];
1334         ndlp->nlp_type |= NLP_FABRIC;
1335         lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1336
1337         lpfc_nlp_put(ndlp);     /* Drop the reference from the mbox */
1338
1339         if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1340                 list_for_each_entry(next_vport, &phba->port_list, listentry) {
1341                         if (next_vport->port_type == LPFC_PHYSICAL_PORT)
1342                                 continue;
1343
1344                         if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1345                                 lpfc_initial_fdisc(next_vport);
1346                         else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1347                                 lpfc_vport_set_state(vport,
1348                                                      FC_VPORT_NO_FABRIC_SUPP);
1349                                 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1350                                                 "%d (%d):0259 No NPIV Fabric "
1351                                                 "support\n",
1352                                                 phba->brd_no, vport->vpi);
1353                         }
1354                 }
1355                 lpfc_do_scr_ns_plogi(phba, vport);
1356         }
1357
1358         lpfc_mbuf_free(phba, mp->virt, mp->phys);
1359         kfree(mp);
1360         mempool_free(pmb, phba->mbox_mem_pool);
1361         return;
1362 }
1363
1364 /*
1365  * This routine handles processing a NameServer REG_LOGIN mailbox
1366  * command upon completion. It is setup in the LPFC_MBOXQ
1367  * as the completion routine when the command is
1368  * handed off to the SLI layer.
1369  */
1370 void
1371 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1372 {
1373         MAILBOX_t *mb = &pmb->mb;
1374         struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1375         struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1376         struct lpfc_vport *vport = pmb->vport;
1377
1378         if (mb->mbxStatus) {
1379 out:
1380                 lpfc_nlp_put(ndlp);
1381                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1382                 kfree(mp);
1383                 mempool_free(pmb, phba->mbox_mem_pool);
1384                 lpfc_drop_node(vport, ndlp);
1385
1386                 if (phba->fc_topology == TOPOLOGY_LOOP) {
1387                         /*
1388                          * RegLogin failed, use loop map to make discovery
1389                          * list
1390                          */
1391                         lpfc_disc_list_loopmap(vport);
1392
1393                         /* Start discovery */
1394                         lpfc_disc_start(vport);
1395                         return;
1396                 }
1397                 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1398                 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1399                         "%d (%d):0260 Register NameServer error: 0x%x\n",
1400                         phba->brd_no, vport->vpi, mb->mbxStatus);
1401                 return;
1402         }
1403
1404         pmb->context1 = NULL;
1405
1406         ndlp->nlp_rpi = mb->un.varWords[0];
1407         ndlp->nlp_type |= NLP_FABRIC;
1408         lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1409
1410         if (vport->port_state < LPFC_VPORT_READY) {
1411                 /* Link up discovery requires Fabric registration. */
1412                 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1413                 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1414                 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1415                 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1416                 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1417
1418                 /* Issue SCR just before NameServer GID_FT Query */
1419                 lpfc_issue_els_scr(vport, SCR_DID, 0);
1420         }
1421
1422         vport->fc_ns_retry = 0;
1423         /* Good status, issue CT Request to NameServer */
1424         if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
1425                 /* Cannot issue NameServer Query, so finish up discovery */
1426                 goto out;
1427         }
1428
1429         lpfc_nlp_put(ndlp);
1430         lpfc_mbuf_free(phba, mp->virt, mp->phys);
1431         kfree(mp);
1432         mempool_free(pmb, phba->mbox_mem_pool);
1433
1434         return;
1435 }
1436
1437 static void
1438 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1439 {
1440         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1441         struct fc_rport  *rport;
1442         struct lpfc_rport_data *rdata;
1443         struct fc_rport_identifiers rport_ids;
1444         struct lpfc_hba  *phba = vport->phba;
1445
1446         /* Remote port has reappeared. Re-register w/ FC transport */
1447         rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1448         rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1449         rport_ids.port_id = ndlp->nlp_DID;
1450         rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1451
1452         /*
1453          * We leave our node pointer in rport->dd_data when we unregister a
1454          * FCP target port.  But fc_remote_port_add zeros the space to which
1455          * rport->dd_data points.  So, if we're reusing a previously
1456          * registered port, drop the reference that we took the last time we
1457          * registered the port.
1458          */
1459         if (ndlp->rport && ndlp->rport->dd_data &&
1460             ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
1461                 lpfc_nlp_put(ndlp);
1462         }
1463
1464         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1465                 "rport add:       did:x%x flg:x%x type x%x",
1466                 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1467
1468         ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
1469         if (!rport || !get_device(&rport->dev)) {
1470                 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1471                            "Warning: fc_remote_port_add failed\n");
1472                 return;
1473         }
1474
1475         /* initialize static port data */
1476         rport->maxframe_size = ndlp->nlp_maxframe;
1477         rport->supported_classes = ndlp->nlp_class_sup;
1478         rdata = rport->dd_data;
1479         rdata->pnode = lpfc_nlp_get(ndlp);
1480
1481         if (ndlp->nlp_type & NLP_FCP_TARGET)
1482                 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1483         if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1484                 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1485
1486
1487         if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
1488                 fc_remote_port_rolechg(rport, rport_ids.roles);
1489
1490         if ((rport->scsi_target_id != -1) &&
1491             (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1492                 ndlp->nlp_sid = rport->scsi_target_id;
1493         }
1494         return;
1495 }
1496
1497 static void
1498 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1499 {
1500         struct fc_rport *rport = ndlp->rport;
1501
1502         lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1503                 "rport delete:    did:x%x flg:x%x type x%x",
1504                 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1505
1506         fc_remote_port_delete(rport);
1507
1508         return;
1509 }
1510
1511 static void
1512 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
1513 {
1514         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1515
1516         spin_lock_irq(shost->host_lock);
1517         switch (state) {
1518         case NLP_STE_UNUSED_NODE:
1519                 vport->fc_unused_cnt += count;
1520                 break;
1521         case NLP_STE_PLOGI_ISSUE:
1522                 vport->fc_plogi_cnt += count;
1523                 break;
1524         case NLP_STE_ADISC_ISSUE:
1525                 vport->fc_adisc_cnt += count;
1526                 break;
1527         case NLP_STE_REG_LOGIN_ISSUE:
1528                 vport->fc_reglogin_cnt += count;
1529                 break;
1530         case NLP_STE_PRLI_ISSUE:
1531                 vport->fc_prli_cnt += count;
1532                 break;
1533         case NLP_STE_UNMAPPED_NODE:
1534                 vport->fc_unmap_cnt += count;
1535                 break;
1536         case NLP_STE_MAPPED_NODE:
1537                 vport->fc_map_cnt += count;
1538                 break;
1539         case NLP_STE_NPR_NODE:
1540                 vport->fc_npr_cnt += count;
1541                 break;
1542         }
1543         spin_unlock_irq(shost->host_lock);
1544 }
1545
1546 static void
1547 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1548                        int old_state, int new_state)
1549 {
1550         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1551
1552         if (new_state == NLP_STE_UNMAPPED_NODE) {
1553                 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1554                 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1555                 ndlp->nlp_type |= NLP_FC_NODE;
1556         }
1557         if (new_state == NLP_STE_MAPPED_NODE)
1558                 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1559         if (new_state == NLP_STE_NPR_NODE)
1560                 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1561
1562         /* Transport interface */
1563         if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1564                             old_state == NLP_STE_UNMAPPED_NODE)) {
1565                 vport->phba->nport_event_cnt++;
1566                 lpfc_unregister_remote_port(ndlp);
1567         }
1568
1569         if (new_state ==  NLP_STE_MAPPED_NODE ||
1570             new_state == NLP_STE_UNMAPPED_NODE) {
1571                 vport->phba->nport_event_cnt++;
1572                 /*
1573                  * Tell the fc transport about the port, if we haven't
1574                  * already. If we have, and it's a scsi entity, be
1575                  * sure to unblock any attached scsi devices
1576                  */
1577                 lpfc_register_remote_port(vport, ndlp);
1578         }
1579         /*
1580          * if we added to Mapped list, but the remote port
1581          * registration failed or assigned a target id outside
1582          * our presentable range - move the node to the
1583          * Unmapped List
1584          */
1585         if (new_state == NLP_STE_MAPPED_NODE &&
1586             (!ndlp->rport ||
1587              ndlp->rport->scsi_target_id == -1 ||
1588              ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1589                 spin_lock_irq(shost->host_lock);
1590                 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1591                 spin_unlock_irq(shost->host_lock);
1592                 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1593         }
1594 }
1595
1596 static char *
1597 lpfc_nlp_state_name(char *buffer, size_t size, int state)
1598 {
1599         static char *states[] = {
1600                 [NLP_STE_UNUSED_NODE] = "UNUSED",
1601                 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1602                 [NLP_STE_ADISC_ISSUE] = "ADISC",
1603                 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1604                 [NLP_STE_PRLI_ISSUE] = "PRLI",
1605                 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1606                 [NLP_STE_MAPPED_NODE] = "MAPPED",
1607                 [NLP_STE_NPR_NODE] = "NPR",
1608         };
1609
1610         if (state < ARRAY_SIZE(states) && states[state])
1611                 strlcpy(buffer, states[state], size);
1612         else
1613                 snprintf(buffer, size, "unknown (%d)", state);
1614         return buffer;
1615 }
1616
1617 void
1618 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1619                    int state)
1620 {
1621         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1622         int  old_state = ndlp->nlp_state;
1623         char name1[16], name2[16];
1624
1625         lpfc_printf_log(vport->phba, KERN_INFO, LOG_NODE,
1626                         "%d (%d):0904 NPort state transition x%06x, %s -> %s\n",
1627                         vport->phba->brd_no, vport->vpi,
1628                         ndlp->nlp_DID,
1629                         lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1630                         lpfc_nlp_state_name(name2, sizeof(name2), state));
1631
1632         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1633                 "node statechg    did:x%x old:%d ste:%d",
1634                 ndlp->nlp_DID, old_state, state);
1635
1636         if (old_state == NLP_STE_NPR_NODE &&
1637             (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1638             state != NLP_STE_NPR_NODE)
1639                 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1640         if (old_state == NLP_STE_UNMAPPED_NODE) {
1641                 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1642                 ndlp->nlp_type &= ~NLP_FC_NODE;
1643         }
1644
1645         if (list_empty(&ndlp->nlp_listp)) {
1646                 spin_lock_irq(shost->host_lock);
1647                 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1648                 spin_unlock_irq(shost->host_lock);
1649         } else if (old_state)
1650                 lpfc_nlp_counters(vport, old_state, -1);
1651
1652         ndlp->nlp_state = state;
1653         lpfc_nlp_counters(vport, state, 1);
1654         lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
1655 }
1656
1657 void
1658 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1659 {
1660         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1661
1662         if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1663                 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1664         if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1665                 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1666         spin_lock_irq(shost->host_lock);
1667         list_del_init(&ndlp->nlp_listp);
1668         spin_unlock_irq(shost->host_lock);
1669         lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1670                                NLP_STE_UNUSED_NODE);
1671 }
1672
1673 void
1674 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1675 {
1676         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1677
1678         if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1679                 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1680         if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1681                 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1682         spin_lock_irq(shost->host_lock);
1683         list_del_init(&ndlp->nlp_listp);
1684         ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
1685         spin_unlock_irq(shost->host_lock);
1686         lpfc_nlp_put(ndlp);
1687 }
1688
1689 /*
1690  * Start / ReStart rescue timer for Discovery / RSCN handling
1691  */
1692 void
1693 lpfc_set_disctmo(struct lpfc_vport *vport)
1694 {
1695         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1696         struct lpfc_hba  *phba = vport->phba;
1697         uint32_t tmo;
1698
1699         if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
1700                 /* For FAN, timeout should be greater then edtov */
1701                 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1702         } else {
1703                 /* Normal discovery timeout should be > then ELS/CT timeout
1704                  * FC spec states we need 3 * ratov for CT requests
1705                  */
1706                 tmo = ((phba->fc_ratov * 3) + 3);
1707         }
1708
1709
1710         if (!timer_pending(&vport->fc_disctmo)) {
1711                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1712                         "set disc timer:  tmo:x%x state:x%x flg:x%x",
1713                         tmo, vport->port_state, vport->fc_flag);
1714         }
1715
1716         mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1717         spin_lock_irq(shost->host_lock);
1718         vport->fc_flag |= FC_DISC_TMO;
1719         spin_unlock_irq(shost->host_lock);
1720
1721         /* Start Discovery Timer state <hba_state> */
1722         lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1723                         "%d (%d):0247 Start Discovery Timer state x%x "
1724                         "Data: x%x x%lx x%x x%x\n",
1725                         phba->brd_no, vport->vpi, vport->port_state, tmo,
1726                         (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1727                         vport->fc_adisc_cnt);
1728
1729         return;
1730 }
1731
1732 /*
1733  * Cancel rescue timer for Discovery / RSCN handling
1734  */
1735 int
1736 lpfc_can_disctmo(struct lpfc_vport *vport)
1737 {
1738         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1739         struct lpfc_hba  *phba = vport->phba;
1740         unsigned long iflags;
1741
1742         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1743                 "can disc timer:  state:x%x rtry:x%x flg:x%x",
1744                 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1745
1746         /* Turn off discovery timer if its running */
1747         if (vport->fc_flag & FC_DISC_TMO) {
1748                 spin_lock_irqsave(shost->host_lock, iflags);
1749                 vport->fc_flag &= ~FC_DISC_TMO;
1750                 spin_unlock_irqrestore(shost->host_lock, iflags);
1751                 del_timer_sync(&vport->fc_disctmo);
1752                 spin_lock_irqsave(&vport->work_port_lock, iflags);
1753                 vport->work_port_events &= ~WORKER_DISC_TMO;
1754                 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
1755         }
1756
1757         /* Cancel Discovery Timer state <hba_state> */
1758         lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1759                         "%d (%d):0248 Cancel Discovery Timer state x%x "
1760                         "Data: x%x x%x x%x\n",
1761                         phba->brd_no, vport->vpi, vport->port_state,
1762                         vport->fc_flag, vport->fc_plogi_cnt,
1763                         vport->fc_adisc_cnt);
1764
1765         return 0;
1766 }
1767
1768 /*
1769  * Check specified ring for outstanding IOCB on the SLI queue
1770  * Return true if iocb matches the specified nport
1771  */
1772 int
1773 lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1774                     struct lpfc_sli_ring *pring,
1775                     struct lpfc_iocbq *iocb,
1776                     struct lpfc_nodelist *ndlp)
1777 {
1778         struct lpfc_sli *psli = &phba->sli;
1779         IOCB_t *icmd = &iocb->iocb;
1780         struct lpfc_vport    *vport = ndlp->vport;
1781
1782         if (iocb->vport != vport)
1783                 return 0;
1784
1785         if (pring->ringno == LPFC_ELS_RING) {
1786                 switch (icmd->ulpCommand) {
1787                 case CMD_GEN_REQUEST64_CR:
1788                         if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1789                                 return 1;
1790                 case CMD_ELS_REQUEST64_CR:
1791                         if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1792                                 return 1;
1793                 case CMD_XMIT_ELS_RSP64_CX:
1794                         if (iocb->context1 == (uint8_t *) ndlp)
1795                                 return 1;
1796                 }
1797         } else if (pring->ringno == psli->extra_ring) {
1798
1799         } else if (pring->ringno == psli->fcp_ring) {
1800                 /* Skip match check if waiting to relogin to FCP target */
1801                 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1802                     (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1803                         return 0;
1804                 }
1805                 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1806                         return 1;
1807                 }
1808         } else if (pring->ringno == psli->next_ring) {
1809
1810         }
1811         return 0;
1812 }
1813
1814 /*
1815  * Free resources / clean up outstanding I/Os
1816  * associated with nlp_rpi in the LPFC_NODELIST entry.
1817  */
1818 static int
1819 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1820 {
1821         LIST_HEAD(completions);
1822         struct lpfc_sli *psli;
1823         struct lpfc_sli_ring *pring;
1824         struct lpfc_iocbq *iocb, *next_iocb;
1825         IOCB_t *icmd;
1826         uint32_t rpi, i;
1827
1828         lpfc_fabric_abort_nport(ndlp);
1829
1830         /*
1831          * Everything that matches on txcmplq will be returned
1832          * by firmware with a no rpi error.
1833          */
1834         psli = &phba->sli;
1835         rpi = ndlp->nlp_rpi;
1836         if (rpi) {
1837                 /* Now process each ring */
1838                 for (i = 0; i < psli->num_rings; i++) {
1839                         pring = &psli->ring[i];
1840
1841                         spin_lock_irq(&phba->hbalock);
1842                         list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1843                                                  list) {
1844                                 /*
1845                                  * Check to see if iocb matches the nport we are
1846                                  * looking for
1847                                  */
1848                                 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1849                                                          ndlp))) {
1850                                         /* It matches, so deque and call compl
1851                                            with an error */
1852                                         list_move_tail(&iocb->list,
1853                                                        &completions);
1854                                         pring->txq_cnt--;
1855                                 }
1856                         }
1857                         spin_unlock_irq(&phba->hbalock);
1858                 }
1859         }
1860
1861         while (!list_empty(&completions)) {
1862                 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1863                 list_del_init(&iocb->list);
1864
1865                 if (!iocb->iocb_cmpl)
1866                         lpfc_sli_release_iocbq(phba, iocb);
1867                 else {
1868                         icmd = &iocb->iocb;
1869                         icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1870                         icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1871                         (iocb->iocb_cmpl)(phba, iocb, iocb);
1872                 }
1873         }
1874
1875         return 0;
1876 }
1877
1878 /*
1879  * Free rpi associated with LPFC_NODELIST entry.
1880  * This routine is called from lpfc_freenode(), when we are removing
1881  * a LPFC_NODELIST entry. It is also called if the driver initiates a
1882  * LOGO that completes successfully, and we are waiting to PLOGI back
1883  * to the remote NPort. In addition, it is called after we receive
1884  * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1885  * we are waiting to PLOGI back to the remote NPort.
1886  */
1887 int
1888 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1889 {
1890         struct lpfc_hba *phba = vport->phba;
1891         LPFC_MBOXQ_t    *mbox;
1892         int rc;
1893
1894         if (ndlp->nlp_rpi) {
1895                 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1896                 if (mbox) {
1897                         lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1898                         mbox->vport = vport;
1899                         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1900                         rc = lpfc_sli_issue_mbox(phba, mbox,
1901                                                  (MBX_NOWAIT | MBX_STOP_IOCB));
1902                         if (rc == MBX_NOT_FINISHED)
1903                                 mempool_free(mbox, phba->mbox_mem_pool);
1904                 }
1905                 lpfc_no_rpi(phba, ndlp);
1906                 ndlp->nlp_rpi = 0;
1907                 return 1;
1908         }
1909         return 0;
1910 }
1911
1912 void
1913 lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1914 {
1915         struct lpfc_hba  *phba  = vport->phba;
1916         LPFC_MBOXQ_t     *mbox;
1917         int rc;
1918
1919         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1920         if (mbox) {
1921                 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1922                 mbox->vport = vport;
1923                 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1924                 rc = lpfc_sli_issue_mbox(phba, mbox,
1925                                          (MBX_NOWAIT | MBX_STOP_IOCB));
1926                 if (rc == MBX_NOT_FINISHED) {
1927                         mempool_free(mbox, phba->mbox_mem_pool);
1928                 }
1929         }
1930 }
1931
1932 void
1933 lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1934 {
1935         struct lpfc_hba  *phba  = vport->phba;
1936         LPFC_MBOXQ_t     *mbox;
1937         int rc;
1938
1939         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1940         if (mbox) {
1941                 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1942                 mbox->vport = vport;
1943                 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1944                 rc = lpfc_sli_issue_mbox(phba, mbox,
1945                                          (MBX_NOWAIT | MBX_STOP_IOCB));
1946                 if (rc == MBX_NOT_FINISHED) {
1947                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1948                                         "%d (%d):1815 Could not issue "
1949                                         "unreg_did (default rpis)\n",
1950                                         phba->brd_no, vport->vpi);
1951                         mempool_free(mbox, phba->mbox_mem_pool);
1952                 }
1953         }
1954 }
1955
1956 /*
1957  * Free resources associated with LPFC_NODELIST entry
1958  * so it can be freed.
1959  */
1960 static int
1961 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1962 {
1963         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1964         struct lpfc_hba  *phba = vport->phba;
1965         LPFC_MBOXQ_t *mb, *nextmb;
1966         struct lpfc_dmabuf *mp;
1967
1968         /* Cleanup node for NPort <nlp_DID> */
1969         lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1970                         "%d (%d):0900 Cleanup node for NPort x%x "
1971                         "Data: x%x x%x x%x\n",
1972                         phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag,
1973                         ndlp->nlp_state, ndlp->nlp_rpi);
1974
1975         lpfc_dequeue_node(vport, ndlp);
1976
1977         /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1978         if ((mb = phba->sli.mbox_active)) {
1979                 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1980                    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1981                         mb->context2 = NULL;
1982                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1983                 }
1984         }
1985
1986         spin_lock_irq(&phba->hbalock);
1987         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1988                 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1989                     (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1990                         mp = (struct lpfc_dmabuf *) (mb->context1);
1991                         if (mp) {
1992                                 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1993                                 kfree(mp);
1994                         }
1995                         list_del(&mb->list);
1996                         mempool_free(mb, phba->mbox_mem_pool);
1997                         lpfc_nlp_put(ndlp);
1998                 }
1999         }
2000         spin_unlock_irq(&phba->hbalock);
2001
2002         lpfc_els_abort(phba,ndlp);
2003         spin_lock_irq(shost->host_lock);
2004         ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2005         spin_unlock_irq(shost->host_lock);
2006
2007         ndlp->nlp_last_elscmd = 0;
2008         del_timer_sync(&ndlp->nlp_delayfunc);
2009
2010         if (!list_empty(&ndlp->els_retry_evt.evt_listp))
2011                 list_del_init(&ndlp->els_retry_evt.evt_listp);
2012         if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
2013                 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2014
2015         if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
2016                 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2017                 complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
2018         }
2019
2020         lpfc_unreg_rpi(vport, ndlp);
2021
2022         return 0;
2023 }
2024
2025 /*
2026  * Check to see if we can free the nlp back to the freelist.
2027  * If we are in the middle of using the nlp in the discovery state
2028  * machine, defer the free till we reach the end of the state machine.
2029  */
2030 static void
2031 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2032 {
2033         struct lpfc_rport_data *rdata;
2034
2035         if (ndlp->nlp_flag & NLP_DELAY_TMO) {
2036                 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2037         }
2038
2039         lpfc_cleanup_node(vport, ndlp);
2040
2041         /*
2042          * We can get here with a non-NULL ndlp->rport because when we
2043          * unregister a rport we don't break the rport/node linkage.  So if we
2044          * do, make sure we don't leaving any dangling pointers behind.
2045          */
2046         if (ndlp->rport) {
2047                 rdata = ndlp->rport->dd_data;
2048                 rdata->pnode = NULL;
2049                 ndlp->rport = NULL;
2050         }
2051 }
2052
2053 static int
2054 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2055               uint32_t did)
2056 {
2057         D_ID mydid, ndlpdid, matchdid;
2058
2059         if (did == Bcast_DID)
2060                 return 0;
2061
2062         if (ndlp->nlp_DID == 0) {
2063                 return 0;
2064         }
2065
2066         /* First check for Direct match */
2067         if (ndlp->nlp_DID == did)
2068                 return 1;
2069
2070         /* Next check for area/domain identically equals 0 match */
2071         mydid.un.word = vport->fc_myDID;
2072         if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
2073                 return 0;
2074         }
2075
2076         matchdid.un.word = did;
2077         ndlpdid.un.word = ndlp->nlp_DID;
2078         if (matchdid.un.b.id == ndlpdid.un.b.id) {
2079                 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
2080                     (mydid.un.b.area == matchdid.un.b.area)) {
2081                         if ((ndlpdid.un.b.domain == 0) &&
2082                             (ndlpdid.un.b.area == 0)) {
2083                                 if (ndlpdid.un.b.id)
2084                                         return 1;
2085                         }
2086                         return 0;
2087                 }
2088
2089                 matchdid.un.word = ndlp->nlp_DID;
2090                 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
2091                     (mydid.un.b.area == ndlpdid.un.b.area)) {
2092                         if ((matchdid.un.b.domain == 0) &&
2093                             (matchdid.un.b.area == 0)) {
2094                                 if (matchdid.un.b.id)
2095                                         return 1;
2096                         }
2097                 }
2098         }
2099         return 0;
2100 }
2101
2102 /* Search for a nodelist entry */
2103 static struct lpfc_nodelist *
2104 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2105 {
2106         struct lpfc_hba  *phba = vport->phba;
2107         struct lpfc_nodelist *ndlp;
2108         uint32_t data1;
2109
2110         list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2111                 if (lpfc_matchdid(vport, ndlp, did)) {
2112                         data1 = (((uint32_t) ndlp->nlp_state << 24) |
2113                                  ((uint32_t) ndlp->nlp_xri << 16) |
2114                                  ((uint32_t) ndlp->nlp_type << 8) |
2115                                  ((uint32_t) ndlp->nlp_rpi & 0xff));
2116                         lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
2117                                         "%d (%d):0929 FIND node DID "
2118                                         " Data: x%p x%x x%x x%x\n",
2119                                         phba->brd_no, vport->vpi,
2120                                         ndlp, ndlp->nlp_DID,
2121                                         ndlp->nlp_flag, data1);
2122                         return ndlp;
2123                 }
2124         }
2125
2126         /* FIND node did <did> NOT FOUND */
2127         lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
2128                         "%d (%d):0932 FIND node did x%x NOT FOUND.\n",
2129                         phba->brd_no, vport->vpi, did);
2130         return NULL;
2131 }
2132
2133 struct lpfc_nodelist *
2134 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2135 {
2136         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2137         struct lpfc_nodelist *ndlp;
2138
2139         spin_lock_irq(shost->host_lock);
2140         ndlp = __lpfc_findnode_did(vport, did);
2141         spin_unlock_irq(shost->host_lock);
2142         return ndlp;
2143 }
2144
2145 struct lpfc_nodelist *
2146 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2147 {
2148         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2149         struct lpfc_nodelist *ndlp;
2150
2151         ndlp = lpfc_findnode_did(vport, did);
2152         if (!ndlp) {
2153                 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
2154                     lpfc_rscn_payload_check(vport, did) == 0)
2155                         return NULL;
2156                 ndlp = (struct lpfc_nodelist *)
2157                      mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
2158                 if (!ndlp)
2159                         return NULL;
2160                 lpfc_nlp_init(vport, ndlp, did);
2161                 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2162                 spin_lock_irq(shost->host_lock);
2163                 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2164                 spin_unlock_irq(shost->host_lock);
2165                 return ndlp;
2166         }
2167         if (vport->fc_flag & FC_RSCN_MODE) {
2168                 if (lpfc_rscn_payload_check(vport, did)) {
2169                         spin_lock_irq(shost->host_lock);
2170                         ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2171                         spin_unlock_irq(shost->host_lock);
2172
2173                         /* Since this node is marked for discovery,
2174                          * delay timeout is not needed.
2175                          */
2176                         if (ndlp->nlp_flag & NLP_DELAY_TMO)
2177                                 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2178                 } else
2179                         ndlp = NULL;
2180         } else {
2181                 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
2182                     ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
2183                         return NULL;
2184                 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2185                 spin_lock_irq(shost->host_lock);
2186                 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2187                 spin_unlock_irq(shost->host_lock);
2188         }
2189         return ndlp;
2190 }
2191
2192 /* Build a list of nodes to discover based on the loopmap */
2193 void
2194 lpfc_disc_list_loopmap(struct lpfc_vport *vport)
2195 {
2196         struct lpfc_hba  *phba = vport->phba;
2197         int j;
2198         uint32_t alpa, index;
2199
2200         if (!lpfc_is_link_up(phba))
2201                 return;
2202
2203         if (phba->fc_topology != TOPOLOGY_LOOP)
2204                 return;
2205
2206         /* Check for loop map present or not */
2207         if (phba->alpa_map[0]) {
2208                 for (j = 1; j <= phba->alpa_map[0]; j++) {
2209                         alpa = phba->alpa_map[j];
2210                         if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
2211                                 continue;
2212                         lpfc_setup_disc_node(vport, alpa);
2213                 }
2214         } else {
2215                 /* No alpamap, so try all alpa's */
2216                 for (j = 0; j < FC_MAXLOOP; j++) {
2217                         /* If cfg_scan_down is set, start from highest
2218                          * ALPA (0xef) to lowest (0x1).
2219                          */
2220                         if (phba->cfg_scan_down)
2221                                 index = j;
2222                         else
2223                                 index = FC_MAXLOOP - j - 1;
2224                         alpa = lpfcAlpaArray[index];
2225                         if ((vport->fc_myDID & 0xff) == alpa)
2226                                 continue;
2227                         lpfc_setup_disc_node(vport, alpa);
2228                 }
2229         }
2230         return;
2231 }
2232
2233 void
2234 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2235 {
2236         LPFC_MBOXQ_t *mbox;
2237         struct lpfc_sli *psli = &phba->sli;
2238         struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2239         struct lpfc_sli_ring *fcp_ring   = &psli->ring[psli->fcp_ring];
2240         struct lpfc_sli_ring *next_ring  = &psli->ring[psli->next_ring];
2241         int  rc;
2242
2243         /*
2244          * if it's not a physical port or if we already send
2245          * clear_la then don't send it.
2246          */
2247         if ((phba->link_state >= LPFC_CLEAR_LA) ||
2248             (vport->port_type != LPFC_PHYSICAL_PORT))
2249                 return;
2250
2251                         /* Link up discovery */
2252         if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2253                 phba->link_state = LPFC_CLEAR_LA;
2254                 lpfc_clear_la(phba, mbox);
2255                 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2256                 mbox->vport = vport;
2257                 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT |
2258                                                       MBX_STOP_IOCB));
2259                 if (rc == MBX_NOT_FINISHED) {
2260                         mempool_free(mbox, phba->mbox_mem_pool);
2261                         lpfc_disc_flush_list(vport);
2262                         extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2263                         fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2264                         next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2265                         phba->link_state = LPFC_HBA_ERROR;
2266                 }
2267         }
2268 }
2269
2270 /* Reg_vpi to tell firmware to resume normal operations */
2271 void
2272 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2273 {
2274         LPFC_MBOXQ_t *regvpimbox;
2275
2276         regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2277         if (regvpimbox) {
2278                 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2279                 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2280                 regvpimbox->vport = vport;
2281                 if (lpfc_sli_issue_mbox(phba, regvpimbox,
2282                                         (MBX_NOWAIT | MBX_STOP_IOCB))
2283                                         == MBX_NOT_FINISHED) {
2284                         mempool_free(regvpimbox, phba->mbox_mem_pool);
2285                 }
2286         }
2287 }
2288
2289 /* Start Link up / RSCN discovery on NPR nodes */
2290 void
2291 lpfc_disc_start(struct lpfc_vport *vport)
2292 {
2293         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2294         struct lpfc_hba  *phba = vport->phba;
2295         uint32_t num_sent;
2296         uint32_t clear_la_pending;
2297         int did_changed;
2298
2299         if (!lpfc_is_link_up(phba))
2300                 return;
2301
2302         if (phba->link_state == LPFC_CLEAR_LA)
2303                 clear_la_pending = 1;
2304         else
2305                 clear_la_pending = 0;
2306
2307         if (vport->port_state < LPFC_VPORT_READY)
2308                 vport->port_state = LPFC_DISC_AUTH;
2309
2310         lpfc_set_disctmo(vport);
2311
2312         if (vport->fc_prevDID == vport->fc_myDID)
2313                 did_changed = 0;
2314         else
2315                 did_changed = 1;
2316
2317         vport->fc_prevDID = vport->fc_myDID;
2318         vport->num_disc_nodes = 0;
2319
2320         /* Start Discovery state <hba_state> */
2321         lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2322                         "%d (%d):0202 Start Discovery hba state x%x "
2323                         "Data: x%x x%x x%x\n",
2324                         phba->brd_no, vport->vpi, vport->port_state,
2325                         vport->fc_flag, vport->fc_plogi_cnt,
2326                         vport->fc_adisc_cnt);
2327
2328         /* First do ADISCs - if any */
2329         num_sent = lpfc_els_disc_adisc(vport);
2330
2331         if (num_sent)
2332                 return;
2333
2334         /*
2335          * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2336          * continue discovery.
2337          */
2338         if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2339             !(vport->fc_flag & FC_RSCN_MODE)) {
2340                 lpfc_issue_reg_vpi(phba, vport);
2341                 return;
2342         }
2343
2344         /*
2345          * For SLI2, we need to set port_state to READY and continue
2346          * discovery.
2347          */
2348         if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
2349                 /* If we get here, there is nothing to ADISC */
2350                 if (vport->port_type == LPFC_PHYSICAL_PORT)
2351                         lpfc_issue_clear_la(phba, vport);
2352
2353                 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2354                         vport->num_disc_nodes = 0;
2355                         /* go thru NPR nodes and issue ELS PLOGIs */
2356                         if (vport->fc_npr_cnt)
2357                                 lpfc_els_disc_plogi(vport);
2358
2359                         if (!vport->num_disc_nodes) {
2360                                 spin_lock_irq(shost->host_lock);
2361                                 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2362                                 spin_unlock_irq(shost->host_lock);
2363                                 lpfc_can_disctmo(vport);
2364                         }
2365                 }
2366                 vport->port_state = LPFC_VPORT_READY;
2367         } else {
2368                 /* Next do PLOGIs - if any */
2369                 num_sent = lpfc_els_disc_plogi(vport);
2370
2371                 if (num_sent)
2372                         return;
2373
2374                 if (vport->fc_flag & FC_RSCN_MODE) {
2375                         /* Check to see if more RSCNs came in while we
2376                          * were processing this one.
2377                          */
2378                         if ((vport->fc_rscn_id_cnt == 0) &&
2379                             (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
2380                                 spin_lock_irq(shost->host_lock);
2381                                 vport->fc_flag &= ~FC_RSCN_MODE;
2382                                 spin_unlock_irq(shost->host_lock);
2383                                 lpfc_can_disctmo(vport);
2384                         } else
2385                                 lpfc_els_handle_rscn(vport);
2386                 }
2387         }
2388         return;
2389 }
2390
2391 /*
2392  *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2393  *  ring the match the sppecified nodelist.
2394  */
2395 static void
2396 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2397 {
2398         LIST_HEAD(completions);
2399         struct lpfc_sli *psli;
2400         IOCB_t     *icmd;
2401         struct lpfc_iocbq    *iocb, *next_iocb;
2402         struct lpfc_sli_ring *pring;
2403
2404         psli = &phba->sli;
2405         pring = &psli->ring[LPFC_ELS_RING];
2406
2407         /* Error matching iocb on txq or txcmplq
2408          * First check the txq.
2409          */
2410         spin_lock_irq(&phba->hbalock);
2411         list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2412                 if (iocb->context1 != ndlp) {
2413                         continue;
2414                 }
2415                 icmd = &iocb->iocb;
2416                 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2417                     (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2418
2419                         list_move_tail(&iocb->list, &completions);
2420                         pring->txq_cnt--;
2421                 }
2422         }
2423
2424         /* Next check the txcmplq */
2425         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2426                 if (iocb->context1 != ndlp) {
2427                         continue;
2428                 }
2429                 icmd = &iocb->iocb;
2430                 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
2431                     icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2432                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2433                 }
2434         }
2435         spin_unlock_irq(&phba->hbalock);
2436
2437         while (!list_empty(&completions)) {
2438                 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2439                 list_del_init(&iocb->list);
2440
2441                 if (!iocb->iocb_cmpl)
2442                         lpfc_sli_release_iocbq(phba, iocb);
2443                 else {
2444                         icmd = &iocb->iocb;
2445                         icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2446                         icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2447                         (iocb->iocb_cmpl) (phba, iocb, iocb);
2448                 }
2449         }
2450 }
2451
2452 void
2453 lpfc_disc_flush_list(struct lpfc_vport *vport)
2454 {
2455         struct lpfc_nodelist *ndlp, *next_ndlp;
2456         struct lpfc_hba *phba = vport->phba;
2457
2458         if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2459                 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2460                                          nlp_listp) {
2461                         if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2462                             ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2463                                 lpfc_free_tx(phba, ndlp);
2464                                 lpfc_nlp_put(ndlp);
2465                         }
2466                 }
2467         }
2468 }
2469
2470 void
2471 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2472 {
2473         lpfc_els_flush_rscn(vport);
2474         lpfc_els_flush_cmd(vport);
2475         lpfc_disc_flush_list(vport);
2476 }
2477
2478 /*****************************************************************************/
2479 /*
2480  * NAME:     lpfc_disc_timeout
2481  *
2482  * FUNCTION: Fibre Channel driver discovery timeout routine.
2483  *
2484  * EXECUTION ENVIRONMENT: interrupt only
2485  *
2486  * CALLED FROM:
2487  *      Timer function
2488  *
2489  * RETURNS:
2490  *      none
2491  */
2492 /*****************************************************************************/
2493 void
2494 lpfc_disc_timeout(unsigned long ptr)
2495 {
2496         struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2497         struct lpfc_hba   *phba = vport->phba;
2498         unsigned long flags = 0;
2499
2500         if (unlikely(!phba))
2501                 return;
2502
2503         if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
2504                 spin_lock_irqsave(&vport->work_port_lock, flags);
2505                 vport->work_port_events |= WORKER_DISC_TMO;
2506                 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2507
2508                 spin_lock_irqsave(&phba->hbalock, flags);
2509                 if (phba->work_wait)
2510                         lpfc_worker_wake_up(phba);
2511                 spin_unlock_irqrestore(&phba->hbalock, flags);
2512         }
2513         return;
2514 }
2515
2516 static void
2517 lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2518 {
2519         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2520         struct lpfc_hba  *phba = vport->phba;
2521         struct lpfc_sli  *psli = &phba->sli;
2522         struct lpfc_nodelist *ndlp, *next_ndlp;
2523         LPFC_MBOXQ_t *initlinkmbox;
2524         int rc, clrlaerr = 0;
2525
2526         if (!(vport->fc_flag & FC_DISC_TMO))
2527                 return;
2528
2529         spin_lock_irq(shost->host_lock);
2530         vport->fc_flag &= ~FC_DISC_TMO;
2531         spin_unlock_irq(shost->host_lock);
2532
2533         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2534                 "disc timeout:    state:x%x rtry:x%x flg:x%x",
2535                 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2536
2537         switch (vport->port_state) {
2538
2539         case LPFC_LOCAL_CFG_LINK:
2540         /* port_state is identically  LPFC_LOCAL_CFG_LINK while waiting for
2541          * FAN
2542          */
2543                                 /* FAN timeout */
2544                 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
2545                                 "%d (%d):0221 FAN timeout\n",
2546                                 phba->brd_no, vport->vpi);
2547
2548                 /* Start discovery by sending FLOGI, clean up old rpis */
2549                 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2550                                          nlp_listp) {
2551                         if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2552                                 continue;
2553                         if (ndlp->nlp_type & NLP_FABRIC) {
2554                                 /* Clean up the ndlp on Fabric connections */
2555                                 lpfc_drop_node(vport, ndlp);
2556                         } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2557                                 /* Fail outstanding IO now since device
2558                                  * is marked for PLOGI.
2559                                  */
2560                                 lpfc_unreg_rpi(vport, ndlp);
2561                         }
2562                 }
2563                 if (vport->port_state != LPFC_FLOGI) {
2564                         vport->port_state = LPFC_FLOGI;
2565                         lpfc_set_disctmo(vport);
2566                         lpfc_initial_flogi(vport);
2567                 }
2568                 break;
2569
2570         case LPFC_FDISC:
2571         case LPFC_FLOGI:
2572         /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2573                 /* Initial FLOGI timeout */
2574                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2575                                 "%d (%d):0222 Initial %s timeout\n",
2576                                 phba->brd_no, vport->vpi,
2577                                 vport->vpi ? "FLOGI" : "FDISC");
2578
2579                 /* Assume no Fabric and go on with discovery.
2580                  * Check for outstanding ELS FLOGI to abort.
2581                  */
2582
2583                 /* FLOGI failed, so just use loop map to make discovery list */
2584                 lpfc_disc_list_loopmap(vport);
2585
2586                 /* Start discovery */
2587                 lpfc_disc_start(vport);
2588                 break;
2589
2590         case LPFC_FABRIC_CFG_LINK:
2591         /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2592            NameServer login */
2593                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2594                                 "%d (%d):0223 Timeout while waiting for "
2595                                 "NameServer login\n",
2596                                 phba->brd_no, vport->vpi);
2597
2598                 /* Next look for NameServer ndlp */
2599                 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2600                 if (ndlp)
2601                         lpfc_nlp_put(ndlp);
2602                 /* Start discovery */
2603                 lpfc_disc_start(vport);
2604                 break;
2605
2606         case LPFC_NS_QRY:
2607         /* Check for wait for NameServer Rsp timeout */
2608                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2609                                 "%d (%d):0224 NameServer Query timeout "
2610                                 "Data: x%x x%x\n",
2611                                 phba->brd_no, vport->vpi,
2612                                 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2613
2614                 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2615                         /* Try it one more time */
2616                         vport->fc_ns_retry++;
2617                         rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2618                                          vport->fc_ns_retry, 0);
2619                         if (rc == 0)
2620                                 break;
2621                 }
2622                 vport->fc_ns_retry = 0;
2623
2624                 /*
2625                  * Discovery is over.
2626                  * set port_state to PORT_READY if SLI2.
2627                  * cmpl_reg_vpi will set port_state to READY for SLI3.
2628                  */
2629                 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2630                         lpfc_issue_reg_vpi(phba, vport);
2631                 else  { /* NPIV Not enabled */
2632                         lpfc_issue_clear_la(phba, vport);
2633                         vport->port_state = LPFC_VPORT_READY;
2634                 }
2635
2636                 /* Setup and issue mailbox INITIALIZE LINK command */
2637                 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2638                 if (!initlinkmbox) {
2639                         lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2640                                         "%d (%d):0206 Device Discovery "
2641                                         "completion error\n",
2642                                         phba->brd_no, vport->vpi);
2643                         phba->link_state = LPFC_HBA_ERROR;
2644                         break;
2645                 }
2646
2647                 lpfc_linkdown(phba);
2648                 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2649                                phba->cfg_link_speed);
2650                 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2651                 initlinkmbox->vport = vport;
2652                 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2653                 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2654                                          (MBX_NOWAIT | MBX_STOP_IOCB));
2655                 lpfc_set_loopback_flag(phba);
2656                 if (rc == MBX_NOT_FINISHED)
2657                         mempool_free(initlinkmbox, phba->mbox_mem_pool);
2658
2659                 break;
2660
2661         case LPFC_DISC_AUTH:
2662         /* Node Authentication timeout */
2663                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2664                                 "%d (%d):0227 Node Authentication timeout\n",
2665                                 phba->brd_no, vport->vpi);
2666                 lpfc_disc_flush_list(vport);
2667
2668                 /*
2669                  * set port_state to PORT_READY if SLI2.
2670                  * cmpl_reg_vpi will set port_state to READY for SLI3.
2671                  */
2672                 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2673                         lpfc_issue_reg_vpi(phba, vport);
2674                 else {  /* NPIV Not enabled */
2675                         lpfc_issue_clear_la(phba, vport);
2676                         vport->port_state = LPFC_VPORT_READY;
2677                 }
2678                 break;
2679
2680         case LPFC_VPORT_READY:
2681                 if (vport->fc_flag & FC_RSCN_MODE) {
2682                         lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2683                                         "%d (%d):0231 RSCN timeout Data: x%x "
2684                                         "x%x\n",
2685                                         phba->brd_no, vport->vpi,
2686                                         vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2687
2688                         /* Cleanup any outstanding ELS commands */
2689                         lpfc_els_flush_cmd(vport);
2690
2691                         lpfc_els_flush_rscn(vport);
2692                         lpfc_disc_flush_list(vport);
2693                 }
2694                 break;
2695
2696         default:
2697                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2698                                 "%d (%d):0229 Unexpected discovery timeout, "
2699                                 "vport State x%x\n",
2700                                 phba->brd_no, vport->vpi, vport->port_state);
2701
2702                 break;
2703         }
2704
2705         switch (phba->link_state) {
2706         case LPFC_CLEAR_LA:
2707                                 /* CLEAR LA timeout */
2708                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2709                                 "%d (%d):0228 CLEAR LA timeout\n",
2710                                 phba->brd_no, vport->vpi);
2711                 clrlaerr = 1;
2712                 break;
2713
2714         case LPFC_LINK_UNKNOWN:
2715         case LPFC_WARM_START:
2716         case LPFC_INIT_START:
2717         case LPFC_INIT_MBX_CMDS:
2718         case LPFC_LINK_DOWN:
2719         case LPFC_LINK_UP:
2720         case LPFC_HBA_ERROR:
2721                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2722                                 "%d (%d):0230 Unexpected timeout, hba link "
2723                                 "state x%x\n",
2724                                 phba->brd_no, vport->vpi, phba->link_state);
2725                 clrlaerr = 1;
2726                 break;
2727
2728         case LPFC_HBA_READY:
2729                 break;
2730         }
2731
2732         if (clrlaerr) {
2733                 lpfc_disc_flush_list(vport);
2734                 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2735                 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2736                 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2737                 vport->port_state = LPFC_VPORT_READY;
2738         }
2739
2740         return;
2741 }
2742
2743 /*
2744  * This routine handles processing a NameServer REG_LOGIN mailbox
2745  * command upon completion. It is setup in the LPFC_MBOXQ
2746  * as the completion routine when the command is
2747  * handed off to the SLI layer.
2748  */
2749 void
2750 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2751 {
2752         MAILBOX_t *mb = &pmb->mb;
2753         struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *) (pmb->context1);
2754         struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2755         struct lpfc_vport    *vport = pmb->vport;
2756
2757         pmb->context1 = NULL;
2758
2759         ndlp->nlp_rpi = mb->un.varWords[0];
2760         ndlp->nlp_type |= NLP_FABRIC;
2761         lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2762
2763         /*
2764          * Start issuing Fabric-Device Management Interface (FDMI) command to
2765          * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2766          * fdmi-on=2 (supporting RPA/hostnmae)
2767          */
2768
2769         if (phba->cfg_fdmi_on == 1)
2770                 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2771         else
2772                 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2773
2774                                 /* Mailbox took a reference to the node */
2775         lpfc_nlp_put(ndlp);
2776         lpfc_mbuf_free(phba, mp->virt, mp->phys);
2777         kfree(mp);
2778         mempool_free(pmb, phba->mbox_mem_pool);
2779
2780         return;
2781 }
2782
2783 static int
2784 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2785 {
2786         uint16_t *rpi = param;
2787
2788         return ndlp->nlp_rpi == *rpi;
2789 }
2790
2791 static int
2792 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2793 {
2794         return memcmp(&ndlp->nlp_portname, param,
2795                       sizeof(ndlp->nlp_portname)) == 0;
2796 }
2797
2798 struct lpfc_nodelist *
2799 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2800 {
2801         struct lpfc_nodelist *ndlp;
2802
2803         list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2804                 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
2805                     filter(ndlp, param))
2806                         return ndlp;
2807         }
2808         return NULL;
2809 }
2810
2811 /*
2812  * Search node lists for a remote port matching filter criteria
2813  * Caller needs to hold host_lock before calling this routine.
2814  */
2815 struct lpfc_nodelist *
2816 lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2817 {
2818         struct Scsi_Host     *shost = lpfc_shost_from_vport(vport);
2819         struct lpfc_nodelist *ndlp;
2820
2821         spin_lock_irq(shost->host_lock);
2822         ndlp = __lpfc_find_node(vport, filter, param);
2823         spin_unlock_irq(shost->host_lock);
2824         return ndlp;
2825 }
2826
2827 /*
2828  * This routine looks up the ndlp lists for the given RPI. If rpi found it
2829  * returns the node list element pointer else return NULL.
2830  */
2831 struct lpfc_nodelist *
2832 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2833 {
2834         return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2835 }
2836
2837 struct lpfc_nodelist *
2838 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2839 {
2840         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2841         struct lpfc_nodelist *ndlp;
2842
2843         spin_lock_irq(shost->host_lock);
2844         ndlp = __lpfc_findnode_rpi(vport, rpi);
2845         spin_unlock_irq(shost->host_lock);
2846         return ndlp;
2847 }
2848
2849 /*
2850  * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2851  * returns the node element list pointer else return NULL.
2852  */
2853 struct lpfc_nodelist *
2854 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2855 {
2856         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2857         struct lpfc_nodelist *ndlp;
2858
2859         spin_lock_irq(shost->host_lock);
2860         ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2861         spin_unlock_irq(shost->host_lock);
2862         return ndlp;
2863 }
2864
2865 void
2866 lpfc_dev_loss_delay(unsigned long ptr)
2867 {
2868         struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2869         struct lpfc_vport *vport = ndlp->vport;
2870         struct lpfc_hba   *phba = vport->phba;
2871         struct lpfc_work_evt  *evtp = &ndlp->dev_loss_evt;
2872         unsigned long flags;
2873
2874         evtp = &ndlp->dev_loss_evt;
2875
2876         spin_lock_irqsave(&phba->hbalock, flags);
2877         if (!list_empty(&evtp->evt_listp)) {
2878                 spin_unlock_irqrestore(&phba->hbalock, flags);
2879                 return;
2880         }
2881
2882         evtp->evt_arg1  = ndlp;
2883         evtp->evt       = LPFC_EVT_DEV_LOSS_DELAY;
2884         list_add_tail(&evtp->evt_listp, &phba->work_list);
2885         if (phba->work_wait)
2886                 lpfc_worker_wake_up(phba);
2887         spin_unlock_irqrestore(&phba->hbalock, flags);
2888         return;
2889 }
2890
2891 void
2892 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2893               uint32_t did)
2894 {
2895         memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2896         INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2897         INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2898         init_timer(&ndlp->nlp_delayfunc);
2899         ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2900         ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2901         ndlp->nlp_DID = did;
2902         ndlp->vport = vport;
2903         ndlp->nlp_sid = NLP_NO_SID;
2904         INIT_LIST_HEAD(&ndlp->nlp_listp);
2905         kref_init(&ndlp->kref);
2906
2907         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2908                 "node init:       did:x%x",
2909                 ndlp->nlp_DID, 0, 0);
2910
2911         return;
2912 }
2913
2914 void
2915 lpfc_nlp_release(struct kref *kref)
2916 {
2917         struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2918                                                   kref);
2919
2920         lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2921                 "node release:    did:x%x flg:x%x type:x%x",
2922                 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2923
2924         lpfc_nlp_remove(ndlp->vport, ndlp);
2925         mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2926 }
2927
2928 struct lpfc_nodelist *
2929 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2930 {
2931         if (ndlp)
2932                 kref_get(&ndlp->kref);
2933         return ndlp;
2934 }
2935
2936 int
2937 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2938 {
2939         return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2940 }