]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/brocade/bna/bnad.c
bna: Fix build due to missing use of dma_unmap_len_set()
[~andy/linux] / drivers / net / ethernet / brocade / bna / bnad.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
29
30 #include "bnad.h"
31 #include "bna.h"
32 #include "cna.h"
33
34 static DEFINE_MUTEX(bnad_fwimg_mutex);
35
36 /*
37  * Module params
38  */
39 static uint bnad_msix_disable;
40 module_param(bnad_msix_disable, uint, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
42
43 static uint bnad_ioc_auto_recover = 1;
44 module_param(bnad_ioc_auto_recover, uint, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
46
47 static uint bna_debugfs_enable = 1;
48 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50                  " Range[false:0|true:1]");
51
52 /*
53  * Global variables
54  */
55 static u32 bnad_rxqs_per_cq = 2;
56 static u32 bna_id;
57 static struct mutex bnad_list_mutex;
58 static LIST_HEAD(bnad_list);
59 static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
60
61 /*
62  * Local MACROS
63  */
64 #define BNAD_GET_MBOX_IRQ(_bnad)                                \
65         (((_bnad)->cfg_flags & BNAD_CF_MSIX) ?                  \
66          ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
67          ((_bnad)->pcidev->irq))
68
69 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)        \
70 do {                                                            \
71         (_res_info)->res_type = BNA_RES_T_MEM;                  \
72         (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;   \
73         (_res_info)->res_u.mem_info.num = (_num);               \
74         (_res_info)->res_u.mem_info.len = (_size);              \
75 } while (0)
76
77 static void
78 bnad_add_to_list(struct bnad *bnad)
79 {
80         mutex_lock(&bnad_list_mutex);
81         list_add_tail(&bnad->list_entry, &bnad_list);
82         bnad->id = bna_id++;
83         mutex_unlock(&bnad_list_mutex);
84 }
85
86 static void
87 bnad_remove_from_list(struct bnad *bnad)
88 {
89         mutex_lock(&bnad_list_mutex);
90         list_del(&bnad->list_entry);
91         mutex_unlock(&bnad_list_mutex);
92 }
93
94 /*
95  * Reinitialize completions in CQ, once Rx is taken down
96  */
97 static void
98 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
99 {
100         struct bna_cq_entry *cmpl;
101         int i;
102
103         for (i = 0; i < ccb->q_depth; i++) {
104                 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
105                 cmpl->valid = 0;
106         }
107 }
108
109 /* Tx Datapath functions */
110
111
112 /* Caller should ensure that the entry at unmap_q[index] is valid */
113 static u32
114 bnad_tx_buff_unmap(struct bnad *bnad,
115                               struct bnad_tx_unmap *unmap_q,
116                               u32 q_depth, u32 index)
117 {
118         struct bnad_tx_unmap *unmap;
119         struct sk_buff *skb;
120         int vector, nvecs;
121
122         unmap = &unmap_q[index];
123         nvecs = unmap->nvecs;
124
125         skb = unmap->skb;
126         unmap->skb = NULL;
127         unmap->nvecs = 0;
128         dma_unmap_single(&bnad->pcidev->dev,
129                 dma_unmap_addr(&unmap->vectors[0], dma_addr),
130                 skb_headlen(skb), DMA_TO_DEVICE);
131         dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
132         nvecs--;
133
134         vector = 0;
135         while (nvecs) {
136                 vector++;
137                 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
138                         vector = 0;
139                         BNA_QE_INDX_INC(index, q_depth);
140                         unmap = &unmap_q[index];
141                 }
142
143                 dma_unmap_page(&bnad->pcidev->dev,
144                         dma_unmap_addr(&unmap->vectors[vector], dma_addr),
145                         dma_unmap_len(&unmap->vectors[vector], dma_len),
146                         DMA_TO_DEVICE);
147                 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
148                 nvecs--;
149         }
150
151         BNA_QE_INDX_INC(index, q_depth);
152
153         return index;
154 }
155
156 /*
157  * Frees all pending Tx Bufs
158  * At this point no activity is expected on the Q,
159  * so DMA unmap & freeing is fine.
160  */
161 static void
162 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
163 {
164         struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
165         struct sk_buff *skb;
166         int i;
167
168         for (i = 0; i < tcb->q_depth; i++) {
169                 skb = unmap_q[i].skb;
170                 if (!skb)
171                         continue;
172                 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
173
174                 dev_kfree_skb_any(skb);
175         }
176 }
177
178 /*
179  * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
180  * Can be called in a) Interrupt context
181  *                  b) Sending context
182  */
183 static u32
184 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
185 {
186         u32 sent_packets = 0, sent_bytes = 0;
187         u32 wis, unmap_wis, hw_cons, cons, q_depth;
188         struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
189         struct bnad_tx_unmap *unmap;
190         struct sk_buff *skb;
191
192         /* Just return if TX is stopped */
193         if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
194                 return 0;
195
196         hw_cons = *(tcb->hw_consumer_index);
197         cons = tcb->consumer_index;
198         q_depth = tcb->q_depth;
199
200         wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
201         BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
202
203         while (wis) {
204                 unmap = &unmap_q[cons];
205
206                 skb = unmap->skb;
207
208                 sent_packets++;
209                 sent_bytes += skb->len;
210
211                 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
212                 wis -= unmap_wis;
213
214                 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
215                 dev_kfree_skb_any(skb);
216         }
217
218         /* Update consumer pointers. */
219         tcb->consumer_index = hw_cons;
220
221         tcb->txq->tx_packets += sent_packets;
222         tcb->txq->tx_bytes += sent_bytes;
223
224         return sent_packets;
225 }
226
227 static u32
228 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
229 {
230         struct net_device *netdev = bnad->netdev;
231         u32 sent = 0;
232
233         if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
234                 return 0;
235
236         sent = bnad_txcmpl_process(bnad, tcb);
237         if (sent) {
238                 if (netif_queue_stopped(netdev) &&
239                     netif_carrier_ok(netdev) &&
240                     BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
241                                     BNAD_NETIF_WAKE_THRESHOLD) {
242                         if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
243                                 netif_wake_queue(netdev);
244                                 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
245                         }
246                 }
247         }
248
249         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
250                 bna_ib_ack(tcb->i_dbell, sent);
251
252         smp_mb__before_clear_bit();
253         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
254
255         return sent;
256 }
257
258 /* MSIX Tx Completion Handler */
259 static irqreturn_t
260 bnad_msix_tx(int irq, void *data)
261 {
262         struct bna_tcb *tcb = (struct bna_tcb *)data;
263         struct bnad *bnad = tcb->bnad;
264
265         bnad_tx_complete(bnad, tcb);
266
267         return IRQ_HANDLED;
268 }
269
270 static inline void
271 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
272 {
273         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
274
275         unmap_q->reuse_pi = -1;
276         unmap_q->alloc_order = -1;
277         unmap_q->map_size = 0;
278         unmap_q->type = BNAD_RXBUF_NONE;
279 }
280
281 /* Default is page-based allocation. Multi-buffer support - TBD */
282 static int
283 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
284 {
285         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
286         int order;
287
288         bnad_rxq_alloc_uninit(bnad, rcb);
289
290         order = get_order(rcb->rxq->buffer_size);
291
292         unmap_q->type = BNAD_RXBUF_PAGE;
293
294         if (bna_is_small_rxq(rcb->id)) {
295                 unmap_q->alloc_order = 0;
296                 unmap_q->map_size = rcb->rxq->buffer_size;
297         } else {
298                 if (rcb->rxq->multi_buffer) {
299                         unmap_q->alloc_order = 0;
300                         unmap_q->map_size = rcb->rxq->buffer_size;
301                         unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
302                 } else {
303                         unmap_q->alloc_order = order;
304                         unmap_q->map_size =
305                                 (rcb->rxq->buffer_size > 2048) ?
306                                 PAGE_SIZE << order : 2048;
307                 }
308         }
309
310         BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
311
312         return 0;
313 }
314
315 static inline void
316 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
317 {
318         if (!unmap->page)
319                 return;
320
321         dma_unmap_page(&bnad->pcidev->dev,
322                         dma_unmap_addr(&unmap->vector, dma_addr),
323                         unmap->vector.len, DMA_FROM_DEVICE);
324         put_page(unmap->page);
325         unmap->page = NULL;
326         dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
327         unmap->vector.len = 0;
328 }
329
330 static inline void
331 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
332 {
333         if (!unmap->skb)
334                 return;
335
336         dma_unmap_single(&bnad->pcidev->dev,
337                         dma_unmap_addr(&unmap->vector, dma_addr),
338                         unmap->vector.len, DMA_FROM_DEVICE);
339         dev_kfree_skb_any(unmap->skb);
340         unmap->skb = NULL;
341         dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
342         unmap->vector.len = 0;
343 }
344
345 static void
346 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
347 {
348         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
349         int i;
350
351         for (i = 0; i < rcb->q_depth; i++) {
352                 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
353
354                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
355                         bnad_rxq_cleanup_skb(bnad, unmap);
356                 else
357                         bnad_rxq_cleanup_page(bnad, unmap);
358         }
359         bnad_rxq_alloc_uninit(bnad, rcb);
360 }
361
362 static u32
363 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
364 {
365         u32 alloced, prod, q_depth;
366         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
367         struct bnad_rx_unmap *unmap, *prev;
368         struct bna_rxq_entry *rxent;
369         struct page *page;
370         u32 page_offset, alloc_size;
371         dma_addr_t dma_addr;
372
373         prod = rcb->producer_index;
374         q_depth = rcb->q_depth;
375
376         alloc_size = PAGE_SIZE << unmap_q->alloc_order;
377         alloced = 0;
378
379         while (nalloc--) {
380                 unmap = &unmap_q->unmap[prod];
381
382                 if (unmap_q->reuse_pi < 0) {
383                         page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
384                                         unmap_q->alloc_order);
385                         page_offset = 0;
386                 } else {
387                         prev = &unmap_q->unmap[unmap_q->reuse_pi];
388                         page = prev->page;
389                         page_offset = prev->page_offset + unmap_q->map_size;
390                         get_page(page);
391                 }
392
393                 if (unlikely(!page)) {
394                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
395                         rcb->rxq->rxbuf_alloc_failed++;
396                         goto finishing;
397                 }
398
399                 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
400                                 unmap_q->map_size, DMA_FROM_DEVICE);
401
402                 unmap->page = page;
403                 unmap->page_offset = page_offset;
404                 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
405                 unmap->vector.len = unmap_q->map_size;
406                 page_offset += unmap_q->map_size;
407
408                 if (page_offset < alloc_size)
409                         unmap_q->reuse_pi = prod;
410                 else
411                         unmap_q->reuse_pi = -1;
412
413                 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
414                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
415                 BNA_QE_INDX_INC(prod, q_depth);
416                 alloced++;
417         }
418
419 finishing:
420         if (likely(alloced)) {
421                 rcb->producer_index = prod;
422                 smp_mb();
423                 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
424                         bna_rxq_prod_indx_doorbell(rcb);
425         }
426
427         return alloced;
428 }
429
430 static u32
431 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
432 {
433         u32 alloced, prod, q_depth, buff_sz;
434         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
435         struct bnad_rx_unmap *unmap;
436         struct bna_rxq_entry *rxent;
437         struct sk_buff *skb;
438         dma_addr_t dma_addr;
439
440         buff_sz = rcb->rxq->buffer_size;
441         prod = rcb->producer_index;
442         q_depth = rcb->q_depth;
443
444         alloced = 0;
445         while (nalloc--) {
446                 unmap = &unmap_q->unmap[prod];
447
448                 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
449
450                 if (unlikely(!skb)) {
451                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
452                         rcb->rxq->rxbuf_alloc_failed++;
453                         goto finishing;
454                 }
455                 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
456                                           buff_sz, DMA_FROM_DEVICE);
457
458                 unmap->skb = skb;
459                 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
460                 unmap->vector.len = buff_sz;
461
462                 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
463                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
464                 BNA_QE_INDX_INC(prod, q_depth);
465                 alloced++;
466         }
467
468 finishing:
469         if (likely(alloced)) {
470                 rcb->producer_index = prod;
471                 smp_mb();
472                 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
473                         bna_rxq_prod_indx_doorbell(rcb);
474         }
475
476         return alloced;
477 }
478
479 static inline void
480 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
481 {
482         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
483         u32 to_alloc;
484
485         to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
486         if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
487                 return;
488
489         if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
490                 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
491         else
492                 bnad_rxq_refill_page(bnad, rcb, to_alloc);
493 }
494
495 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
496                                         BNA_CQ_EF_IPV6 | \
497                                         BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
498                                         BNA_CQ_EF_L4_CKSUM_OK)
499
500 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
501                                 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
502 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
503                                 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
504 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
505                                 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
506 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
507                                 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
508
509 static void
510 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
511                     u32 sop_ci, u32 nvecs)
512 {
513         struct bnad_rx_unmap_q *unmap_q;
514         struct bnad_rx_unmap *unmap;
515         u32 ci, vec;
516
517         unmap_q = rcb->unmap_q;
518         for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
519                 unmap = &unmap_q->unmap[ci];
520                 BNA_QE_INDX_INC(ci, rcb->q_depth);
521
522                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
523                         bnad_rxq_cleanup_skb(bnad, unmap);
524                 else
525                         bnad_rxq_cleanup_page(bnad, unmap);
526         }
527 }
528
529 static void
530 bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
531                         u32 sop_ci, u32 nvecs, u32 last_fraglen)
532 {
533         struct bnad *bnad;
534         u32 ci, vec, len, totlen = 0;
535         struct bnad_rx_unmap_q *unmap_q;
536         struct bnad_rx_unmap *unmap;
537
538         unmap_q = rcb->unmap_q;
539         bnad = rcb->bnad;
540
541         /* prefetch header */
542         prefetch(page_address(unmap_q->unmap[sop_ci].page) +
543                         unmap_q->unmap[sop_ci].page_offset);
544
545         for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
546                 unmap = &unmap_q->unmap[ci];
547                 BNA_QE_INDX_INC(ci, rcb->q_depth);
548
549                 dma_unmap_page(&bnad->pcidev->dev,
550                                 dma_unmap_addr(&unmap->vector, dma_addr),
551                                 unmap->vector.len, DMA_FROM_DEVICE);
552
553                 len = (vec == nvecs) ?
554                         last_fraglen : unmap->vector.len;
555                 totlen += len;
556
557                 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
558                                 unmap->page, unmap->page_offset, len);
559
560                 unmap->page = NULL;
561                 unmap->vector.len = 0;
562         }
563
564         skb->len += totlen;
565         skb->data_len += totlen;
566         skb->truesize += totlen;
567 }
568
569 static inline void
570 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
571                   struct bnad_rx_unmap *unmap, u32 len)
572 {
573         prefetch(skb->data);
574
575         dma_unmap_single(&bnad->pcidev->dev,
576                         dma_unmap_addr(&unmap->vector, dma_addr),
577                         unmap->vector.len, DMA_FROM_DEVICE);
578
579         skb_put(skb, len);
580         skb->protocol = eth_type_trans(skb, bnad->netdev);
581
582         unmap->skb = NULL;
583         unmap->vector.len = 0;
584 }
585
586 static u32
587 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
588 {
589         struct bna_cq_entry *cq, *cmpl, *next_cmpl;
590         struct bna_rcb *rcb = NULL;
591         struct bnad_rx_unmap_q *unmap_q;
592         struct bnad_rx_unmap *unmap = NULL;
593         struct sk_buff *skb = NULL;
594         struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
595         struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
596         u32 packets = 0, len = 0, totlen = 0;
597         u32 pi, vec, sop_ci = 0, nvecs = 0;
598         u32 flags, masked_flags;
599
600         prefetch(bnad->netdev);
601
602         cq = ccb->sw_q;
603         cmpl = &cq[ccb->producer_index];
604
605         while (packets < budget) {
606                 if (!cmpl->valid)
607                         break;
608                 /* The 'valid' field is set by the adapter, only after writing
609                  * the other fields of completion entry. Hence, do not load
610                  * other fields of completion entry *before* the 'valid' is
611                  * loaded. Adding the rmb() here prevents the compiler and/or
612                  * CPU from reordering the reads which would potentially result
613                  * in reading stale values in completion entry.
614                  */
615                 rmb();
616
617                 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
618
619                 if (bna_is_small_rxq(cmpl->rxq_id))
620                         rcb = ccb->rcb[1];
621                 else
622                         rcb = ccb->rcb[0];
623
624                 unmap_q = rcb->unmap_q;
625
626                 /* start of packet ci */
627                 sop_ci = rcb->consumer_index;
628
629                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
630                         unmap = &unmap_q->unmap[sop_ci];
631                         skb = unmap->skb;
632                 } else {
633                         skb = napi_get_frags(&rx_ctrl->napi);
634                         if (unlikely(!skb))
635                                 break;
636                 }
637                 prefetch(skb);
638
639                 flags = ntohl(cmpl->flags);
640                 len = ntohs(cmpl->length);
641                 totlen = len;
642                 nvecs = 1;
643
644                 /* Check all the completions for this frame.
645                  * busy-wait doesn't help much, break here.
646                  */
647                 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
648                     (flags & BNA_CQ_EF_EOP) == 0) {
649                         pi = ccb->producer_index;
650                         do {
651                                 BNA_QE_INDX_INC(pi, ccb->q_depth);
652                                 next_cmpl = &cq[pi];
653
654                                 if (!next_cmpl->valid)
655                                         break;
656                                 /* The 'valid' field is set by the adapter, only
657                                  * after writing the other fields of completion
658                                  * entry. Hence, do not load other fields of
659                                  * completion entry *before* the 'valid' is
660                                  * loaded. Adding the rmb() here prevents the
661                                  * compiler and/or CPU from reordering the reads
662                                  * which would potentially result in reading
663                                  * stale values in completion entry.
664                                  */
665                                 rmb();
666
667                                 len = ntohs(next_cmpl->length);
668                                 flags = ntohl(next_cmpl->flags);
669
670                                 nvecs++;
671                                 totlen += len;
672                         } while ((flags & BNA_CQ_EF_EOP) == 0);
673
674                         if (!next_cmpl->valid)
675                                 break;
676                 }
677
678                 /* TODO: BNA_CQ_EF_LOCAL ? */
679                 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
680                                                 BNA_CQ_EF_FCS_ERROR |
681                                                 BNA_CQ_EF_TOO_LONG))) {
682                         bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
683                         rcb->rxq->rx_packets_with_error++;
684
685                         goto next;
686                 }
687
688                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
689                         bnad_cq_setup_skb(bnad, skb, unmap, len);
690                 else
691                         bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
692
693                 packets++;
694                 rcb->rxq->rx_packets++;
695                 rcb->rxq->rx_bytes += totlen;
696                 ccb->bytes_per_intr += totlen;
697
698                 masked_flags = flags & flags_cksum_prot_mask;
699
700                 if (likely
701                     ((bnad->netdev->features & NETIF_F_RXCSUM) &&
702                      ((masked_flags == flags_tcp4) ||
703                       (masked_flags == flags_udp4) ||
704                       (masked_flags == flags_tcp6) ||
705                       (masked_flags == flags_udp6))))
706                         skb->ip_summed = CHECKSUM_UNNECESSARY;
707                 else
708                         skb_checksum_none_assert(skb);
709
710                 if (flags & BNA_CQ_EF_VLAN)
711                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
712
713                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
714                         netif_receive_skb(skb);
715                 else
716                         napi_gro_frags(&rx_ctrl->napi);
717
718 next:
719                 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
720                 for (vec = 0; vec < nvecs; vec++) {
721                         cmpl = &cq[ccb->producer_index];
722                         cmpl->valid = 0;
723                         BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
724                 }
725                 cmpl = &cq[ccb->producer_index];
726         }
727
728         napi_gro_flush(&rx_ctrl->napi, false);
729         if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
730                 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
731
732         bnad_rxq_post(bnad, ccb->rcb[0]);
733         if (ccb->rcb[1])
734                 bnad_rxq_post(bnad, ccb->rcb[1]);
735
736         return packets;
737 }
738
739 static void
740 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
741 {
742         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
743         struct napi_struct *napi = &rx_ctrl->napi;
744
745         if (likely(napi_schedule_prep(napi))) {
746                 __napi_schedule(napi);
747                 rx_ctrl->rx_schedule++;
748         }
749 }
750
751 /* MSIX Rx Path Handler */
752 static irqreturn_t
753 bnad_msix_rx(int irq, void *data)
754 {
755         struct bna_ccb *ccb = (struct bna_ccb *)data;
756
757         if (ccb) {
758                 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
759                 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
760         }
761
762         return IRQ_HANDLED;
763 }
764
765 /* Interrupt handlers */
766
767 /* Mbox Interrupt Handlers */
768 static irqreturn_t
769 bnad_msix_mbox_handler(int irq, void *data)
770 {
771         u32 intr_status;
772         unsigned long flags;
773         struct bnad *bnad = (struct bnad *)data;
774
775         spin_lock_irqsave(&bnad->bna_lock, flags);
776         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
777                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
778                 return IRQ_HANDLED;
779         }
780
781         bna_intr_status_get(&bnad->bna, intr_status);
782
783         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
784                 bna_mbox_handler(&bnad->bna, intr_status);
785
786         spin_unlock_irqrestore(&bnad->bna_lock, flags);
787
788         return IRQ_HANDLED;
789 }
790
791 static irqreturn_t
792 bnad_isr(int irq, void *data)
793 {
794         int i, j;
795         u32 intr_status;
796         unsigned long flags;
797         struct bnad *bnad = (struct bnad *)data;
798         struct bnad_rx_info *rx_info;
799         struct bnad_rx_ctrl *rx_ctrl;
800         struct bna_tcb *tcb = NULL;
801
802         spin_lock_irqsave(&bnad->bna_lock, flags);
803         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
804                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
805                 return IRQ_NONE;
806         }
807
808         bna_intr_status_get(&bnad->bna, intr_status);
809
810         if (unlikely(!intr_status)) {
811                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
812                 return IRQ_NONE;
813         }
814
815         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
816                 bna_mbox_handler(&bnad->bna, intr_status);
817
818         spin_unlock_irqrestore(&bnad->bna_lock, flags);
819
820         if (!BNA_IS_INTX_DATA_INTR(intr_status))
821                 return IRQ_HANDLED;
822
823         /* Process data interrupts */
824         /* Tx processing */
825         for (i = 0; i < bnad->num_tx; i++) {
826                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
827                         tcb = bnad->tx_info[i].tcb[j];
828                         if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
829                                 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
830                 }
831         }
832         /* Rx processing */
833         for (i = 0; i < bnad->num_rx; i++) {
834                 rx_info = &bnad->rx_info[i];
835                 if (!rx_info->rx)
836                         continue;
837                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
838                         rx_ctrl = &rx_info->rx_ctrl[j];
839                         if (rx_ctrl->ccb)
840                                 bnad_netif_rx_schedule_poll(bnad,
841                                                             rx_ctrl->ccb);
842                 }
843         }
844         return IRQ_HANDLED;
845 }
846
847 /*
848  * Called in interrupt / callback context
849  * with bna_lock held, so cfg_flags access is OK
850  */
851 static void
852 bnad_enable_mbox_irq(struct bnad *bnad)
853 {
854         clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
855
856         BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
857 }
858
859 /*
860  * Called with bnad->bna_lock held b'cos of
861  * bnad->cfg_flags access.
862  */
863 static void
864 bnad_disable_mbox_irq(struct bnad *bnad)
865 {
866         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
867
868         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
869 }
870
871 static void
872 bnad_set_netdev_perm_addr(struct bnad *bnad)
873 {
874         struct net_device *netdev = bnad->netdev;
875
876         memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
877         if (is_zero_ether_addr(netdev->dev_addr))
878                 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
879 }
880
881 /* Control Path Handlers */
882
883 /* Callbacks */
884 void
885 bnad_cb_mbox_intr_enable(struct bnad *bnad)
886 {
887         bnad_enable_mbox_irq(bnad);
888 }
889
890 void
891 bnad_cb_mbox_intr_disable(struct bnad *bnad)
892 {
893         bnad_disable_mbox_irq(bnad);
894 }
895
896 void
897 bnad_cb_ioceth_ready(struct bnad *bnad)
898 {
899         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
900         complete(&bnad->bnad_completions.ioc_comp);
901 }
902
903 void
904 bnad_cb_ioceth_failed(struct bnad *bnad)
905 {
906         bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
907         complete(&bnad->bnad_completions.ioc_comp);
908 }
909
910 void
911 bnad_cb_ioceth_disabled(struct bnad *bnad)
912 {
913         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
914         complete(&bnad->bnad_completions.ioc_comp);
915 }
916
917 static void
918 bnad_cb_enet_disabled(void *arg)
919 {
920         struct bnad *bnad = (struct bnad *)arg;
921
922         netif_carrier_off(bnad->netdev);
923         complete(&bnad->bnad_completions.enet_comp);
924 }
925
926 void
927 bnad_cb_ethport_link_status(struct bnad *bnad,
928                         enum bna_link_status link_status)
929 {
930         bool link_up = false;
931
932         link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
933
934         if (link_status == BNA_CEE_UP) {
935                 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
936                         BNAD_UPDATE_CTR(bnad, cee_toggle);
937                 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
938         } else {
939                 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
940                         BNAD_UPDATE_CTR(bnad, cee_toggle);
941                 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
942         }
943
944         if (link_up) {
945                 if (!netif_carrier_ok(bnad->netdev)) {
946                         uint tx_id, tcb_id;
947                         printk(KERN_WARNING "bna: %s link up\n",
948                                 bnad->netdev->name);
949                         netif_carrier_on(bnad->netdev);
950                         BNAD_UPDATE_CTR(bnad, link_toggle);
951                         for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
952                                 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
953                                       tcb_id++) {
954                                         struct bna_tcb *tcb =
955                                         bnad->tx_info[tx_id].tcb[tcb_id];
956                                         u32 txq_id;
957                                         if (!tcb)
958                                                 continue;
959
960                                         txq_id = tcb->id;
961
962                                         if (test_bit(BNAD_TXQ_TX_STARTED,
963                                                      &tcb->flags)) {
964                                                 /*
965                                                  * Force an immediate
966                                                  * Transmit Schedule */
967                                                 printk(KERN_INFO "bna: %s %d "
968                                                       "TXQ_STARTED\n",
969                                                        bnad->netdev->name,
970                                                        txq_id);
971                                                 netif_wake_subqueue(
972                                                                 bnad->netdev,
973                                                                 txq_id);
974                                                 BNAD_UPDATE_CTR(bnad,
975                                                         netif_queue_wakeup);
976                                         } else {
977                                                 netif_stop_subqueue(
978                                                                 bnad->netdev,
979                                                                 txq_id);
980                                                 BNAD_UPDATE_CTR(bnad,
981                                                         netif_queue_stop);
982                                         }
983                                 }
984                         }
985                 }
986         } else {
987                 if (netif_carrier_ok(bnad->netdev)) {
988                         printk(KERN_WARNING "bna: %s link down\n",
989                                 bnad->netdev->name);
990                         netif_carrier_off(bnad->netdev);
991                         BNAD_UPDATE_CTR(bnad, link_toggle);
992                 }
993         }
994 }
995
996 static void
997 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
998 {
999         struct bnad *bnad = (struct bnad *)arg;
1000
1001         complete(&bnad->bnad_completions.tx_comp);
1002 }
1003
1004 static void
1005 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1006 {
1007         struct bnad_tx_info *tx_info =
1008                         (struct bnad_tx_info *)tcb->txq->tx->priv;
1009
1010         tcb->priv = tcb;
1011         tx_info->tcb[tcb->id] = tcb;
1012 }
1013
1014 static void
1015 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1016 {
1017         struct bnad_tx_info *tx_info =
1018                         (struct bnad_tx_info *)tcb->txq->tx->priv;
1019
1020         tx_info->tcb[tcb->id] = NULL;
1021         tcb->priv = NULL;
1022 }
1023
1024 static void
1025 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1026 {
1027         struct bnad_rx_info *rx_info =
1028                         (struct bnad_rx_info *)ccb->cq->rx->priv;
1029
1030         rx_info->rx_ctrl[ccb->id].ccb = ccb;
1031         ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1032 }
1033
1034 static void
1035 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1036 {
1037         struct bnad_rx_info *rx_info =
1038                         (struct bnad_rx_info *)ccb->cq->rx->priv;
1039
1040         rx_info->rx_ctrl[ccb->id].ccb = NULL;
1041 }
1042
1043 static void
1044 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1045 {
1046         struct bnad_tx_info *tx_info =
1047                         (struct bnad_tx_info *)tx->priv;
1048         struct bna_tcb *tcb;
1049         u32 txq_id;
1050         int i;
1051
1052         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1053                 tcb = tx_info->tcb[i];
1054                 if (!tcb)
1055                         continue;
1056                 txq_id = tcb->id;
1057                 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1058                 netif_stop_subqueue(bnad->netdev, txq_id);
1059                 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
1060                         bnad->netdev->name, txq_id);
1061         }
1062 }
1063
1064 static void
1065 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1066 {
1067         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1068         struct bna_tcb *tcb;
1069         u32 txq_id;
1070         int i;
1071
1072         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1073                 tcb = tx_info->tcb[i];
1074                 if (!tcb)
1075                         continue;
1076                 txq_id = tcb->id;
1077
1078                 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1079                 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1080                 BUG_ON(*(tcb->hw_consumer_index) != 0);
1081
1082                 if (netif_carrier_ok(bnad->netdev)) {
1083                         printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
1084                                 bnad->netdev->name, txq_id);
1085                         netif_wake_subqueue(bnad->netdev, txq_id);
1086                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1087                 }
1088         }
1089
1090         /*
1091          * Workaround for first ioceth enable failure & we
1092          * get a 0 MAC address. We try to get the MAC address
1093          * again here.
1094          */
1095         if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
1096                 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
1097                 bnad_set_netdev_perm_addr(bnad);
1098         }
1099 }
1100
1101 /*
1102  * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1103  */
1104 static void
1105 bnad_tx_cleanup(struct delayed_work *work)
1106 {
1107         struct bnad_tx_info *tx_info =
1108                 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1109         struct bnad *bnad = NULL;
1110         struct bna_tcb *tcb;
1111         unsigned long flags;
1112         u32 i, pending = 0;
1113
1114         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1115                 tcb = tx_info->tcb[i];
1116                 if (!tcb)
1117                         continue;
1118
1119                 bnad = tcb->bnad;
1120
1121                 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1122                         pending++;
1123                         continue;
1124                 }
1125
1126                 bnad_txq_cleanup(bnad, tcb);
1127
1128                 smp_mb__before_clear_bit();
1129                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1130         }
1131
1132         if (pending) {
1133                 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1134                         msecs_to_jiffies(1));
1135                 return;
1136         }
1137
1138         spin_lock_irqsave(&bnad->bna_lock, flags);
1139         bna_tx_cleanup_complete(tx_info->tx);
1140         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1141 }
1142
1143 static void
1144 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1145 {
1146         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1147         struct bna_tcb *tcb;
1148         int i;
1149
1150         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1151                 tcb = tx_info->tcb[i];
1152                 if (!tcb)
1153                         continue;
1154         }
1155
1156         queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1157 }
1158
1159 static void
1160 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1161 {
1162         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1163         struct bna_ccb *ccb;
1164         struct bnad_rx_ctrl *rx_ctrl;
1165         int i;
1166
1167         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1168                 rx_ctrl = &rx_info->rx_ctrl[i];
1169                 ccb = rx_ctrl->ccb;
1170                 if (!ccb)
1171                         continue;
1172
1173                 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1174
1175                 if (ccb->rcb[1])
1176                         clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1177         }
1178 }
1179
1180 /*
1181  * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1182  */
1183 static void
1184 bnad_rx_cleanup(void *work)
1185 {
1186         struct bnad_rx_info *rx_info =
1187                 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1188         struct bnad_rx_ctrl *rx_ctrl;
1189         struct bnad *bnad = NULL;
1190         unsigned long flags;
1191         u32 i;
1192
1193         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1194                 rx_ctrl = &rx_info->rx_ctrl[i];
1195
1196                 if (!rx_ctrl->ccb)
1197                         continue;
1198
1199                 bnad = rx_ctrl->ccb->bnad;
1200
1201                 /*
1202                  * Wait till the poll handler has exited
1203                  * and nothing can be scheduled anymore
1204                  */
1205                 napi_disable(&rx_ctrl->napi);
1206
1207                 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1208                 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1209                 if (rx_ctrl->ccb->rcb[1])
1210                         bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1211         }
1212
1213         spin_lock_irqsave(&bnad->bna_lock, flags);
1214         bna_rx_cleanup_complete(rx_info->rx);
1215         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1216 }
1217
1218 static void
1219 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1220 {
1221         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1222         struct bna_ccb *ccb;
1223         struct bnad_rx_ctrl *rx_ctrl;
1224         int i;
1225
1226         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1227                 rx_ctrl = &rx_info->rx_ctrl[i];
1228                 ccb = rx_ctrl->ccb;
1229                 if (!ccb)
1230                         continue;
1231
1232                 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1233
1234                 if (ccb->rcb[1])
1235                         clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1236         }
1237
1238         queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1239 }
1240
1241 static void
1242 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1243 {
1244         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1245         struct bna_ccb *ccb;
1246         struct bna_rcb *rcb;
1247         struct bnad_rx_ctrl *rx_ctrl;
1248         int i, j;
1249
1250         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1251                 rx_ctrl = &rx_info->rx_ctrl[i];
1252                 ccb = rx_ctrl->ccb;
1253                 if (!ccb)
1254                         continue;
1255
1256                 napi_enable(&rx_ctrl->napi);
1257
1258                 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1259                         rcb = ccb->rcb[j];
1260                         if (!rcb)
1261                                 continue;
1262
1263                         bnad_rxq_alloc_init(bnad, rcb);
1264                         set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1265                         set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1266                         bnad_rxq_post(bnad, rcb);
1267                 }
1268         }
1269 }
1270
1271 static void
1272 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1273 {
1274         struct bnad *bnad = (struct bnad *)arg;
1275
1276         complete(&bnad->bnad_completions.rx_comp);
1277 }
1278
1279 static void
1280 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1281 {
1282         bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1283         complete(&bnad->bnad_completions.mcast_comp);
1284 }
1285
1286 void
1287 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1288                        struct bna_stats *stats)
1289 {
1290         if (status == BNA_CB_SUCCESS)
1291                 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1292
1293         if (!netif_running(bnad->netdev) ||
1294                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1295                 return;
1296
1297         mod_timer(&bnad->stats_timer,
1298                   jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1299 }
1300
1301 static void
1302 bnad_cb_enet_mtu_set(struct bnad *bnad)
1303 {
1304         bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1305         complete(&bnad->bnad_completions.mtu_comp);
1306 }
1307
1308 void
1309 bnad_cb_completion(void *arg, enum bfa_status status)
1310 {
1311         struct bnad_iocmd_comp *iocmd_comp =
1312                         (struct bnad_iocmd_comp *)arg;
1313
1314         iocmd_comp->comp_status = (u32) status;
1315         complete(&iocmd_comp->comp);
1316 }
1317
1318 /* Resource allocation, free functions */
1319
1320 static void
1321 bnad_mem_free(struct bnad *bnad,
1322               struct bna_mem_info *mem_info)
1323 {
1324         int i;
1325         dma_addr_t dma_pa;
1326
1327         if (mem_info->mdl == NULL)
1328                 return;
1329
1330         for (i = 0; i < mem_info->num; i++) {
1331                 if (mem_info->mdl[i].kva != NULL) {
1332                         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1333                                 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1334                                                 dma_pa);
1335                                 dma_free_coherent(&bnad->pcidev->dev,
1336                                                   mem_info->mdl[i].len,
1337                                                   mem_info->mdl[i].kva, dma_pa);
1338                         } else
1339                                 kfree(mem_info->mdl[i].kva);
1340                 }
1341         }
1342         kfree(mem_info->mdl);
1343         mem_info->mdl = NULL;
1344 }
1345
1346 static int
1347 bnad_mem_alloc(struct bnad *bnad,
1348                struct bna_mem_info *mem_info)
1349 {
1350         int i;
1351         dma_addr_t dma_pa;
1352
1353         if ((mem_info->num == 0) || (mem_info->len == 0)) {
1354                 mem_info->mdl = NULL;
1355                 return 0;
1356         }
1357
1358         mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1359                                 GFP_KERNEL);
1360         if (mem_info->mdl == NULL)
1361                 return -ENOMEM;
1362
1363         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1364                 for (i = 0; i < mem_info->num; i++) {
1365                         mem_info->mdl[i].len = mem_info->len;
1366                         mem_info->mdl[i].kva =
1367                                 dma_alloc_coherent(&bnad->pcidev->dev,
1368                                                    mem_info->len, &dma_pa,
1369                                                    GFP_KERNEL);
1370                         if (mem_info->mdl[i].kva == NULL)
1371                                 goto err_return;
1372
1373                         BNA_SET_DMA_ADDR(dma_pa,
1374                                          &(mem_info->mdl[i].dma));
1375                 }
1376         } else {
1377                 for (i = 0; i < mem_info->num; i++) {
1378                         mem_info->mdl[i].len = mem_info->len;
1379                         mem_info->mdl[i].kva = kzalloc(mem_info->len,
1380                                                         GFP_KERNEL);
1381                         if (mem_info->mdl[i].kva == NULL)
1382                                 goto err_return;
1383                 }
1384         }
1385
1386         return 0;
1387
1388 err_return:
1389         bnad_mem_free(bnad, mem_info);
1390         return -ENOMEM;
1391 }
1392
1393 /* Free IRQ for Mailbox */
1394 static void
1395 bnad_mbox_irq_free(struct bnad *bnad)
1396 {
1397         int irq;
1398         unsigned long flags;
1399
1400         spin_lock_irqsave(&bnad->bna_lock, flags);
1401         bnad_disable_mbox_irq(bnad);
1402         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1403
1404         irq = BNAD_GET_MBOX_IRQ(bnad);
1405         free_irq(irq, bnad);
1406 }
1407
1408 /*
1409  * Allocates IRQ for Mailbox, but keep it disabled
1410  * This will be enabled once we get the mbox enable callback
1411  * from bna
1412  */
1413 static int
1414 bnad_mbox_irq_alloc(struct bnad *bnad)
1415 {
1416         int             err = 0;
1417         unsigned long   irq_flags, flags;
1418         u32     irq;
1419         irq_handler_t   irq_handler;
1420
1421         spin_lock_irqsave(&bnad->bna_lock, flags);
1422         if (bnad->cfg_flags & BNAD_CF_MSIX) {
1423                 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1424                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1425                 irq_flags = 0;
1426         } else {
1427                 irq_handler = (irq_handler_t)bnad_isr;
1428                 irq = bnad->pcidev->irq;
1429                 irq_flags = IRQF_SHARED;
1430         }
1431
1432         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1433         sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1434
1435         /*
1436          * Set the Mbox IRQ disable flag, so that the IRQ handler
1437          * called from request_irq() for SHARED IRQs do not execute
1438          */
1439         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1440
1441         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1442
1443         err = request_irq(irq, irq_handler, irq_flags,
1444                           bnad->mbox_irq_name, bnad);
1445
1446         return err;
1447 }
1448
1449 static void
1450 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1451 {
1452         kfree(intr_info->idl);
1453         intr_info->idl = NULL;
1454 }
1455
1456 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1457 static int
1458 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1459                     u32 txrx_id, struct bna_intr_info *intr_info)
1460 {
1461         int i, vector_start = 0;
1462         u32 cfg_flags;
1463         unsigned long flags;
1464
1465         spin_lock_irqsave(&bnad->bna_lock, flags);
1466         cfg_flags = bnad->cfg_flags;
1467         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1468
1469         if (cfg_flags & BNAD_CF_MSIX) {
1470                 intr_info->intr_type = BNA_INTR_T_MSIX;
1471                 intr_info->idl = kcalloc(intr_info->num,
1472                                         sizeof(struct bna_intr_descr),
1473                                         GFP_KERNEL);
1474                 if (!intr_info->idl)
1475                         return -ENOMEM;
1476
1477                 switch (src) {
1478                 case BNAD_INTR_TX:
1479                         vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1480                         break;
1481
1482                 case BNAD_INTR_RX:
1483                         vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1484                                         (bnad->num_tx * bnad->num_txq_per_tx) +
1485                                         txrx_id;
1486                         break;
1487
1488                 default:
1489                         BUG();
1490                 }
1491
1492                 for (i = 0; i < intr_info->num; i++)
1493                         intr_info->idl[i].vector = vector_start + i;
1494         } else {
1495                 intr_info->intr_type = BNA_INTR_T_INTX;
1496                 intr_info->num = 1;
1497                 intr_info->idl = kcalloc(intr_info->num,
1498                                         sizeof(struct bna_intr_descr),
1499                                         GFP_KERNEL);
1500                 if (!intr_info->idl)
1501                         return -ENOMEM;
1502
1503                 switch (src) {
1504                 case BNAD_INTR_TX:
1505                         intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1506                         break;
1507
1508                 case BNAD_INTR_RX:
1509                         intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1510                         break;
1511                 }
1512         }
1513         return 0;
1514 }
1515
1516 /* NOTE: Should be called for MSIX only
1517  * Unregisters Tx MSIX vector(s) from the kernel
1518  */
1519 static void
1520 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1521                         int num_txqs)
1522 {
1523         int i;
1524         int vector_num;
1525
1526         for (i = 0; i < num_txqs; i++) {
1527                 if (tx_info->tcb[i] == NULL)
1528                         continue;
1529
1530                 vector_num = tx_info->tcb[i]->intr_vector;
1531                 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1532         }
1533 }
1534
1535 /* NOTE: Should be called for MSIX only
1536  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1537  */
1538 static int
1539 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1540                         u32 tx_id, int num_txqs)
1541 {
1542         int i;
1543         int err;
1544         int vector_num;
1545
1546         for (i = 0; i < num_txqs; i++) {
1547                 vector_num = tx_info->tcb[i]->intr_vector;
1548                 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1549                                 tx_id + tx_info->tcb[i]->id);
1550                 err = request_irq(bnad->msix_table[vector_num].vector,
1551                                   (irq_handler_t)bnad_msix_tx, 0,
1552                                   tx_info->tcb[i]->name,
1553                                   tx_info->tcb[i]);
1554                 if (err)
1555                         goto err_return;
1556         }
1557
1558         return 0;
1559
1560 err_return:
1561         if (i > 0)
1562                 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1563         return -1;
1564 }
1565
1566 /* NOTE: Should be called for MSIX only
1567  * Unregisters Rx MSIX vector(s) from the kernel
1568  */
1569 static void
1570 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1571                         int num_rxps)
1572 {
1573         int i;
1574         int vector_num;
1575
1576         for (i = 0; i < num_rxps; i++) {
1577                 if (rx_info->rx_ctrl[i].ccb == NULL)
1578                         continue;
1579
1580                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1581                 free_irq(bnad->msix_table[vector_num].vector,
1582                          rx_info->rx_ctrl[i].ccb);
1583         }
1584 }
1585
1586 /* NOTE: Should be called for MSIX only
1587  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1588  */
1589 static int
1590 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1591                         u32 rx_id, int num_rxps)
1592 {
1593         int i;
1594         int err;
1595         int vector_num;
1596
1597         for (i = 0; i < num_rxps; i++) {
1598                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1599                 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1600                         bnad->netdev->name,
1601                         rx_id + rx_info->rx_ctrl[i].ccb->id);
1602                 err = request_irq(bnad->msix_table[vector_num].vector,
1603                                   (irq_handler_t)bnad_msix_rx, 0,
1604                                   rx_info->rx_ctrl[i].ccb->name,
1605                                   rx_info->rx_ctrl[i].ccb);
1606                 if (err)
1607                         goto err_return;
1608         }
1609
1610         return 0;
1611
1612 err_return:
1613         if (i > 0)
1614                 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1615         return -1;
1616 }
1617
1618 /* Free Tx object Resources */
1619 static void
1620 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1621 {
1622         int i;
1623
1624         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1625                 if (res_info[i].res_type == BNA_RES_T_MEM)
1626                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1627                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1628                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1629         }
1630 }
1631
1632 /* Allocates memory and interrupt resources for Tx object */
1633 static int
1634 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1635                   u32 tx_id)
1636 {
1637         int i, err = 0;
1638
1639         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1640                 if (res_info[i].res_type == BNA_RES_T_MEM)
1641                         err = bnad_mem_alloc(bnad,
1642                                         &res_info[i].res_u.mem_info);
1643                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1644                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1645                                         &res_info[i].res_u.intr_info);
1646                 if (err)
1647                         goto err_return;
1648         }
1649         return 0;
1650
1651 err_return:
1652         bnad_tx_res_free(bnad, res_info);
1653         return err;
1654 }
1655
1656 /* Free Rx object Resources */
1657 static void
1658 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1659 {
1660         int i;
1661
1662         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1663                 if (res_info[i].res_type == BNA_RES_T_MEM)
1664                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1665                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1666                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1667         }
1668 }
1669
1670 /* Allocates memory and interrupt resources for Rx object */
1671 static int
1672 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1673                   uint rx_id)
1674 {
1675         int i, err = 0;
1676
1677         /* All memory needs to be allocated before setup_ccbs */
1678         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1679                 if (res_info[i].res_type == BNA_RES_T_MEM)
1680                         err = bnad_mem_alloc(bnad,
1681                                         &res_info[i].res_u.mem_info);
1682                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1683                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1684                                         &res_info[i].res_u.intr_info);
1685                 if (err)
1686                         goto err_return;
1687         }
1688         return 0;
1689
1690 err_return:
1691         bnad_rx_res_free(bnad, res_info);
1692         return err;
1693 }
1694
1695 /* Timer callbacks */
1696 /* a) IOC timer */
1697 static void
1698 bnad_ioc_timeout(unsigned long data)
1699 {
1700         struct bnad *bnad = (struct bnad *)data;
1701         unsigned long flags;
1702
1703         spin_lock_irqsave(&bnad->bna_lock, flags);
1704         bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1705         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1706 }
1707
1708 static void
1709 bnad_ioc_hb_check(unsigned long data)
1710 {
1711         struct bnad *bnad = (struct bnad *)data;
1712         unsigned long flags;
1713
1714         spin_lock_irqsave(&bnad->bna_lock, flags);
1715         bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1716         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1717 }
1718
1719 static void
1720 bnad_iocpf_timeout(unsigned long data)
1721 {
1722         struct bnad *bnad = (struct bnad *)data;
1723         unsigned long flags;
1724
1725         spin_lock_irqsave(&bnad->bna_lock, flags);
1726         bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1727         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1728 }
1729
1730 static void
1731 bnad_iocpf_sem_timeout(unsigned long data)
1732 {
1733         struct bnad *bnad = (struct bnad *)data;
1734         unsigned long flags;
1735
1736         spin_lock_irqsave(&bnad->bna_lock, flags);
1737         bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1738         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1739 }
1740
1741 /*
1742  * All timer routines use bnad->bna_lock to protect against
1743  * the following race, which may occur in case of no locking:
1744  *      Time    CPU m   CPU n
1745  *      0       1 = test_bit
1746  *      1                       clear_bit
1747  *      2                       del_timer_sync
1748  *      3       mod_timer
1749  */
1750
1751 /* b) Dynamic Interrupt Moderation Timer */
1752 static void
1753 bnad_dim_timeout(unsigned long data)
1754 {
1755         struct bnad *bnad = (struct bnad *)data;
1756         struct bnad_rx_info *rx_info;
1757         struct bnad_rx_ctrl *rx_ctrl;
1758         int i, j;
1759         unsigned long flags;
1760
1761         if (!netif_carrier_ok(bnad->netdev))
1762                 return;
1763
1764         spin_lock_irqsave(&bnad->bna_lock, flags);
1765         for (i = 0; i < bnad->num_rx; i++) {
1766                 rx_info = &bnad->rx_info[i];
1767                 if (!rx_info->rx)
1768                         continue;
1769                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1770                         rx_ctrl = &rx_info->rx_ctrl[j];
1771                         if (!rx_ctrl->ccb)
1772                                 continue;
1773                         bna_rx_dim_update(rx_ctrl->ccb);
1774                 }
1775         }
1776
1777         /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1778         if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1779                 mod_timer(&bnad->dim_timer,
1780                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1781         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1782 }
1783
1784 /* c)  Statistics Timer */
1785 static void
1786 bnad_stats_timeout(unsigned long data)
1787 {
1788         struct bnad *bnad = (struct bnad *)data;
1789         unsigned long flags;
1790
1791         if (!netif_running(bnad->netdev) ||
1792                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1793                 return;
1794
1795         spin_lock_irqsave(&bnad->bna_lock, flags);
1796         bna_hw_stats_get(&bnad->bna);
1797         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1798 }
1799
1800 /*
1801  * Set up timer for DIM
1802  * Called with bnad->bna_lock held
1803  */
1804 void
1805 bnad_dim_timer_start(struct bnad *bnad)
1806 {
1807         if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1808             !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1809                 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1810                             (unsigned long)bnad);
1811                 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1812                 mod_timer(&bnad->dim_timer,
1813                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1814         }
1815 }
1816
1817 /*
1818  * Set up timer for statistics
1819  * Called with mutex_lock(&bnad->conf_mutex) held
1820  */
1821 static void
1822 bnad_stats_timer_start(struct bnad *bnad)
1823 {
1824         unsigned long flags;
1825
1826         spin_lock_irqsave(&bnad->bna_lock, flags);
1827         if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1828                 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1829                             (unsigned long)bnad);
1830                 mod_timer(&bnad->stats_timer,
1831                           jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1832         }
1833         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1834 }
1835
1836 /*
1837  * Stops the stats timer
1838  * Called with mutex_lock(&bnad->conf_mutex) held
1839  */
1840 static void
1841 bnad_stats_timer_stop(struct bnad *bnad)
1842 {
1843         int to_del = 0;
1844         unsigned long flags;
1845
1846         spin_lock_irqsave(&bnad->bna_lock, flags);
1847         if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1848                 to_del = 1;
1849         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1850         if (to_del)
1851                 del_timer_sync(&bnad->stats_timer);
1852 }
1853
1854 /* Utilities */
1855
1856 static void
1857 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1858 {
1859         int i = 1; /* Index 0 has broadcast address */
1860         struct netdev_hw_addr *mc_addr;
1861
1862         netdev_for_each_mc_addr(mc_addr, netdev) {
1863                 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1864                                                         ETH_ALEN);
1865                 i++;
1866         }
1867 }
1868
1869 static int
1870 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1871 {
1872         struct bnad_rx_ctrl *rx_ctrl =
1873                 container_of(napi, struct bnad_rx_ctrl, napi);
1874         struct bnad *bnad = rx_ctrl->bnad;
1875         int rcvd = 0;
1876
1877         rx_ctrl->rx_poll_ctr++;
1878
1879         if (!netif_carrier_ok(bnad->netdev))
1880                 goto poll_exit;
1881
1882         rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1883         if (rcvd >= budget)
1884                 return rcvd;
1885
1886 poll_exit:
1887         napi_complete(napi);
1888
1889         rx_ctrl->rx_complete++;
1890
1891         if (rx_ctrl->ccb)
1892                 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1893
1894         return rcvd;
1895 }
1896
1897 #define BNAD_NAPI_POLL_QUOTA            64
1898 static void
1899 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1900 {
1901         struct bnad_rx_ctrl *rx_ctrl;
1902         int i;
1903
1904         /* Initialize & enable NAPI */
1905         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1906                 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1907                 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1908                                bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1909         }
1910 }
1911
1912 static void
1913 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1914 {
1915         int i;
1916
1917         /* First disable and then clean up */
1918         for (i = 0; i < bnad->num_rxp_per_rx; i++)
1919                 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1920 }
1921
1922 /* Should be held with conf_lock held */
1923 void
1924 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1925 {
1926         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1927         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1928         unsigned long flags;
1929
1930         if (!tx_info->tx)
1931                 return;
1932
1933         init_completion(&bnad->bnad_completions.tx_comp);
1934         spin_lock_irqsave(&bnad->bna_lock, flags);
1935         bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1936         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1937         wait_for_completion(&bnad->bnad_completions.tx_comp);
1938
1939         if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1940                 bnad_tx_msix_unregister(bnad, tx_info,
1941                         bnad->num_txq_per_tx);
1942
1943         spin_lock_irqsave(&bnad->bna_lock, flags);
1944         bna_tx_destroy(tx_info->tx);
1945         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1946
1947         tx_info->tx = NULL;
1948         tx_info->tx_id = 0;
1949
1950         bnad_tx_res_free(bnad, res_info);
1951 }
1952
1953 /* Should be held with conf_lock held */
1954 int
1955 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1956 {
1957         int err;
1958         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1959         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1960         struct bna_intr_info *intr_info =
1961                         &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1962         struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1963         static const struct bna_tx_event_cbfn tx_cbfn = {
1964                 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1965                 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1966                 .tx_stall_cbfn = bnad_cb_tx_stall,
1967                 .tx_resume_cbfn = bnad_cb_tx_resume,
1968                 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1969         };
1970
1971         struct bna_tx *tx;
1972         unsigned long flags;
1973
1974         tx_info->tx_id = tx_id;
1975
1976         /* Initialize the Tx object configuration */
1977         tx_config->num_txq = bnad->num_txq_per_tx;
1978         tx_config->txq_depth = bnad->txq_depth;
1979         tx_config->tx_type = BNA_TX_T_REGULAR;
1980         tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1981
1982         /* Get BNA's resource requirement for one tx object */
1983         spin_lock_irqsave(&bnad->bna_lock, flags);
1984         bna_tx_res_req(bnad->num_txq_per_tx,
1985                 bnad->txq_depth, res_info);
1986         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1987
1988         /* Fill Unmap Q memory requirements */
1989         BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1990                         bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1991                         bnad->txq_depth));
1992
1993         /* Allocate resources */
1994         err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1995         if (err)
1996                 return err;
1997
1998         /* Ask BNA to create one Tx object, supplying required resources */
1999         spin_lock_irqsave(&bnad->bna_lock, flags);
2000         tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
2001                         tx_info);
2002         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2003         if (!tx) {
2004                 err = -ENOMEM;
2005                 goto err_return;
2006         }
2007         tx_info->tx = tx;
2008
2009         INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2010                         (work_func_t)bnad_tx_cleanup);
2011
2012         /* Register ISR for the Tx object */
2013         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2014                 err = bnad_tx_msix_register(bnad, tx_info,
2015                         tx_id, bnad->num_txq_per_tx);
2016                 if (err)
2017                         goto cleanup_tx;
2018         }
2019
2020         spin_lock_irqsave(&bnad->bna_lock, flags);
2021         bna_tx_enable(tx);
2022         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2023
2024         return 0;
2025
2026 cleanup_tx:
2027         spin_lock_irqsave(&bnad->bna_lock, flags);
2028         bna_tx_destroy(tx_info->tx);
2029         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2030         tx_info->tx = NULL;
2031         tx_info->tx_id = 0;
2032 err_return:
2033         bnad_tx_res_free(bnad, res_info);
2034         return err;
2035 }
2036
2037 /* Setup the rx config for bna_rx_create */
2038 /* bnad decides the configuration */
2039 static void
2040 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2041 {
2042         memset(rx_config, 0, sizeof(*rx_config));
2043         rx_config->rx_type = BNA_RX_T_REGULAR;
2044         rx_config->num_paths = bnad->num_rxp_per_rx;
2045         rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2046
2047         if (bnad->num_rxp_per_rx > 1) {
2048                 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2049                 rx_config->rss_config.hash_type =
2050                                 (BFI_ENET_RSS_IPV6 |
2051                                  BFI_ENET_RSS_IPV6_TCP |
2052                                  BFI_ENET_RSS_IPV4 |
2053                                  BFI_ENET_RSS_IPV4_TCP);
2054                 rx_config->rss_config.hash_mask =
2055                                 bnad->num_rxp_per_rx - 1;
2056                 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
2057                         sizeof(rx_config->rss_config.toeplitz_hash_key));
2058         } else {
2059                 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2060                 memset(&rx_config->rss_config, 0,
2061                        sizeof(rx_config->rss_config));
2062         }
2063
2064         rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2065         rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2066
2067         /* BNA_RXP_SINGLE - one data-buffer queue
2068          * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2069          * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2070          */
2071         /* TODO: configurable param for queue type */
2072         rx_config->rxp_type = BNA_RXP_SLR;
2073
2074         if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2075             rx_config->frame_size > 4096) {
2076                 /* though size_routing_enable is set in SLR,
2077                  * small packets may get routed to same rxq.
2078                  * set buf_size to 2048 instead of PAGE_SIZE.
2079                  */
2080                 rx_config->q0_buf_size = 2048;
2081                 /* this should be in multiples of 2 */
2082                 rx_config->q0_num_vecs = 4;
2083                 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2084                 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2085         } else {
2086                 rx_config->q0_buf_size = rx_config->frame_size;
2087                 rx_config->q0_num_vecs = 1;
2088                 rx_config->q0_depth = bnad->rxq_depth;
2089         }
2090
2091         /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2092         if (rx_config->rxp_type == BNA_RXP_SLR) {
2093                 rx_config->q1_depth = bnad->rxq_depth;
2094                 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2095         }
2096
2097         rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
2098 }
2099
2100 static void
2101 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2102 {
2103         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2104         int i;
2105
2106         for (i = 0; i < bnad->num_rxp_per_rx; i++)
2107                 rx_info->rx_ctrl[i].bnad = bnad;
2108 }
2109
2110 /* Called with mutex_lock(&bnad->conf_mutex) held */
2111 u32
2112 bnad_reinit_rx(struct bnad *bnad)
2113 {
2114         struct net_device *netdev = bnad->netdev;
2115         u32 err = 0, current_err = 0;
2116         u32 rx_id = 0, count = 0;
2117         unsigned long flags;
2118
2119         /* destroy and create new rx objects */
2120         for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2121                 if (!bnad->rx_info[rx_id].rx)
2122                         continue;
2123                 bnad_destroy_rx(bnad, rx_id);
2124         }
2125
2126         spin_lock_irqsave(&bnad->bna_lock, flags);
2127         bna_enet_mtu_set(&bnad->bna.enet,
2128                          BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2129         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2130
2131         for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2132                 count++;
2133                 current_err = bnad_setup_rx(bnad, rx_id);
2134                 if (current_err && !err) {
2135                         err = current_err;
2136                         pr_err("RXQ:%u setup failed\n", rx_id);
2137                 }
2138         }
2139
2140         /* restore rx configuration */
2141         if (bnad->rx_info[0].rx && !err) {
2142                 bnad_restore_vlans(bnad, 0);
2143                 bnad_enable_default_bcast(bnad);
2144                 spin_lock_irqsave(&bnad->bna_lock, flags);
2145                 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2146                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2147                 bnad_set_rx_mode(netdev);
2148         }
2149
2150         return count;
2151 }
2152
2153 /* Called with bnad_conf_lock() held */
2154 void
2155 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2156 {
2157         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2158         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2159         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2160         unsigned long flags;
2161         int to_del = 0;
2162
2163         if (!rx_info->rx)
2164                 return;
2165
2166         if (0 == rx_id) {
2167                 spin_lock_irqsave(&bnad->bna_lock, flags);
2168                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2169                     test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2170                         clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2171                         to_del = 1;
2172                 }
2173                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2174                 if (to_del)
2175                         del_timer_sync(&bnad->dim_timer);
2176         }
2177
2178         init_completion(&bnad->bnad_completions.rx_comp);
2179         spin_lock_irqsave(&bnad->bna_lock, flags);
2180         bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2181         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2182         wait_for_completion(&bnad->bnad_completions.rx_comp);
2183
2184         if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2185                 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2186
2187         bnad_napi_delete(bnad, rx_id);
2188
2189         spin_lock_irqsave(&bnad->bna_lock, flags);
2190         bna_rx_destroy(rx_info->rx);
2191
2192         rx_info->rx = NULL;
2193         rx_info->rx_id = 0;
2194         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2195
2196         bnad_rx_res_free(bnad, res_info);
2197 }
2198
2199 /* Called with mutex_lock(&bnad->conf_mutex) held */
2200 int
2201 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2202 {
2203         int err;
2204         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2205         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2206         struct bna_intr_info *intr_info =
2207                         &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2208         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2209         static const struct bna_rx_event_cbfn rx_cbfn = {
2210                 .rcb_setup_cbfn = NULL,
2211                 .rcb_destroy_cbfn = NULL,
2212                 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2213                 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2214                 .rx_stall_cbfn = bnad_cb_rx_stall,
2215                 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2216                 .rx_post_cbfn = bnad_cb_rx_post,
2217         };
2218         struct bna_rx *rx;
2219         unsigned long flags;
2220
2221         rx_info->rx_id = rx_id;
2222
2223         /* Initialize the Rx object configuration */
2224         bnad_init_rx_config(bnad, rx_config);
2225
2226         /* Get BNA's resource requirement for one Rx object */
2227         spin_lock_irqsave(&bnad->bna_lock, flags);
2228         bna_rx_res_req(rx_config, res_info);
2229         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2230
2231         /* Fill Unmap Q memory requirements */
2232         BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2233                                  rx_config->num_paths,
2234                         (rx_config->q0_depth *
2235                          sizeof(struct bnad_rx_unmap)) +
2236                          sizeof(struct bnad_rx_unmap_q));
2237
2238         if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2239                 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2240                                          rx_config->num_paths,
2241                                 (rx_config->q1_depth *
2242                                  sizeof(struct bnad_rx_unmap) +
2243                                  sizeof(struct bnad_rx_unmap_q)));
2244         }
2245         /* Allocate resource */
2246         err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2247         if (err)
2248                 return err;
2249
2250         bnad_rx_ctrl_init(bnad, rx_id);
2251
2252         /* Ask BNA to create one Rx object, supplying required resources */
2253         spin_lock_irqsave(&bnad->bna_lock, flags);
2254         rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2255                         rx_info);
2256         if (!rx) {
2257                 err = -ENOMEM;
2258                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2259                 goto err_return;
2260         }
2261         rx_info->rx = rx;
2262         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2263
2264         INIT_WORK(&rx_info->rx_cleanup_work,
2265                         (work_func_t)(bnad_rx_cleanup));
2266
2267         /*
2268          * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2269          * so that IRQ handler cannot schedule NAPI at this point.
2270          */
2271         bnad_napi_add(bnad, rx_id);
2272
2273         /* Register ISR for the Rx object */
2274         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2275                 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2276                                                 rx_config->num_paths);
2277                 if (err)
2278                         goto err_return;
2279         }
2280
2281         spin_lock_irqsave(&bnad->bna_lock, flags);
2282         if (0 == rx_id) {
2283                 /* Set up Dynamic Interrupt Moderation Vector */
2284                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2285                         bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2286
2287                 /* Enable VLAN filtering only on the default Rx */
2288                 bna_rx_vlanfilter_enable(rx);
2289
2290                 /* Start the DIM timer */
2291                 bnad_dim_timer_start(bnad);
2292         }
2293
2294         bna_rx_enable(rx);
2295         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2296
2297         return 0;
2298
2299 err_return:
2300         bnad_destroy_rx(bnad, rx_id);
2301         return err;
2302 }
2303
2304 /* Called with conf_lock & bnad->bna_lock held */
2305 void
2306 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2307 {
2308         struct bnad_tx_info *tx_info;
2309
2310         tx_info = &bnad->tx_info[0];
2311         if (!tx_info->tx)
2312                 return;
2313
2314         bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2315 }
2316
2317 /* Called with conf_lock & bnad->bna_lock held */
2318 void
2319 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2320 {
2321         struct bnad_rx_info *rx_info;
2322         int     i;
2323
2324         for (i = 0; i < bnad->num_rx; i++) {
2325                 rx_info = &bnad->rx_info[i];
2326                 if (!rx_info->rx)
2327                         continue;
2328                 bna_rx_coalescing_timeo_set(rx_info->rx,
2329                                 bnad->rx_coalescing_timeo);
2330         }
2331 }
2332
2333 /*
2334  * Called with bnad->bna_lock held
2335  */
2336 int
2337 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2338 {
2339         int ret;
2340
2341         if (!is_valid_ether_addr(mac_addr))
2342                 return -EADDRNOTAVAIL;
2343
2344         /* If datapath is down, pretend everything went through */
2345         if (!bnad->rx_info[0].rx)
2346                 return 0;
2347
2348         ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2349         if (ret != BNA_CB_SUCCESS)
2350                 return -EADDRNOTAVAIL;
2351
2352         return 0;
2353 }
2354
2355 /* Should be called with conf_lock held */
2356 int
2357 bnad_enable_default_bcast(struct bnad *bnad)
2358 {
2359         struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2360         int ret;
2361         unsigned long flags;
2362
2363         init_completion(&bnad->bnad_completions.mcast_comp);
2364
2365         spin_lock_irqsave(&bnad->bna_lock, flags);
2366         ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2367                                 bnad_cb_rx_mcast_add);
2368         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2369
2370         if (ret == BNA_CB_SUCCESS)
2371                 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2372         else
2373                 return -ENODEV;
2374
2375         if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2376                 return -ENODEV;
2377
2378         return 0;
2379 }
2380
2381 /* Called with mutex_lock(&bnad->conf_mutex) held */
2382 void
2383 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2384 {
2385         u16 vid;
2386         unsigned long flags;
2387
2388         for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2389                 spin_lock_irqsave(&bnad->bna_lock, flags);
2390                 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2391                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2392         }
2393 }
2394
2395 /* Statistics utilities */
2396 void
2397 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2398 {
2399         int i, j;
2400
2401         for (i = 0; i < bnad->num_rx; i++) {
2402                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2403                         if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2404                                 stats->rx_packets += bnad->rx_info[i].
2405                                 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2406                                 stats->rx_bytes += bnad->rx_info[i].
2407                                         rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2408                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2409                                         bnad->rx_info[i].rx_ctrl[j].ccb->
2410                                         rcb[1]->rxq) {
2411                                         stats->rx_packets +=
2412                                                 bnad->rx_info[i].rx_ctrl[j].
2413                                                 ccb->rcb[1]->rxq->rx_packets;
2414                                         stats->rx_bytes +=
2415                                                 bnad->rx_info[i].rx_ctrl[j].
2416                                                 ccb->rcb[1]->rxq->rx_bytes;
2417                                 }
2418                         }
2419                 }
2420         }
2421         for (i = 0; i < bnad->num_tx; i++) {
2422                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2423                         if (bnad->tx_info[i].tcb[j]) {
2424                                 stats->tx_packets +=
2425                                 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2426                                 stats->tx_bytes +=
2427                                         bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2428                         }
2429                 }
2430         }
2431 }
2432
2433 /*
2434  * Must be called with the bna_lock held.
2435  */
2436 void
2437 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2438 {
2439         struct bfi_enet_stats_mac *mac_stats;
2440         u32 bmap;
2441         int i;
2442
2443         mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2444         stats->rx_errors =
2445                 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2446                 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2447                 mac_stats->rx_undersize;
2448         stats->tx_errors = mac_stats->tx_fcs_error +
2449                                         mac_stats->tx_undersize;
2450         stats->rx_dropped = mac_stats->rx_drop;
2451         stats->tx_dropped = mac_stats->tx_drop;
2452         stats->multicast = mac_stats->rx_multicast;
2453         stats->collisions = mac_stats->tx_total_collision;
2454
2455         stats->rx_length_errors = mac_stats->rx_frame_length_error;
2456
2457         /* receive ring buffer overflow  ?? */
2458
2459         stats->rx_crc_errors = mac_stats->rx_fcs_error;
2460         stats->rx_frame_errors = mac_stats->rx_alignment_error;
2461         /* recv'r fifo overrun */
2462         bmap = bna_rx_rid_mask(&bnad->bna);
2463         for (i = 0; bmap; i++) {
2464                 if (bmap & 1) {
2465                         stats->rx_fifo_errors +=
2466                                 bnad->stats.bna_stats->
2467                                         hw_stats.rxf_stats[i].frame_drops;
2468                         break;
2469                 }
2470                 bmap >>= 1;
2471         }
2472 }
2473
2474 static void
2475 bnad_mbox_irq_sync(struct bnad *bnad)
2476 {
2477         u32 irq;
2478         unsigned long flags;
2479
2480         spin_lock_irqsave(&bnad->bna_lock, flags);
2481         if (bnad->cfg_flags & BNAD_CF_MSIX)
2482                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2483         else
2484                 irq = bnad->pcidev->irq;
2485         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2486
2487         synchronize_irq(irq);
2488 }
2489
2490 /* Utility used by bnad_start_xmit, for doing TSO */
2491 static int
2492 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2493 {
2494         int err;
2495
2496         if (skb_header_cloned(skb)) {
2497                 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2498                 if (err) {
2499                         BNAD_UPDATE_CTR(bnad, tso_err);
2500                         return err;
2501                 }
2502         }
2503
2504         /*
2505          * For TSO, the TCP checksum field is seeded with pseudo-header sum
2506          * excluding the length field.
2507          */
2508         if (skb->protocol == htons(ETH_P_IP)) {
2509                 struct iphdr *iph = ip_hdr(skb);
2510
2511                 /* Do we really need these? */
2512                 iph->tot_len = 0;
2513                 iph->check = 0;
2514
2515                 tcp_hdr(skb)->check =
2516                         ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2517                                            IPPROTO_TCP, 0);
2518                 BNAD_UPDATE_CTR(bnad, tso4);
2519         } else {
2520                 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2521
2522                 ipv6h->payload_len = 0;
2523                 tcp_hdr(skb)->check =
2524                         ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2525                                          IPPROTO_TCP, 0);
2526                 BNAD_UPDATE_CTR(bnad, tso6);
2527         }
2528
2529         return 0;
2530 }
2531
2532 /*
2533  * Initialize Q numbers depending on Rx Paths
2534  * Called with bnad->bna_lock held, because of cfg_flags
2535  * access.
2536  */
2537 static void
2538 bnad_q_num_init(struct bnad *bnad)
2539 {
2540         int rxps;
2541
2542         rxps = min((uint)num_online_cpus(),
2543                         (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2544
2545         if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2546                 rxps = 1;       /* INTx */
2547
2548         bnad->num_rx = 1;
2549         bnad->num_tx = 1;
2550         bnad->num_rxp_per_rx = rxps;
2551         bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2552 }
2553
2554 /*
2555  * Adjusts the Q numbers, given a number of msix vectors
2556  * Give preference to RSS as opposed to Tx priority Queues,
2557  * in such a case, just use 1 Tx Q
2558  * Called with bnad->bna_lock held b'cos of cfg_flags access
2559  */
2560 static void
2561 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2562 {
2563         bnad->num_txq_per_tx = 1;
2564         if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2565              bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2566             (bnad->cfg_flags & BNAD_CF_MSIX)) {
2567                 bnad->num_rxp_per_rx = msix_vectors -
2568                         (bnad->num_tx * bnad->num_txq_per_tx) -
2569                         BNAD_MAILBOX_MSIX_VECTORS;
2570         } else
2571                 bnad->num_rxp_per_rx = 1;
2572 }
2573
2574 /* Enable / disable ioceth */
2575 static int
2576 bnad_ioceth_disable(struct bnad *bnad)
2577 {
2578         unsigned long flags;
2579         int err = 0;
2580
2581         spin_lock_irqsave(&bnad->bna_lock, flags);
2582         init_completion(&bnad->bnad_completions.ioc_comp);
2583         bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2584         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2585
2586         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2587                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2588
2589         err = bnad->bnad_completions.ioc_comp_status;
2590         return err;
2591 }
2592
2593 static int
2594 bnad_ioceth_enable(struct bnad *bnad)
2595 {
2596         int err = 0;
2597         unsigned long flags;
2598
2599         spin_lock_irqsave(&bnad->bna_lock, flags);
2600         init_completion(&bnad->bnad_completions.ioc_comp);
2601         bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2602         bna_ioceth_enable(&bnad->bna.ioceth);
2603         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2604
2605         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2606                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2607
2608         err = bnad->bnad_completions.ioc_comp_status;
2609
2610         return err;
2611 }
2612
2613 /* Free BNA resources */
2614 static void
2615 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2616                 u32 res_val_max)
2617 {
2618         int i;
2619
2620         for (i = 0; i < res_val_max; i++)
2621                 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2622 }
2623
2624 /* Allocates memory and interrupt resources for BNA */
2625 static int
2626 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2627                 u32 res_val_max)
2628 {
2629         int i, err;
2630
2631         for (i = 0; i < res_val_max; i++) {
2632                 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2633                 if (err)
2634                         goto err_return;
2635         }
2636         return 0;
2637
2638 err_return:
2639         bnad_res_free(bnad, res_info, res_val_max);
2640         return err;
2641 }
2642
2643 /* Interrupt enable / disable */
2644 static void
2645 bnad_enable_msix(struct bnad *bnad)
2646 {
2647         int i, ret;
2648         unsigned long flags;
2649
2650         spin_lock_irqsave(&bnad->bna_lock, flags);
2651         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2652                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2653                 return;
2654         }
2655         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2656
2657         if (bnad->msix_table)
2658                 return;
2659
2660         bnad->msix_table =
2661                 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2662
2663         if (!bnad->msix_table)
2664                 goto intx_mode;
2665
2666         for (i = 0; i < bnad->msix_num; i++)
2667                 bnad->msix_table[i].entry = i;
2668
2669         ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2670         if (ret > 0) {
2671                 /* Not enough MSI-X vectors. */
2672                 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2673                         ret, bnad->msix_num);
2674
2675                 spin_lock_irqsave(&bnad->bna_lock, flags);
2676                 /* ret = #of vectors that we got */
2677                 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2678                         (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2679                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2680
2681                 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2682                          BNAD_MAILBOX_MSIX_VECTORS;
2683
2684                 if (bnad->msix_num > ret)
2685                         goto intx_mode;
2686
2687                 /* Try once more with adjusted numbers */
2688                 /* If this fails, fall back to INTx */
2689                 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2690                                       bnad->msix_num);
2691                 if (ret)
2692                         goto intx_mode;
2693
2694         } else if (ret < 0)
2695                 goto intx_mode;
2696
2697         pci_intx(bnad->pcidev, 0);
2698
2699         return;
2700
2701 intx_mode:
2702         pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2703
2704         kfree(bnad->msix_table);
2705         bnad->msix_table = NULL;
2706         bnad->msix_num = 0;
2707         spin_lock_irqsave(&bnad->bna_lock, flags);
2708         bnad->cfg_flags &= ~BNAD_CF_MSIX;
2709         bnad_q_num_init(bnad);
2710         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2711 }
2712
2713 static void
2714 bnad_disable_msix(struct bnad *bnad)
2715 {
2716         u32 cfg_flags;
2717         unsigned long flags;
2718
2719         spin_lock_irqsave(&bnad->bna_lock, flags);
2720         cfg_flags = bnad->cfg_flags;
2721         if (bnad->cfg_flags & BNAD_CF_MSIX)
2722                 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2723         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2724
2725         if (cfg_flags & BNAD_CF_MSIX) {
2726                 pci_disable_msix(bnad->pcidev);
2727                 kfree(bnad->msix_table);
2728                 bnad->msix_table = NULL;
2729         }
2730 }
2731
2732 /* Netdev entry points */
2733 static int
2734 bnad_open(struct net_device *netdev)
2735 {
2736         int err;
2737         struct bnad *bnad = netdev_priv(netdev);
2738         struct bna_pause_config pause_config;
2739         unsigned long flags;
2740
2741         mutex_lock(&bnad->conf_mutex);
2742
2743         /* Tx */
2744         err = bnad_setup_tx(bnad, 0);
2745         if (err)
2746                 goto err_return;
2747
2748         /* Rx */
2749         err = bnad_setup_rx(bnad, 0);
2750         if (err)
2751                 goto cleanup_tx;
2752
2753         /* Port */
2754         pause_config.tx_pause = 0;
2755         pause_config.rx_pause = 0;
2756
2757         spin_lock_irqsave(&bnad->bna_lock, flags);
2758         bna_enet_mtu_set(&bnad->bna.enet,
2759                          BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2760         bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2761         bna_enet_enable(&bnad->bna.enet);
2762         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2763
2764         /* Enable broadcast */
2765         bnad_enable_default_bcast(bnad);
2766
2767         /* Restore VLANs, if any */
2768         bnad_restore_vlans(bnad, 0);
2769
2770         /* Set the UCAST address */
2771         spin_lock_irqsave(&bnad->bna_lock, flags);
2772         bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2773         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2774
2775         /* Start the stats timer */
2776         bnad_stats_timer_start(bnad);
2777
2778         mutex_unlock(&bnad->conf_mutex);
2779
2780         return 0;
2781
2782 cleanup_tx:
2783         bnad_destroy_tx(bnad, 0);
2784
2785 err_return:
2786         mutex_unlock(&bnad->conf_mutex);
2787         return err;
2788 }
2789
2790 static int
2791 bnad_stop(struct net_device *netdev)
2792 {
2793         struct bnad *bnad = netdev_priv(netdev);
2794         unsigned long flags;
2795
2796         mutex_lock(&bnad->conf_mutex);
2797
2798         /* Stop the stats timer */
2799         bnad_stats_timer_stop(bnad);
2800
2801         init_completion(&bnad->bnad_completions.enet_comp);
2802
2803         spin_lock_irqsave(&bnad->bna_lock, flags);
2804         bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2805                         bnad_cb_enet_disabled);
2806         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2807
2808         wait_for_completion(&bnad->bnad_completions.enet_comp);
2809
2810         bnad_destroy_tx(bnad, 0);
2811         bnad_destroy_rx(bnad, 0);
2812
2813         /* Synchronize mailbox IRQ */
2814         bnad_mbox_irq_sync(bnad);
2815
2816         mutex_unlock(&bnad->conf_mutex);
2817
2818         return 0;
2819 }
2820
2821 /* TX */
2822 /* Returns 0 for success */
2823 static int
2824 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2825                     struct sk_buff *skb, struct bna_txq_entry *txqent)
2826 {
2827         u16 flags = 0;
2828         u32 gso_size;
2829         u16 vlan_tag = 0;
2830
2831         if (vlan_tx_tag_present(skb)) {
2832                 vlan_tag = (u16)vlan_tx_tag_get(skb);
2833                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2834         }
2835         if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2836                 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2837                                 | (vlan_tag & 0x1fff);
2838                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2839         }
2840         txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2841
2842         if (skb_is_gso(skb)) {
2843                 gso_size = skb_shinfo(skb)->gso_size;
2844                 if (unlikely(gso_size > bnad->netdev->mtu)) {
2845                         BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2846                         return -EINVAL;
2847                 }
2848                 if (unlikely((gso_size + skb_transport_offset(skb) +
2849                               tcp_hdrlen(skb)) >= skb->len)) {
2850                         txqent->hdr.wi.opcode =
2851                                 __constant_htons(BNA_TXQ_WI_SEND);
2852                         txqent->hdr.wi.lso_mss = 0;
2853                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2854                 } else {
2855                         txqent->hdr.wi.opcode =
2856                                 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2857                         txqent->hdr.wi.lso_mss = htons(gso_size);
2858                 }
2859
2860                 if (bnad_tso_prepare(bnad, skb)) {
2861                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2862                         return -EINVAL;
2863                 }
2864
2865                 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2866                 txqent->hdr.wi.l4_hdr_size_n_offset =
2867                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2868                         tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2869         } else  {
2870                 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2871                 txqent->hdr.wi.lso_mss = 0;
2872
2873                 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
2874                         BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2875                         return -EINVAL;
2876                 }
2877
2878                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2879                         u8 proto = 0;
2880
2881                         if (skb->protocol == __constant_htons(ETH_P_IP))
2882                                 proto = ip_hdr(skb)->protocol;
2883 #ifdef NETIF_F_IPV6_CSUM
2884                         else if (skb->protocol ==
2885                                  __constant_htons(ETH_P_IPV6)) {
2886                                 /* nexthdr may not be TCP immediately. */
2887                                 proto = ipv6_hdr(skb)->nexthdr;
2888                         }
2889 #endif
2890                         if (proto == IPPROTO_TCP) {
2891                                 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2892                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2893                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2894                                               (0, skb_transport_offset(skb)));
2895
2896                                 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2897
2898                                 if (unlikely(skb_headlen(skb) <
2899                                             skb_transport_offset(skb) +
2900                                     tcp_hdrlen(skb))) {
2901                                         BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2902                                         return -EINVAL;
2903                                 }
2904                         } else if (proto == IPPROTO_UDP) {
2905                                 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2906                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2907                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2908                                               (0, skb_transport_offset(skb)));
2909
2910                                 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2911                                 if (unlikely(skb_headlen(skb) <
2912                                             skb_transport_offset(skb) +
2913                                     sizeof(struct udphdr))) {
2914                                         BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2915                                         return -EINVAL;
2916                                 }
2917                         } else {
2918
2919                                 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2920                                 return -EINVAL;
2921                         }
2922                 } else
2923                         txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2924         }
2925
2926         txqent->hdr.wi.flags = htons(flags);
2927         txqent->hdr.wi.frame_length = htonl(skb->len);
2928
2929         return 0;
2930 }
2931
2932 /*
2933  * bnad_start_xmit : Netdev entry point for Transmit
2934  *                   Called under lock held by net_device
2935  */
2936 static netdev_tx_t
2937 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2938 {
2939         struct bnad *bnad = netdev_priv(netdev);
2940         u32 txq_id = 0;
2941         struct bna_tcb *tcb = NULL;
2942         struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2943         u32             prod, q_depth, vect_id;
2944         u32             wis, vectors, len;
2945         int             i;
2946         dma_addr_t              dma_addr;
2947         struct bna_txq_entry *txqent;
2948
2949         len = skb_headlen(skb);
2950
2951         /* Sanity checks for the skb */
2952
2953         if (unlikely(skb->len <= ETH_HLEN)) {
2954                 dev_kfree_skb(skb);
2955                 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2956                 return NETDEV_TX_OK;
2957         }
2958         if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2959                 dev_kfree_skb(skb);
2960                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2961                 return NETDEV_TX_OK;
2962         }
2963         if (unlikely(len == 0)) {
2964                 dev_kfree_skb(skb);
2965                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2966                 return NETDEV_TX_OK;
2967         }
2968
2969         tcb = bnad->tx_info[0].tcb[txq_id];
2970
2971         /*
2972          * Takes care of the Tx that is scheduled between clearing the flag
2973          * and the netif_tx_stop_all_queues() call.
2974          */
2975         if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2976                 dev_kfree_skb(skb);
2977                 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2978                 return NETDEV_TX_OK;
2979         }
2980
2981         q_depth = tcb->q_depth;
2982         prod = tcb->producer_index;
2983         unmap_q = tcb->unmap_q;
2984
2985         vectors = 1 + skb_shinfo(skb)->nr_frags;
2986         wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
2987
2988         if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2989                 dev_kfree_skb(skb);
2990                 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2991                 return NETDEV_TX_OK;
2992         }
2993
2994         /* Check for available TxQ resources */
2995         if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2996                 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2997                     !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2998                         u32 sent;
2999                         sent = bnad_txcmpl_process(bnad, tcb);
3000                         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3001                                 bna_ib_ack(tcb->i_dbell, sent);
3002                         smp_mb__before_clear_bit();
3003                         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
3004                 } else {
3005                         netif_stop_queue(netdev);
3006                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3007                 }
3008
3009                 smp_mb();
3010                 /*
3011                  * Check again to deal with race condition between
3012                  * netif_stop_queue here, and netif_wake_queue in
3013                  * interrupt handler which is not inside netif tx lock.
3014                  */
3015                 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3016                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3017                         return NETDEV_TX_BUSY;
3018                 } else {
3019                         netif_wake_queue(netdev);
3020                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3021                 }
3022         }
3023
3024         txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3025         head_unmap = &unmap_q[prod];
3026
3027         /* Program the opcode, flags, frame_len, num_vectors in WI */
3028         if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3029                 dev_kfree_skb(skb);
3030                 return NETDEV_TX_OK;
3031         }
3032         txqent->hdr.wi.reserved = 0;
3033         txqent->hdr.wi.num_vectors = vectors;
3034
3035         head_unmap->skb = skb;
3036         head_unmap->nvecs = 0;
3037
3038         /* Program the vectors */
3039         unmap = head_unmap;
3040         dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3041                                   len, DMA_TO_DEVICE);
3042         BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3043         txqent->vector[0].length = htons(len);
3044         dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3045         head_unmap->nvecs++;
3046
3047         for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3048                 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3049                 u32             size = skb_frag_size(frag);
3050
3051                 if (unlikely(size == 0)) {
3052                         /* Undo the changes starting at tcb->producer_index */
3053                         bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3054                                 tcb->producer_index);
3055                         dev_kfree_skb(skb);
3056                         BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3057                         return NETDEV_TX_OK;
3058                 }
3059
3060                 len += size;
3061
3062                 vect_id++;
3063                 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3064                         vect_id = 0;
3065                         BNA_QE_INDX_INC(prod, q_depth);
3066                         txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3067                         txqent->hdr.wi_ext.opcode =
3068                                 __constant_htons(BNA_TXQ_WI_EXTENSION);
3069                         unmap = &unmap_q[prod];
3070                 }
3071
3072                 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3073                                             0, size, DMA_TO_DEVICE);
3074                 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3075                 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3076                 txqent->vector[vect_id].length = htons(size);
3077                 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3078                                    dma_addr);
3079                 head_unmap->nvecs++;
3080         }
3081
3082         if (unlikely(len != skb->len)) {
3083                 /* Undo the changes starting at tcb->producer_index */
3084                 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3085                 dev_kfree_skb(skb);
3086                 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3087                 return NETDEV_TX_OK;
3088         }
3089
3090         BNA_QE_INDX_INC(prod, q_depth);
3091         tcb->producer_index = prod;
3092
3093         smp_mb();
3094
3095         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3096                 return NETDEV_TX_OK;
3097
3098         skb_tx_timestamp(skb);
3099
3100         bna_txq_prod_indx_doorbell(tcb);
3101         smp_mb();
3102
3103         return NETDEV_TX_OK;
3104 }
3105
3106 /*
3107  * Used spin_lock to synchronize reading of stats structures, which
3108  * is written by BNA under the same lock.
3109  */
3110 static struct rtnl_link_stats64 *
3111 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3112 {
3113         struct bnad *bnad = netdev_priv(netdev);
3114         unsigned long flags;
3115
3116         spin_lock_irqsave(&bnad->bna_lock, flags);
3117
3118         bnad_netdev_qstats_fill(bnad, stats);
3119         bnad_netdev_hwstats_fill(bnad, stats);
3120
3121         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3122
3123         return stats;
3124 }
3125
3126 static void
3127 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3128 {
3129         struct net_device *netdev = bnad->netdev;
3130         int uc_count = netdev_uc_count(netdev);
3131         enum bna_cb_status ret;
3132         u8 *mac_list;
3133         struct netdev_hw_addr *ha;
3134         int entry;
3135
3136         if (netdev_uc_empty(bnad->netdev)) {
3137                 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3138                 return;
3139         }
3140
3141         if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3142                 goto mode_default;
3143
3144         mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3145         if (mac_list == NULL)
3146                 goto mode_default;
3147
3148         entry = 0;
3149         netdev_for_each_uc_addr(ha, netdev) {
3150                 memcpy(&mac_list[entry * ETH_ALEN],
3151                        &ha->addr[0], ETH_ALEN);
3152                 entry++;
3153         }
3154
3155         ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
3156                         mac_list, NULL);
3157         kfree(mac_list);
3158
3159         if (ret != BNA_CB_SUCCESS)
3160                 goto mode_default;
3161
3162         return;
3163
3164         /* ucast packets not in UCAM are routed to default function */
3165 mode_default:
3166         bnad->cfg_flags |= BNAD_CF_DEFAULT;
3167         bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3168 }
3169
3170 static void
3171 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3172 {
3173         struct net_device *netdev = bnad->netdev;
3174         int mc_count = netdev_mc_count(netdev);
3175         enum bna_cb_status ret;
3176         u8 *mac_list;
3177
3178         if (netdev->flags & IFF_ALLMULTI)
3179                 goto mode_allmulti;
3180
3181         if (netdev_mc_empty(netdev))
3182                 return;
3183
3184         if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3185                 goto mode_allmulti;
3186
3187         mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3188
3189         if (mac_list == NULL)
3190                 goto mode_allmulti;
3191
3192         memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
3193
3194         /* copy rest of the MCAST addresses */
3195         bnad_netdev_mc_list_get(netdev, mac_list);
3196         ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
3197                         mac_list, NULL);
3198         kfree(mac_list);
3199
3200         if (ret != BNA_CB_SUCCESS)
3201                 goto mode_allmulti;
3202
3203         return;
3204
3205 mode_allmulti:
3206         bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3207         bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
3208 }
3209
3210 void
3211 bnad_set_rx_mode(struct net_device *netdev)
3212 {
3213         struct bnad *bnad = netdev_priv(netdev);
3214         enum bna_rxmode new_mode, mode_mask;
3215         unsigned long flags;
3216
3217         spin_lock_irqsave(&bnad->bna_lock, flags);
3218
3219         if (bnad->rx_info[0].rx == NULL) {
3220                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3221                 return;
3222         }
3223
3224         /* clear bnad flags to update it with new settings */
3225         bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3226                         BNAD_CF_ALLMULTI);
3227
3228         new_mode = 0;
3229         if (netdev->flags & IFF_PROMISC) {
3230                 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3231                 bnad->cfg_flags |= BNAD_CF_PROMISC;
3232         } else {
3233                 bnad_set_rx_mcast_fltr(bnad);
3234
3235                 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3236                         new_mode |= BNA_RXMODE_ALLMULTI;
3237
3238                 bnad_set_rx_ucast_fltr(bnad);
3239
3240                 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3241                         new_mode |= BNA_RXMODE_DEFAULT;
3242         }
3243
3244         mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3245                         BNA_RXMODE_ALLMULTI;
3246         bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
3247
3248         if (bnad->cfg_flags & BNAD_CF_PROMISC)
3249                 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3250         else
3251                 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3252
3253         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3254 }
3255
3256 /*
3257  * bna_lock is used to sync writes to netdev->addr
3258  * conf_lock cannot be used since this call may be made
3259  * in a non-blocking context.
3260  */
3261 static int
3262 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
3263 {
3264         int err;
3265         struct bnad *bnad = netdev_priv(netdev);
3266         struct sockaddr *sa = (struct sockaddr *)mac_addr;
3267         unsigned long flags;
3268
3269         spin_lock_irqsave(&bnad->bna_lock, flags);
3270
3271         err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3272
3273         if (!err)
3274                 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
3275
3276         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3277
3278         return err;
3279 }
3280
3281 static int
3282 bnad_mtu_set(struct bnad *bnad, int frame_size)
3283 {
3284         unsigned long flags;
3285
3286         init_completion(&bnad->bnad_completions.mtu_comp);
3287
3288         spin_lock_irqsave(&bnad->bna_lock, flags);
3289         bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3290         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3291
3292         wait_for_completion(&bnad->bnad_completions.mtu_comp);
3293
3294         return bnad->bnad_completions.mtu_comp_status;
3295 }
3296
3297 static int
3298 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3299 {
3300         int err, mtu;
3301         struct bnad *bnad = netdev_priv(netdev);
3302         u32 rx_count = 0, frame, new_frame;
3303
3304         if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3305                 return -EINVAL;
3306
3307         mutex_lock(&bnad->conf_mutex);
3308
3309         mtu = netdev->mtu;
3310         netdev->mtu = new_mtu;
3311
3312         frame = BNAD_FRAME_SIZE(mtu);
3313         new_frame = BNAD_FRAME_SIZE(new_mtu);
3314
3315         /* check if multi-buffer needs to be enabled */
3316         if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3317             netif_running(bnad->netdev)) {
3318                 /* only when transition is over 4K */
3319                 if ((frame <= 4096 && new_frame > 4096) ||
3320                     (frame > 4096 && new_frame <= 4096))
3321                         rx_count = bnad_reinit_rx(bnad);
3322         }
3323
3324         /* rx_count > 0 - new rx created
3325          *      - Linux set err = 0 and return
3326          */
3327         err = bnad_mtu_set(bnad, new_frame);
3328         if (err)
3329                 err = -EBUSY;
3330
3331         mutex_unlock(&bnad->conf_mutex);
3332         return err;
3333 }
3334
3335 static int
3336 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3337 {
3338         struct bnad *bnad = netdev_priv(netdev);
3339         unsigned long flags;
3340
3341         if (!bnad->rx_info[0].rx)
3342                 return 0;
3343
3344         mutex_lock(&bnad->conf_mutex);
3345
3346         spin_lock_irqsave(&bnad->bna_lock, flags);
3347         bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3348         set_bit(vid, bnad->active_vlans);
3349         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3350
3351         mutex_unlock(&bnad->conf_mutex);
3352
3353         return 0;
3354 }
3355
3356 static int
3357 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3358 {
3359         struct bnad *bnad = netdev_priv(netdev);
3360         unsigned long flags;
3361
3362         if (!bnad->rx_info[0].rx)
3363                 return 0;
3364
3365         mutex_lock(&bnad->conf_mutex);
3366
3367         spin_lock_irqsave(&bnad->bna_lock, flags);
3368         clear_bit(vid, bnad->active_vlans);
3369         bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3370         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3371
3372         mutex_unlock(&bnad->conf_mutex);
3373
3374         return 0;
3375 }
3376
3377 #ifdef CONFIG_NET_POLL_CONTROLLER
3378 static void
3379 bnad_netpoll(struct net_device *netdev)
3380 {
3381         struct bnad *bnad = netdev_priv(netdev);
3382         struct bnad_rx_info *rx_info;
3383         struct bnad_rx_ctrl *rx_ctrl;
3384         u32 curr_mask;
3385         int i, j;
3386
3387         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3388                 bna_intx_disable(&bnad->bna, curr_mask);
3389                 bnad_isr(bnad->pcidev->irq, netdev);
3390                 bna_intx_enable(&bnad->bna, curr_mask);
3391         } else {
3392                 /*
3393                  * Tx processing may happen in sending context, so no need
3394                  * to explicitly process completions here
3395                  */
3396
3397                 /* Rx processing */
3398                 for (i = 0; i < bnad->num_rx; i++) {
3399                         rx_info = &bnad->rx_info[i];
3400                         if (!rx_info->rx)
3401                                 continue;
3402                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3403                                 rx_ctrl = &rx_info->rx_ctrl[j];
3404                                 if (rx_ctrl->ccb)
3405                                         bnad_netif_rx_schedule_poll(bnad,
3406                                                             rx_ctrl->ccb);
3407                         }
3408                 }
3409         }
3410 }
3411 #endif
3412
3413 static const struct net_device_ops bnad_netdev_ops = {
3414         .ndo_open               = bnad_open,
3415         .ndo_stop               = bnad_stop,
3416         .ndo_start_xmit         = bnad_start_xmit,
3417         .ndo_get_stats64                = bnad_get_stats64,
3418         .ndo_set_rx_mode        = bnad_set_rx_mode,
3419         .ndo_validate_addr      = eth_validate_addr,
3420         .ndo_set_mac_address    = bnad_set_mac_address,
3421         .ndo_change_mtu         = bnad_change_mtu,
3422         .ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3423         .ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3424 #ifdef CONFIG_NET_POLL_CONTROLLER
3425         .ndo_poll_controller    = bnad_netpoll
3426 #endif
3427 };
3428
3429 static void
3430 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3431 {
3432         struct net_device *netdev = bnad->netdev;
3433
3434         netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3435                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3436                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
3437
3438         netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3439                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3440                 NETIF_F_TSO | NETIF_F_TSO6;
3441
3442         netdev->features |= netdev->hw_features |
3443                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3444
3445         if (using_dac)
3446                 netdev->features |= NETIF_F_HIGHDMA;
3447
3448         netdev->mem_start = bnad->mmio_start;
3449         netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3450
3451         netdev->netdev_ops = &bnad_netdev_ops;
3452         bnad_set_ethtool_ops(netdev);
3453 }
3454
3455 /*
3456  * 1. Initialize the bnad structure
3457  * 2. Setup netdev pointer in pci_dev
3458  * 3. Initialize no. of TxQ & CQs & MSIX vectors
3459  * 4. Initialize work queue.
3460  */
3461 static int
3462 bnad_init(struct bnad *bnad,
3463           struct pci_dev *pdev, struct net_device *netdev)
3464 {
3465         unsigned long flags;
3466
3467         SET_NETDEV_DEV(netdev, &pdev->dev);
3468         pci_set_drvdata(pdev, netdev);
3469
3470         bnad->netdev = netdev;
3471         bnad->pcidev = pdev;
3472         bnad->mmio_start = pci_resource_start(pdev, 0);
3473         bnad->mmio_len = pci_resource_len(pdev, 0);
3474         bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3475         if (!bnad->bar0) {
3476                 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3477                 return -ENOMEM;
3478         }
3479         pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3480                (unsigned long long) bnad->mmio_len);
3481
3482         spin_lock_irqsave(&bnad->bna_lock, flags);
3483         if (!bnad_msix_disable)
3484                 bnad->cfg_flags = BNAD_CF_MSIX;
3485
3486         bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3487
3488         bnad_q_num_init(bnad);
3489         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3490
3491         bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3492                 (bnad->num_rx * bnad->num_rxp_per_rx) +
3493                          BNAD_MAILBOX_MSIX_VECTORS;
3494
3495         bnad->txq_depth = BNAD_TXQ_DEPTH;
3496         bnad->rxq_depth = BNAD_RXQ_DEPTH;
3497
3498         bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3499         bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3500
3501         sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3502         bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3503         if (!bnad->work_q) {
3504                 iounmap(bnad->bar0);
3505                 return -ENOMEM;
3506         }
3507
3508         return 0;
3509 }
3510
3511 /*
3512  * Must be called after bnad_pci_uninit()
3513  * so that iounmap() and pci_set_drvdata(NULL)
3514  * happens only after PCI uninitialization.
3515  */
3516 static void
3517 bnad_uninit(struct bnad *bnad)
3518 {
3519         if (bnad->work_q) {
3520                 flush_workqueue(bnad->work_q);
3521                 destroy_workqueue(bnad->work_q);
3522                 bnad->work_q = NULL;
3523         }
3524
3525         if (bnad->bar0)
3526                 iounmap(bnad->bar0);
3527 }
3528
3529 /*
3530  * Initialize locks
3531         a) Per ioceth mutes used for serializing configuration
3532            changes from OS interface
3533         b) spin lock used to protect bna state machine
3534  */
3535 static void
3536 bnad_lock_init(struct bnad *bnad)
3537 {
3538         spin_lock_init(&bnad->bna_lock);
3539         mutex_init(&bnad->conf_mutex);
3540         mutex_init(&bnad_list_mutex);
3541 }
3542
3543 static void
3544 bnad_lock_uninit(struct bnad *bnad)
3545 {
3546         mutex_destroy(&bnad->conf_mutex);
3547         mutex_destroy(&bnad_list_mutex);
3548 }
3549
3550 /* PCI Initialization */
3551 static int
3552 bnad_pci_init(struct bnad *bnad,
3553               struct pci_dev *pdev, bool *using_dac)
3554 {
3555         int err;
3556
3557         err = pci_enable_device(pdev);
3558         if (err)
3559                 return err;
3560         err = pci_request_regions(pdev, BNAD_NAME);
3561         if (err)
3562                 goto disable_device;
3563         if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3564                 *using_dac = true;
3565         } else {
3566                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3567                 if (err)
3568                         goto release_regions;
3569                 *using_dac = false;
3570         }
3571         pci_set_master(pdev);
3572         return 0;
3573
3574 release_regions:
3575         pci_release_regions(pdev);
3576 disable_device:
3577         pci_disable_device(pdev);
3578
3579         return err;
3580 }
3581
3582 static void
3583 bnad_pci_uninit(struct pci_dev *pdev)
3584 {
3585         pci_release_regions(pdev);
3586         pci_disable_device(pdev);
3587 }
3588
3589 static int
3590 bnad_pci_probe(struct pci_dev *pdev,
3591                 const struct pci_device_id *pcidev_id)
3592 {
3593         bool    using_dac;
3594         int     err;
3595         struct bnad *bnad;
3596         struct bna *bna;
3597         struct net_device *netdev;
3598         struct bfa_pcidev pcidev_info;
3599         unsigned long flags;
3600
3601         pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3602                pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3603
3604         mutex_lock(&bnad_fwimg_mutex);
3605         if (!cna_get_firmware_buf(pdev)) {
3606                 mutex_unlock(&bnad_fwimg_mutex);
3607                 pr_warn("Failed to load Firmware Image!\n");
3608                 return -ENODEV;
3609         }
3610         mutex_unlock(&bnad_fwimg_mutex);
3611
3612         /*
3613          * Allocates sizeof(struct net_device + struct bnad)
3614          * bnad = netdev->priv
3615          */
3616         netdev = alloc_etherdev(sizeof(struct bnad));
3617         if (!netdev) {
3618                 err = -ENOMEM;
3619                 return err;
3620         }
3621         bnad = netdev_priv(netdev);
3622         bnad_lock_init(bnad);
3623         bnad_add_to_list(bnad);
3624
3625         mutex_lock(&bnad->conf_mutex);
3626         /*
3627          * PCI initialization
3628          *      Output : using_dac = 1 for 64 bit DMA
3629          *                         = 0 for 32 bit DMA
3630          */
3631         using_dac = false;
3632         err = bnad_pci_init(bnad, pdev, &using_dac);
3633         if (err)
3634                 goto unlock_mutex;
3635
3636         /*
3637          * Initialize bnad structure
3638          * Setup relation between pci_dev & netdev
3639          */
3640         err = bnad_init(bnad, pdev, netdev);
3641         if (err)
3642                 goto pci_uninit;
3643
3644         /* Initialize netdev structure, set up ethtool ops */
3645         bnad_netdev_init(bnad, using_dac);
3646
3647         /* Set link to down state */
3648         netif_carrier_off(netdev);
3649
3650         /* Setup the debugfs node for this bfad */
3651         if (bna_debugfs_enable)
3652                 bnad_debugfs_init(bnad);
3653
3654         /* Get resource requirement form bna */
3655         spin_lock_irqsave(&bnad->bna_lock, flags);
3656         bna_res_req(&bnad->res_info[0]);
3657         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3658
3659         /* Allocate resources from bna */
3660         err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3661         if (err)
3662                 goto drv_uninit;
3663
3664         bna = &bnad->bna;
3665
3666         /* Setup pcidev_info for bna_init() */
3667         pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3668         pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3669         pcidev_info.device_id = bnad->pcidev->device;
3670         pcidev_info.pci_bar_kva = bnad->bar0;
3671
3672         spin_lock_irqsave(&bnad->bna_lock, flags);
3673         bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3674         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3675
3676         bnad->stats.bna_stats = &bna->stats;
3677
3678         bnad_enable_msix(bnad);
3679         err = bnad_mbox_irq_alloc(bnad);
3680         if (err)
3681                 goto res_free;
3682
3683         /* Set up timers */
3684         setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3685                                 ((unsigned long)bnad));
3686         setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3687                                 ((unsigned long)bnad));
3688         setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3689                                 ((unsigned long)bnad));
3690         setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3691                                 ((unsigned long)bnad));
3692
3693         /* Now start the timer before calling IOC */
3694         mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3695                   jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3696
3697         /*
3698          * Start the chip
3699          * If the call back comes with error, we bail out.
3700          * This is a catastrophic error.
3701          */
3702         err = bnad_ioceth_enable(bnad);
3703         if (err) {
3704                 pr_err("BNA: Initialization failed err=%d\n",
3705                        err);
3706                 goto probe_success;
3707         }
3708
3709         spin_lock_irqsave(&bnad->bna_lock, flags);
3710         if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3711                 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3712                 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3713                         bna_attr(bna)->num_rxp - 1);
3714                 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3715                         bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3716                         err = -EIO;
3717         }
3718         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3719         if (err)
3720                 goto disable_ioceth;
3721
3722         spin_lock_irqsave(&bnad->bna_lock, flags);
3723         bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3724         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3725
3726         err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3727         if (err) {
3728                 err = -EIO;
3729                 goto disable_ioceth;
3730         }
3731
3732         spin_lock_irqsave(&bnad->bna_lock, flags);
3733         bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3734         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3735
3736         /* Get the burnt-in mac */
3737         spin_lock_irqsave(&bnad->bna_lock, flags);
3738         bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3739         bnad_set_netdev_perm_addr(bnad);
3740         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3741
3742         mutex_unlock(&bnad->conf_mutex);
3743
3744         /* Finally, reguister with net_device layer */
3745         err = register_netdev(netdev);
3746         if (err) {
3747                 pr_err("BNA : Registering with netdev failed\n");
3748                 goto probe_uninit;
3749         }
3750         set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3751
3752         return 0;
3753
3754 probe_success:
3755         mutex_unlock(&bnad->conf_mutex);
3756         return 0;
3757
3758 probe_uninit:
3759         mutex_lock(&bnad->conf_mutex);
3760         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3761 disable_ioceth:
3762         bnad_ioceth_disable(bnad);
3763         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3764         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3765         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3766         spin_lock_irqsave(&bnad->bna_lock, flags);
3767         bna_uninit(bna);
3768         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3769         bnad_mbox_irq_free(bnad);
3770         bnad_disable_msix(bnad);
3771 res_free:
3772         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3773 drv_uninit:
3774         /* Remove the debugfs node for this bnad */
3775         kfree(bnad->regdata);
3776         bnad_debugfs_uninit(bnad);
3777         bnad_uninit(bnad);
3778 pci_uninit:
3779         bnad_pci_uninit(pdev);
3780 unlock_mutex:
3781         mutex_unlock(&bnad->conf_mutex);
3782         bnad_remove_from_list(bnad);
3783         bnad_lock_uninit(bnad);
3784         free_netdev(netdev);
3785         return err;
3786 }
3787
3788 static void
3789 bnad_pci_remove(struct pci_dev *pdev)
3790 {
3791         struct net_device *netdev = pci_get_drvdata(pdev);
3792         struct bnad *bnad;
3793         struct bna *bna;
3794         unsigned long flags;
3795
3796         if (!netdev)
3797                 return;
3798
3799         pr_info("%s bnad_pci_remove\n", netdev->name);
3800         bnad = netdev_priv(netdev);
3801         bna = &bnad->bna;
3802
3803         if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3804                 unregister_netdev(netdev);
3805
3806         mutex_lock(&bnad->conf_mutex);
3807         bnad_ioceth_disable(bnad);
3808         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3809         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3810         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3811         spin_lock_irqsave(&bnad->bna_lock, flags);
3812         bna_uninit(bna);
3813         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3814
3815         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3816         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3817         bnad_mbox_irq_free(bnad);
3818         bnad_disable_msix(bnad);
3819         bnad_pci_uninit(pdev);
3820         mutex_unlock(&bnad->conf_mutex);
3821         bnad_remove_from_list(bnad);
3822         bnad_lock_uninit(bnad);
3823         /* Remove the debugfs node for this bnad */
3824         kfree(bnad->regdata);
3825         bnad_debugfs_uninit(bnad);
3826         bnad_uninit(bnad);
3827         free_netdev(netdev);
3828 }
3829
3830 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3831         {
3832                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3833                         PCI_DEVICE_ID_BROCADE_CT),
3834                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3835                 .class_mask =  0xffff00
3836         },
3837         {
3838                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3839                         BFA_PCI_DEVICE_ID_CT2),
3840                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3841                 .class_mask =  0xffff00
3842         },
3843         {0,  },
3844 };
3845
3846 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3847
3848 static struct pci_driver bnad_pci_driver = {
3849         .name = BNAD_NAME,
3850         .id_table = bnad_pci_id_table,
3851         .probe = bnad_pci_probe,
3852         .remove = bnad_pci_remove,
3853 };
3854
3855 static int __init
3856 bnad_module_init(void)
3857 {
3858         int err;
3859
3860         pr_info("Brocade 10G Ethernet driver - version: %s\n",
3861                         BNAD_VERSION);
3862
3863         bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3864
3865         err = pci_register_driver(&bnad_pci_driver);
3866         if (err < 0) {
3867                 pr_err("bna : PCI registration failed in module init "
3868                        "(%d)\n", err);
3869                 return err;
3870         }
3871
3872         return 0;
3873 }
3874
3875 static void __exit
3876 bnad_module_exit(void)
3877 {
3878         pci_unregister_driver(&bnad_pci_driver);
3879         release_firmware(bfi_fw);
3880 }
3881
3882 module_init(bnad_module_init);
3883 module_exit(bnad_module_exit);
3884
3885 MODULE_AUTHOR("Brocade");
3886 MODULE_LICENSE("GPL");
3887 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3888 MODULE_VERSION(BNAD_VERSION);
3889 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3890 MODULE_FIRMWARE(CNA_FW_FILE_CT2);