2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
34 static DEFINE_MUTEX(bnad_fwimg_mutex);
39 static uint bnad_msix_disable;
40 module_param(bnad_msix_disable, uint, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
43 static uint bnad_ioc_auto_recover = 1;
44 module_param(bnad_ioc_auto_recover, uint, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47 static uint bna_debugfs_enable = 1;
48 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50 " Range[false:0|true:1]");
55 static u32 bnad_rxqs_per_cq = 2;
57 static struct mutex bnad_list_mutex;
58 static LIST_HEAD(bnad_list);
59 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
64 #define BNAD_GET_MBOX_IRQ(_bnad) \
65 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
66 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
67 ((_bnad)->pcidev->irq))
69 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
71 (_res_info)->res_type = BNA_RES_T_MEM; \
72 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
73 (_res_info)->res_u.mem_info.num = (_num); \
74 (_res_info)->res_u.mem_info.len = (_size); \
78 bnad_add_to_list(struct bnad *bnad)
80 mutex_lock(&bnad_list_mutex);
81 list_add_tail(&bnad->list_entry, &bnad_list);
83 mutex_unlock(&bnad_list_mutex);
87 bnad_remove_from_list(struct bnad *bnad)
89 mutex_lock(&bnad_list_mutex);
90 list_del(&bnad->list_entry);
91 mutex_unlock(&bnad_list_mutex);
95 * Reinitialize completions in CQ, once Rx is taken down
98 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
100 struct bna_cq_entry *cmpl;
103 for (i = 0; i < ccb->q_depth; i++) {
104 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
109 /* Tx Datapath functions */
112 /* Caller should ensure that the entry at unmap_q[index] is valid */
114 bnad_tx_buff_unmap(struct bnad *bnad,
115 struct bnad_tx_unmap *unmap_q,
116 u32 q_depth, u32 index)
118 struct bnad_tx_unmap *unmap;
122 unmap = &unmap_q[index];
123 nvecs = unmap->nvecs;
128 dma_unmap_single(&bnad->pcidev->dev,
129 dma_unmap_addr(&unmap->vectors[0], dma_addr),
130 skb_headlen(skb), DMA_TO_DEVICE);
131 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
137 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
139 BNA_QE_INDX_INC(index, q_depth);
140 unmap = &unmap_q[index];
143 dma_unmap_page(&bnad->pcidev->dev,
144 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
145 skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
146 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
150 BNA_QE_INDX_INC(index, q_depth);
156 * Frees all pending Tx Bufs
157 * At this point no activity is expected on the Q,
158 * so DMA unmap & freeing is fine.
161 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
167 for (i = 0; i < tcb->q_depth; i++) {
168 skb = unmap_q[i].skb;
171 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
173 dev_kfree_skb_any(skb);
178 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
179 * Can be called in a) Interrupt context
183 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
185 u32 sent_packets = 0, sent_bytes = 0;
186 u32 wis, unmap_wis, hw_cons, cons, q_depth;
187 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
188 struct bnad_tx_unmap *unmap;
191 /* Just return if TX is stopped */
192 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
195 hw_cons = *(tcb->hw_consumer_index);
196 cons = tcb->consumer_index;
197 q_depth = tcb->q_depth;
199 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
200 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
203 unmap = &unmap_q[cons];
208 sent_bytes += skb->len;
210 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
213 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
214 dev_kfree_skb_any(skb);
217 /* Update consumer pointers. */
218 tcb->consumer_index = hw_cons;
220 tcb->txq->tx_packets += sent_packets;
221 tcb->txq->tx_bytes += sent_bytes;
227 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
229 struct net_device *netdev = bnad->netdev;
232 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
235 sent = bnad_txcmpl_process(bnad, tcb);
237 if (netif_queue_stopped(netdev) &&
238 netif_carrier_ok(netdev) &&
239 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
240 BNAD_NETIF_WAKE_THRESHOLD) {
241 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
242 netif_wake_queue(netdev);
243 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
248 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
249 bna_ib_ack(tcb->i_dbell, sent);
251 smp_mb__before_clear_bit();
252 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
257 /* MSIX Tx Completion Handler */
259 bnad_msix_tx(int irq, void *data)
261 struct bna_tcb *tcb = (struct bna_tcb *)data;
262 struct bnad *bnad = tcb->bnad;
264 bnad_tx_complete(bnad, tcb);
270 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
272 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
274 unmap_q->reuse_pi = -1;
275 unmap_q->alloc_order = -1;
276 unmap_q->map_size = 0;
277 unmap_q->type = BNAD_RXBUF_NONE;
280 /* Default is page-based allocation. Multi-buffer support - TBD */
282 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
284 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
287 bnad_rxq_alloc_uninit(bnad, rcb);
289 order = get_order(rcb->rxq->buffer_size);
291 unmap_q->type = BNAD_RXBUF_PAGE;
293 if (bna_is_small_rxq(rcb->id)) {
294 unmap_q->alloc_order = 0;
295 unmap_q->map_size = rcb->rxq->buffer_size;
297 if (rcb->rxq->multi_buffer) {
298 unmap_q->alloc_order = 0;
299 unmap_q->map_size = rcb->rxq->buffer_size;
300 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
302 unmap_q->alloc_order = order;
304 (rcb->rxq->buffer_size > 2048) ?
305 PAGE_SIZE << order : 2048;
309 BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
315 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
320 dma_unmap_page(&bnad->pcidev->dev,
321 dma_unmap_addr(&unmap->vector, dma_addr),
322 unmap->vector.len, DMA_FROM_DEVICE);
323 put_page(unmap->page);
325 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
326 unmap->vector.len = 0;
330 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
335 dma_unmap_single(&bnad->pcidev->dev,
336 dma_unmap_addr(&unmap->vector, dma_addr),
337 unmap->vector.len, DMA_FROM_DEVICE);
338 dev_kfree_skb_any(unmap->skb);
340 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
341 unmap->vector.len = 0;
345 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
347 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
350 for (i = 0; i < rcb->q_depth; i++) {
351 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
353 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
354 bnad_rxq_cleanup_skb(bnad, unmap);
356 bnad_rxq_cleanup_page(bnad, unmap);
358 bnad_rxq_alloc_uninit(bnad, rcb);
362 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
364 u32 alloced, prod, q_depth;
365 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
366 struct bnad_rx_unmap *unmap, *prev;
367 struct bna_rxq_entry *rxent;
369 u32 page_offset, alloc_size;
372 prod = rcb->producer_index;
373 q_depth = rcb->q_depth;
375 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
379 unmap = &unmap_q->unmap[prod];
381 if (unmap_q->reuse_pi < 0) {
382 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
383 unmap_q->alloc_order);
386 prev = &unmap_q->unmap[unmap_q->reuse_pi];
388 page_offset = prev->page_offset + unmap_q->map_size;
392 if (unlikely(!page)) {
393 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
394 rcb->rxq->rxbuf_alloc_failed++;
398 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
399 unmap_q->map_size, DMA_FROM_DEVICE);
402 unmap->page_offset = page_offset;
403 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
404 unmap->vector.len = unmap_q->map_size;
405 page_offset += unmap_q->map_size;
407 if (page_offset < alloc_size)
408 unmap_q->reuse_pi = prod;
410 unmap_q->reuse_pi = -1;
412 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
413 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
414 BNA_QE_INDX_INC(prod, q_depth);
419 if (likely(alloced)) {
420 rcb->producer_index = prod;
422 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
423 bna_rxq_prod_indx_doorbell(rcb);
430 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
432 u32 alloced, prod, q_depth, buff_sz;
433 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
434 struct bnad_rx_unmap *unmap;
435 struct bna_rxq_entry *rxent;
439 buff_sz = rcb->rxq->buffer_size;
440 prod = rcb->producer_index;
441 q_depth = rcb->q_depth;
445 unmap = &unmap_q->unmap[prod];
447 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
449 if (unlikely(!skb)) {
450 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
451 rcb->rxq->rxbuf_alloc_failed++;
454 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
455 buff_sz, DMA_FROM_DEVICE);
458 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
459 unmap->vector.len = buff_sz;
461 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
462 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
463 BNA_QE_INDX_INC(prod, q_depth);
468 if (likely(alloced)) {
469 rcb->producer_index = prod;
471 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
472 bna_rxq_prod_indx_doorbell(rcb);
479 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
481 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
484 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
485 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
488 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
489 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
491 bnad_rxq_refill_page(bnad, rcb, to_alloc);
494 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
496 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
497 BNA_CQ_EF_L4_CKSUM_OK)
499 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
500 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
501 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
502 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
503 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
504 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
505 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
506 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
509 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
510 u32 sop_ci, u32 nvecs)
512 struct bnad_rx_unmap_q *unmap_q;
513 struct bnad_rx_unmap *unmap;
516 unmap_q = rcb->unmap_q;
517 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
518 unmap = &unmap_q->unmap[ci];
519 BNA_QE_INDX_INC(ci, rcb->q_depth);
521 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
522 bnad_rxq_cleanup_skb(bnad, unmap);
524 bnad_rxq_cleanup_page(bnad, unmap);
529 bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
530 u32 sop_ci, u32 nvecs, u32 last_fraglen)
533 u32 ci, vec, len, totlen = 0;
534 struct bnad_rx_unmap_q *unmap_q;
535 struct bnad_rx_unmap *unmap;
537 unmap_q = rcb->unmap_q;
540 /* prefetch header */
541 prefetch(page_address(unmap_q->unmap[sop_ci].page) +
542 unmap_q->unmap[sop_ci].page_offset);
544 for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
545 unmap = &unmap_q->unmap[ci];
546 BNA_QE_INDX_INC(ci, rcb->q_depth);
548 dma_unmap_page(&bnad->pcidev->dev,
549 dma_unmap_addr(&unmap->vector, dma_addr),
550 unmap->vector.len, DMA_FROM_DEVICE);
552 len = (vec == nvecs) ?
553 last_fraglen : unmap->vector.len;
556 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
557 unmap->page, unmap->page_offset, len);
560 unmap->vector.len = 0;
564 skb->data_len += totlen;
565 skb->truesize += totlen;
569 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
570 struct bnad_rx_unmap *unmap, u32 len)
574 dma_unmap_single(&bnad->pcidev->dev,
575 dma_unmap_addr(&unmap->vector, dma_addr),
576 unmap->vector.len, DMA_FROM_DEVICE);
579 skb->protocol = eth_type_trans(skb, bnad->netdev);
582 unmap->vector.len = 0;
586 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
588 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
589 struct bna_rcb *rcb = NULL;
590 struct bnad_rx_unmap_q *unmap_q;
591 struct bnad_rx_unmap *unmap = NULL;
592 struct sk_buff *skb = NULL;
593 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
594 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
595 u32 packets = 0, len = 0, totlen = 0;
596 u32 pi, vec, sop_ci = 0, nvecs = 0;
597 u32 flags, masked_flags;
599 prefetch(bnad->netdev);
602 cmpl = &cq[ccb->producer_index];
604 while (packets < budget) {
607 /* The 'valid' field is set by the adapter, only after writing
608 * the other fields of completion entry. Hence, do not load
609 * other fields of completion entry *before* the 'valid' is
610 * loaded. Adding the rmb() here prevents the compiler and/or
611 * CPU from reordering the reads which would potentially result
612 * in reading stale values in completion entry.
616 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
618 if (bna_is_small_rxq(cmpl->rxq_id))
623 unmap_q = rcb->unmap_q;
625 /* start of packet ci */
626 sop_ci = rcb->consumer_index;
628 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
629 unmap = &unmap_q->unmap[sop_ci];
632 skb = napi_get_frags(&rx_ctrl->napi);
638 flags = ntohl(cmpl->flags);
639 len = ntohs(cmpl->length);
643 /* Check all the completions for this frame.
644 * busy-wait doesn't help much, break here.
646 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
647 (flags & BNA_CQ_EF_EOP) == 0) {
648 pi = ccb->producer_index;
650 BNA_QE_INDX_INC(pi, ccb->q_depth);
653 if (!next_cmpl->valid)
655 /* The 'valid' field is set by the adapter, only
656 * after writing the other fields of completion
657 * entry. Hence, do not load other fields of
658 * completion entry *before* the 'valid' is
659 * loaded. Adding the rmb() here prevents the
660 * compiler and/or CPU from reordering the reads
661 * which would potentially result in reading
662 * stale values in completion entry.
666 len = ntohs(next_cmpl->length);
667 flags = ntohl(next_cmpl->flags);
671 } while ((flags & BNA_CQ_EF_EOP) == 0);
673 if (!next_cmpl->valid)
677 /* TODO: BNA_CQ_EF_LOCAL ? */
678 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
679 BNA_CQ_EF_FCS_ERROR |
680 BNA_CQ_EF_TOO_LONG))) {
681 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
682 rcb->rxq->rx_packets_with_error++;
687 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
688 bnad_cq_setup_skb(bnad, skb, unmap, len);
690 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
693 rcb->rxq->rx_packets++;
694 rcb->rxq->rx_bytes += totlen;
695 ccb->bytes_per_intr += totlen;
697 masked_flags = flags & flags_cksum_prot_mask;
700 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
701 ((masked_flags == flags_tcp4) ||
702 (masked_flags == flags_udp4) ||
703 (masked_flags == flags_tcp6) ||
704 (masked_flags == flags_udp6))))
705 skb->ip_summed = CHECKSUM_UNNECESSARY;
707 skb_checksum_none_assert(skb);
709 if (flags & BNA_CQ_EF_VLAN)
710 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
712 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
713 netif_receive_skb(skb);
715 napi_gro_frags(&rx_ctrl->napi);
718 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
719 for (vec = 0; vec < nvecs; vec++) {
720 cmpl = &cq[ccb->producer_index];
722 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
724 cmpl = &cq[ccb->producer_index];
727 napi_gro_flush(&rx_ctrl->napi, false);
728 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
729 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
731 bnad_rxq_post(bnad, ccb->rcb[0]);
733 bnad_rxq_post(bnad, ccb->rcb[1]);
739 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
741 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
742 struct napi_struct *napi = &rx_ctrl->napi;
744 if (likely(napi_schedule_prep(napi))) {
745 __napi_schedule(napi);
746 rx_ctrl->rx_schedule++;
750 /* MSIX Rx Path Handler */
752 bnad_msix_rx(int irq, void *data)
754 struct bna_ccb *ccb = (struct bna_ccb *)data;
757 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
758 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
764 /* Interrupt handlers */
766 /* Mbox Interrupt Handlers */
768 bnad_msix_mbox_handler(int irq, void *data)
772 struct bnad *bnad = (struct bnad *)data;
774 spin_lock_irqsave(&bnad->bna_lock, flags);
775 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
776 spin_unlock_irqrestore(&bnad->bna_lock, flags);
780 bna_intr_status_get(&bnad->bna, intr_status);
782 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
783 bna_mbox_handler(&bnad->bna, intr_status);
785 spin_unlock_irqrestore(&bnad->bna_lock, flags);
791 bnad_isr(int irq, void *data)
796 struct bnad *bnad = (struct bnad *)data;
797 struct bnad_rx_info *rx_info;
798 struct bnad_rx_ctrl *rx_ctrl;
799 struct bna_tcb *tcb = NULL;
801 spin_lock_irqsave(&bnad->bna_lock, flags);
802 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
803 spin_unlock_irqrestore(&bnad->bna_lock, flags);
807 bna_intr_status_get(&bnad->bna, intr_status);
809 if (unlikely(!intr_status)) {
810 spin_unlock_irqrestore(&bnad->bna_lock, flags);
814 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
815 bna_mbox_handler(&bnad->bna, intr_status);
817 spin_unlock_irqrestore(&bnad->bna_lock, flags);
819 if (!BNA_IS_INTX_DATA_INTR(intr_status))
822 /* Process data interrupts */
824 for (i = 0; i < bnad->num_tx; i++) {
825 for (j = 0; j < bnad->num_txq_per_tx; j++) {
826 tcb = bnad->tx_info[i].tcb[j];
827 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
828 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
832 for (i = 0; i < bnad->num_rx; i++) {
833 rx_info = &bnad->rx_info[i];
836 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
837 rx_ctrl = &rx_info->rx_ctrl[j];
839 bnad_netif_rx_schedule_poll(bnad,
847 * Called in interrupt / callback context
848 * with bna_lock held, so cfg_flags access is OK
851 bnad_enable_mbox_irq(struct bnad *bnad)
853 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
855 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
859 * Called with bnad->bna_lock held b'cos of
860 * bnad->cfg_flags access.
863 bnad_disable_mbox_irq(struct bnad *bnad)
865 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
867 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
871 bnad_set_netdev_perm_addr(struct bnad *bnad)
873 struct net_device *netdev = bnad->netdev;
875 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
876 if (is_zero_ether_addr(netdev->dev_addr))
877 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
880 /* Control Path Handlers */
884 bnad_cb_mbox_intr_enable(struct bnad *bnad)
886 bnad_enable_mbox_irq(bnad);
890 bnad_cb_mbox_intr_disable(struct bnad *bnad)
892 bnad_disable_mbox_irq(bnad);
896 bnad_cb_ioceth_ready(struct bnad *bnad)
898 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
899 complete(&bnad->bnad_completions.ioc_comp);
903 bnad_cb_ioceth_failed(struct bnad *bnad)
905 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
906 complete(&bnad->bnad_completions.ioc_comp);
910 bnad_cb_ioceth_disabled(struct bnad *bnad)
912 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
913 complete(&bnad->bnad_completions.ioc_comp);
917 bnad_cb_enet_disabled(void *arg)
919 struct bnad *bnad = (struct bnad *)arg;
921 netif_carrier_off(bnad->netdev);
922 complete(&bnad->bnad_completions.enet_comp);
926 bnad_cb_ethport_link_status(struct bnad *bnad,
927 enum bna_link_status link_status)
929 bool link_up = false;
931 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
933 if (link_status == BNA_CEE_UP) {
934 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
935 BNAD_UPDATE_CTR(bnad, cee_toggle);
936 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
938 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
939 BNAD_UPDATE_CTR(bnad, cee_toggle);
940 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
944 if (!netif_carrier_ok(bnad->netdev)) {
946 printk(KERN_WARNING "bna: %s link up\n",
948 netif_carrier_on(bnad->netdev);
949 BNAD_UPDATE_CTR(bnad, link_toggle);
950 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
951 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
953 struct bna_tcb *tcb =
954 bnad->tx_info[tx_id].tcb[tcb_id];
961 if (test_bit(BNAD_TXQ_TX_STARTED,
965 * Transmit Schedule */
966 printk(KERN_INFO "bna: %s %d "
973 BNAD_UPDATE_CTR(bnad,
979 BNAD_UPDATE_CTR(bnad,
986 if (netif_carrier_ok(bnad->netdev)) {
987 printk(KERN_WARNING "bna: %s link down\n",
989 netif_carrier_off(bnad->netdev);
990 BNAD_UPDATE_CTR(bnad, link_toggle);
996 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
998 struct bnad *bnad = (struct bnad *)arg;
1000 complete(&bnad->bnad_completions.tx_comp);
1004 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1006 struct bnad_tx_info *tx_info =
1007 (struct bnad_tx_info *)tcb->txq->tx->priv;
1010 tx_info->tcb[tcb->id] = tcb;
1014 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1016 struct bnad_tx_info *tx_info =
1017 (struct bnad_tx_info *)tcb->txq->tx->priv;
1019 tx_info->tcb[tcb->id] = NULL;
1024 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1026 struct bnad_rx_info *rx_info =
1027 (struct bnad_rx_info *)ccb->cq->rx->priv;
1029 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1030 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1034 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1036 struct bnad_rx_info *rx_info =
1037 (struct bnad_rx_info *)ccb->cq->rx->priv;
1039 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1043 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1045 struct bnad_tx_info *tx_info =
1046 (struct bnad_tx_info *)tx->priv;
1047 struct bna_tcb *tcb;
1051 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1052 tcb = tx_info->tcb[i];
1056 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1057 netif_stop_subqueue(bnad->netdev, txq_id);
1058 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
1059 bnad->netdev->name, txq_id);
1064 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1066 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1067 struct bna_tcb *tcb;
1071 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1072 tcb = tx_info->tcb[i];
1077 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1078 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1079 BUG_ON(*(tcb->hw_consumer_index) != 0);
1081 if (netif_carrier_ok(bnad->netdev)) {
1082 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
1083 bnad->netdev->name, txq_id);
1084 netif_wake_subqueue(bnad->netdev, txq_id);
1085 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1090 * Workaround for first ioceth enable failure & we
1091 * get a 0 MAC address. We try to get the MAC address
1094 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
1095 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
1096 bnad_set_netdev_perm_addr(bnad);
1101 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1104 bnad_tx_cleanup(struct delayed_work *work)
1106 struct bnad_tx_info *tx_info =
1107 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1108 struct bnad *bnad = NULL;
1109 struct bna_tcb *tcb;
1110 unsigned long flags;
1113 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1114 tcb = tx_info->tcb[i];
1120 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1125 bnad_txq_cleanup(bnad, tcb);
1127 smp_mb__before_clear_bit();
1128 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1132 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1133 msecs_to_jiffies(1));
1137 spin_lock_irqsave(&bnad->bna_lock, flags);
1138 bna_tx_cleanup_complete(tx_info->tx);
1139 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1143 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1145 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1146 struct bna_tcb *tcb;
1149 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1150 tcb = tx_info->tcb[i];
1155 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1159 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1161 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1162 struct bna_ccb *ccb;
1163 struct bnad_rx_ctrl *rx_ctrl;
1166 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1167 rx_ctrl = &rx_info->rx_ctrl[i];
1172 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1175 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1180 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1183 bnad_rx_cleanup(void *work)
1185 struct bnad_rx_info *rx_info =
1186 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1187 struct bnad_rx_ctrl *rx_ctrl;
1188 struct bnad *bnad = NULL;
1189 unsigned long flags;
1192 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1193 rx_ctrl = &rx_info->rx_ctrl[i];
1198 bnad = rx_ctrl->ccb->bnad;
1201 * Wait till the poll handler has exited
1202 * and nothing can be scheduled anymore
1204 napi_disable(&rx_ctrl->napi);
1206 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1207 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1208 if (rx_ctrl->ccb->rcb[1])
1209 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1212 spin_lock_irqsave(&bnad->bna_lock, flags);
1213 bna_rx_cleanup_complete(rx_info->rx);
1214 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1218 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1220 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1221 struct bna_ccb *ccb;
1222 struct bnad_rx_ctrl *rx_ctrl;
1225 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1226 rx_ctrl = &rx_info->rx_ctrl[i];
1231 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1234 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1237 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1241 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1243 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1244 struct bna_ccb *ccb;
1245 struct bna_rcb *rcb;
1246 struct bnad_rx_ctrl *rx_ctrl;
1249 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1250 rx_ctrl = &rx_info->rx_ctrl[i];
1255 napi_enable(&rx_ctrl->napi);
1257 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1262 bnad_rxq_alloc_init(bnad, rcb);
1263 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1264 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1265 bnad_rxq_post(bnad, rcb);
1271 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1273 struct bnad *bnad = (struct bnad *)arg;
1275 complete(&bnad->bnad_completions.rx_comp);
1279 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1281 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1282 complete(&bnad->bnad_completions.mcast_comp);
1286 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1287 struct bna_stats *stats)
1289 if (status == BNA_CB_SUCCESS)
1290 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1292 if (!netif_running(bnad->netdev) ||
1293 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1296 mod_timer(&bnad->stats_timer,
1297 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1301 bnad_cb_enet_mtu_set(struct bnad *bnad)
1303 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1304 complete(&bnad->bnad_completions.mtu_comp);
1308 bnad_cb_completion(void *arg, enum bfa_status status)
1310 struct bnad_iocmd_comp *iocmd_comp =
1311 (struct bnad_iocmd_comp *)arg;
1313 iocmd_comp->comp_status = (u32) status;
1314 complete(&iocmd_comp->comp);
1317 /* Resource allocation, free functions */
1320 bnad_mem_free(struct bnad *bnad,
1321 struct bna_mem_info *mem_info)
1326 if (mem_info->mdl == NULL)
1329 for (i = 0; i < mem_info->num; i++) {
1330 if (mem_info->mdl[i].kva != NULL) {
1331 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1332 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1334 dma_free_coherent(&bnad->pcidev->dev,
1335 mem_info->mdl[i].len,
1336 mem_info->mdl[i].kva, dma_pa);
1338 kfree(mem_info->mdl[i].kva);
1341 kfree(mem_info->mdl);
1342 mem_info->mdl = NULL;
1346 bnad_mem_alloc(struct bnad *bnad,
1347 struct bna_mem_info *mem_info)
1352 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1353 mem_info->mdl = NULL;
1357 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1359 if (mem_info->mdl == NULL)
1362 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1363 for (i = 0; i < mem_info->num; i++) {
1364 mem_info->mdl[i].len = mem_info->len;
1365 mem_info->mdl[i].kva =
1366 dma_alloc_coherent(&bnad->pcidev->dev,
1367 mem_info->len, &dma_pa,
1369 if (mem_info->mdl[i].kva == NULL)
1372 BNA_SET_DMA_ADDR(dma_pa,
1373 &(mem_info->mdl[i].dma));
1376 for (i = 0; i < mem_info->num; i++) {
1377 mem_info->mdl[i].len = mem_info->len;
1378 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1380 if (mem_info->mdl[i].kva == NULL)
1388 bnad_mem_free(bnad, mem_info);
1392 /* Free IRQ for Mailbox */
1394 bnad_mbox_irq_free(struct bnad *bnad)
1397 unsigned long flags;
1399 spin_lock_irqsave(&bnad->bna_lock, flags);
1400 bnad_disable_mbox_irq(bnad);
1401 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1403 irq = BNAD_GET_MBOX_IRQ(bnad);
1404 free_irq(irq, bnad);
1408 * Allocates IRQ for Mailbox, but keep it disabled
1409 * This will be enabled once we get the mbox enable callback
1413 bnad_mbox_irq_alloc(struct bnad *bnad)
1416 unsigned long irq_flags, flags;
1418 irq_handler_t irq_handler;
1420 spin_lock_irqsave(&bnad->bna_lock, flags);
1421 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1422 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1423 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1426 irq_handler = (irq_handler_t)bnad_isr;
1427 irq = bnad->pcidev->irq;
1428 irq_flags = IRQF_SHARED;
1431 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1432 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1435 * Set the Mbox IRQ disable flag, so that the IRQ handler
1436 * called from request_irq() for SHARED IRQs do not execute
1438 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1440 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1442 err = request_irq(irq, irq_handler, irq_flags,
1443 bnad->mbox_irq_name, bnad);
1449 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1451 kfree(intr_info->idl);
1452 intr_info->idl = NULL;
1455 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1457 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1458 u32 txrx_id, struct bna_intr_info *intr_info)
1460 int i, vector_start = 0;
1462 unsigned long flags;
1464 spin_lock_irqsave(&bnad->bna_lock, flags);
1465 cfg_flags = bnad->cfg_flags;
1466 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1468 if (cfg_flags & BNAD_CF_MSIX) {
1469 intr_info->intr_type = BNA_INTR_T_MSIX;
1470 intr_info->idl = kcalloc(intr_info->num,
1471 sizeof(struct bna_intr_descr),
1473 if (!intr_info->idl)
1478 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1482 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1483 (bnad->num_tx * bnad->num_txq_per_tx) +
1491 for (i = 0; i < intr_info->num; i++)
1492 intr_info->idl[i].vector = vector_start + i;
1494 intr_info->intr_type = BNA_INTR_T_INTX;
1496 intr_info->idl = kcalloc(intr_info->num,
1497 sizeof(struct bna_intr_descr),
1499 if (!intr_info->idl)
1504 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1508 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1515 /* NOTE: Should be called for MSIX only
1516 * Unregisters Tx MSIX vector(s) from the kernel
1519 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1525 for (i = 0; i < num_txqs; i++) {
1526 if (tx_info->tcb[i] == NULL)
1529 vector_num = tx_info->tcb[i]->intr_vector;
1530 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1534 /* NOTE: Should be called for MSIX only
1535 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1538 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1539 u32 tx_id, int num_txqs)
1545 for (i = 0; i < num_txqs; i++) {
1546 vector_num = tx_info->tcb[i]->intr_vector;
1547 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1548 tx_id + tx_info->tcb[i]->id);
1549 err = request_irq(bnad->msix_table[vector_num].vector,
1550 (irq_handler_t)bnad_msix_tx, 0,
1551 tx_info->tcb[i]->name,
1561 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1565 /* NOTE: Should be called for MSIX only
1566 * Unregisters Rx MSIX vector(s) from the kernel
1569 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1575 for (i = 0; i < num_rxps; i++) {
1576 if (rx_info->rx_ctrl[i].ccb == NULL)
1579 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1580 free_irq(bnad->msix_table[vector_num].vector,
1581 rx_info->rx_ctrl[i].ccb);
1585 /* NOTE: Should be called for MSIX only
1586 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1589 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1590 u32 rx_id, int num_rxps)
1596 for (i = 0; i < num_rxps; i++) {
1597 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1598 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1600 rx_id + rx_info->rx_ctrl[i].ccb->id);
1601 err = request_irq(bnad->msix_table[vector_num].vector,
1602 (irq_handler_t)bnad_msix_rx, 0,
1603 rx_info->rx_ctrl[i].ccb->name,
1604 rx_info->rx_ctrl[i].ccb);
1613 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1617 /* Free Tx object Resources */
1619 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1623 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1624 if (res_info[i].res_type == BNA_RES_T_MEM)
1625 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1626 else if (res_info[i].res_type == BNA_RES_T_INTR)
1627 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1631 /* Allocates memory and interrupt resources for Tx object */
1633 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1638 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1639 if (res_info[i].res_type == BNA_RES_T_MEM)
1640 err = bnad_mem_alloc(bnad,
1641 &res_info[i].res_u.mem_info);
1642 else if (res_info[i].res_type == BNA_RES_T_INTR)
1643 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1644 &res_info[i].res_u.intr_info);
1651 bnad_tx_res_free(bnad, res_info);
1655 /* Free Rx object Resources */
1657 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1661 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1662 if (res_info[i].res_type == BNA_RES_T_MEM)
1663 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1664 else if (res_info[i].res_type == BNA_RES_T_INTR)
1665 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1669 /* Allocates memory and interrupt resources for Rx object */
1671 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1676 /* All memory needs to be allocated before setup_ccbs */
1677 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1678 if (res_info[i].res_type == BNA_RES_T_MEM)
1679 err = bnad_mem_alloc(bnad,
1680 &res_info[i].res_u.mem_info);
1681 else if (res_info[i].res_type == BNA_RES_T_INTR)
1682 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1683 &res_info[i].res_u.intr_info);
1690 bnad_rx_res_free(bnad, res_info);
1694 /* Timer callbacks */
1697 bnad_ioc_timeout(unsigned long data)
1699 struct bnad *bnad = (struct bnad *)data;
1700 unsigned long flags;
1702 spin_lock_irqsave(&bnad->bna_lock, flags);
1703 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1704 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1708 bnad_ioc_hb_check(unsigned long data)
1710 struct bnad *bnad = (struct bnad *)data;
1711 unsigned long flags;
1713 spin_lock_irqsave(&bnad->bna_lock, flags);
1714 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1715 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1719 bnad_iocpf_timeout(unsigned long data)
1721 struct bnad *bnad = (struct bnad *)data;
1722 unsigned long flags;
1724 spin_lock_irqsave(&bnad->bna_lock, flags);
1725 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1726 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1730 bnad_iocpf_sem_timeout(unsigned long data)
1732 struct bnad *bnad = (struct bnad *)data;
1733 unsigned long flags;
1735 spin_lock_irqsave(&bnad->bna_lock, flags);
1736 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1737 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1741 * All timer routines use bnad->bna_lock to protect against
1742 * the following race, which may occur in case of no locking:
1750 /* b) Dynamic Interrupt Moderation Timer */
1752 bnad_dim_timeout(unsigned long data)
1754 struct bnad *bnad = (struct bnad *)data;
1755 struct bnad_rx_info *rx_info;
1756 struct bnad_rx_ctrl *rx_ctrl;
1758 unsigned long flags;
1760 if (!netif_carrier_ok(bnad->netdev))
1763 spin_lock_irqsave(&bnad->bna_lock, flags);
1764 for (i = 0; i < bnad->num_rx; i++) {
1765 rx_info = &bnad->rx_info[i];
1768 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1769 rx_ctrl = &rx_info->rx_ctrl[j];
1772 bna_rx_dim_update(rx_ctrl->ccb);
1776 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1777 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1778 mod_timer(&bnad->dim_timer,
1779 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1780 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1783 /* c) Statistics Timer */
1785 bnad_stats_timeout(unsigned long data)
1787 struct bnad *bnad = (struct bnad *)data;
1788 unsigned long flags;
1790 if (!netif_running(bnad->netdev) ||
1791 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1794 spin_lock_irqsave(&bnad->bna_lock, flags);
1795 bna_hw_stats_get(&bnad->bna);
1796 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1800 * Set up timer for DIM
1801 * Called with bnad->bna_lock held
1804 bnad_dim_timer_start(struct bnad *bnad)
1806 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1807 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1808 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1809 (unsigned long)bnad);
1810 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1811 mod_timer(&bnad->dim_timer,
1812 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1817 * Set up timer for statistics
1818 * Called with mutex_lock(&bnad->conf_mutex) held
1821 bnad_stats_timer_start(struct bnad *bnad)
1823 unsigned long flags;
1825 spin_lock_irqsave(&bnad->bna_lock, flags);
1826 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1827 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1828 (unsigned long)bnad);
1829 mod_timer(&bnad->stats_timer,
1830 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1832 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1836 * Stops the stats timer
1837 * Called with mutex_lock(&bnad->conf_mutex) held
1840 bnad_stats_timer_stop(struct bnad *bnad)
1843 unsigned long flags;
1845 spin_lock_irqsave(&bnad->bna_lock, flags);
1846 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1848 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1850 del_timer_sync(&bnad->stats_timer);
1856 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1858 int i = 1; /* Index 0 has broadcast address */
1859 struct netdev_hw_addr *mc_addr;
1861 netdev_for_each_mc_addr(mc_addr, netdev) {
1862 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1869 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1871 struct bnad_rx_ctrl *rx_ctrl =
1872 container_of(napi, struct bnad_rx_ctrl, napi);
1873 struct bnad *bnad = rx_ctrl->bnad;
1876 rx_ctrl->rx_poll_ctr++;
1878 if (!netif_carrier_ok(bnad->netdev))
1881 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1886 napi_complete(napi);
1888 rx_ctrl->rx_complete++;
1891 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1896 #define BNAD_NAPI_POLL_QUOTA 64
1898 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1900 struct bnad_rx_ctrl *rx_ctrl;
1903 /* Initialize & enable NAPI */
1904 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1905 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1906 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1907 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1912 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1916 /* First disable and then clean up */
1917 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1918 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1921 /* Should be held with conf_lock held */
1923 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1925 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1926 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1927 unsigned long flags;
1932 init_completion(&bnad->bnad_completions.tx_comp);
1933 spin_lock_irqsave(&bnad->bna_lock, flags);
1934 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1935 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1936 wait_for_completion(&bnad->bnad_completions.tx_comp);
1938 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1939 bnad_tx_msix_unregister(bnad, tx_info,
1940 bnad->num_txq_per_tx);
1942 spin_lock_irqsave(&bnad->bna_lock, flags);
1943 bna_tx_destroy(tx_info->tx);
1944 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1949 bnad_tx_res_free(bnad, res_info);
1952 /* Should be held with conf_lock held */
1954 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1957 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1958 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1959 struct bna_intr_info *intr_info =
1960 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1961 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1962 static const struct bna_tx_event_cbfn tx_cbfn = {
1963 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1964 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1965 .tx_stall_cbfn = bnad_cb_tx_stall,
1966 .tx_resume_cbfn = bnad_cb_tx_resume,
1967 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1971 unsigned long flags;
1973 tx_info->tx_id = tx_id;
1975 /* Initialize the Tx object configuration */
1976 tx_config->num_txq = bnad->num_txq_per_tx;
1977 tx_config->txq_depth = bnad->txq_depth;
1978 tx_config->tx_type = BNA_TX_T_REGULAR;
1979 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1981 /* Get BNA's resource requirement for one tx object */
1982 spin_lock_irqsave(&bnad->bna_lock, flags);
1983 bna_tx_res_req(bnad->num_txq_per_tx,
1984 bnad->txq_depth, res_info);
1985 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1987 /* Fill Unmap Q memory requirements */
1988 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1989 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1992 /* Allocate resources */
1993 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1997 /* Ask BNA to create one Tx object, supplying required resources */
1998 spin_lock_irqsave(&bnad->bna_lock, flags);
1999 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
2001 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2006 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2007 (work_func_t)bnad_tx_cleanup);
2009 /* Register ISR for the Tx object */
2010 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2011 err = bnad_tx_msix_register(bnad, tx_info,
2012 tx_id, bnad->num_txq_per_tx);
2017 spin_lock_irqsave(&bnad->bna_lock, flags);
2019 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2024 bnad_tx_res_free(bnad, res_info);
2028 /* Setup the rx config for bna_rx_create */
2029 /* bnad decides the configuration */
2031 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2033 memset(rx_config, 0, sizeof(*rx_config));
2034 rx_config->rx_type = BNA_RX_T_REGULAR;
2035 rx_config->num_paths = bnad->num_rxp_per_rx;
2036 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2038 if (bnad->num_rxp_per_rx > 1) {
2039 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2040 rx_config->rss_config.hash_type =
2041 (BFI_ENET_RSS_IPV6 |
2042 BFI_ENET_RSS_IPV6_TCP |
2044 BFI_ENET_RSS_IPV4_TCP);
2045 rx_config->rss_config.hash_mask =
2046 bnad->num_rxp_per_rx - 1;
2047 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
2048 sizeof(rx_config->rss_config.toeplitz_hash_key));
2050 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2051 memset(&rx_config->rss_config, 0,
2052 sizeof(rx_config->rss_config));
2055 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2056 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2058 /* BNA_RXP_SINGLE - one data-buffer queue
2059 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2060 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2062 /* TODO: configurable param for queue type */
2063 rx_config->rxp_type = BNA_RXP_SLR;
2065 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2066 rx_config->frame_size > 4096) {
2067 /* though size_routing_enable is set in SLR,
2068 * small packets may get routed to same rxq.
2069 * set buf_size to 2048 instead of PAGE_SIZE.
2071 rx_config->q0_buf_size = 2048;
2072 /* this should be in multiples of 2 */
2073 rx_config->q0_num_vecs = 4;
2074 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2075 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2077 rx_config->q0_buf_size = rx_config->frame_size;
2078 rx_config->q0_num_vecs = 1;
2079 rx_config->q0_depth = bnad->rxq_depth;
2082 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2083 if (rx_config->rxp_type == BNA_RXP_SLR) {
2084 rx_config->q1_depth = bnad->rxq_depth;
2085 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2088 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
2092 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2094 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2097 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2098 rx_info->rx_ctrl[i].bnad = bnad;
2101 /* Called with mutex_lock(&bnad->conf_mutex) held */
2103 bnad_reinit_rx(struct bnad *bnad)
2105 struct net_device *netdev = bnad->netdev;
2106 u32 err = 0, current_err = 0;
2107 u32 rx_id = 0, count = 0;
2108 unsigned long flags;
2110 /* destroy and create new rx objects */
2111 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2112 if (!bnad->rx_info[rx_id].rx)
2114 bnad_destroy_rx(bnad, rx_id);
2117 spin_lock_irqsave(&bnad->bna_lock, flags);
2118 bna_enet_mtu_set(&bnad->bna.enet,
2119 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2120 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2122 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2124 current_err = bnad_setup_rx(bnad, rx_id);
2125 if (current_err && !err) {
2127 pr_err("RXQ:%u setup failed\n", rx_id);
2131 /* restore rx configuration */
2132 if (bnad->rx_info[0].rx && !err) {
2133 bnad_restore_vlans(bnad, 0);
2134 bnad_enable_default_bcast(bnad);
2135 spin_lock_irqsave(&bnad->bna_lock, flags);
2136 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2137 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2138 bnad_set_rx_mode(netdev);
2144 /* Called with bnad_conf_lock() held */
2146 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2148 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2149 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2150 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2151 unsigned long flags;
2158 spin_lock_irqsave(&bnad->bna_lock, flags);
2159 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2160 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2161 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2164 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2166 del_timer_sync(&bnad->dim_timer);
2169 init_completion(&bnad->bnad_completions.rx_comp);
2170 spin_lock_irqsave(&bnad->bna_lock, flags);
2171 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2172 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2173 wait_for_completion(&bnad->bnad_completions.rx_comp);
2175 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2176 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2178 bnad_napi_delete(bnad, rx_id);
2180 spin_lock_irqsave(&bnad->bna_lock, flags);
2181 bna_rx_destroy(rx_info->rx);
2185 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2187 bnad_rx_res_free(bnad, res_info);
2190 /* Called with mutex_lock(&bnad->conf_mutex) held */
2192 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2195 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2196 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2197 struct bna_intr_info *intr_info =
2198 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2199 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2200 static const struct bna_rx_event_cbfn rx_cbfn = {
2201 .rcb_setup_cbfn = NULL,
2202 .rcb_destroy_cbfn = NULL,
2203 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2204 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2205 .rx_stall_cbfn = bnad_cb_rx_stall,
2206 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2207 .rx_post_cbfn = bnad_cb_rx_post,
2210 unsigned long flags;
2212 rx_info->rx_id = rx_id;
2214 /* Initialize the Rx object configuration */
2215 bnad_init_rx_config(bnad, rx_config);
2217 /* Get BNA's resource requirement for one Rx object */
2218 spin_lock_irqsave(&bnad->bna_lock, flags);
2219 bna_rx_res_req(rx_config, res_info);
2220 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2222 /* Fill Unmap Q memory requirements */
2223 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2224 rx_config->num_paths,
2225 (rx_config->q0_depth *
2226 sizeof(struct bnad_rx_unmap)) +
2227 sizeof(struct bnad_rx_unmap_q));
2229 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2230 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2231 rx_config->num_paths,
2232 (rx_config->q1_depth *
2233 sizeof(struct bnad_rx_unmap) +
2234 sizeof(struct bnad_rx_unmap_q)));
2236 /* Allocate resource */
2237 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2241 bnad_rx_ctrl_init(bnad, rx_id);
2243 /* Ask BNA to create one Rx object, supplying required resources */
2244 spin_lock_irqsave(&bnad->bna_lock, flags);
2245 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2249 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2253 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2255 INIT_WORK(&rx_info->rx_cleanup_work,
2256 (work_func_t)(bnad_rx_cleanup));
2259 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2260 * so that IRQ handler cannot schedule NAPI at this point.
2262 bnad_napi_add(bnad, rx_id);
2264 /* Register ISR for the Rx object */
2265 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2266 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2267 rx_config->num_paths);
2272 spin_lock_irqsave(&bnad->bna_lock, flags);
2274 /* Set up Dynamic Interrupt Moderation Vector */
2275 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2276 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2278 /* Enable VLAN filtering only on the default Rx */
2279 bna_rx_vlanfilter_enable(rx);
2281 /* Start the DIM timer */
2282 bnad_dim_timer_start(bnad);
2286 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2291 bnad_destroy_rx(bnad, rx_id);
2295 /* Called with conf_lock & bnad->bna_lock held */
2297 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2299 struct bnad_tx_info *tx_info;
2301 tx_info = &bnad->tx_info[0];
2305 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2308 /* Called with conf_lock & bnad->bna_lock held */
2310 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2312 struct bnad_rx_info *rx_info;
2315 for (i = 0; i < bnad->num_rx; i++) {
2316 rx_info = &bnad->rx_info[i];
2319 bna_rx_coalescing_timeo_set(rx_info->rx,
2320 bnad->rx_coalescing_timeo);
2325 * Called with bnad->bna_lock held
2328 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2332 if (!is_valid_ether_addr(mac_addr))
2333 return -EADDRNOTAVAIL;
2335 /* If datapath is down, pretend everything went through */
2336 if (!bnad->rx_info[0].rx)
2339 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2340 if (ret != BNA_CB_SUCCESS)
2341 return -EADDRNOTAVAIL;
2346 /* Should be called with conf_lock held */
2348 bnad_enable_default_bcast(struct bnad *bnad)
2350 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2352 unsigned long flags;
2354 init_completion(&bnad->bnad_completions.mcast_comp);
2356 spin_lock_irqsave(&bnad->bna_lock, flags);
2357 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2358 bnad_cb_rx_mcast_add);
2359 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2361 if (ret == BNA_CB_SUCCESS)
2362 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2366 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2372 /* Called with mutex_lock(&bnad->conf_mutex) held */
2374 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2377 unsigned long flags;
2379 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2380 spin_lock_irqsave(&bnad->bna_lock, flags);
2381 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2382 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2386 /* Statistics utilities */
2388 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2392 for (i = 0; i < bnad->num_rx; i++) {
2393 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2394 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2395 stats->rx_packets += bnad->rx_info[i].
2396 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2397 stats->rx_bytes += bnad->rx_info[i].
2398 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2399 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2400 bnad->rx_info[i].rx_ctrl[j].ccb->
2402 stats->rx_packets +=
2403 bnad->rx_info[i].rx_ctrl[j].
2404 ccb->rcb[1]->rxq->rx_packets;
2406 bnad->rx_info[i].rx_ctrl[j].
2407 ccb->rcb[1]->rxq->rx_bytes;
2412 for (i = 0; i < bnad->num_tx; i++) {
2413 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2414 if (bnad->tx_info[i].tcb[j]) {
2415 stats->tx_packets +=
2416 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2418 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2425 * Must be called with the bna_lock held.
2428 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2430 struct bfi_enet_stats_mac *mac_stats;
2434 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2436 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2437 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2438 mac_stats->rx_undersize;
2439 stats->tx_errors = mac_stats->tx_fcs_error +
2440 mac_stats->tx_undersize;
2441 stats->rx_dropped = mac_stats->rx_drop;
2442 stats->tx_dropped = mac_stats->tx_drop;
2443 stats->multicast = mac_stats->rx_multicast;
2444 stats->collisions = mac_stats->tx_total_collision;
2446 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2448 /* receive ring buffer overflow ?? */
2450 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2451 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2452 /* recv'r fifo overrun */
2453 bmap = bna_rx_rid_mask(&bnad->bna);
2454 for (i = 0; bmap; i++) {
2456 stats->rx_fifo_errors +=
2457 bnad->stats.bna_stats->
2458 hw_stats.rxf_stats[i].frame_drops;
2466 bnad_mbox_irq_sync(struct bnad *bnad)
2469 unsigned long flags;
2471 spin_lock_irqsave(&bnad->bna_lock, flags);
2472 if (bnad->cfg_flags & BNAD_CF_MSIX)
2473 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2475 irq = bnad->pcidev->irq;
2476 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2478 synchronize_irq(irq);
2481 /* Utility used by bnad_start_xmit, for doing TSO */
2483 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2487 if (skb_header_cloned(skb)) {
2488 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2490 BNAD_UPDATE_CTR(bnad, tso_err);
2496 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2497 * excluding the length field.
2499 if (skb->protocol == htons(ETH_P_IP)) {
2500 struct iphdr *iph = ip_hdr(skb);
2502 /* Do we really need these? */
2506 tcp_hdr(skb)->check =
2507 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2509 BNAD_UPDATE_CTR(bnad, tso4);
2511 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2513 ipv6h->payload_len = 0;
2514 tcp_hdr(skb)->check =
2515 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2517 BNAD_UPDATE_CTR(bnad, tso6);
2524 * Initialize Q numbers depending on Rx Paths
2525 * Called with bnad->bna_lock held, because of cfg_flags
2529 bnad_q_num_init(struct bnad *bnad)
2533 rxps = min((uint)num_online_cpus(),
2534 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2536 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2537 rxps = 1; /* INTx */
2541 bnad->num_rxp_per_rx = rxps;
2542 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2546 * Adjusts the Q numbers, given a number of msix vectors
2547 * Give preference to RSS as opposed to Tx priority Queues,
2548 * in such a case, just use 1 Tx Q
2549 * Called with bnad->bna_lock held b'cos of cfg_flags access
2552 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2554 bnad->num_txq_per_tx = 1;
2555 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2556 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2557 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2558 bnad->num_rxp_per_rx = msix_vectors -
2559 (bnad->num_tx * bnad->num_txq_per_tx) -
2560 BNAD_MAILBOX_MSIX_VECTORS;
2562 bnad->num_rxp_per_rx = 1;
2565 /* Enable / disable ioceth */
2567 bnad_ioceth_disable(struct bnad *bnad)
2569 unsigned long flags;
2572 spin_lock_irqsave(&bnad->bna_lock, flags);
2573 init_completion(&bnad->bnad_completions.ioc_comp);
2574 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2575 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2577 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2578 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2580 err = bnad->bnad_completions.ioc_comp_status;
2585 bnad_ioceth_enable(struct bnad *bnad)
2588 unsigned long flags;
2590 spin_lock_irqsave(&bnad->bna_lock, flags);
2591 init_completion(&bnad->bnad_completions.ioc_comp);
2592 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2593 bna_ioceth_enable(&bnad->bna.ioceth);
2594 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2596 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2597 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2599 err = bnad->bnad_completions.ioc_comp_status;
2604 /* Free BNA resources */
2606 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2611 for (i = 0; i < res_val_max; i++)
2612 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2615 /* Allocates memory and interrupt resources for BNA */
2617 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2622 for (i = 0; i < res_val_max; i++) {
2623 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2630 bnad_res_free(bnad, res_info, res_val_max);
2634 /* Interrupt enable / disable */
2636 bnad_enable_msix(struct bnad *bnad)
2639 unsigned long flags;
2641 spin_lock_irqsave(&bnad->bna_lock, flags);
2642 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2643 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2646 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2648 if (bnad->msix_table)
2652 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2654 if (!bnad->msix_table)
2657 for (i = 0; i < bnad->msix_num; i++)
2658 bnad->msix_table[i].entry = i;
2660 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2662 /* Not enough MSI-X vectors. */
2663 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2664 ret, bnad->msix_num);
2666 spin_lock_irqsave(&bnad->bna_lock, flags);
2667 /* ret = #of vectors that we got */
2668 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2669 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2670 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2672 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2673 BNAD_MAILBOX_MSIX_VECTORS;
2675 if (bnad->msix_num > ret)
2678 /* Try once more with adjusted numbers */
2679 /* If this fails, fall back to INTx */
2680 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2688 pci_intx(bnad->pcidev, 0);
2693 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2695 kfree(bnad->msix_table);
2696 bnad->msix_table = NULL;
2698 spin_lock_irqsave(&bnad->bna_lock, flags);
2699 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2700 bnad_q_num_init(bnad);
2701 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2705 bnad_disable_msix(struct bnad *bnad)
2708 unsigned long flags;
2710 spin_lock_irqsave(&bnad->bna_lock, flags);
2711 cfg_flags = bnad->cfg_flags;
2712 if (bnad->cfg_flags & BNAD_CF_MSIX)
2713 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2714 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2716 if (cfg_flags & BNAD_CF_MSIX) {
2717 pci_disable_msix(bnad->pcidev);
2718 kfree(bnad->msix_table);
2719 bnad->msix_table = NULL;
2723 /* Netdev entry points */
2725 bnad_open(struct net_device *netdev)
2728 struct bnad *bnad = netdev_priv(netdev);
2729 struct bna_pause_config pause_config;
2730 unsigned long flags;
2732 mutex_lock(&bnad->conf_mutex);
2735 err = bnad_setup_tx(bnad, 0);
2740 err = bnad_setup_rx(bnad, 0);
2745 pause_config.tx_pause = 0;
2746 pause_config.rx_pause = 0;
2748 spin_lock_irqsave(&bnad->bna_lock, flags);
2749 bna_enet_mtu_set(&bnad->bna.enet,
2750 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2751 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2752 bna_enet_enable(&bnad->bna.enet);
2753 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2755 /* Enable broadcast */
2756 bnad_enable_default_bcast(bnad);
2758 /* Restore VLANs, if any */
2759 bnad_restore_vlans(bnad, 0);
2761 /* Set the UCAST address */
2762 spin_lock_irqsave(&bnad->bna_lock, flags);
2763 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2764 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2766 /* Start the stats timer */
2767 bnad_stats_timer_start(bnad);
2769 mutex_unlock(&bnad->conf_mutex);
2774 bnad_destroy_tx(bnad, 0);
2777 mutex_unlock(&bnad->conf_mutex);
2782 bnad_stop(struct net_device *netdev)
2784 struct bnad *bnad = netdev_priv(netdev);
2785 unsigned long flags;
2787 mutex_lock(&bnad->conf_mutex);
2789 /* Stop the stats timer */
2790 bnad_stats_timer_stop(bnad);
2792 init_completion(&bnad->bnad_completions.enet_comp);
2794 spin_lock_irqsave(&bnad->bna_lock, flags);
2795 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2796 bnad_cb_enet_disabled);
2797 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2799 wait_for_completion(&bnad->bnad_completions.enet_comp);
2801 bnad_destroy_tx(bnad, 0);
2802 bnad_destroy_rx(bnad, 0);
2804 /* Synchronize mailbox IRQ */
2805 bnad_mbox_irq_sync(bnad);
2807 mutex_unlock(&bnad->conf_mutex);
2813 /* Returns 0 for success */
2815 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2816 struct sk_buff *skb, struct bna_txq_entry *txqent)
2822 if (vlan_tx_tag_present(skb)) {
2823 vlan_tag = (u16)vlan_tx_tag_get(skb);
2824 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2826 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2827 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2828 | (vlan_tag & 0x1fff);
2829 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2831 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2833 if (skb_is_gso(skb)) {
2834 gso_size = skb_shinfo(skb)->gso_size;
2835 if (unlikely(gso_size > bnad->netdev->mtu)) {
2836 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2839 if (unlikely((gso_size + skb_transport_offset(skb) +
2840 tcp_hdrlen(skb)) >= skb->len)) {
2841 txqent->hdr.wi.opcode =
2842 __constant_htons(BNA_TXQ_WI_SEND);
2843 txqent->hdr.wi.lso_mss = 0;
2844 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2846 txqent->hdr.wi.opcode =
2847 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2848 txqent->hdr.wi.lso_mss = htons(gso_size);
2851 if (bnad_tso_prepare(bnad, skb)) {
2852 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2856 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2857 txqent->hdr.wi.l4_hdr_size_n_offset =
2858 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2859 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2861 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2862 txqent->hdr.wi.lso_mss = 0;
2864 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
2865 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2869 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2872 if (skb->protocol == __constant_htons(ETH_P_IP))
2873 proto = ip_hdr(skb)->protocol;
2874 #ifdef NETIF_F_IPV6_CSUM
2875 else if (skb->protocol ==
2876 __constant_htons(ETH_P_IPV6)) {
2877 /* nexthdr may not be TCP immediately. */
2878 proto = ipv6_hdr(skb)->nexthdr;
2881 if (proto == IPPROTO_TCP) {
2882 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2883 txqent->hdr.wi.l4_hdr_size_n_offset =
2884 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2885 (0, skb_transport_offset(skb)));
2887 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2889 if (unlikely(skb_headlen(skb) <
2890 skb_transport_offset(skb) +
2892 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2895 } else if (proto == IPPROTO_UDP) {
2896 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2897 txqent->hdr.wi.l4_hdr_size_n_offset =
2898 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2899 (0, skb_transport_offset(skb)));
2901 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2902 if (unlikely(skb_headlen(skb) <
2903 skb_transport_offset(skb) +
2904 sizeof(struct udphdr))) {
2905 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2910 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2914 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2917 txqent->hdr.wi.flags = htons(flags);
2918 txqent->hdr.wi.frame_length = htonl(skb->len);
2924 * bnad_start_xmit : Netdev entry point for Transmit
2925 * Called under lock held by net_device
2928 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2930 struct bnad *bnad = netdev_priv(netdev);
2932 struct bna_tcb *tcb = NULL;
2933 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2934 u32 prod, q_depth, vect_id;
2935 u32 wis, vectors, len;
2937 dma_addr_t dma_addr;
2938 struct bna_txq_entry *txqent;
2940 len = skb_headlen(skb);
2942 /* Sanity checks for the skb */
2944 if (unlikely(skb->len <= ETH_HLEN)) {
2946 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2947 return NETDEV_TX_OK;
2949 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2951 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2952 return NETDEV_TX_OK;
2954 if (unlikely(len == 0)) {
2956 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2957 return NETDEV_TX_OK;
2960 tcb = bnad->tx_info[0].tcb[txq_id];
2963 * Takes care of the Tx that is scheduled between clearing the flag
2964 * and the netif_tx_stop_all_queues() call.
2966 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2968 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2969 return NETDEV_TX_OK;
2972 q_depth = tcb->q_depth;
2973 prod = tcb->producer_index;
2974 unmap_q = tcb->unmap_q;
2976 vectors = 1 + skb_shinfo(skb)->nr_frags;
2977 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2979 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2981 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2982 return NETDEV_TX_OK;
2985 /* Check for available TxQ resources */
2986 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2987 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2988 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2990 sent = bnad_txcmpl_process(bnad, tcb);
2991 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2992 bna_ib_ack(tcb->i_dbell, sent);
2993 smp_mb__before_clear_bit();
2994 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2996 netif_stop_queue(netdev);
2997 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3002 * Check again to deal with race condition between
3003 * netif_stop_queue here, and netif_wake_queue in
3004 * interrupt handler which is not inside netif tx lock.
3006 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3007 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3008 return NETDEV_TX_BUSY;
3010 netif_wake_queue(netdev);
3011 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3015 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3016 head_unmap = &unmap_q[prod];
3018 /* Program the opcode, flags, frame_len, num_vectors in WI */
3019 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3021 return NETDEV_TX_OK;
3023 txqent->hdr.wi.reserved = 0;
3024 txqent->hdr.wi.num_vectors = vectors;
3026 head_unmap->skb = skb;
3027 head_unmap->nvecs = 0;
3029 /* Program the vectors */
3031 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3032 len, DMA_TO_DEVICE);
3033 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3034 txqent->vector[0].length = htons(len);
3035 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3036 head_unmap->nvecs++;
3038 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3039 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3040 u16 size = skb_frag_size(frag);
3042 if (unlikely(size == 0)) {
3043 /* Undo the changes starting at tcb->producer_index */
3044 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3045 tcb->producer_index);
3047 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3048 return NETDEV_TX_OK;
3054 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3056 BNA_QE_INDX_INC(prod, q_depth);
3057 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3058 txqent->hdr.wi_ext.opcode =
3059 __constant_htons(BNA_TXQ_WI_EXTENSION);
3060 unmap = &unmap_q[prod];
3063 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3064 0, size, DMA_TO_DEVICE);
3065 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3066 txqent->vector[vect_id].length = htons(size);
3067 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3069 head_unmap->nvecs++;
3072 if (unlikely(len != skb->len)) {
3073 /* Undo the changes starting at tcb->producer_index */
3074 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3076 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3077 return NETDEV_TX_OK;
3080 BNA_QE_INDX_INC(prod, q_depth);
3081 tcb->producer_index = prod;
3085 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3086 return NETDEV_TX_OK;
3088 skb_tx_timestamp(skb);
3090 bna_txq_prod_indx_doorbell(tcb);
3093 return NETDEV_TX_OK;
3097 * Used spin_lock to synchronize reading of stats structures, which
3098 * is written by BNA under the same lock.
3100 static struct rtnl_link_stats64 *
3101 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3103 struct bnad *bnad = netdev_priv(netdev);
3104 unsigned long flags;
3106 spin_lock_irqsave(&bnad->bna_lock, flags);
3108 bnad_netdev_qstats_fill(bnad, stats);
3109 bnad_netdev_hwstats_fill(bnad, stats);
3111 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3117 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3119 struct net_device *netdev = bnad->netdev;
3120 int uc_count = netdev_uc_count(netdev);
3121 enum bna_cb_status ret;
3123 struct netdev_hw_addr *ha;
3126 if (netdev_uc_empty(bnad->netdev)) {
3127 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3131 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3134 mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3135 if (mac_list == NULL)
3139 netdev_for_each_uc_addr(ha, netdev) {
3140 memcpy(&mac_list[entry * ETH_ALEN],
3141 &ha->addr[0], ETH_ALEN);
3145 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
3149 if (ret != BNA_CB_SUCCESS)
3154 /* ucast packets not in UCAM are routed to default function */
3156 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3157 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3161 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3163 struct net_device *netdev = bnad->netdev;
3164 int mc_count = netdev_mc_count(netdev);
3165 enum bna_cb_status ret;
3168 if (netdev->flags & IFF_ALLMULTI)
3171 if (netdev_mc_empty(netdev))
3174 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3177 mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3179 if (mac_list == NULL)
3182 memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
3184 /* copy rest of the MCAST addresses */
3185 bnad_netdev_mc_list_get(netdev, mac_list);
3186 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
3190 if (ret != BNA_CB_SUCCESS)
3196 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3197 bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
3201 bnad_set_rx_mode(struct net_device *netdev)
3203 struct bnad *bnad = netdev_priv(netdev);
3204 enum bna_rxmode new_mode, mode_mask;
3205 unsigned long flags;
3207 spin_lock_irqsave(&bnad->bna_lock, flags);
3209 if (bnad->rx_info[0].rx == NULL) {
3210 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3214 /* clear bnad flags to update it with new settings */
3215 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3219 if (netdev->flags & IFF_PROMISC) {
3220 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3221 bnad->cfg_flags |= BNAD_CF_PROMISC;
3223 bnad_set_rx_mcast_fltr(bnad);
3225 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3226 new_mode |= BNA_RXMODE_ALLMULTI;
3228 bnad_set_rx_ucast_fltr(bnad);
3230 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3231 new_mode |= BNA_RXMODE_DEFAULT;
3234 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3235 BNA_RXMODE_ALLMULTI;
3236 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
3238 if (bnad->cfg_flags & BNAD_CF_PROMISC)
3239 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3241 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3243 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3247 * bna_lock is used to sync writes to netdev->addr
3248 * conf_lock cannot be used since this call may be made
3249 * in a non-blocking context.
3252 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
3255 struct bnad *bnad = netdev_priv(netdev);
3256 struct sockaddr *sa = (struct sockaddr *)mac_addr;
3257 unsigned long flags;
3259 spin_lock_irqsave(&bnad->bna_lock, flags);
3261 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3264 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
3266 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3272 bnad_mtu_set(struct bnad *bnad, int frame_size)
3274 unsigned long flags;
3276 init_completion(&bnad->bnad_completions.mtu_comp);
3278 spin_lock_irqsave(&bnad->bna_lock, flags);
3279 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3280 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3282 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3284 return bnad->bnad_completions.mtu_comp_status;
3288 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3291 struct bnad *bnad = netdev_priv(netdev);
3292 u32 rx_count = 0, frame, new_frame;
3294 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3297 mutex_lock(&bnad->conf_mutex);
3300 netdev->mtu = new_mtu;
3302 frame = BNAD_FRAME_SIZE(mtu);
3303 new_frame = BNAD_FRAME_SIZE(new_mtu);
3305 /* check if multi-buffer needs to be enabled */
3306 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3307 netif_running(bnad->netdev)) {
3308 /* only when transition is over 4K */
3309 if ((frame <= 4096 && new_frame > 4096) ||
3310 (frame > 4096 && new_frame <= 4096))
3311 rx_count = bnad_reinit_rx(bnad);
3314 /* rx_count > 0 - new rx created
3315 * - Linux set err = 0 and return
3317 err = bnad_mtu_set(bnad, new_frame);
3321 mutex_unlock(&bnad->conf_mutex);
3326 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3328 struct bnad *bnad = netdev_priv(netdev);
3329 unsigned long flags;
3331 if (!bnad->rx_info[0].rx)
3334 mutex_lock(&bnad->conf_mutex);
3336 spin_lock_irqsave(&bnad->bna_lock, flags);
3337 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3338 set_bit(vid, bnad->active_vlans);
3339 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3341 mutex_unlock(&bnad->conf_mutex);
3347 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3349 struct bnad *bnad = netdev_priv(netdev);
3350 unsigned long flags;
3352 if (!bnad->rx_info[0].rx)
3355 mutex_lock(&bnad->conf_mutex);
3357 spin_lock_irqsave(&bnad->bna_lock, flags);
3358 clear_bit(vid, bnad->active_vlans);
3359 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3360 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3362 mutex_unlock(&bnad->conf_mutex);
3367 #ifdef CONFIG_NET_POLL_CONTROLLER
3369 bnad_netpoll(struct net_device *netdev)
3371 struct bnad *bnad = netdev_priv(netdev);
3372 struct bnad_rx_info *rx_info;
3373 struct bnad_rx_ctrl *rx_ctrl;
3377 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3378 bna_intx_disable(&bnad->bna, curr_mask);
3379 bnad_isr(bnad->pcidev->irq, netdev);
3380 bna_intx_enable(&bnad->bna, curr_mask);
3383 * Tx processing may happen in sending context, so no need
3384 * to explicitly process completions here
3388 for (i = 0; i < bnad->num_rx; i++) {
3389 rx_info = &bnad->rx_info[i];
3392 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3393 rx_ctrl = &rx_info->rx_ctrl[j];
3395 bnad_netif_rx_schedule_poll(bnad,
3403 static const struct net_device_ops bnad_netdev_ops = {
3404 .ndo_open = bnad_open,
3405 .ndo_stop = bnad_stop,
3406 .ndo_start_xmit = bnad_start_xmit,
3407 .ndo_get_stats64 = bnad_get_stats64,
3408 .ndo_set_rx_mode = bnad_set_rx_mode,
3409 .ndo_validate_addr = eth_validate_addr,
3410 .ndo_set_mac_address = bnad_set_mac_address,
3411 .ndo_change_mtu = bnad_change_mtu,
3412 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3413 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3414 #ifdef CONFIG_NET_POLL_CONTROLLER
3415 .ndo_poll_controller = bnad_netpoll
3420 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3422 struct net_device *netdev = bnad->netdev;
3424 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3425 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3426 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
3428 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3429 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3430 NETIF_F_TSO | NETIF_F_TSO6;
3432 netdev->features |= netdev->hw_features |
3433 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3436 netdev->features |= NETIF_F_HIGHDMA;
3438 netdev->mem_start = bnad->mmio_start;
3439 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3441 netdev->netdev_ops = &bnad_netdev_ops;
3442 bnad_set_ethtool_ops(netdev);
3446 * 1. Initialize the bnad structure
3447 * 2. Setup netdev pointer in pci_dev
3448 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3449 * 4. Initialize work queue.
3452 bnad_init(struct bnad *bnad,
3453 struct pci_dev *pdev, struct net_device *netdev)
3455 unsigned long flags;
3457 SET_NETDEV_DEV(netdev, &pdev->dev);
3458 pci_set_drvdata(pdev, netdev);
3460 bnad->netdev = netdev;
3461 bnad->pcidev = pdev;
3462 bnad->mmio_start = pci_resource_start(pdev, 0);
3463 bnad->mmio_len = pci_resource_len(pdev, 0);
3464 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3466 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3469 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3470 (unsigned long long) bnad->mmio_len);
3472 spin_lock_irqsave(&bnad->bna_lock, flags);
3473 if (!bnad_msix_disable)
3474 bnad->cfg_flags = BNAD_CF_MSIX;
3476 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3478 bnad_q_num_init(bnad);
3479 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3481 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3482 (bnad->num_rx * bnad->num_rxp_per_rx) +
3483 BNAD_MAILBOX_MSIX_VECTORS;
3485 bnad->txq_depth = BNAD_TXQ_DEPTH;
3486 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3488 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3489 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3491 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3492 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3493 if (!bnad->work_q) {
3494 iounmap(bnad->bar0);
3502 * Must be called after bnad_pci_uninit()
3503 * so that iounmap() and pci_set_drvdata(NULL)
3504 * happens only after PCI uninitialization.
3507 bnad_uninit(struct bnad *bnad)
3510 flush_workqueue(bnad->work_q);
3511 destroy_workqueue(bnad->work_q);
3512 bnad->work_q = NULL;
3516 iounmap(bnad->bar0);
3521 a) Per ioceth mutes used for serializing configuration
3522 changes from OS interface
3523 b) spin lock used to protect bna state machine
3526 bnad_lock_init(struct bnad *bnad)
3528 spin_lock_init(&bnad->bna_lock);
3529 mutex_init(&bnad->conf_mutex);
3530 mutex_init(&bnad_list_mutex);
3534 bnad_lock_uninit(struct bnad *bnad)
3536 mutex_destroy(&bnad->conf_mutex);
3537 mutex_destroy(&bnad_list_mutex);
3540 /* PCI Initialization */
3542 bnad_pci_init(struct bnad *bnad,
3543 struct pci_dev *pdev, bool *using_dac)
3547 err = pci_enable_device(pdev);
3550 err = pci_request_regions(pdev, BNAD_NAME);
3552 goto disable_device;
3553 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3556 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3558 goto release_regions;
3561 pci_set_master(pdev);
3565 pci_release_regions(pdev);
3567 pci_disable_device(pdev);
3573 bnad_pci_uninit(struct pci_dev *pdev)
3575 pci_release_regions(pdev);
3576 pci_disable_device(pdev);
3580 bnad_pci_probe(struct pci_dev *pdev,
3581 const struct pci_device_id *pcidev_id)
3587 struct net_device *netdev;
3588 struct bfa_pcidev pcidev_info;
3589 unsigned long flags;
3591 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3592 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3594 mutex_lock(&bnad_fwimg_mutex);
3595 if (!cna_get_firmware_buf(pdev)) {
3596 mutex_unlock(&bnad_fwimg_mutex);
3597 pr_warn("Failed to load Firmware Image!\n");
3600 mutex_unlock(&bnad_fwimg_mutex);
3603 * Allocates sizeof(struct net_device + struct bnad)
3604 * bnad = netdev->priv
3606 netdev = alloc_etherdev(sizeof(struct bnad));
3611 bnad = netdev_priv(netdev);
3612 bnad_lock_init(bnad);
3613 bnad_add_to_list(bnad);
3615 mutex_lock(&bnad->conf_mutex);
3617 * PCI initialization
3618 * Output : using_dac = 1 for 64 bit DMA
3619 * = 0 for 32 bit DMA
3622 err = bnad_pci_init(bnad, pdev, &using_dac);
3627 * Initialize bnad structure
3628 * Setup relation between pci_dev & netdev
3630 err = bnad_init(bnad, pdev, netdev);
3634 /* Initialize netdev structure, set up ethtool ops */
3635 bnad_netdev_init(bnad, using_dac);
3637 /* Set link to down state */
3638 netif_carrier_off(netdev);
3640 /* Setup the debugfs node for this bfad */
3641 if (bna_debugfs_enable)
3642 bnad_debugfs_init(bnad);
3644 /* Get resource requirement form bna */
3645 spin_lock_irqsave(&bnad->bna_lock, flags);
3646 bna_res_req(&bnad->res_info[0]);
3647 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3649 /* Allocate resources from bna */
3650 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3656 /* Setup pcidev_info for bna_init() */
3657 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3658 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3659 pcidev_info.device_id = bnad->pcidev->device;
3660 pcidev_info.pci_bar_kva = bnad->bar0;
3662 spin_lock_irqsave(&bnad->bna_lock, flags);
3663 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3664 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3666 bnad->stats.bna_stats = &bna->stats;
3668 bnad_enable_msix(bnad);
3669 err = bnad_mbox_irq_alloc(bnad);
3674 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3675 ((unsigned long)bnad));
3676 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3677 ((unsigned long)bnad));
3678 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3679 ((unsigned long)bnad));
3680 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3681 ((unsigned long)bnad));
3683 /* Now start the timer before calling IOC */
3684 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3685 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3689 * If the call back comes with error, we bail out.
3690 * This is a catastrophic error.
3692 err = bnad_ioceth_enable(bnad);
3694 pr_err("BNA: Initialization failed err=%d\n",
3699 spin_lock_irqsave(&bnad->bna_lock, flags);
3700 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3701 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3702 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3703 bna_attr(bna)->num_rxp - 1);
3704 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3705 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3708 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3710 goto disable_ioceth;
3712 spin_lock_irqsave(&bnad->bna_lock, flags);
3713 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3714 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3716 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3719 goto disable_ioceth;
3722 spin_lock_irqsave(&bnad->bna_lock, flags);
3723 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3724 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3726 /* Get the burnt-in mac */
3727 spin_lock_irqsave(&bnad->bna_lock, flags);
3728 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3729 bnad_set_netdev_perm_addr(bnad);
3730 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3732 mutex_unlock(&bnad->conf_mutex);
3734 /* Finally, reguister with net_device layer */
3735 err = register_netdev(netdev);
3737 pr_err("BNA : Registering with netdev failed\n");
3740 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3745 mutex_unlock(&bnad->conf_mutex);
3749 mutex_lock(&bnad->conf_mutex);
3750 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3752 bnad_ioceth_disable(bnad);
3753 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3754 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3755 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3756 spin_lock_irqsave(&bnad->bna_lock, flags);
3758 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3759 bnad_mbox_irq_free(bnad);
3760 bnad_disable_msix(bnad);
3762 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3764 /* Remove the debugfs node for this bnad */
3765 kfree(bnad->regdata);
3766 bnad_debugfs_uninit(bnad);
3769 bnad_pci_uninit(pdev);
3771 mutex_unlock(&bnad->conf_mutex);
3772 bnad_remove_from_list(bnad);
3773 bnad_lock_uninit(bnad);
3774 free_netdev(netdev);
3779 bnad_pci_remove(struct pci_dev *pdev)
3781 struct net_device *netdev = pci_get_drvdata(pdev);
3784 unsigned long flags;
3789 pr_info("%s bnad_pci_remove\n", netdev->name);
3790 bnad = netdev_priv(netdev);
3793 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3794 unregister_netdev(netdev);
3796 mutex_lock(&bnad->conf_mutex);
3797 bnad_ioceth_disable(bnad);
3798 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3799 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3800 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3801 spin_lock_irqsave(&bnad->bna_lock, flags);
3803 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3805 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3806 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3807 bnad_mbox_irq_free(bnad);
3808 bnad_disable_msix(bnad);
3809 bnad_pci_uninit(pdev);
3810 mutex_unlock(&bnad->conf_mutex);
3811 bnad_remove_from_list(bnad);
3812 bnad_lock_uninit(bnad);
3813 /* Remove the debugfs node for this bnad */
3814 kfree(bnad->regdata);
3815 bnad_debugfs_uninit(bnad);
3817 free_netdev(netdev);
3820 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3822 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3823 PCI_DEVICE_ID_BROCADE_CT),
3824 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3825 .class_mask = 0xffff00
3828 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3829 BFA_PCI_DEVICE_ID_CT2),
3830 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3831 .class_mask = 0xffff00
3836 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3838 static struct pci_driver bnad_pci_driver = {
3840 .id_table = bnad_pci_id_table,
3841 .probe = bnad_pci_probe,
3842 .remove = bnad_pci_remove,
3846 bnad_module_init(void)
3850 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3853 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3855 err = pci_register_driver(&bnad_pci_driver);
3857 pr_err("bna : PCI registration failed in module init "
3866 bnad_module_exit(void)
3868 pci_unregister_driver(&bnad_pci_driver);
3869 release_firmware(bfi_fw);
3872 module_init(bnad_module_init);
3873 module_exit(bnad_module_exit);
3875 MODULE_AUTHOR("Brocade");
3876 MODULE_LICENSE("GPL");
3877 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3878 MODULE_VERSION(BNAD_VERSION);
3879 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3880 MODULE_FIRMWARE(CNA_FW_FILE_CT2);