1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/etherdevice.h>
21 #include <net/ip6_checksum.h>
22 #include <linux/firmware.h>
23 #include "bnx2x_cmn.h"
26 #include <linux/if_vlan.h>
29 #include "bnx2x_init.h"
32 /* free skb in the packet ring at pos idx
33 * return idx of last bd freed
35 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
38 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
39 struct eth_tx_start_bd *tx_start_bd;
40 struct eth_tx_bd *tx_data_bd;
41 struct sk_buff *skb = tx_buf->skb;
42 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
45 /* prefetch skb end pointer to speedup dev_kfree_skb() */
48 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
52 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
53 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
54 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
55 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
57 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
58 #ifdef BNX2X_STOP_ON_ERROR
59 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
60 BNX2X_ERR("BAD nbd!\n");
64 new_cons = nbd + tx_buf->first_bd;
67 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
69 /* Skip a parse bd... */
71 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
73 /* ...and the TSO split header bd since they have no mapping */
74 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
76 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
82 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
83 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
84 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
85 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
87 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
99 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
101 struct bnx2x *bp = fp->bp;
102 struct netdev_queue *txq;
103 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
105 #ifdef BNX2X_STOP_ON_ERROR
106 if (unlikely(bp->panic))
110 txq = netdev_get_tx_queue(bp->dev, fp->index);
111 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
112 sw_cons = fp->tx_pkt_cons;
114 while (sw_cons != hw_cons) {
117 pkt_cons = TX_BD(sw_cons);
119 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
121 fp->index, hw_cons, sw_cons, pkt_cons);
123 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
127 fp->tx_pkt_cons = sw_cons;
128 fp->tx_bd_cons = bd_cons;
130 /* Need to make the tx_bd_cons update visible to start_xmit()
131 * before checking for netif_tx_queue_stopped(). Without the
132 * memory barrier, there is a small possibility that
133 * start_xmit() will miss it and cause the queue to be stopped
138 if (unlikely(netif_tx_queue_stopped(txq))) {
139 /* Taking tx_lock() is needed to prevent reenabling the queue
140 * while it's empty. This could have happen if rx_action() gets
141 * suspended in bnx2x_tx_int() after the condition before
142 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
144 * stops the queue->sees fresh tx_bd_cons->releases the queue->
145 * sends some packets consuming the whole queue again->
149 __netif_tx_lock(txq, smp_processor_id());
151 if ((netif_tx_queue_stopped(txq)) &&
152 (bp->state == BNX2X_STATE_OPEN) &&
153 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
154 netif_tx_wake_queue(txq);
156 __netif_tx_unlock(txq);
161 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
164 u16 last_max = fp->last_max_sge;
166 if (SUB_S16(idx, last_max) > 0)
167 fp->last_max_sge = idx;
170 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
171 struct eth_fast_path_rx_cqe *fp_cqe)
173 struct bnx2x *bp = fp->bp;
174 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
175 le16_to_cpu(fp_cqe->len_on_bd)) >>
177 u16 last_max, last_elem, first_elem;
184 /* First mark all used pages */
185 for (i = 0; i < sge_len; i++)
186 SGE_MASK_CLEAR_BIT(fp,
187 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
189 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
190 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
192 /* Here we assume that the last SGE index is the biggest */
193 prefetch((void *)(fp->sge_mask));
194 bnx2x_update_last_max_sge(fp,
195 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
197 last_max = RX_SGE(fp->last_max_sge);
198 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
199 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
201 /* If ring is not full */
202 if (last_elem + 1 != first_elem)
205 /* Now update the prod */
206 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
207 if (likely(fp->sge_mask[i]))
210 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
211 delta += RX_SGE_MASK_ELEM_SZ;
215 fp->rx_sge_prod += delta;
216 /* clear page-end entries */
217 bnx2x_clear_sge_mask_next_elems(fp);
220 DP(NETIF_MSG_RX_STATUS,
221 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
222 fp->last_max_sge, fp->rx_sge_prod);
225 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
226 struct sk_buff *skb, u16 cons, u16 prod)
228 struct bnx2x *bp = fp->bp;
229 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
230 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
231 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
234 /* move empty skb from pool to prod and map it */
235 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
236 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
237 bp->rx_buf_size, DMA_FROM_DEVICE);
238 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
240 /* move partial skb from cons to pool (don't unmap yet) */
241 fp->tpa_pool[queue] = *cons_rx_buf;
243 /* mark bin state as start - print error if current state != stop */
244 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
245 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
247 fp->tpa_state[queue] = BNX2X_TPA_START;
249 /* point prod_bd to new skb */
250 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
251 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
253 #ifdef BNX2X_STOP_ON_ERROR
254 fp->tpa_queue_used |= (1 << queue);
255 #ifdef _ASM_GENERIC_INT_L64_H
256 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
264 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
266 struct eth_fast_path_rx_cqe *fp_cqe,
269 struct sw_rx_page *rx_pg, old_rx_pg;
270 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
271 u32 i, frag_len, frag_size, pages;
275 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
276 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
278 /* This is needed in order to enable forwarding support */
280 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
281 max(frag_size, (u32)len_on_bd));
283 #ifdef BNX2X_STOP_ON_ERROR
284 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
285 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
287 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
288 fp_cqe->pkt_len, len_on_bd);
294 /* Run through the SGL and compose the fragmented skb */
295 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
297 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
299 /* FW gives the indices of the SGE as if the ring is an array
300 (meaning that "next" element will consume 2 indices) */
301 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
302 rx_pg = &fp->rx_page_ring[sge_idx];
305 /* If we fail to allocate a substitute page, we simply stop
306 where we are and drop the whole packet */
307 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
309 fp->eth_q_stats.rx_skb_alloc_failed++;
313 /* Unmap the page as we r going to pass it to the stack */
314 dma_unmap_page(&bp->pdev->dev,
315 dma_unmap_addr(&old_rx_pg, mapping),
316 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
318 /* Add one frag and update the appropriate fields in the skb */
319 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
321 skb->data_len += frag_len;
322 skb->truesize += frag_len;
323 skb->len += frag_len;
325 frag_size -= frag_len;
331 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
332 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
335 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
336 struct sk_buff *skb = rx_buf->skb;
338 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
340 /* Unmap skb in the pool anyway, as we are going to change
341 pool entry status to BNX2X_TPA_STOP even if new skb allocation
343 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
344 bp->rx_buf_size, DMA_FROM_DEVICE);
346 if (likely(new_skb)) {
347 /* fix ip xsum and give it to the stack */
348 /* (no need to map the new skb) */
351 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
353 int is_not_hwaccel_vlan_cqe =
354 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
358 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
360 #ifdef BNX2X_STOP_ON_ERROR
361 if (pad + len > bp->rx_buf_size) {
362 BNX2X_ERR("skb_put is about to fail... "
363 "pad %d len %d rx_buf_size %d\n",
364 pad, len, bp->rx_buf_size);
370 skb_reserve(skb, pad);
373 skb->protocol = eth_type_trans(skb, bp->dev);
374 skb->ip_summed = CHECKSUM_UNNECESSARY;
379 iph = (struct iphdr *)skb->data;
381 /* If there is no Rx VLAN offloading -
382 take VLAN tag into an account */
383 if (unlikely(is_not_hwaccel_vlan_cqe))
384 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
387 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
390 if (!bnx2x_fill_frag_skb(bp, fp, skb,
391 &cqe->fast_path_cqe, cqe_idx)) {
393 if ((bp->vlgrp != NULL) &&
394 (le16_to_cpu(cqe->fast_path_cqe.
395 pars_flags.flags) & PARSING_FLAGS_VLAN))
396 vlan_gro_receive(&fp->napi, bp->vlgrp,
397 le16_to_cpu(cqe->fast_path_cqe.
401 napi_gro_receive(&fp->napi, skb);
403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
404 " - dropping packet!\n");
409 /* put new skb in bin */
410 fp->tpa_pool[queue].skb = new_skb;
413 /* else drop the packet and keep the buffer in the bin */
414 DP(NETIF_MSG_RX_STATUS,
415 "Failed to allocate new skb - dropping packet!\n");
416 fp->eth_q_stats.rx_skb_alloc_failed++;
419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
422 /* Set Toeplitz hash value in the skb using the value from the
423 * CQE (calculated by HW).
425 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
428 /* Set Toeplitz hash from CQE */
429 if ((bp->dev->features & NETIF_F_RXHASH) &&
430 (cqe->fast_path_cqe.status_flags &
431 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
433 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
436 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
438 struct bnx2x *bp = fp->bp;
439 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
440 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
443 #ifdef BNX2X_STOP_ON_ERROR
444 if (unlikely(bp->panic))
448 /* CQ "next element" is of the size of the regular element,
449 that's why it's ok here */
450 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
451 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
454 bd_cons = fp->rx_bd_cons;
455 bd_prod = fp->rx_bd_prod;
456 bd_prod_fw = bd_prod;
457 sw_comp_cons = fp->rx_comp_cons;
458 sw_comp_prod = fp->rx_comp_prod;
460 /* Memory barrier necessary as speculative reads of the rx
461 * buffer can be ahead of the index in the status block
465 DP(NETIF_MSG_RX_STATUS,
466 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
467 fp->index, hw_comp_cons, sw_comp_cons);
469 while (sw_comp_cons != hw_comp_cons) {
470 struct sw_rx_bd *rx_buf = NULL;
472 union eth_rx_cqe *cqe;
476 comp_ring_cons = RCQ_BD(sw_comp_cons);
477 bd_prod = RX_BD(bd_prod);
478 bd_cons = RX_BD(bd_cons);
480 /* Prefetch the page containing the BD descriptor
481 at producer's index. It will be needed when new skb is
483 prefetch((void *)(PAGE_ALIGN((unsigned long)
484 (&fp->rx_desc_ring[bd_prod])) -
487 cqe = &fp->rx_comp_ring[comp_ring_cons];
488 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
490 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
491 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
492 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
493 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
494 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
495 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
497 /* is this a slowpath msg? */
498 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
499 bnx2x_sp_event(fp, cqe);
502 /* this is an rx packet */
504 rx_buf = &fp->rx_buf_ring[bd_cons];
507 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
508 pad = cqe->fast_path_cqe.placement_offset;
510 /* - If CQE is marked both TPA_START and TPA_END it is
512 * - FP CQE will always have either TPA_START or/and
513 * TPA_STOP flags set.
515 if ((!fp->disable_tpa) &&
516 (TPA_TYPE(cqe_fp_flags) !=
517 (TPA_TYPE_START | TPA_TYPE_END))) {
518 u16 queue = cqe->fast_path_cqe.queue_index;
520 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
521 DP(NETIF_MSG_RX_STATUS,
522 "calling tpa_start on queue %d\n",
525 bnx2x_tpa_start(fp, queue, skb,
528 /* Set Toeplitz hash for an LRO skb */
529 bnx2x_set_skb_rxhash(bp, cqe, skb);
532 } else { /* TPA_STOP */
533 DP(NETIF_MSG_RX_STATUS,
534 "calling tpa_stop on queue %d\n",
537 if (!BNX2X_RX_SUM_FIX(cqe))
538 BNX2X_ERR("STOP on none TCP "
541 /* This is a size of the linear data
543 len = le16_to_cpu(cqe->fast_path_cqe.
545 bnx2x_tpa_stop(bp, fp, queue, pad,
546 len, cqe, comp_ring_cons);
547 #ifdef BNX2X_STOP_ON_ERROR
552 bnx2x_update_sge_prod(fp,
553 &cqe->fast_path_cqe);
558 dma_sync_single_for_device(&bp->pdev->dev,
559 dma_unmap_addr(rx_buf, mapping),
560 pad + RX_COPY_THRESH,
562 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
564 /* is this an error packet? */
565 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
567 "ERROR flags %x rx packet %u\n",
568 cqe_fp_flags, sw_comp_cons);
569 fp->eth_q_stats.rx_err_discard_pkt++;
573 /* Since we don't have a jumbo ring
574 * copy small packets if mtu > 1500
576 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
577 (len <= RX_COPY_THRESH)) {
578 struct sk_buff *new_skb;
580 new_skb = netdev_alloc_skb(bp->dev,
582 if (new_skb == NULL) {
584 "ERROR packet dropped "
585 "because of alloc failure\n");
586 fp->eth_q_stats.rx_skb_alloc_failed++;
591 skb_copy_from_linear_data_offset(skb, pad,
592 new_skb->data + pad, len);
593 skb_reserve(new_skb, pad);
594 skb_put(new_skb, len);
596 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
601 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
602 dma_unmap_single(&bp->pdev->dev,
603 dma_unmap_addr(rx_buf, mapping),
606 skb_reserve(skb, pad);
611 "ERROR packet dropped because "
612 "of alloc failure\n");
613 fp->eth_q_stats.rx_skb_alloc_failed++;
615 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
619 skb->protocol = eth_type_trans(skb, bp->dev);
621 /* Set Toeplitz hash for a none-LRO skb */
622 bnx2x_set_skb_rxhash(bp, cqe, skb);
624 skb_checksum_none_assert(skb);
627 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY;
630 fp->eth_q_stats.hw_csum_err++;
634 skb_record_rx_queue(skb, fp->index);
637 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
638 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
640 vlan_gro_receive(&fp->napi, bp->vlgrp,
641 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
644 napi_gro_receive(&fp->napi, skb);
650 bd_cons = NEXT_RX_IDX(bd_cons);
651 bd_prod = NEXT_RX_IDX(bd_prod);
652 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
655 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
656 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
658 if (rx_pkt == budget)
662 fp->rx_bd_cons = bd_cons;
663 fp->rx_bd_prod = bd_prod_fw;
664 fp->rx_comp_cons = sw_comp_cons;
665 fp->rx_comp_prod = sw_comp_prod;
667 /* Update producers */
668 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
671 fp->rx_pkt += rx_pkt;
677 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
679 struct bnx2x_fastpath *fp = fp_cookie;
680 struct bnx2x *bp = fp->bp;
682 /* Return here if interrupt is disabled */
683 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
684 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
688 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
689 "[fp %d fw_sd %d igusb %d]\n",
690 fp->index, fp->fw_sb_id, fp->igu_sb_id);
691 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
693 #ifdef BNX2X_STOP_ON_ERROR
694 if (unlikely(bp->panic))
698 /* Handle Rx and Tx according to MSI-X vector */
699 prefetch(fp->rx_cons_sb);
700 prefetch(fp->tx_cons_sb);
701 prefetch(&fp->sb_running_index[SM_RX_ID]);
702 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
707 /* HW Lock for shared dual port PHYs */
708 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
710 mutex_lock(&bp->port.phy_mutex);
712 if (bp->port.need_hw_lock)
713 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
716 void bnx2x_release_phy_lock(struct bnx2x *bp)
718 if (bp->port.need_hw_lock)
719 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
721 mutex_unlock(&bp->port.phy_mutex);
724 void bnx2x_link_report(struct bnx2x *bp)
726 if (bp->flags & MF_FUNC_DIS) {
727 netif_carrier_off(bp->dev);
728 netdev_err(bp->dev, "NIC Link is Down\n");
732 if (bp->link_vars.link_up) {
735 if (bp->state == BNX2X_STATE_OPEN)
736 netif_carrier_on(bp->dev);
737 netdev_info(bp->dev, "NIC Link is Up, ");
739 line_speed = bp->link_vars.line_speed;
744 ((bp->mf_config[BP_VN(bp)] &
745 FUNC_MF_CFG_MAX_BW_MASK) >>
746 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
747 if (vn_max_rate < line_speed)
748 line_speed = vn_max_rate;
750 pr_cont("%d Mbps ", line_speed);
752 if (bp->link_vars.duplex == DUPLEX_FULL)
753 pr_cont("full duplex");
755 pr_cont("half duplex");
757 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
758 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
759 pr_cont(", receive ");
760 if (bp->link_vars.flow_ctrl &
762 pr_cont("& transmit ");
764 pr_cont(", transmit ");
766 pr_cont("flow control ON");
770 } else { /* link_down */
771 netif_carrier_off(bp->dev);
772 netdev_err(bp->dev, "NIC Link is Down\n");
776 /* Returns the number of actually allocated BDs */
777 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
780 struct bnx2x *bp = fp->bp;
781 u16 ring_prod, cqe_ring_prod;
784 fp->rx_comp_cons = 0;
785 cqe_ring_prod = ring_prod = 0;
786 for (i = 0; i < rx_ring_size; i++) {
787 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
788 BNX2X_ERR("was only able to allocate "
789 "%d rx skbs on queue[%d]\n", i, fp->index);
790 fp->eth_q_stats.rx_skb_alloc_failed++;
793 ring_prod = NEXT_RX_IDX(ring_prod);
794 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
795 WARN_ON(ring_prod <= i);
798 fp->rx_bd_prod = ring_prod;
799 /* Limit the CQE producer by the CQE ring size */
800 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
802 fp->rx_pkt = fp->rx_calls = 0;
807 static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
809 struct bnx2x *bp = fp->bp;
810 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
811 MAX_RX_AVAIL/bp->num_queues;
813 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
815 bnx2x_alloc_rx_bds(fp, rx_ring_size);
818 * this will generate an interrupt (to the TSTORM)
819 * must only be done after chip is initialized
821 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
825 void bnx2x_init_rx_rings(struct bnx2x *bp)
827 int func = BP_FUNC(bp);
828 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
829 ETH_MAX_AGGREGATION_QUEUES_E1H;
833 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
834 BNX2X_FW_IP_HDR_ALIGN_PAD;
837 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
839 for_each_queue(bp, j) {
840 struct bnx2x_fastpath *fp = &bp->fp[j];
842 if (!fp->disable_tpa) {
843 for (i = 0; i < max_agg_queues; i++) {
844 fp->tpa_pool[i].skb =
845 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
846 if (!fp->tpa_pool[i].skb) {
847 BNX2X_ERR("Failed to allocate TPA "
848 "skb pool for queue[%d] - "
849 "disabling TPA on this "
851 bnx2x_free_tpa_pool(bp, fp, i);
855 dma_unmap_addr_set((struct sw_rx_bd *)
856 &bp->fp->tpa_pool[i],
858 fp->tpa_state[i] = BNX2X_TPA_STOP;
861 /* "next page" elements initialization */
862 bnx2x_set_next_page_sgl(fp);
864 /* set SGEs bit mask */
865 bnx2x_init_sge_ring_bit_mask(fp);
867 /* Allocate SGEs and initialize the ring elements */
868 for (i = 0, ring_prod = 0;
869 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
871 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
872 BNX2X_ERR("was only able to allocate "
874 BNX2X_ERR("disabling TPA for"
876 /* Cleanup already allocated elements */
877 bnx2x_free_rx_sge_range(bp,
879 bnx2x_free_tpa_pool(bp,
885 ring_prod = NEXT_SGE_IDX(ring_prod);
888 fp->rx_sge_prod = ring_prod;
892 for_each_queue(bp, j) {
893 struct bnx2x_fastpath *fp = &bp->fp[j];
897 bnx2x_set_next_page_rx_bd(fp);
900 bnx2x_set_next_page_rx_cq(fp);
902 /* Allocate BDs and initialize BD ring */
903 bnx2x_alloc_rx_bd_ring(fp);
908 if (!CHIP_IS_E2(bp)) {
909 REG_WR(bp, BAR_USTRORM_INTMEM +
910 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
911 U64_LO(fp->rx_comp_mapping));
912 REG_WR(bp, BAR_USTRORM_INTMEM +
913 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
914 U64_HI(fp->rx_comp_mapping));
919 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
923 for_each_queue(bp, i) {
924 struct bnx2x_fastpath *fp = &bp->fp[i];
926 u16 bd_cons = fp->tx_bd_cons;
927 u16 sw_prod = fp->tx_pkt_prod;
928 u16 sw_cons = fp->tx_pkt_cons;
930 while (sw_cons != sw_prod) {
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
937 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
941 for_each_queue(bp, j) {
942 struct bnx2x_fastpath *fp = &bp->fp[j];
944 for (i = 0; i < NUM_RX_BD; i++) {
945 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
946 struct sk_buff *skb = rx_buf->skb;
951 dma_unmap_single(&bp->pdev->dev,
952 dma_unmap_addr(rx_buf, mapping),
953 bp->rx_buf_size, DMA_FROM_DEVICE);
958 if (!fp->disable_tpa)
959 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
960 ETH_MAX_AGGREGATION_QUEUES_E1 :
961 ETH_MAX_AGGREGATION_QUEUES_E1H);
965 void bnx2x_free_skbs(struct bnx2x *bp)
967 bnx2x_free_tx_skbs(bp);
968 bnx2x_free_rx_skbs(bp);
971 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
975 free_irq(bp->msix_table[0].vector, bp->dev);
976 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
977 bp->msix_table[0].vector);
982 for_each_queue(bp, i) {
983 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
984 "state %x\n", i, bp->msix_table[i + offset].vector,
985 bnx2x_fp(bp, i, state));
987 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
991 void bnx2x_free_irq(struct bnx2x *bp)
993 if (bp->flags & USING_MSIX_FLAG)
994 bnx2x_free_msix_irqs(bp);
995 else if (bp->flags & USING_MSI_FLAG)
996 free_irq(bp->pdev->irq, bp->dev);
998 free_irq(bp->pdev->irq, bp->dev);
1001 int bnx2x_enable_msix(struct bnx2x *bp)
1003 int msix_vec = 0, i, rc, req_cnt;
1005 bp->msix_table[msix_vec].entry = msix_vec;
1006 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1007 bp->msix_table[0].entry);
1011 bp->msix_table[msix_vec].entry = msix_vec;
1012 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1013 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1016 for_each_queue(bp, i) {
1017 bp->msix_table[msix_vec].entry = msix_vec;
1018 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1019 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1023 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1025 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1028 * reconfigure number of tx/rx queues according to available
1031 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1032 /* how less vectors we will have? */
1033 int diff = req_cnt - rc;
1036 "Trying to use less MSI-X vectors: %d\n", rc);
1038 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1042 "MSI-X is not attainable rc %d\n", rc);
1046 * decrease number of queues by number of unallocated entries
1048 bp->num_queues -= diff;
1050 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1053 /* fall to INTx if not enough memory */
1055 bp->flags |= DISABLE_MSI_FLAG;
1056 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1060 bp->flags |= USING_MSIX_FLAG;
1065 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1067 int i, rc, offset = 1;
1069 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1070 bp->dev->name, bp->dev);
1072 BNX2X_ERR("request sp irq failed\n");
1079 for_each_queue(bp, i) {
1080 struct bnx2x_fastpath *fp = &bp->fp[i];
1081 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1084 rc = request_irq(bp->msix_table[offset].vector,
1085 bnx2x_msix_fp_int, 0, fp->name, fp);
1087 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1088 bnx2x_free_msix_irqs(bp);
1093 fp->state = BNX2X_FP_STATE_IRQ;
1096 i = BNX2X_NUM_QUEUES(bp);
1097 offset = 1 + CNIC_CONTEXT_USE;
1098 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1100 bp->msix_table[0].vector,
1101 0, bp->msix_table[offset].vector,
1102 i - 1, bp->msix_table[offset + i - 1].vector);
1107 int bnx2x_enable_msi(struct bnx2x *bp)
1111 rc = pci_enable_msi(bp->pdev);
1113 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1116 bp->flags |= USING_MSI_FLAG;
1121 static int bnx2x_req_irq(struct bnx2x *bp)
1123 unsigned long flags;
1126 if (bp->flags & USING_MSI_FLAG)
1129 flags = IRQF_SHARED;
1131 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1132 bp->dev->name, bp->dev);
1134 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1139 static void bnx2x_napi_enable(struct bnx2x *bp)
1143 for_each_queue(bp, i)
1144 napi_enable(&bnx2x_fp(bp, i, napi));
1147 static void bnx2x_napi_disable(struct bnx2x *bp)
1151 for_each_queue(bp, i)
1152 napi_disable(&bnx2x_fp(bp, i, napi));
1155 void bnx2x_netif_start(struct bnx2x *bp)
1159 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1160 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1163 if (netif_running(bp->dev)) {
1164 bnx2x_napi_enable(bp);
1165 bnx2x_int_enable(bp);
1166 if (bp->state == BNX2X_STATE_OPEN)
1167 netif_tx_wake_all_queues(bp->dev);
1172 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1174 bnx2x_int_disable_sync(bp, disable_hw);
1175 bnx2x_napi_disable(bp);
1176 netif_tx_disable(bp->dev);
1179 void bnx2x_set_num_queues(struct bnx2x *bp)
1181 switch (bp->multi_mode) {
1182 case ETH_RSS_MODE_DISABLED:
1185 case ETH_RSS_MODE_REGULAR:
1186 bp->num_queues = bnx2x_calc_num_queues(bp);
1195 static void bnx2x_release_firmware(struct bnx2x *bp)
1197 kfree(bp->init_ops_offsets);
1198 kfree(bp->init_ops);
1199 kfree(bp->init_data);
1200 release_firmware(bp->firmware);
1203 /* must be called with rtnl_lock */
1204 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1209 /* Set init arrays */
1210 rc = bnx2x_init_firmware(bp);
1212 BNX2X_ERR("Error loading firmware\n");
1216 #ifdef BNX2X_STOP_ON_ERROR
1217 if (unlikely(bp->panic))
1221 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1223 /* must be called before memory allocation and HW init */
1224 bnx2x_ilt_set_info(bp);
1226 if (bnx2x_alloc_mem(bp))
1229 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1230 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1232 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1236 for_each_queue(bp, i)
1237 bnx2x_fp(bp, i, disable_tpa) =
1238 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1240 bnx2x_napi_enable(bp);
1242 /* Send LOAD_REQUEST command to MCP
1243 Returns the type of LOAD command:
1244 if it is the first port to be initialized
1245 common blocks should be initialized, otherwise - not
1247 if (!BP_NOMCP(bp)) {
1248 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1250 BNX2X_ERR("MCP response failure, aborting\n");
1254 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1255 rc = -EBUSY; /* other port in diagnostic mode */
1260 int path = BP_PATH(bp);
1261 int port = BP_PORT(bp);
1263 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1264 path, load_count[path][0], load_count[path][1],
1265 load_count[path][2]);
1266 load_count[path][0]++;
1267 load_count[path][1 + port]++;
1268 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1269 path, load_count[path][0], load_count[path][1],
1270 load_count[path][2]);
1271 if (load_count[path][0] == 1)
1272 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1273 else if (load_count[path][1 + port] == 1)
1274 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1276 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1279 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1280 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1281 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1285 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1288 rc = bnx2x_init_hw(bp, load_code);
1290 BNX2X_ERR("HW init failed, aborting\n");
1291 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1295 /* Connect to IRQs */
1296 rc = bnx2x_setup_irqs(bp);
1298 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1302 /* Setup NIC internals and enable interrupts */
1303 bnx2x_nic_init(bp, load_code);
1305 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1306 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1307 (bp->common.shmem2_base))
1308 SHMEM2_WR(bp, dcc_support,
1309 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1310 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1312 /* Send LOAD_DONE command to MCP */
1313 if (!BP_NOMCP(bp)) {
1314 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1316 BNX2X_ERR("MCP response failure, aborting\n");
1322 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1324 rc = bnx2x_func_start(bp);
1326 BNX2X_ERR("Function start failed!\n");
1327 #ifndef BNX2X_STOP_ON_ERROR
1335 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1337 BNX2X_ERR("Setup leading failed!\n");
1338 #ifndef BNX2X_STOP_ON_ERROR
1346 if (!CHIP_IS_E1(bp) &&
1347 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1348 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1349 bp->flags |= MF_FUNC_DIS;
1353 /* Enable Timer scan */
1354 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1357 for_each_nondefault_queue(bp, i) {
1358 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1367 /* Now when Clients are configured we are ready to work */
1368 bp->state = BNX2X_STATE_OPEN;
1370 bnx2x_set_eth_mac(bp, 1);
1373 bnx2x_initial_phy_init(bp, load_mode);
1375 /* Start fast path */
1376 switch (load_mode) {
1378 /* Tx queue should be only reenabled */
1379 netif_tx_wake_all_queues(bp->dev);
1380 /* Initialize the receive filter. */
1381 bnx2x_set_rx_mode(bp->dev);
1385 netif_tx_start_all_queues(bp->dev);
1386 smp_mb__after_clear_bit();
1387 /* Initialize the receive filter. */
1388 bnx2x_set_rx_mode(bp->dev);
1392 /* Initialize the receive filter. */
1393 bnx2x_set_rx_mode(bp->dev);
1394 bp->state = BNX2X_STATE_DIAG;
1402 bnx2x__link_status_update(bp);
1404 /* start the timer */
1405 mod_timer(&bp->timer, jiffies + bp->current_interval);
1408 bnx2x_setup_cnic_irq_info(bp);
1409 if (bp->state == BNX2X_STATE_OPEN)
1410 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1412 bnx2x_inc_load_cnt(bp);
1414 bnx2x_release_firmware(bp);
1420 /* Disable Timer scan */
1421 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1424 bnx2x_int_disable_sync(bp, 1);
1426 /* Free SKBs, SGEs, TPA pool and driver internals */
1427 bnx2x_free_skbs(bp);
1428 for_each_queue(bp, i)
1429 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1434 if (!BP_NOMCP(bp)) {
1435 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1436 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1441 bnx2x_napi_disable(bp);
1445 bnx2x_release_firmware(bp);
1450 /* must be called with rtnl_lock */
1451 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1455 if (bp->state == BNX2X_STATE_CLOSED) {
1456 /* Interface has been removed - nothing to recover */
1457 bp->recovery_state = BNX2X_RECOVERY_DONE;
1459 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1466 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1468 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1470 /* Set "drop all" */
1471 bp->rx_mode = BNX2X_RX_MODE_NONE;
1472 bnx2x_set_storm_rx_mode(bp);
1475 bnx2x_tx_disable(bp);
1477 del_timer_sync(&bp->timer);
1479 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1480 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1482 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1484 /* Cleanup the chip if needed */
1485 if (unload_mode != UNLOAD_RECOVERY)
1486 bnx2x_chip_cleanup(bp, unload_mode);
1488 /* Disable HW interrupts, NAPI and Tx */
1489 bnx2x_netif_stop(bp, 1);
1497 /* Free SKBs, SGEs, TPA pool and driver internals */
1498 bnx2x_free_skbs(bp);
1499 for_each_queue(bp, i)
1500 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1504 bp->state = BNX2X_STATE_CLOSED;
1506 /* The last driver must disable a "close the gate" if there is no
1507 * parity attention or "process kill" pending.
1509 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1510 bnx2x_reset_is_done(bp))
1511 bnx2x_disable_close_the_gate(bp);
1513 /* Reset MCP mail box sequence if there is on going recovery */
1514 if (unload_mode == UNLOAD_RECOVERY)
1520 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1524 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1528 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1529 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1530 PCI_PM_CTRL_PME_STATUS));
1532 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1533 /* delay required during transition out of D3hot */
1538 /* If there are other clients above don't
1539 shut down the power */
1540 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1542 /* Don't shut down the power for emulation and FPGA */
1543 if (CHIP_REV_IS_SLOW(bp))
1546 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1550 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1552 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1555 /* No more memory access after this point until
1556 * device is brought back to D0.
1567 * net_device service functions
1569 int bnx2x_poll(struct napi_struct *napi, int budget)
1572 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1574 struct bnx2x *bp = fp->bp;
1577 #ifdef BNX2X_STOP_ON_ERROR
1578 if (unlikely(bp->panic)) {
1579 napi_complete(napi);
1584 if (bnx2x_has_tx_work(fp))
1587 if (bnx2x_has_rx_work(fp)) {
1588 work_done += bnx2x_rx_int(fp, budget - work_done);
1590 /* must not complete if we consumed full budget */
1591 if (work_done >= budget)
1595 /* Fall out from the NAPI loop if needed */
1596 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1597 bnx2x_update_fpsb_idx(fp);
1598 /* bnx2x_has_rx_work() reads the status block,
1599 * thus we need to ensure that status block indices
1600 * have been actually read (bnx2x_update_fpsb_idx)
1601 * prior to this check (bnx2x_has_rx_work) so that
1602 * we won't write the "newer" value of the status block
1603 * to IGU (if there was a DMA right after
1604 * bnx2x_has_rx_work and if there is no rmb, the memory
1605 * reading (bnx2x_update_fpsb_idx) may be postponed
1606 * to right before bnx2x_ack_sb). In this case there
1607 * will never be another interrupt until there is
1608 * another update of the status block, while there
1609 * is still unhandled work.
1613 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1614 napi_complete(napi);
1615 /* Re-enable interrupts */
1617 "Update index to %d\n", fp->fp_hc_idx);
1618 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1619 le16_to_cpu(fp->fp_hc_idx),
1629 /* we split the first BD into headers and data BDs
1630 * to ease the pain of our fellow microcode engineers
1631 * we use one mapping for both BDs
1632 * So far this has only been observed to happen
1633 * in Other Operating Systems(TM)
1635 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1636 struct bnx2x_fastpath *fp,
1637 struct sw_tx_bd *tx_buf,
1638 struct eth_tx_start_bd **tx_bd, u16 hlen,
1639 u16 bd_prod, int nbd)
1641 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1642 struct eth_tx_bd *d_tx_bd;
1644 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1646 /* first fix first BD */
1647 h_tx_bd->nbd = cpu_to_le16(nbd);
1648 h_tx_bd->nbytes = cpu_to_le16(hlen);
1650 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1651 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1652 h_tx_bd->addr_lo, h_tx_bd->nbd);
1654 /* now get a new data BD
1655 * (after the pbd) and fill it */
1656 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1657 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1659 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1660 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1662 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1663 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1664 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1666 /* this marks the BD as one that has no individual mapping */
1667 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1669 DP(NETIF_MSG_TX_QUEUED,
1670 "TSO split data size is %d (%x:%x)\n",
1671 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1674 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1679 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1682 csum = (u16) ~csum_fold(csum_sub(csum,
1683 csum_partial(t_header - fix, fix, 0)));
1686 csum = (u16) ~csum_fold(csum_add(csum,
1687 csum_partial(t_header, -fix, 0)));
1689 return swab16(csum);
1692 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1696 if (skb->ip_summed != CHECKSUM_PARTIAL)
1700 if (skb->protocol == htons(ETH_P_IPV6)) {
1702 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1703 rc |= XMIT_CSUM_TCP;
1707 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1708 rc |= XMIT_CSUM_TCP;
1712 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1713 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1715 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1716 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1721 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1722 /* check if packet requires linearization (packet is too fragmented)
1723 no need to check fragmentation if page size > 8K (there will be no
1724 violation to FW restrictions) */
1725 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1730 int first_bd_sz = 0;
1732 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1733 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1735 if (xmit_type & XMIT_GSO) {
1736 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1737 /* Check if LSO packet needs to be copied:
1738 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1739 int wnd_size = MAX_FETCH_BD - 3;
1740 /* Number of windows to check */
1741 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1746 /* Headers length */
1747 hlen = (int)(skb_transport_header(skb) - skb->data) +
1750 /* Amount of data (w/o headers) on linear part of SKB*/
1751 first_bd_sz = skb_headlen(skb) - hlen;
1753 wnd_sum = first_bd_sz;
1755 /* Calculate the first sum - it's special */
1756 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1758 skb_shinfo(skb)->frags[frag_idx].size;
1760 /* If there was data on linear skb data - check it */
1761 if (first_bd_sz > 0) {
1762 if (unlikely(wnd_sum < lso_mss)) {
1767 wnd_sum -= first_bd_sz;
1770 /* Others are easier: run through the frag list and
1771 check all windows */
1772 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1774 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1776 if (unlikely(wnd_sum < lso_mss)) {
1781 skb_shinfo(skb)->frags[wnd_idx].size;
1784 /* in non-LSO too fragmented packet should always
1791 if (unlikely(to_copy))
1792 DP(NETIF_MSG_TX_QUEUED,
1793 "Linearization IS REQUIRED for %s packet. "
1794 "num_frags %d hlen %d first_bd_sz %d\n",
1795 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1796 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1802 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1803 struct eth_tx_parse_bd_e2 *pbd,
1806 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1807 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1808 if ((xmit_type & XMIT_GSO_V6) &&
1809 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1810 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1814 * Update PBD in GSO case.
1817 * @param tx_start_bd
1821 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1822 struct eth_tx_parse_bd_e1x *pbd,
1825 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1826 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1827 pbd->tcp_flags = pbd_tcp_flags(skb);
1829 if (xmit_type & XMIT_GSO_V4) {
1830 pbd->ip_id = swab16(ip_hdr(skb)->id);
1831 pbd->tcp_pseudo_csum =
1832 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1834 0, IPPROTO_TCP, 0));
1837 pbd->tcp_pseudo_csum =
1838 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1839 &ipv6_hdr(skb)->daddr,
1840 0, IPPROTO_TCP, 0));
1842 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1848 * @param tx_start_bd
1852 * @return header len
1854 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1855 struct eth_tx_parse_bd_e2 *pbd,
1858 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1859 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1861 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1863 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1865 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1871 * @param tx_start_bd
1875 * @return Header length
1877 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1878 struct eth_tx_parse_bd_e1x *pbd,
1881 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1883 /* for now NS flag is not used in Linux */
1885 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1886 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1888 pbd->ip_hlen_w = (skb_transport_header(skb) -
1889 skb_network_header(skb)) / 2;
1891 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1893 pbd->total_hlen_w = cpu_to_le16(hlen);
1896 if (xmit_type & XMIT_CSUM_TCP) {
1897 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1900 s8 fix = SKB_CS_OFF(skb); /* signed! */
1902 DP(NETIF_MSG_TX_QUEUED,
1903 "hlen %d fix %d csum before fix %x\n",
1904 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1906 /* HW bug: fixup the CSUM */
1907 pbd->tcp_pseudo_csum =
1908 bnx2x_csum_fix(skb_transport_header(skb),
1911 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1912 pbd->tcp_pseudo_csum);
1918 /* called with netif_tx_lock
1919 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1920 * netif_wake_queue()
1922 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1924 struct bnx2x *bp = netdev_priv(dev);
1925 struct bnx2x_fastpath *fp;
1926 struct netdev_queue *txq;
1927 struct sw_tx_bd *tx_buf;
1928 struct eth_tx_start_bd *tx_start_bd;
1929 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1930 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1931 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1932 u16 pkt_prod, bd_prod;
1935 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1938 __le16 pkt_size = 0;
1940 u8 mac_type = UNICAST_ADDRESS;
1942 #ifdef BNX2X_STOP_ON_ERROR
1943 if (unlikely(bp->panic))
1944 return NETDEV_TX_BUSY;
1947 fp_index = skb_get_queue_mapping(skb);
1948 txq = netdev_get_tx_queue(dev, fp_index);
1950 fp = &bp->fp[fp_index];
1952 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1953 fp->eth_q_stats.driver_xoff++;
1954 netif_tx_stop_queue(txq);
1955 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1956 return NETDEV_TX_BUSY;
1959 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1960 "protocol(%x,%x) gso type %x xmit_type %x\n",
1961 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1962 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1964 eth = (struct ethhdr *)skb->data;
1966 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1967 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1968 if (is_broadcast_ether_addr(eth->h_dest))
1969 mac_type = BROADCAST_ADDRESS;
1971 mac_type = MULTICAST_ADDRESS;
1974 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1975 /* First, check if we need to linearize the skb (due to FW
1976 restrictions). No need to check fragmentation if page size > 8K
1977 (there will be no violation to FW restrictions) */
1978 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1979 /* Statistics of linearization */
1981 if (skb_linearize(skb) != 0) {
1982 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1983 "silently dropping this SKB\n");
1984 dev_kfree_skb_any(skb);
1985 return NETDEV_TX_OK;
1991 Please read carefully. First we use one BD which we mark as start,
1992 then we have a parsing info BD (used for TSO or xsum),
1993 and only then we have the rest of the TSO BDs.
1994 (don't forget to mark the last one as last,
1995 and to unmap only AFTER you write to the BD ...)
1996 And above all, all pdb sizes are in words - NOT DWORDS!
1999 pkt_prod = fp->tx_pkt_prod++;
2000 bd_prod = TX_BD(fp->tx_bd_prod);
2002 /* get a tx_buf and first BD */
2003 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2004 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2006 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2007 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2011 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2013 /* remember the first BD of the packet */
2014 tx_buf->first_bd = fp->tx_bd_prod;
2018 DP(NETIF_MSG_TX_QUEUED,
2019 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2020 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2023 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
2024 (bp->flags & HW_VLAN_TX_FLAG)) {
2025 tx_start_bd->vlan_or_ethertype =
2026 cpu_to_le16(vlan_tx_tag_get(skb));
2027 tx_start_bd->bd_flags.as_bitfield |=
2028 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2031 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2033 /* turn on parsing and get a BD */
2034 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2036 if (xmit_type & XMIT_CSUM) {
2037 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2039 if (xmit_type & XMIT_CSUM_V4)
2040 tx_start_bd->bd_flags.as_bitfield |=
2041 ETH_TX_BD_FLAGS_IP_CSUM;
2043 tx_start_bd->bd_flags.as_bitfield |=
2044 ETH_TX_BD_FLAGS_IPV6;
2046 if (!(xmit_type & XMIT_CSUM_TCP))
2047 tx_start_bd->bd_flags.as_bitfield |=
2048 ETH_TX_BD_FLAGS_IS_UDP;
2051 if (CHIP_IS_E2(bp)) {
2052 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2053 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2054 /* Set PBD in checksum offload case */
2055 if (xmit_type & XMIT_CSUM)
2056 hlen = bnx2x_set_pbd_csum_e2(bp,
2057 skb, pbd_e2, xmit_type);
2059 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2060 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2061 /* Set PBD in checksum offload case */
2062 if (xmit_type & XMIT_CSUM)
2063 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2067 /* Map skb linear data for DMA */
2068 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2069 skb_headlen(skb), DMA_TO_DEVICE);
2071 /* Setup the data pointer of the first BD of the packet */
2072 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2073 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2074 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2075 tx_start_bd->nbd = cpu_to_le16(nbd);
2076 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2077 pkt_size = tx_start_bd->nbytes;
2079 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2080 " nbytes %d flags %x vlan %x\n",
2081 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2082 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2083 tx_start_bd->bd_flags.as_bitfield,
2084 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2086 if (xmit_type & XMIT_GSO) {
2088 DP(NETIF_MSG_TX_QUEUED,
2089 "TSO packet len %d hlen %d total len %d tso size %d\n",
2090 skb->len, hlen, skb_headlen(skb),
2091 skb_shinfo(skb)->gso_size);
2093 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2095 if (unlikely(skb_headlen(skb) > hlen))
2096 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2097 hlen, bd_prod, ++nbd);
2099 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2101 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2103 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2105 /* Handle fragmented skb */
2106 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2107 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2109 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2110 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2111 if (total_pkt_bd == NULL)
2112 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2114 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2116 frag->size, DMA_TO_DEVICE);
2118 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2119 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2120 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2121 le16_add_cpu(&pkt_size, frag->size);
2123 DP(NETIF_MSG_TX_QUEUED,
2124 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2125 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2126 le16_to_cpu(tx_data_bd->nbytes));
2129 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2131 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2133 /* now send a tx doorbell, counting the next BD
2134 * if the packet contains or ends with it
2136 if (TX_BD_POFF(bd_prod) < nbd)
2139 if (total_pkt_bd != NULL)
2140 total_pkt_bd->total_pkt_bytes = pkt_size;
2143 DP(NETIF_MSG_TX_QUEUED,
2144 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2145 " tcp_flags %x xsum %x seq %u hlen %u\n",
2146 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2147 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2148 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2149 le16_to_cpu(pbd_e1x->total_hlen_w));
2151 DP(NETIF_MSG_TX_QUEUED,
2152 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2153 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2154 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2155 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2156 pbd_e2->parsing_data);
2157 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2160 * Make sure that the BD data is updated before updating the producer
2161 * since FW might read the BD right after the producer is updated.
2162 * This is only applicable for weak-ordered memory model archs such
2163 * as IA-64. The following barrier is also mandatory since FW will
2164 * assumes packets must have BDs.
2168 fp->tx_db.data.prod += nbd;
2171 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2175 fp->tx_bd_prod += nbd;
2177 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2178 netif_tx_stop_queue(txq);
2180 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2181 * ordering of set_bit() in netif_tx_stop_queue() and read of
2185 fp->eth_q_stats.driver_xoff++;
2186 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2187 netif_tx_wake_queue(txq);
2191 return NETDEV_TX_OK;
2194 /* called with rtnl_lock */
2195 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2197 struct sockaddr *addr = p;
2198 struct bnx2x *bp = netdev_priv(dev);
2200 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2203 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2204 if (netif_running(dev))
2205 bnx2x_set_eth_mac(bp, 1);
2211 int bnx2x_setup_irqs(struct bnx2x *bp)
2214 if (bp->flags & USING_MSIX_FLAG) {
2215 rc = bnx2x_req_msix_irqs(bp);
2220 rc = bnx2x_req_irq(bp);
2222 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2225 if (bp->flags & USING_MSI_FLAG) {
2226 bp->dev->irq = bp->pdev->irq;
2227 netdev_info(bp->dev, "using MSI IRQ %d\n",
2235 void bnx2x_free_mem_bp(struct bnx2x *bp)
2238 kfree(bp->msix_table);
2242 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2244 struct bnx2x_fastpath *fp;
2245 struct msix_entry *tbl;
2246 struct bnx2x_ilt *ilt;
2249 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2255 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2259 bp->msix_table = tbl;
2262 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2269 bnx2x_free_mem_bp(bp);
2274 /* called with rtnl_lock */
2275 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2277 struct bnx2x *bp = netdev_priv(dev);
2280 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2281 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2285 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2286 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2289 /* This does not race with packet allocation
2290 * because the actual alloc size is
2291 * only updated as part of load
2295 if (netif_running(dev)) {
2296 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2297 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2303 void bnx2x_tx_timeout(struct net_device *dev)
2305 struct bnx2x *bp = netdev_priv(dev);
2307 #ifdef BNX2X_STOP_ON_ERROR
2311 /* This allows the netif to be shutdown gracefully before resetting */
2312 schedule_delayed_work(&bp->reset_task, 0);
2316 /* called with rtnl_lock */
2317 void bnx2x_vlan_rx_register(struct net_device *dev,
2318 struct vlan_group *vlgrp)
2320 struct bnx2x *bp = netdev_priv(dev);
2327 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2329 struct net_device *dev = pci_get_drvdata(pdev);
2333 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2336 bp = netdev_priv(dev);
2340 pci_save_state(pdev);
2342 if (!netif_running(dev)) {
2347 netif_device_detach(dev);
2349 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2351 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2358 int bnx2x_resume(struct pci_dev *pdev)
2360 struct net_device *dev = pci_get_drvdata(pdev);
2365 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2368 bp = netdev_priv(dev);
2370 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2371 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2377 pci_restore_state(pdev);
2379 if (!netif_running(dev)) {
2384 bnx2x_set_power_state(bp, PCI_D0);
2385 netif_device_attach(dev);
2387 /* Since the chip was reset, clear the FW sequence number */
2389 rc = bnx2x_nic_load(bp, LOAD_OPEN);