1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target. Update txdata pointers and related
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
60 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
89 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
91 /* free skb in the packet ring at pos idx
92 * return idx of last bd freed
94 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
95 u16 idx, unsigned int *pkts_compl,
96 unsigned int *bytes_compl)
98 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
99 struct eth_tx_start_bd *tx_start_bd;
100 struct eth_tx_bd *tx_data_bd;
101 struct sk_buff *skb = tx_buf->skb;
102 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
105 /* prefetch skb end pointer to speedup dev_kfree_skb() */
108 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
109 txdata->txq_index, idx, tx_buf, skb);
112 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
113 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
114 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
117 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
118 #ifdef BNX2X_STOP_ON_ERROR
119 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
120 BNX2X_ERR("BAD nbd!\n");
124 new_cons = nbd + tx_buf->first_bd;
126 /* Get the next bd */
127 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
129 /* Skip a parse bd... */
131 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
133 /* ...and the TSO split header bd since they have no mapping */
134 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
136 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
142 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
143 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
144 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
146 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
153 (*bytes_compl) += skb->len;
156 dev_kfree_skb_any(skb);
157 tx_buf->first_bd = 0;
163 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
165 struct netdev_queue *txq;
166 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
167 unsigned int pkts_compl = 0, bytes_compl = 0;
169 #ifdef BNX2X_STOP_ON_ERROR
170 if (unlikely(bp->panic))
174 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176 sw_cons = txdata->tx_pkt_cons;
178 while (sw_cons != hw_cons) {
181 pkt_cons = TX_BD(sw_cons);
183 DP(NETIF_MSG_TX_DONE,
184 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
185 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188 &pkts_compl, &bytes_compl);
193 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
195 txdata->tx_pkt_cons = sw_cons;
196 txdata->tx_bd_cons = bd_cons;
198 /* Need to make the tx_bd_cons update visible to start_xmit()
199 * before checking for netif_tx_queue_stopped(). Without the
200 * memory barrier, there is a small possibility that
201 * start_xmit() will miss it and cause the queue to be stopped
203 * On the other hand we need an rmb() here to ensure the proper
204 * ordering of bit testing in the following
205 * netif_tx_queue_stopped(txq) call.
209 if (unlikely(netif_tx_queue_stopped(txq))) {
210 /* Taking tx_lock() is needed to prevent reenabling the queue
211 * while it's empty. This could have happen if rx_action() gets
212 * suspended in bnx2x_tx_int() after the condition before
213 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
215 * stops the queue->sees fresh tx_bd_cons->releases the queue->
216 * sends some packets consuming the whole queue again->
220 __netif_tx_lock(txq, smp_processor_id());
222 if ((netif_tx_queue_stopped(txq)) &&
223 (bp->state == BNX2X_STATE_OPEN) &&
224 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
225 netif_tx_wake_queue(txq);
227 __netif_tx_unlock(txq);
232 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
235 u16 last_max = fp->last_max_sge;
237 if (SUB_S16(idx, last_max) > 0)
238 fp->last_max_sge = idx;
241 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
243 struct eth_end_agg_rx_cqe *cqe)
245 struct bnx2x *bp = fp->bp;
246 u16 last_max, last_elem, first_elem;
253 /* First mark all used pages */
254 for (i = 0; i < sge_len; i++)
255 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
256 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
258 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
259 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
261 /* Here we assume that the last SGE index is the biggest */
262 prefetch((void *)(fp->sge_mask));
263 bnx2x_update_last_max_sge(fp,
264 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
266 last_max = RX_SGE(fp->last_max_sge);
267 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
268 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
270 /* If ring is not full */
271 if (last_elem + 1 != first_elem)
274 /* Now update the prod */
275 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
276 if (likely(fp->sge_mask[i]))
279 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
280 delta += BIT_VEC64_ELEM_SZ;
284 fp->rx_sge_prod += delta;
285 /* clear page-end entries */
286 bnx2x_clear_sge_mask_next_elems(fp);
289 DP(NETIF_MSG_RX_STATUS,
290 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
291 fp->last_max_sge, fp->rx_sge_prod);
294 /* Set Toeplitz hash value in the skb using the value from the
295 * CQE (calculated by HW).
297 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
298 const struct eth_fast_path_rx_cqe *cqe,
301 /* Set Toeplitz hash from CQE */
302 if ((bp->dev->features & NETIF_F_RXHASH) &&
303 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
304 enum eth_rss_hash_type htype;
306 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
307 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
308 (htype == TCP_IPV6_HASH_TYPE);
309 return le32_to_cpu(cqe->rss_hash_result);
315 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
317 struct eth_fast_path_rx_cqe *cqe)
319 struct bnx2x *bp = fp->bp;
320 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
321 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
322 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
324 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
325 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
327 /* print error if current state != stop */
328 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
329 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
331 /* Try to map an empty data buffer from the aggregation info */
332 mapping = dma_map_single(&bp->pdev->dev,
333 first_buf->data + NET_SKB_PAD,
334 fp->rx_buf_size, DMA_FROM_DEVICE);
336 * ...if it fails - move the skb from the consumer to the producer
337 * and set the current aggregation state as ERROR to drop it
338 * when TPA_STOP arrives.
341 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
342 /* Move the BD from the consumer to the producer */
343 bnx2x_reuse_rx_data(fp, cons, prod);
344 tpa_info->tpa_state = BNX2X_TPA_ERROR;
348 /* move empty data from pool to prod */
349 prod_rx_buf->data = first_buf->data;
350 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
351 /* point prod_bd to new data */
352 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
353 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
355 /* move partial skb from cons to pool (don't unmap yet) */
356 *first_buf = *cons_rx_buf;
358 /* mark bin state as START */
359 tpa_info->parsing_flags =
360 le16_to_cpu(cqe->pars_flags.flags);
361 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
362 tpa_info->tpa_state = BNX2X_TPA_START;
363 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
364 tpa_info->placement_offset = cqe->placement_offset;
365 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
366 if (fp->mode == TPA_MODE_GRO) {
367 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
368 tpa_info->full_page =
369 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
370 tpa_info->gro_size = gro_size;
373 #ifdef BNX2X_STOP_ON_ERROR
374 fp->tpa_queue_used |= (1 << queue);
375 #ifdef _ASM_GENERIC_INT_L64_H
376 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
378 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
384 /* Timestamp option length allowed for TPA aggregation:
386 * nop nop kind length echo val
388 #define TPA_TSTAMP_OPT_LEN 12
390 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
393 * @parsing_flags: parsing flags from the START CQE
394 * @len_on_bd: total length of the first packet for the
397 * Approximate value of the MSS for this aggregation calculated using
398 * the first packet of it.
400 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
404 * TPA arrgregation won't have either IP options or TCP options
405 * other than timestamp or IPv6 extension headers.
407 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
409 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
410 PRS_FLAG_OVERETH_IPV6)
411 hdrs_len += sizeof(struct ipv6hdr);
413 hdrs_len += sizeof(struct iphdr);
416 /* Check if there was a TCP timestamp, if there is it's will
417 * always be 12 bytes length: nop nop kind length echo val.
419 * Otherwise FW would close the aggregation.
421 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
422 hdrs_len += TPA_TSTAMP_OPT_LEN;
424 return len_on_bd - hdrs_len;
427 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
428 struct bnx2x_fastpath *fp, u16 index)
430 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
431 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
432 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
435 if (unlikely(page == NULL)) {
436 BNX2X_ERR("Can't alloc sge\n");
440 mapping = dma_map_page(&bp->pdev->dev, page, 0,
441 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
442 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
443 __free_pages(page, PAGES_PER_SGE_SHIFT);
444 BNX2X_ERR("Can't map sge\n");
449 dma_unmap_addr_set(sw_buf, mapping, mapping);
451 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
452 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
457 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
458 struct bnx2x_agg_info *tpa_info,
461 struct eth_end_agg_rx_cqe *cqe,
464 struct sw_rx_page *rx_pg, old_rx_pg;
465 u32 i, frag_len, frag_size;
466 int err, j, frag_id = 0;
467 u16 len_on_bd = tpa_info->len_on_bd;
468 u16 full_page = 0, gro_size = 0;
470 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
472 if (fp->mode == TPA_MODE_GRO) {
473 gro_size = tpa_info->gro_size;
474 full_page = tpa_info->full_page;
477 /* This is needed in order to enable forwarding support */
479 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
480 tpa_info->parsing_flags, len_on_bd);
483 if (fp->mode == TPA_MODE_GRO)
484 skb_shinfo(skb)->gso_type =
485 (GET_FLAG(tpa_info->parsing_flags,
486 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
487 PRS_FLAG_OVERETH_IPV6) ?
488 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
492 #ifdef BNX2X_STOP_ON_ERROR
493 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
494 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
496 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
502 /* Run through the SGL and compose the fragmented skb */
503 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
504 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
506 /* FW gives the indices of the SGE as if the ring is an array
507 (meaning that "next" element will consume 2 indices) */
508 if (fp->mode == TPA_MODE_GRO)
509 frag_len = min_t(u32, frag_size, (u32)full_page);
511 frag_len = min_t(u32, frag_size,
512 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
514 rx_pg = &fp->rx_page_ring[sge_idx];
517 /* If we fail to allocate a substitute page, we simply stop
518 where we are and drop the whole packet */
519 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
521 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
525 /* Unmap the page as we r going to pass it to the stack */
526 dma_unmap_page(&bp->pdev->dev,
527 dma_unmap_addr(&old_rx_pg, mapping),
528 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
529 /* Add one frag and update the appropriate fields in the skb */
530 if (fp->mode == TPA_MODE_LRO)
531 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
535 for (rem = frag_len; rem > 0; rem -= gro_size) {
536 int len = rem > gro_size ? gro_size : rem;
537 skb_fill_page_desc(skb, frag_id++,
538 old_rx_pg.page, offset, len);
540 get_page(old_rx_pg.page);
545 skb->data_len += frag_len;
546 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
547 skb->len += frag_len;
549 frag_size -= frag_len;
555 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
556 struct bnx2x_agg_info *tpa_info,
558 struct eth_end_agg_rx_cqe *cqe,
561 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
562 u8 pad = tpa_info->placement_offset;
563 u16 len = tpa_info->len_on_bd;
564 struct sk_buff *skb = NULL;
565 u8 *new_data, *data = rx_buf->data;
566 u8 old_tpa_state = tpa_info->tpa_state;
568 tpa_info->tpa_state = BNX2X_TPA_STOP;
570 /* If we there was an error during the handling of the TPA_START -
571 * drop this aggregation.
573 if (old_tpa_state == BNX2X_TPA_ERROR)
576 /* Try to allocate the new data */
577 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
579 /* Unmap skb in the pool anyway, as we are going to change
580 pool entry status to BNX2X_TPA_STOP even if new skb allocation
582 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
583 fp->rx_buf_size, DMA_FROM_DEVICE);
584 if (likely(new_data))
585 skb = build_skb(data, 0);
588 #ifdef BNX2X_STOP_ON_ERROR
589 if (pad + len > fp->rx_buf_size) {
590 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
591 pad, len, fp->rx_buf_size);
597 skb_reserve(skb, pad + NET_SKB_PAD);
599 skb->rxhash = tpa_info->rxhash;
600 skb->l4_rxhash = tpa_info->l4_rxhash;
602 skb->protocol = eth_type_trans(skb, bp->dev);
603 skb->ip_summed = CHECKSUM_UNNECESSARY;
605 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
606 skb, cqe, cqe_idx)) {
607 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
608 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
609 napi_gro_receive(&fp->napi, skb);
611 DP(NETIF_MSG_RX_STATUS,
612 "Failed to allocate new pages - dropping packet!\n");
613 dev_kfree_skb_any(skb);
617 /* put new data in bin */
618 rx_buf->data = new_data;
624 /* drop the packet and keep the buffer in the bin */
625 DP(NETIF_MSG_RX_STATUS,
626 "Failed to allocate or map a new skb - dropping packet!\n");
627 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
630 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
631 struct bnx2x_fastpath *fp, u16 index)
634 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
635 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
638 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
639 if (unlikely(data == NULL))
642 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
645 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
647 BNX2X_ERR("Can't map rx data\n");
652 dma_unmap_addr_set(rx_buf, mapping, mapping);
654 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
655 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
661 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
662 struct bnx2x_fastpath *fp,
663 struct bnx2x_eth_q_stats *qstats)
665 /* Do nothing if no L4 csum validation was done.
666 * We do not check whether IP csum was validated. For IPv4 we assume
667 * that if the card got as far as validating the L4 csum, it also
668 * validated the IP csum. IPv6 has no IP csum.
670 if (cqe->fast_path_cqe.status_flags &
671 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
674 /* If L4 validation was done, check if an error was found. */
676 if (cqe->fast_path_cqe.type_error_flags &
677 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
678 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
679 qstats->hw_csum_err++;
681 skb->ip_summed = CHECKSUM_UNNECESSARY;
684 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
686 struct bnx2x *bp = fp->bp;
687 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
688 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
691 #ifdef BNX2X_STOP_ON_ERROR
692 if (unlikely(bp->panic))
696 /* CQ "next element" is of the size of the regular element,
697 that's why it's ok here */
698 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
699 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
702 bd_cons = fp->rx_bd_cons;
703 bd_prod = fp->rx_bd_prod;
704 bd_prod_fw = bd_prod;
705 sw_comp_cons = fp->rx_comp_cons;
706 sw_comp_prod = fp->rx_comp_prod;
708 /* Memory barrier necessary as speculative reads of the rx
709 * buffer can be ahead of the index in the status block
713 DP(NETIF_MSG_RX_STATUS,
714 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
715 fp->index, hw_comp_cons, sw_comp_cons);
717 while (sw_comp_cons != hw_comp_cons) {
718 struct sw_rx_bd *rx_buf = NULL;
720 union eth_rx_cqe *cqe;
721 struct eth_fast_path_rx_cqe *cqe_fp;
723 enum eth_rx_cqe_type cqe_fp_type;
728 #ifdef BNX2X_STOP_ON_ERROR
729 if (unlikely(bp->panic))
733 comp_ring_cons = RCQ_BD(sw_comp_cons);
734 bd_prod = RX_BD(bd_prod);
735 bd_cons = RX_BD(bd_cons);
737 cqe = &fp->rx_comp_ring[comp_ring_cons];
738 cqe_fp = &cqe->fast_path_cqe;
739 cqe_fp_flags = cqe_fp->type_error_flags;
740 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
742 DP(NETIF_MSG_RX_STATUS,
743 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
744 CQE_TYPE(cqe_fp_flags),
745 cqe_fp_flags, cqe_fp->status_flags,
746 le32_to_cpu(cqe_fp->rss_hash_result),
747 le16_to_cpu(cqe_fp->vlan_tag),
748 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
750 /* is this a slowpath msg? */
751 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
752 bnx2x_sp_event(fp, cqe);
756 rx_buf = &fp->rx_buf_ring[bd_cons];
759 if (!CQE_TYPE_FAST(cqe_fp_type)) {
760 struct bnx2x_agg_info *tpa_info;
761 u16 frag_size, pages;
762 #ifdef BNX2X_STOP_ON_ERROR
764 if (fp->disable_tpa &&
765 (CQE_TYPE_START(cqe_fp_type) ||
766 CQE_TYPE_STOP(cqe_fp_type)))
767 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
768 CQE_TYPE(cqe_fp_type));
771 if (CQE_TYPE_START(cqe_fp_type)) {
772 u16 queue = cqe_fp->queue_index;
773 DP(NETIF_MSG_RX_STATUS,
774 "calling tpa_start on queue %d\n",
777 bnx2x_tpa_start(fp, queue,
784 queue = cqe->end_agg_cqe.queue_index;
785 tpa_info = &fp->tpa_info[queue];
786 DP(NETIF_MSG_RX_STATUS,
787 "calling tpa_stop on queue %d\n",
790 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
793 if (fp->mode == TPA_MODE_GRO)
794 pages = (frag_size + tpa_info->full_page - 1) /
797 pages = SGE_PAGE_ALIGN(frag_size) >>
800 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
801 &cqe->end_agg_cqe, comp_ring_cons);
802 #ifdef BNX2X_STOP_ON_ERROR
807 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
811 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
812 pad = cqe_fp->placement_offset;
813 dma_sync_single_for_cpu(&bp->pdev->dev,
814 dma_unmap_addr(rx_buf, mapping),
815 pad + RX_COPY_THRESH,
818 prefetch(data + pad); /* speedup eth_type_trans() */
819 /* is this an error packet? */
820 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
821 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
822 "ERROR flags %x rx packet %u\n",
823 cqe_fp_flags, sw_comp_cons);
824 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
828 /* Since we don't have a jumbo ring
829 * copy small packets if mtu > 1500
831 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
832 (len <= RX_COPY_THRESH)) {
833 skb = netdev_alloc_skb_ip_align(bp->dev, len);
835 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
836 "ERROR packet dropped because of alloc failure\n");
837 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
840 memcpy(skb->data, data + pad, len);
841 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
843 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
844 dma_unmap_single(&bp->pdev->dev,
845 dma_unmap_addr(rx_buf, mapping),
848 skb = build_skb(data, 0);
849 if (unlikely(!skb)) {
851 bnx2x_fp_qstats(bp, fp)->
852 rx_skb_alloc_failed++;
855 skb_reserve(skb, pad);
857 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
858 "ERROR packet dropped because of alloc failure\n");
859 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
861 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
867 skb->protocol = eth_type_trans(skb, bp->dev);
869 /* Set Toeplitz hash for a none-LRO skb */
870 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
871 skb->l4_rxhash = l4_rxhash;
873 skb_checksum_none_assert(skb);
875 if (bp->dev->features & NETIF_F_RXCSUM)
876 bnx2x_csum_validate(skb, cqe, fp,
877 bnx2x_fp_qstats(bp, fp));
879 skb_record_rx_queue(skb, fp->rx_queue);
881 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
883 __vlan_hwaccel_put_tag(skb,
884 le16_to_cpu(cqe_fp->vlan_tag));
885 napi_gro_receive(&fp->napi, skb);
891 bd_cons = NEXT_RX_IDX(bd_cons);
892 bd_prod = NEXT_RX_IDX(bd_prod);
893 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
896 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
897 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
899 if (rx_pkt == budget)
903 fp->rx_bd_cons = bd_cons;
904 fp->rx_bd_prod = bd_prod_fw;
905 fp->rx_comp_cons = sw_comp_cons;
906 fp->rx_comp_prod = sw_comp_prod;
908 /* Update producers */
909 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
912 fp->rx_pkt += rx_pkt;
918 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
920 struct bnx2x_fastpath *fp = fp_cookie;
921 struct bnx2x *bp = fp->bp;
925 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
926 fp->index, fp->fw_sb_id, fp->igu_sb_id);
927 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
929 #ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
934 /* Handle Rx and Tx according to MSI-X vector */
935 prefetch(fp->rx_cons_sb);
937 for_each_cos_in_tx_queue(fp, cos)
938 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
940 prefetch(&fp->sb_running_index[SM_RX_ID]);
941 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
946 /* HW Lock for shared dual port PHYs */
947 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
949 mutex_lock(&bp->port.phy_mutex);
951 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
954 void bnx2x_release_phy_lock(struct bnx2x *bp)
956 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
958 mutex_unlock(&bp->port.phy_mutex);
961 /* calculates MF speed according to current linespeed and MF configuration */
962 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
964 u16 line_speed = bp->link_vars.line_speed;
966 u16 maxCfg = bnx2x_extract_max_cfg(bp,
967 bp->mf_config[BP_VN(bp)]);
969 /* Calculate the current MAX line speed limit for the MF
973 line_speed = (line_speed * maxCfg) / 100;
975 u16 vn_max_rate = maxCfg * 100;
977 if (vn_max_rate < line_speed)
978 line_speed = vn_max_rate;
986 * bnx2x_fill_report_data - fill link report data to report
989 * @data: link state to update
991 * It uses a none-atomic bit operations because is called under the mutex.
993 static void bnx2x_fill_report_data(struct bnx2x *bp,
994 struct bnx2x_link_report_data *data)
996 u16 line_speed = bnx2x_get_mf_speed(bp);
998 memset(data, 0, sizeof(*data));
1000 /* Fill the report data: efective line speed */
1001 data->line_speed = line_speed;
1004 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1005 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1006 &data->link_report_flags);
1009 if (bp->link_vars.duplex == DUPLEX_FULL)
1010 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1012 /* Rx Flow Control is ON */
1013 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1014 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1016 /* Tx Flow Control is ON */
1017 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1018 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1022 * bnx2x_link_report - report link status to OS.
1024 * @bp: driver handle
1026 * Calls the __bnx2x_link_report() under the same locking scheme
1027 * as a link/PHY state managing code to ensure a consistent link
1031 void bnx2x_link_report(struct bnx2x *bp)
1033 bnx2x_acquire_phy_lock(bp);
1034 __bnx2x_link_report(bp);
1035 bnx2x_release_phy_lock(bp);
1039 * __bnx2x_link_report - report link status to OS.
1041 * @bp: driver handle
1043 * None atomic inmlementation.
1044 * Should be called under the phy_lock.
1046 void __bnx2x_link_report(struct bnx2x *bp)
1048 struct bnx2x_link_report_data cur_data;
1051 if (!CHIP_IS_E1(bp))
1052 bnx2x_read_mf_cfg(bp);
1054 /* Read the current link report info */
1055 bnx2x_fill_report_data(bp, &cur_data);
1057 /* Don't report link down or exactly the same link status twice */
1058 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1059 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1060 &bp->last_reported_link.link_report_flags) &&
1061 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1062 &cur_data.link_report_flags)))
1067 /* We are going to report a new link parameters now -
1068 * remember the current data for the next time.
1070 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1072 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1073 &cur_data.link_report_flags)) {
1074 netif_carrier_off(bp->dev);
1075 netdev_err(bp->dev, "NIC Link is Down\n");
1081 netif_carrier_on(bp->dev);
1083 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1084 &cur_data.link_report_flags))
1089 /* Handle the FC at the end so that only these flags would be
1090 * possibly set. This way we may easily check if there is no FC
1093 if (cur_data.link_report_flags) {
1094 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1095 &cur_data.link_report_flags)) {
1096 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1097 &cur_data.link_report_flags))
1098 flow = "ON - receive & transmit";
1100 flow = "ON - receive";
1102 flow = "ON - transmit";
1107 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1108 cur_data.line_speed, duplex, flow);
1112 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1116 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1117 struct eth_rx_sge *sge;
1119 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1121 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1122 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1125 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1126 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1130 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1131 struct bnx2x_fastpath *fp, int last)
1135 for (i = 0; i < last; i++) {
1136 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1137 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1138 u8 *data = first_buf->data;
1141 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1144 if (tpa_info->tpa_state == BNX2X_TPA_START)
1145 dma_unmap_single(&bp->pdev->dev,
1146 dma_unmap_addr(first_buf, mapping),
1147 fp->rx_buf_size, DMA_FROM_DEVICE);
1149 first_buf->data = NULL;
1153 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1157 for_each_rx_queue_cnic(bp, j) {
1158 struct bnx2x_fastpath *fp = &bp->fp[j];
1162 /* Activate BD ring */
1164 * this will generate an interrupt (to the TSTORM)
1165 * must only be done after chip is initialized
1167 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1172 void bnx2x_init_rx_rings(struct bnx2x *bp)
1174 int func = BP_FUNC(bp);
1178 /* Allocate TPA resources */
1179 for_each_eth_queue(bp, j) {
1180 struct bnx2x_fastpath *fp = &bp->fp[j];
1183 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1185 if (!fp->disable_tpa) {
1186 /* Fill the per-aggregtion pool */
1187 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1188 struct bnx2x_agg_info *tpa_info =
1190 struct sw_rx_bd *first_buf =
1191 &tpa_info->first_buf;
1193 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1195 if (!first_buf->data) {
1196 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1198 bnx2x_free_tpa_pool(bp, fp, i);
1199 fp->disable_tpa = 1;
1202 dma_unmap_addr_set(first_buf, mapping, 0);
1203 tpa_info->tpa_state = BNX2X_TPA_STOP;
1206 /* "next page" elements initialization */
1207 bnx2x_set_next_page_sgl(fp);
1209 /* set SGEs bit mask */
1210 bnx2x_init_sge_ring_bit_mask(fp);
1212 /* Allocate SGEs and initialize the ring elements */
1213 for (i = 0, ring_prod = 0;
1214 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1216 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1217 BNX2X_ERR("was only able to allocate %d rx sges\n",
1219 BNX2X_ERR("disabling TPA for queue[%d]\n",
1221 /* Cleanup already allocated elements */
1222 bnx2x_free_rx_sge_range(bp, fp,
1224 bnx2x_free_tpa_pool(bp, fp,
1226 fp->disable_tpa = 1;
1230 ring_prod = NEXT_SGE_IDX(ring_prod);
1233 fp->rx_sge_prod = ring_prod;
1237 for_each_eth_queue(bp, j) {
1238 struct bnx2x_fastpath *fp = &bp->fp[j];
1242 /* Activate BD ring */
1244 * this will generate an interrupt (to the TSTORM)
1245 * must only be done after chip is initialized
1247 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1253 if (CHIP_IS_E1(bp)) {
1254 REG_WR(bp, BAR_USTRORM_INTMEM +
1255 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1256 U64_LO(fp->rx_comp_mapping));
1257 REG_WR(bp, BAR_USTRORM_INTMEM +
1258 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1259 U64_HI(fp->rx_comp_mapping));
1264 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1267 struct bnx2x *bp = fp->bp;
1269 for_each_cos_in_tx_queue(fp, cos) {
1270 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1271 unsigned pkts_compl = 0, bytes_compl = 0;
1273 u16 sw_prod = txdata->tx_pkt_prod;
1274 u16 sw_cons = txdata->tx_pkt_cons;
1276 while (sw_cons != sw_prod) {
1277 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1278 &pkts_compl, &bytes_compl);
1282 netdev_tx_reset_queue(
1283 netdev_get_tx_queue(bp->dev,
1284 txdata->txq_index));
1288 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1292 for_each_tx_queue_cnic(bp, i) {
1293 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1297 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1301 for_each_eth_queue(bp, i) {
1302 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1306 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1308 struct bnx2x *bp = fp->bp;
1311 /* ring wasn't allocated */
1312 if (fp->rx_buf_ring == NULL)
1315 for (i = 0; i < NUM_RX_BD; i++) {
1316 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1317 u8 *data = rx_buf->data;
1321 dma_unmap_single(&bp->pdev->dev,
1322 dma_unmap_addr(rx_buf, mapping),
1323 fp->rx_buf_size, DMA_FROM_DEVICE);
1325 rx_buf->data = NULL;
1330 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1334 for_each_rx_queue_cnic(bp, j) {
1335 bnx2x_free_rx_bds(&bp->fp[j]);
1339 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1343 for_each_eth_queue(bp, j) {
1344 struct bnx2x_fastpath *fp = &bp->fp[j];
1346 bnx2x_free_rx_bds(fp);
1348 if (!fp->disable_tpa)
1349 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1353 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1355 bnx2x_free_tx_skbs_cnic(bp);
1356 bnx2x_free_rx_skbs_cnic(bp);
1359 void bnx2x_free_skbs(struct bnx2x *bp)
1361 bnx2x_free_tx_skbs(bp);
1362 bnx2x_free_rx_skbs(bp);
1365 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1367 /* load old values */
1368 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1370 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1371 /* leave all but MAX value */
1372 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1374 /* set new MAX value */
1375 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1376 & FUNC_MF_CFG_MAX_BW_MASK;
1378 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1383 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1385 * @bp: driver handle
1386 * @nvecs: number of vectors to be released
1388 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1392 if (nvecs == offset)
1394 free_irq(bp->msix_table[offset].vector, bp->dev);
1395 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1396 bp->msix_table[offset].vector);
1399 if (CNIC_SUPPORT(bp)) {
1400 if (nvecs == offset)
1405 for_each_eth_queue(bp, i) {
1406 if (nvecs == offset)
1408 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1409 i, bp->msix_table[offset].vector);
1411 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1415 void bnx2x_free_irq(struct bnx2x *bp)
1417 if (bp->flags & USING_MSIX_FLAG &&
1418 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1419 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1420 CNIC_SUPPORT(bp) + 1);
1422 free_irq(bp->dev->irq, bp->dev);
1425 int bnx2x_enable_msix(struct bnx2x *bp)
1427 int msix_vec = 0, i, rc, req_cnt;
1429 bp->msix_table[msix_vec].entry = msix_vec;
1430 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1431 bp->msix_table[0].entry);
1434 /* Cnic requires an msix vector for itself */
1435 if (CNIC_SUPPORT(bp)) {
1436 bp->msix_table[msix_vec].entry = msix_vec;
1437 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1438 msix_vec, bp->msix_table[msix_vec].entry);
1442 /* We need separate vectors for ETH queues only (not FCoE) */
1443 for_each_eth_queue(bp, i) {
1444 bp->msix_table[msix_vec].entry = msix_vec;
1445 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1446 msix_vec, msix_vec, i);
1450 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
1452 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1455 * reconfigure number of tx/rx queues according to available
1458 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1459 /* how less vectors we will have? */
1460 int diff = req_cnt - rc;
1462 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1464 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1467 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1471 * decrease number of queues by number of unallocated entries
1473 bp->num_ethernet_queues -= diff;
1474 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1476 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1478 } else if (rc > 0) {
1479 /* Get by with single vector */
1480 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1482 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1487 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1488 bp->flags |= USING_SINGLE_MSIX_FLAG;
1490 BNX2X_DEV_INFO("set number of queues to 1\n");
1491 bp->num_ethernet_queues = 1;
1492 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1493 } else if (rc < 0) {
1494 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1498 bp->flags |= USING_MSIX_FLAG;
1503 /* fall to INTx if not enough memory */
1505 bp->flags |= DISABLE_MSI_FLAG;
1510 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1512 int i, rc, offset = 0;
1514 rc = request_irq(bp->msix_table[offset++].vector,
1515 bnx2x_msix_sp_int, 0,
1516 bp->dev->name, bp->dev);
1518 BNX2X_ERR("request sp irq failed\n");
1522 if (CNIC_SUPPORT(bp))
1525 for_each_eth_queue(bp, i) {
1526 struct bnx2x_fastpath *fp = &bp->fp[i];
1527 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1530 rc = request_irq(bp->msix_table[offset].vector,
1531 bnx2x_msix_fp_int, 0, fp->name, fp);
1533 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1534 bp->msix_table[offset].vector, rc);
1535 bnx2x_free_msix_irqs(bp, offset);
1542 i = BNX2X_NUM_ETH_QUEUES(bp);
1543 offset = 1 + CNIC_SUPPORT(bp);
1544 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1545 bp->msix_table[0].vector,
1546 0, bp->msix_table[offset].vector,
1547 i - 1, bp->msix_table[offset + i - 1].vector);
1552 int bnx2x_enable_msi(struct bnx2x *bp)
1556 rc = pci_enable_msi(bp->pdev);
1558 BNX2X_DEV_INFO("MSI is not attainable\n");
1561 bp->flags |= USING_MSI_FLAG;
1566 static int bnx2x_req_irq(struct bnx2x *bp)
1568 unsigned long flags;
1571 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1574 flags = IRQF_SHARED;
1576 if (bp->flags & USING_MSIX_FLAG)
1577 irq = bp->msix_table[0].vector;
1579 irq = bp->pdev->irq;
1581 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1584 static int bnx2x_setup_irqs(struct bnx2x *bp)
1587 if (bp->flags & USING_MSIX_FLAG &&
1588 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1589 rc = bnx2x_req_msix_irqs(bp);
1594 rc = bnx2x_req_irq(bp);
1596 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1599 if (bp->flags & USING_MSI_FLAG) {
1600 bp->dev->irq = bp->pdev->irq;
1601 netdev_info(bp->dev, "using MSI IRQ %d\n",
1604 if (bp->flags & USING_MSIX_FLAG) {
1605 bp->dev->irq = bp->msix_table[0].vector;
1606 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1614 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1618 for_each_rx_queue_cnic(bp, i)
1619 napi_enable(&bnx2x_fp(bp, i, napi));
1622 static void bnx2x_napi_enable(struct bnx2x *bp)
1626 for_each_eth_queue(bp, i)
1627 napi_enable(&bnx2x_fp(bp, i, napi));
1630 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1634 for_each_rx_queue_cnic(bp, i)
1635 napi_disable(&bnx2x_fp(bp, i, napi));
1638 static void bnx2x_napi_disable(struct bnx2x *bp)
1642 for_each_eth_queue(bp, i)
1643 napi_disable(&bnx2x_fp(bp, i, napi));
1646 void bnx2x_netif_start(struct bnx2x *bp)
1648 if (netif_running(bp->dev)) {
1649 bnx2x_napi_enable(bp);
1650 if (CNIC_LOADED(bp))
1651 bnx2x_napi_enable_cnic(bp);
1652 bnx2x_int_enable(bp);
1653 if (bp->state == BNX2X_STATE_OPEN)
1654 netif_tx_wake_all_queues(bp->dev);
1658 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1660 bnx2x_int_disable_sync(bp, disable_hw);
1661 bnx2x_napi_disable(bp);
1662 if (CNIC_LOADED(bp))
1663 bnx2x_napi_disable_cnic(bp);
1666 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1668 struct bnx2x *bp = netdev_priv(dev);
1670 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1671 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1672 u16 ether_type = ntohs(hdr->h_proto);
1674 /* Skip VLAN tag if present */
1675 if (ether_type == ETH_P_8021Q) {
1676 struct vlan_ethhdr *vhdr =
1677 (struct vlan_ethhdr *)skb->data;
1679 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1682 /* If ethertype is FCoE or FIP - use FCoE ring */
1683 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1684 return bnx2x_fcoe_tx(bp, txq_index);
1687 /* select a non-FCoE queue */
1688 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1692 void bnx2x_set_num_queues(struct bnx2x *bp)
1695 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1697 /* override in STORAGE SD modes */
1698 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1699 bp->num_ethernet_queues = 1;
1701 /* Add special queues */
1702 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1703 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1705 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1709 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1711 * @bp: Driver handle
1713 * We currently support for at most 16 Tx queues for each CoS thus we will
1714 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1717 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1718 * index after all ETH L2 indices.
1720 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1721 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1722 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1724 * The proper configuration of skb->queue_mapping is handled by
1725 * bnx2x_select_queue() and __skb_tx_hash().
1727 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1728 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1730 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1734 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1735 rx = BNX2X_NUM_ETH_QUEUES(bp);
1737 /* account for fcoe queue */
1738 if (include_cnic && !NO_FCOE(bp)) {
1743 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1745 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1748 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1750 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1754 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1760 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1764 for_each_queue(bp, i) {
1765 struct bnx2x_fastpath *fp = &bp->fp[i];
1768 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1771 * Although there are no IP frames expected to arrive to
1772 * this ring we still want to add an
1773 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1776 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1779 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1780 IP_HEADER_ALIGNMENT_PADDING +
1783 BNX2X_FW_RX_ALIGN_END;
1784 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1788 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1791 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1793 /* Prepare the initial contents fo the indirection table if RSS is
1796 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1797 bp->rss_conf_obj.ind_table[i] =
1799 ethtool_rxfh_indir_default(i, num_eth_queues);
1802 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1803 * per-port, so if explicit configuration is needed , do it only
1806 * For 57712 and newer on the other hand it's a per-function
1809 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1812 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1815 struct bnx2x_config_rss_params params = {NULL};
1818 /* Although RSS is meaningless when there is a single HW queue we
1819 * still need it enabled in order to have HW Rx hash generated.
1821 * if (!is_eth_multi(bp))
1822 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1825 params.rss_obj = rss_obj;
1827 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1829 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1831 /* RSS configuration */
1832 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1833 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1834 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1835 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1836 if (rss_obj->udp_rss_v4)
1837 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
1838 if (rss_obj->udp_rss_v6)
1839 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
1842 params.rss_result_mask = MULTI_MASK;
1844 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1848 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1849 params.rss_key[i] = random32();
1851 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1854 return bnx2x_config_rss(bp, ¶ms);
1857 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1859 struct bnx2x_func_state_params func_params = {NULL};
1861 /* Prepare parameters for function state transitions */
1862 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1864 func_params.f_obj = &bp->func_obj;
1865 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1867 func_params.params.hw_init.load_phase = load_code;
1869 return bnx2x_func_state_change(bp, &func_params);
1873 * Cleans the object that have internal lists without sending
1874 * ramrods. Should be run when interrutps are disabled.
1876 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1879 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1880 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1881 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1883 /***************** Cleanup MACs' object first *************************/
1885 /* Wait for completion of requested */
1886 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1887 /* Perform a dry cleanup */
1888 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1890 /* Clean ETH primary MAC */
1891 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1892 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1895 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1897 /* Cleanup UC list */
1899 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1900 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1903 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1905 /***************** Now clean mcast object *****************************/
1906 rparam.mcast_obj = &bp->mcast_obj;
1907 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1909 /* Add a DEL command... */
1910 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1912 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1915 /* ...and wait until all pending commands are cleared */
1916 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1919 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1924 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1928 #ifndef BNX2X_STOP_ON_ERROR
1929 #define LOAD_ERROR_EXIT(bp, label) \
1931 (bp)->state = BNX2X_STATE_ERROR; \
1935 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
1937 bp->cnic_loaded = false; \
1940 #else /*BNX2X_STOP_ON_ERROR*/
1941 #define LOAD_ERROR_EXIT(bp, label) \
1943 (bp)->state = BNX2X_STATE_ERROR; \
1947 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
1949 bp->cnic_loaded = false; \
1953 #endif /*BNX2X_STOP_ON_ERROR*/
1955 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1957 /* build FW version dword */
1958 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1959 (BCM_5710_FW_MINOR_VERSION << 8) +
1960 (BCM_5710_FW_REVISION_VERSION << 16) +
1961 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1963 /* read loaded FW from chip */
1964 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1966 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1968 if (loaded_fw != my_fw) {
1970 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1979 * bnx2x_bz_fp - zero content of the fastpath structure.
1981 * @bp: driver handle
1982 * @index: fastpath index to be zeroed
1984 * Makes sure the contents of the bp->fp[index].napi is kept
1987 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1989 struct bnx2x_fastpath *fp = &bp->fp[index];
1990 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
1993 struct napi_struct orig_napi = fp->napi;
1994 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1995 /* bzero bnx2x_fastpath contents */
1996 if (bp->stats_init) {
1997 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1998 memset(fp, 0, sizeof(*fp));
2000 /* Keep Queue statistics */
2001 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2002 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2004 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2006 if (tmp_eth_q_stats)
2007 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
2008 sizeof(struct bnx2x_eth_q_stats));
2010 tmp_eth_q_stats_old =
2011 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2013 if (tmp_eth_q_stats_old)
2014 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
2015 sizeof(struct bnx2x_eth_q_stats_old));
2017 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2018 memset(fp, 0, sizeof(*fp));
2020 if (tmp_eth_q_stats) {
2021 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2022 sizeof(struct bnx2x_eth_q_stats));
2023 kfree(tmp_eth_q_stats);
2026 if (tmp_eth_q_stats_old) {
2027 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
2028 sizeof(struct bnx2x_eth_q_stats_old));
2029 kfree(tmp_eth_q_stats_old);
2034 /* Restore the NAPI object as it has been already initialized */
2035 fp->napi = orig_napi;
2036 fp->tpa_info = orig_tpa_info;
2040 fp->max_cos = bp->max_cos;
2042 /* Special queues support only one CoS */
2045 /* Init txdata pointers */
2047 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2049 for_each_cos_in_tx_queue(fp, cos)
2050 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2051 BNX2X_NUM_ETH_QUEUES(bp) + index];
2054 * set the tpa flag for each queue. The tpa flag determines the queue
2055 * minimal size so it must be set prior to queue memory allocation
2057 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2058 (bp->flags & GRO_ENABLE_FLAG &&
2059 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2060 if (bp->flags & TPA_ENABLE_FLAG)
2061 fp->mode = TPA_MODE_LRO;
2062 else if (bp->flags & GRO_ENABLE_FLAG)
2063 fp->mode = TPA_MODE_GRO;
2065 /* We don't want TPA on an FCoE L2 ring */
2067 fp->disable_tpa = 1;
2070 int bnx2x_load_cnic(struct bnx2x *bp)
2072 int i, rc, port = BP_PORT(bp);
2074 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2076 mutex_init(&bp->cnic_mutex);
2078 rc = bnx2x_alloc_mem_cnic(bp);
2080 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2081 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2084 rc = bnx2x_alloc_fp_mem_cnic(bp);
2086 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2087 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2090 /* Update the number of queues with the cnic queues */
2091 rc = bnx2x_set_real_num_queues(bp, 1);
2093 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2094 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2097 /* Add all CNIC NAPI objects */
2098 bnx2x_add_all_napi_cnic(bp);
2099 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2100 bnx2x_napi_enable_cnic(bp);
2102 rc = bnx2x_init_hw_func_cnic(bp);
2104 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2106 bnx2x_nic_init_cnic(bp);
2108 /* Enable Timer scan */
2109 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2111 for_each_cnic_queue(bp, i) {
2112 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2114 BNX2X_ERR("Queue setup failed\n");
2115 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2119 /* Initialize Rx filter. */
2120 netif_addr_lock_bh(bp->dev);
2121 bnx2x_set_rx_mode(bp->dev);
2122 netif_addr_unlock_bh(bp->dev);
2124 /* re-read iscsi info */
2125 bnx2x_get_iscsi_info(bp);
2126 bnx2x_setup_cnic_irq_info(bp);
2127 bnx2x_setup_cnic_info(bp);
2128 bp->cnic_loaded = true;
2129 if (bp->state == BNX2X_STATE_OPEN)
2130 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2133 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2137 #ifndef BNX2X_STOP_ON_ERROR
2139 /* Disable Timer scan */
2140 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2143 bnx2x_napi_disable_cnic(bp);
2144 /* Update the number of queues without the cnic queues */
2145 rc = bnx2x_set_real_num_queues(bp, 0);
2147 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2149 BNX2X_ERR("CNIC-related load failed\n");
2150 bnx2x_free_fp_mem_cnic(bp);
2151 bnx2x_free_mem_cnic(bp);
2153 #endif /* ! BNX2X_STOP_ON_ERROR */
2157 /* must be called with rtnl_lock */
2158 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2160 int port = BP_PORT(bp);
2164 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2166 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2168 #ifdef BNX2X_STOP_ON_ERROR
2169 if (unlikely(bp->panic)) {
2170 BNX2X_ERR("Can't load NIC when there is panic\n");
2175 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2177 /* Set the initial link reported state to link down */
2178 bnx2x_acquire_phy_lock(bp);
2179 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2180 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2181 &bp->last_reported_link.link_report_flags);
2182 bnx2x_release_phy_lock(bp);
2184 /* must be called before memory allocation and HW init */
2185 bnx2x_ilt_set_info(bp);
2188 * Zero fastpath structures preserving invariants like napi, which are
2189 * allocated only once, fp index, max_cos, bp pointer.
2190 * Also set fp->disable_tpa and txdata_ptr.
2192 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2193 for_each_queue(bp, i)
2195 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2196 bp->num_cnic_queues) *
2197 sizeof(struct bnx2x_fp_txdata));
2199 bp->fcoe_init = false;
2201 /* Set the receive queues buffer size */
2202 bnx2x_set_rx_buf_size(bp);
2204 if (bnx2x_alloc_mem(bp))
2207 /* As long as bnx2x_alloc_mem() may possibly update
2208 * bp->num_queues, bnx2x_set_real_num_queues() should always
2209 * come after it. At this stage cnic queues are not counted.
2211 rc = bnx2x_set_real_num_queues(bp, 0);
2213 BNX2X_ERR("Unable to set real_num_queues\n");
2214 LOAD_ERROR_EXIT(bp, load_error0);
2217 /* configure multi cos mappings in kernel.
2218 * this configuration may be overriden by a multi class queue discipline
2219 * or by a dcbx negotiation result.
2221 bnx2x_setup_tc(bp->dev, bp->max_cos);
2223 /* Add all NAPI objects */
2224 bnx2x_add_all_napi(bp);
2225 DP(NETIF_MSG_IFUP, "napi added\n");
2226 bnx2x_napi_enable(bp);
2228 /* set pf load just before approaching the MCP */
2229 bnx2x_set_pf_load(bp);
2231 /* Send LOAD_REQUEST command to MCP
2232 * Returns the type of LOAD command:
2233 * if it is the first port to be initialized
2234 * common blocks should be initialized, otherwise - not
2236 if (!BP_NOMCP(bp)) {
2239 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2240 DRV_MSG_SEQ_NUMBER_MASK);
2241 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2243 /* Get current FW pulse sequence */
2244 bp->fw_drv_pulse_wr_seq =
2245 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2246 DRV_PULSE_SEQ_MASK);
2247 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2249 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2250 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2252 BNX2X_ERR("MCP response failure, aborting\n");
2254 LOAD_ERROR_EXIT(bp, load_error1);
2256 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2257 BNX2X_ERR("Driver load refused\n");
2258 rc = -EBUSY; /* other port in diagnostic mode */
2259 LOAD_ERROR_EXIT(bp, load_error1);
2261 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2262 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2263 /* abort nic load if version mismatch */
2264 if (!bnx2x_test_firmware_version(bp, true)) {
2266 LOAD_ERROR_EXIT(bp, load_error2);
2271 int path = BP_PATH(bp);
2273 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2274 path, load_count[path][0], load_count[path][1],
2275 load_count[path][2]);
2276 load_count[path][0]++;
2277 load_count[path][1 + port]++;
2278 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2279 path, load_count[path][0], load_count[path][1],
2280 load_count[path][2]);
2281 if (load_count[path][0] == 1)
2282 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2283 else if (load_count[path][1 + port] == 1)
2284 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2286 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2289 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2290 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2291 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2294 * We need the barrier to ensure the ordering between the
2295 * writing to bp->port.pmf here and reading it from the
2296 * bnx2x_periodic_task().
2302 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2304 /* Init Function state controlling object */
2305 bnx2x__init_func_obj(bp);
2308 rc = bnx2x_init_hw(bp, load_code);
2310 BNX2X_ERR("HW init failed, aborting\n");
2311 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2312 LOAD_ERROR_EXIT(bp, load_error2);
2315 /* Connect to IRQs */
2316 rc = bnx2x_setup_irqs(bp);
2318 BNX2X_ERR("IRQs setup failed\n");
2319 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2320 LOAD_ERROR_EXIT(bp, load_error2);
2323 /* Setup NIC internals and enable interrupts */
2324 bnx2x_nic_init(bp, load_code);
2326 /* Init per-function objects */
2327 bnx2x_init_bp_objs(bp);
2329 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2330 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2331 (bp->common.shmem2_base)) {
2332 if (SHMEM2_HAS(bp, dcc_support))
2333 SHMEM2_WR(bp, dcc_support,
2334 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2335 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2336 if (SHMEM2_HAS(bp, afex_driver_support))
2337 SHMEM2_WR(bp, afex_driver_support,
2338 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2341 /* Set AFEX default VLAN tag to an invalid value */
2342 bp->afex_def_vlan_tag = -1;
2344 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2345 rc = bnx2x_func_start(bp);
2347 BNX2X_ERR("Function start failed!\n");
2348 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2349 LOAD_ERROR_EXIT(bp, load_error3);
2352 /* Send LOAD_DONE command to MCP */
2353 if (!BP_NOMCP(bp)) {
2354 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2356 BNX2X_ERR("MCP response failure, aborting\n");
2358 LOAD_ERROR_EXIT(bp, load_error3);
2362 rc = bnx2x_setup_leading(bp);
2364 BNX2X_ERR("Setup leading failed!\n");
2365 LOAD_ERROR_EXIT(bp, load_error3);
2368 for_each_nondefault_eth_queue(bp, i) {
2369 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2371 BNX2X_ERR("Queue setup failed\n");
2372 LOAD_ERROR_EXIT(bp, load_error3);
2376 rc = bnx2x_init_rss_pf(bp);
2378 BNX2X_ERR("PF RSS init failed\n");
2379 LOAD_ERROR_EXIT(bp, load_error3);
2382 /* Now when Clients are configured we are ready to work */
2383 bp->state = BNX2X_STATE_OPEN;
2385 /* Configure a ucast MAC */
2386 rc = bnx2x_set_eth_mac(bp, true);
2388 BNX2X_ERR("Setting Ethernet MAC failed\n");
2389 LOAD_ERROR_EXIT(bp, load_error3);
2392 if (bp->pending_max) {
2393 bnx2x_update_max_mf_config(bp, bp->pending_max);
2394 bp->pending_max = 0;
2398 bnx2x_initial_phy_init(bp, load_mode);
2400 /* Start fast path */
2402 /* Initialize Rx filter. */
2403 netif_addr_lock_bh(bp->dev);
2404 bnx2x_set_rx_mode(bp->dev);
2405 netif_addr_unlock_bh(bp->dev);
2408 switch (load_mode) {
2410 /* Tx queue should be only reenabled */
2411 netif_tx_wake_all_queues(bp->dev);
2415 netif_tx_start_all_queues(bp->dev);
2416 smp_mb__after_clear_bit();
2420 case LOAD_LOOPBACK_EXT:
2421 bp->state = BNX2X_STATE_DIAG;
2429 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2431 bnx2x__link_status_update(bp);
2433 /* start the timer */
2434 mod_timer(&bp->timer, jiffies + bp->current_interval);
2436 if (CNIC_ENABLED(bp))
2437 bnx2x_load_cnic(bp);
2439 /* mark driver is loaded in shmem2 */
2440 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2442 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2443 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2444 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2445 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2448 /* Wait for all pending SP commands to complete */
2449 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2450 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2451 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2455 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2456 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2457 bnx2x_dcbx_init(bp, false);
2459 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2463 #ifndef BNX2X_STOP_ON_ERROR
2465 bnx2x_int_disable_sync(bp, 1);
2467 /* Clean queueable objects */
2468 bnx2x_squeeze_objects(bp);
2470 /* Free SKBs, SGEs, TPA pool and driver internals */
2471 bnx2x_free_skbs(bp);
2472 for_each_rx_queue(bp, i)
2473 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2478 if (!BP_NOMCP(bp)) {
2479 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2480 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2485 bnx2x_napi_disable(bp);
2486 /* clear pf_load status, as it was already set */
2487 bnx2x_clear_pf_load(bp);
2492 #endif /* ! BNX2X_STOP_ON_ERROR */
2495 /* must be called with rtnl_lock */
2496 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2499 bool global = false;
2501 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2503 /* mark driver is unloaded in shmem2 */
2504 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2506 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2507 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2508 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2511 if ((bp->state == BNX2X_STATE_CLOSED) ||
2512 (bp->state == BNX2X_STATE_ERROR)) {
2513 /* We can get here if the driver has been unloaded
2514 * during parity error recovery and is either waiting for a
2515 * leader to complete or for other functions to unload and
2516 * then ifdown has been issued. In this case we want to
2517 * unload and let other functions to complete a recovery
2520 bp->recovery_state = BNX2X_RECOVERY_DONE;
2522 bnx2x_release_leader_lock(bp);
2525 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2526 BNX2X_ERR("Can't unload in closed or error state\n");
2531 * It's important to set the bp->state to the value different from
2532 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2533 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2535 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2538 if (CNIC_LOADED(bp))
2539 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2542 bnx2x_tx_disable(bp);
2543 netdev_reset_tc(bp->dev);
2545 bp->rx_mode = BNX2X_RX_MODE_NONE;
2547 del_timer_sync(&bp->timer);
2549 /* Set ALWAYS_ALIVE bit in shmem */
2550 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2552 bnx2x_drv_pulse(bp);
2554 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2555 bnx2x_save_statistics(bp);
2557 /* Cleanup the chip if needed */
2558 if (unload_mode != UNLOAD_RECOVERY)
2559 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2561 /* Send the UNLOAD_REQUEST to the MCP */
2562 bnx2x_send_unload_req(bp, unload_mode);
2565 * Prevent transactions to host from the functions on the
2566 * engine that doesn't reset global blocks in case of global
2567 * attention once gloabl blocks are reset and gates are opened
2568 * (the engine which leader will perform the recovery
2571 if (!CHIP_IS_E1x(bp))
2572 bnx2x_pf_disable(bp);
2574 /* Disable HW interrupts, NAPI */
2575 bnx2x_netif_stop(bp, 1);
2576 /* Delete all NAPI objects */
2577 bnx2x_del_all_napi(bp);
2578 if (CNIC_LOADED(bp))
2579 bnx2x_del_all_napi_cnic(bp);
2583 /* Report UNLOAD_DONE to MCP */
2584 bnx2x_send_unload_done(bp, false);
2588 * At this stage no more interrupts will arrive so we may safly clean
2589 * the queueable objects here in case they failed to get cleaned so far.
2591 bnx2x_squeeze_objects(bp);
2593 /* There should be no more pending SP commands at this stage */
2598 /* Free SKBs, SGEs, TPA pool and driver internals */
2599 bnx2x_free_skbs(bp);
2600 if (CNIC_LOADED(bp))
2601 bnx2x_free_skbs_cnic(bp);
2602 for_each_rx_queue(bp, i)
2603 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2605 if (CNIC_LOADED(bp)) {
2606 bnx2x_free_fp_mem_cnic(bp);
2607 bnx2x_free_mem_cnic(bp);
2611 bp->state = BNX2X_STATE_CLOSED;
2612 bp->cnic_loaded = false;
2614 /* Check if there are pending parity attentions. If there are - set
2615 * RECOVERY_IN_PROGRESS.
2617 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2618 bnx2x_set_reset_in_progress(bp);
2620 /* Set RESET_IS_GLOBAL if needed */
2622 bnx2x_set_reset_global(bp);
2626 /* The last driver must disable a "close the gate" if there is no
2627 * parity attention or "process kill" pending.
2629 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2630 bnx2x_disable_close_the_gate(bp);
2632 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2637 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2641 /* If there is no power capability, silently succeed */
2643 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2647 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2651 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2652 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2653 PCI_PM_CTRL_PME_STATUS));
2655 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2656 /* delay required during transition out of D3hot */
2661 /* If there are other clients above don't
2662 shut down the power */
2663 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2665 /* Don't shut down the power for emulation and FPGA */
2666 if (CHIP_REV_IS_SLOW(bp))
2669 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2673 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2675 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2678 /* No more memory access after this point until
2679 * device is brought back to D0.
2684 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2691 * net_device service functions
2693 int bnx2x_poll(struct napi_struct *napi, int budget)
2697 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2699 struct bnx2x *bp = fp->bp;
2702 #ifdef BNX2X_STOP_ON_ERROR
2703 if (unlikely(bp->panic)) {
2704 napi_complete(napi);
2709 for_each_cos_in_tx_queue(fp, cos)
2710 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2711 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2714 if (bnx2x_has_rx_work(fp)) {
2715 work_done += bnx2x_rx_int(fp, budget - work_done);
2717 /* must not complete if we consumed full budget */
2718 if (work_done >= budget)
2722 /* Fall out from the NAPI loop if needed */
2723 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2725 /* No need to update SB for FCoE L2 ring as long as
2726 * it's connected to the default SB and the SB
2727 * has been updated when NAPI was scheduled.
2729 if (IS_FCOE_FP(fp)) {
2730 napi_complete(napi);
2733 bnx2x_update_fpsb_idx(fp);
2734 /* bnx2x_has_rx_work() reads the status block,
2735 * thus we need to ensure that status block indices
2736 * have been actually read (bnx2x_update_fpsb_idx)
2737 * prior to this check (bnx2x_has_rx_work) so that
2738 * we won't write the "newer" value of the status block
2739 * to IGU (if there was a DMA right after
2740 * bnx2x_has_rx_work and if there is no rmb, the memory
2741 * reading (bnx2x_update_fpsb_idx) may be postponed
2742 * to right before bnx2x_ack_sb). In this case there
2743 * will never be another interrupt until there is
2744 * another update of the status block, while there
2745 * is still unhandled work.
2749 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2750 napi_complete(napi);
2751 /* Re-enable interrupts */
2752 DP(NETIF_MSG_RX_STATUS,
2753 "Update index to %d\n", fp->fp_hc_idx);
2754 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2755 le16_to_cpu(fp->fp_hc_idx),
2765 /* we split the first BD into headers and data BDs
2766 * to ease the pain of our fellow microcode engineers
2767 * we use one mapping for both BDs
2769 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2770 struct bnx2x_fp_txdata *txdata,
2771 struct sw_tx_bd *tx_buf,
2772 struct eth_tx_start_bd **tx_bd, u16 hlen,
2773 u16 bd_prod, int nbd)
2775 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2776 struct eth_tx_bd *d_tx_bd;
2778 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2780 /* first fix first BD */
2781 h_tx_bd->nbd = cpu_to_le16(nbd);
2782 h_tx_bd->nbytes = cpu_to_le16(hlen);
2784 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2785 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2787 /* now get a new data BD
2788 * (after the pbd) and fill it */
2789 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2790 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2792 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2793 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2795 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2796 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2797 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2799 /* this marks the BD as one that has no individual mapping */
2800 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2802 DP(NETIF_MSG_TX_QUEUED,
2803 "TSO split data size is %d (%x:%x)\n",
2804 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2807 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2812 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2815 csum = (u16) ~csum_fold(csum_sub(csum,
2816 csum_partial(t_header - fix, fix, 0)));
2819 csum = (u16) ~csum_fold(csum_add(csum,
2820 csum_partial(t_header, -fix, 0)));
2822 return swab16(csum);
2825 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2829 if (skb->ip_summed != CHECKSUM_PARTIAL)
2833 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2835 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2836 rc |= XMIT_CSUM_TCP;
2840 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2841 rc |= XMIT_CSUM_TCP;
2845 if (skb_is_gso_v6(skb))
2846 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2847 else if (skb_is_gso(skb))
2848 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2853 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2854 /* check if packet requires linearization (packet is too fragmented)
2855 no need to check fragmentation if page size > 8K (there will be no
2856 violation to FW restrictions) */
2857 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2862 int first_bd_sz = 0;
2864 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2865 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2867 if (xmit_type & XMIT_GSO) {
2868 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2869 /* Check if LSO packet needs to be copied:
2870 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2871 int wnd_size = MAX_FETCH_BD - 3;
2872 /* Number of windows to check */
2873 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2878 /* Headers length */
2879 hlen = (int)(skb_transport_header(skb) - skb->data) +
2882 /* Amount of data (w/o headers) on linear part of SKB*/
2883 first_bd_sz = skb_headlen(skb) - hlen;
2885 wnd_sum = first_bd_sz;
2887 /* Calculate the first sum - it's special */
2888 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2890 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2892 /* If there was data on linear skb data - check it */
2893 if (first_bd_sz > 0) {
2894 if (unlikely(wnd_sum < lso_mss)) {
2899 wnd_sum -= first_bd_sz;
2902 /* Others are easier: run through the frag list and
2903 check all windows */
2904 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2906 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2908 if (unlikely(wnd_sum < lso_mss)) {
2913 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2916 /* in non-LSO too fragmented packet should always
2923 if (unlikely(to_copy))
2924 DP(NETIF_MSG_TX_QUEUED,
2925 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
2926 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2927 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2933 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2936 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2937 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2938 ETH_TX_PARSE_BD_E2_LSO_MSS;
2939 if ((xmit_type & XMIT_GSO_V6) &&
2940 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2941 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2945 * bnx2x_set_pbd_gso - update PBD in GSO case.
2949 * @xmit_type: xmit flags
2951 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2952 struct eth_tx_parse_bd_e1x *pbd,
2955 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2956 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2957 pbd->tcp_flags = pbd_tcp_flags(skb);
2959 if (xmit_type & XMIT_GSO_V4) {
2960 pbd->ip_id = swab16(ip_hdr(skb)->id);
2961 pbd->tcp_pseudo_csum =
2962 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2964 0, IPPROTO_TCP, 0));
2967 pbd->tcp_pseudo_csum =
2968 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2969 &ipv6_hdr(skb)->daddr,
2970 0, IPPROTO_TCP, 0));
2972 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2976 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2978 * @bp: driver handle
2980 * @parsing_data: data to be updated
2981 * @xmit_type: xmit flags
2985 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2986 u32 *parsing_data, u32 xmit_type)
2989 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2990 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2991 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2993 if (xmit_type & XMIT_CSUM_TCP) {
2994 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2995 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2996 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2998 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3000 /* We support checksum offload for TCP and UDP only.
3001 * No need to pass the UDP header length - it's a constant.
3003 return skb_transport_header(skb) +
3004 sizeof(struct udphdr) - skb->data;
3007 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3008 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3010 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3012 if (xmit_type & XMIT_CSUM_V4)
3013 tx_start_bd->bd_flags.as_bitfield |=
3014 ETH_TX_BD_FLAGS_IP_CSUM;
3016 tx_start_bd->bd_flags.as_bitfield |=
3017 ETH_TX_BD_FLAGS_IPV6;
3019 if (!(xmit_type & XMIT_CSUM_TCP))
3020 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3024 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3026 * @bp: driver handle
3028 * @pbd: parse BD to be updated
3029 * @xmit_type: xmit flags
3031 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3032 struct eth_tx_parse_bd_e1x *pbd,
3035 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3037 /* for now NS flag is not used in Linux */
3039 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3040 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3042 pbd->ip_hlen_w = (skb_transport_header(skb) -
3043 skb_network_header(skb)) >> 1;
3045 hlen += pbd->ip_hlen_w;
3047 /* We support checksum offload for TCP and UDP only */
3048 if (xmit_type & XMIT_CSUM_TCP)
3049 hlen += tcp_hdrlen(skb) / 2;
3051 hlen += sizeof(struct udphdr) / 2;
3053 pbd->total_hlen_w = cpu_to_le16(hlen);
3056 if (xmit_type & XMIT_CSUM_TCP) {
3057 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3060 s8 fix = SKB_CS_OFF(skb); /* signed! */
3062 DP(NETIF_MSG_TX_QUEUED,
3063 "hlen %d fix %d csum before fix %x\n",
3064 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3066 /* HW bug: fixup the CSUM */
3067 pbd->tcp_pseudo_csum =
3068 bnx2x_csum_fix(skb_transport_header(skb),
3071 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3072 pbd->tcp_pseudo_csum);
3078 /* called with netif_tx_lock
3079 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3080 * netif_wake_queue()
3082 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3084 struct bnx2x *bp = netdev_priv(dev);
3086 struct netdev_queue *txq;
3087 struct bnx2x_fp_txdata *txdata;
3088 struct sw_tx_bd *tx_buf;
3089 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3090 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3091 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3092 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3093 u32 pbd_e2_parsing_data = 0;
3094 u16 pkt_prod, bd_prod;
3097 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3100 __le16 pkt_size = 0;
3102 u8 mac_type = UNICAST_ADDRESS;
3104 #ifdef BNX2X_STOP_ON_ERROR
3105 if (unlikely(bp->panic))
3106 return NETDEV_TX_BUSY;
3109 txq_index = skb_get_queue_mapping(skb);
3110 txq = netdev_get_tx_queue(dev, txq_index);
3112 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3114 txdata = &bp->bnx2x_txq[txq_index];
3116 /* enable this debug print to view the transmission queue being used
3117 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3118 txq_index, fp_index, txdata_index); */
3120 /* enable this debug print to view the tranmission details
3121 DP(NETIF_MSG_TX_QUEUED,
3122 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3123 txdata->cid, fp_index, txdata_index, txdata, fp); */
3125 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3126 skb_shinfo(skb)->nr_frags +
3128 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3129 /* Handle special storage cases separately */
3130 if (txdata->tx_ring_size != 0) {
3131 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3132 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3133 netif_tx_stop_queue(txq);
3136 return NETDEV_TX_BUSY;
3139 DP(NETIF_MSG_TX_QUEUED,
3140 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
3141 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3142 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3144 eth = (struct ethhdr *)skb->data;
3146 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3147 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3148 if (is_broadcast_ether_addr(eth->h_dest))
3149 mac_type = BROADCAST_ADDRESS;
3151 mac_type = MULTICAST_ADDRESS;
3154 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3155 /* First, check if we need to linearize the skb (due to FW
3156 restrictions). No need to check fragmentation if page size > 8K
3157 (there will be no violation to FW restrictions) */
3158 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3159 /* Statistics of linearization */
3161 if (skb_linearize(skb) != 0) {
3162 DP(NETIF_MSG_TX_QUEUED,
3163 "SKB linearization failed - silently dropping this SKB\n");
3164 dev_kfree_skb_any(skb);
3165 return NETDEV_TX_OK;
3169 /* Map skb linear data for DMA */
3170 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3171 skb_headlen(skb), DMA_TO_DEVICE);
3172 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3173 DP(NETIF_MSG_TX_QUEUED,
3174 "SKB mapping failed - silently dropping this SKB\n");
3175 dev_kfree_skb_any(skb);
3176 return NETDEV_TX_OK;
3179 Please read carefully. First we use one BD which we mark as start,
3180 then we have a parsing info BD (used for TSO or xsum),
3181 and only then we have the rest of the TSO BDs.
3182 (don't forget to mark the last one as last,
3183 and to unmap only AFTER you write to the BD ...)
3184 And above all, all pdb sizes are in words - NOT DWORDS!
3187 /* get current pkt produced now - advance it just before sending packet
3188 * since mapping of pages may fail and cause packet to be dropped
3190 pkt_prod = txdata->tx_pkt_prod;
3191 bd_prod = TX_BD(txdata->tx_bd_prod);
3193 /* get a tx_buf and first BD
3194 * tx_start_bd may be changed during SPLIT,
3195 * but first_bd will always stay first
3197 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3198 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3199 first_bd = tx_start_bd;
3201 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3202 SET_FLAG(tx_start_bd->general_data,
3203 ETH_TX_START_BD_PARSE_NBDS,
3207 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
3209 /* remember the first BD of the packet */
3210 tx_buf->first_bd = txdata->tx_bd_prod;
3214 DP(NETIF_MSG_TX_QUEUED,
3215 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3216 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3218 if (vlan_tx_tag_present(skb)) {
3219 tx_start_bd->vlan_or_ethertype =
3220 cpu_to_le16(vlan_tx_tag_get(skb));
3221 tx_start_bd->bd_flags.as_bitfield |=
3222 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3224 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3226 /* turn on parsing and get a BD */
3227 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3229 if (xmit_type & XMIT_CSUM)
3230 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3232 if (!CHIP_IS_E1x(bp)) {
3233 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3234 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3235 /* Set PBD in checksum offload case */
3236 if (xmit_type & XMIT_CSUM)
3237 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3238 &pbd_e2_parsing_data,
3242 * fill in the MAC addresses in the PBD - for local
3245 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3246 &pbd_e2->src_mac_addr_mid,
3247 &pbd_e2->src_mac_addr_lo,
3249 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3250 &pbd_e2->dst_mac_addr_mid,
3251 &pbd_e2->dst_mac_addr_lo,
3255 SET_FLAG(pbd_e2_parsing_data,
3256 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3258 u16 global_data = 0;
3259 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3260 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3261 /* Set PBD in checksum offload case */
3262 if (xmit_type & XMIT_CSUM)
3263 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3265 SET_FLAG(global_data,
3266 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3267 pbd_e1x->global_data |= cpu_to_le16(global_data);
3270 /* Setup the data pointer of the first BD of the packet */
3271 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3272 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3273 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3274 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3275 pkt_size = tx_start_bd->nbytes;
3277 DP(NETIF_MSG_TX_QUEUED,
3278 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3279 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3280 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3281 tx_start_bd->bd_flags.as_bitfield,
3282 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3284 if (xmit_type & XMIT_GSO) {
3286 DP(NETIF_MSG_TX_QUEUED,
3287 "TSO packet len %d hlen %d total len %d tso size %d\n",
3288 skb->len, hlen, skb_headlen(skb),
3289 skb_shinfo(skb)->gso_size);
3291 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3293 if (unlikely(skb_headlen(skb) > hlen))
3294 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3297 if (!CHIP_IS_E1x(bp))
3298 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3301 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3304 /* Set the PBD's parsing_data field if not zero
3305 * (for the chips newer than 57711).
3307 if (pbd_e2_parsing_data)
3308 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3310 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3312 /* Handle fragmented skb */
3313 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3314 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3316 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3317 skb_frag_size(frag), DMA_TO_DEVICE);
3318 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3319 unsigned int pkts_compl = 0, bytes_compl = 0;
3321 DP(NETIF_MSG_TX_QUEUED,
3322 "Unable to map page - dropping packet...\n");
3324 /* we need unmap all buffers already mapped
3326 * first_bd->nbd need to be properly updated
3327 * before call to bnx2x_free_tx_pkt
3329 first_bd->nbd = cpu_to_le16(nbd);
3330 bnx2x_free_tx_pkt(bp, txdata,
3331 TX_BD(txdata->tx_pkt_prod),
3332 &pkts_compl, &bytes_compl);
3333 return NETDEV_TX_OK;
3336 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3337 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3338 if (total_pkt_bd == NULL)
3339 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3341 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3342 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3343 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3344 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3347 DP(NETIF_MSG_TX_QUEUED,
3348 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3349 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3350 le16_to_cpu(tx_data_bd->nbytes));
3353 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3355 /* update with actual num BDs */
3356 first_bd->nbd = cpu_to_le16(nbd);
3358 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3360 /* now send a tx doorbell, counting the next BD
3361 * if the packet contains or ends with it
3363 if (TX_BD_POFF(bd_prod) < nbd)
3366 /* total_pkt_bytes should be set on the first data BD if
3367 * it's not an LSO packet and there is more than one
3368 * data BD. In this case pkt_size is limited by an MTU value.
3369 * However we prefer to set it for an LSO packet (while we don't
3370 * have to) in order to save some CPU cycles in a none-LSO
3371 * case, when we much more care about them.
3373 if (total_pkt_bd != NULL)
3374 total_pkt_bd->total_pkt_bytes = pkt_size;
3377 DP(NETIF_MSG_TX_QUEUED,
3378 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3379 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3380 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3381 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3382 le16_to_cpu(pbd_e1x->total_hlen_w));
3384 DP(NETIF_MSG_TX_QUEUED,
3385 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3386 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3387 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3388 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3389 pbd_e2->parsing_data);
3390 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3392 netdev_tx_sent_queue(txq, skb->len);
3394 skb_tx_timestamp(skb);
3396 txdata->tx_pkt_prod++;
3398 * Make sure that the BD data is updated before updating the producer
3399 * since FW might read the BD right after the producer is updated.
3400 * This is only applicable for weak-ordered memory model archs such
3401 * as IA-64. The following barrier is also mandatory since FW will
3402 * assumes packets must have BDs.
3406 txdata->tx_db.data.prod += nbd;
3409 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3413 txdata->tx_bd_prod += nbd;
3415 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3416 netif_tx_stop_queue(txq);
3418 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3419 * ordering of set_bit() in netif_tx_stop_queue() and read of
3423 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3424 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3425 netif_tx_wake_queue(txq);
3429 return NETDEV_TX_OK;
3433 * bnx2x_setup_tc - routine to configure net_device for multi tc
3435 * @netdev: net device to configure
3436 * @tc: number of traffic classes to enable
3438 * callback connected to the ndo_setup_tc function pointer
3440 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3442 int cos, prio, count, offset;
3443 struct bnx2x *bp = netdev_priv(dev);
3445 /* setup tc must be called under rtnl lock */
3448 /* no traffic classes requested. aborting */
3450 netdev_reset_tc(dev);
3454 /* requested to support too many traffic classes */
3455 if (num_tc > bp->max_cos) {
3456 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3457 num_tc, bp->max_cos);
3461 /* declare amount of supported traffic classes */
3462 if (netdev_set_num_tc(dev, num_tc)) {
3463 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3467 /* configure priority to traffic class mapping */
3468 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3469 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3470 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3471 "mapping priority %d to tc %d\n",
3472 prio, bp->prio_to_cos[prio]);
3476 /* Use this configuration to diffrentiate tc0 from other COSes
3477 This can be used for ets or pfc, and save the effort of setting
3478 up a multio class queue disc or negotiating DCBX with a switch
3479 netdev_set_prio_tc_map(dev, 0, 0);
3480 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3481 for (prio = 1; prio < 16; prio++) {
3482 netdev_set_prio_tc_map(dev, prio, 1);
3483 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3486 /* configure traffic class to transmission queue mapping */
3487 for (cos = 0; cos < bp->max_cos; cos++) {
3488 count = BNX2X_NUM_ETH_QUEUES(bp);
3489 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3490 netdev_set_tc_queue(dev, cos, count, offset);
3491 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3492 "mapping tc %d to offset %d count %d\n",
3493 cos, offset, count);
3499 /* called with rtnl_lock */
3500 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3502 struct sockaddr *addr = p;
3503 struct bnx2x *bp = netdev_priv(dev);
3506 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3507 BNX2X_ERR("Requested MAC address is not valid\n");
3511 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3512 !is_zero_ether_addr(addr->sa_data)) {
3513 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3517 if (netif_running(dev)) {
3518 rc = bnx2x_set_eth_mac(bp, false);
3523 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3524 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3526 if (netif_running(dev))
3527 rc = bnx2x_set_eth_mac(bp, true);
3532 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3534 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3535 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3540 if (IS_FCOE_IDX(fp_index)) {
3541 memset(sb, 0, sizeof(union host_hc_status_block));
3542 fp->status_blk_mapping = 0;
3545 if (!CHIP_IS_E1x(bp))
3546 BNX2X_PCI_FREE(sb->e2_sb,
3547 bnx2x_fp(bp, fp_index,
3548 status_blk_mapping),
3549 sizeof(struct host_hc_status_block_e2));
3551 BNX2X_PCI_FREE(sb->e1x_sb,
3552 bnx2x_fp(bp, fp_index,
3553 status_blk_mapping),
3554 sizeof(struct host_hc_status_block_e1x));
3558 if (!skip_rx_queue(bp, fp_index)) {
3559 bnx2x_free_rx_bds(fp);
3561 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3562 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3563 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3564 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3565 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3567 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3568 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3569 sizeof(struct eth_fast_path_rx_cqe) *
3573 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3574 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3575 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3576 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3580 if (!skip_tx_queue(bp, fp_index)) {
3581 /* fastpath tx rings: tx_buf tx_desc */
3582 for_each_cos_in_tx_queue(fp, cos) {
3583 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3585 DP(NETIF_MSG_IFDOWN,
3586 "freeing tx memory of fp %d cos %d cid %d\n",
3587 fp_index, cos, txdata->cid);
3589 BNX2X_FREE(txdata->tx_buf_ring);
3590 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3591 txdata->tx_desc_mapping,
3592 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3595 /* end of fastpath */
3598 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3601 for_each_cnic_queue(bp, i)
3602 bnx2x_free_fp_mem_at(bp, i);
3605 void bnx2x_free_fp_mem(struct bnx2x *bp)
3608 for_each_eth_queue(bp, i)
3609 bnx2x_free_fp_mem_at(bp, i);
3612 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3614 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3615 if (!CHIP_IS_E1x(bp)) {
3616 bnx2x_fp(bp, index, sb_index_values) =
3617 (__le16 *)status_blk.e2_sb->sb.index_values;
3618 bnx2x_fp(bp, index, sb_running_index) =
3619 (__le16 *)status_blk.e2_sb->sb.running_index;
3621 bnx2x_fp(bp, index, sb_index_values) =
3622 (__le16 *)status_blk.e1x_sb->sb.index_values;
3623 bnx2x_fp(bp, index, sb_running_index) =
3624 (__le16 *)status_blk.e1x_sb->sb.running_index;
3628 /* Returns the number of actually allocated BDs */
3629 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3632 struct bnx2x *bp = fp->bp;
3633 u16 ring_prod, cqe_ring_prod;
3634 int i, failure_cnt = 0;
3636 fp->rx_comp_cons = 0;
3637 cqe_ring_prod = ring_prod = 0;
3639 /* This routine is called only during fo init so
3640 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3642 for (i = 0; i < rx_ring_size; i++) {
3643 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3647 ring_prod = NEXT_RX_IDX(ring_prod);
3648 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3649 WARN_ON(ring_prod <= (i - failure_cnt));
3653 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3654 i - failure_cnt, fp->index);
3656 fp->rx_bd_prod = ring_prod;
3657 /* Limit the CQE producer by the CQE ring size */
3658 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3660 fp->rx_pkt = fp->rx_calls = 0;
3662 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3664 return i - failure_cnt;
3667 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3671 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3672 struct eth_rx_cqe_next_page *nextpg;
3674 nextpg = (struct eth_rx_cqe_next_page *)
3675 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3677 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3678 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3680 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3681 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3685 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3687 union host_hc_status_block *sb;
3688 struct bnx2x_fastpath *fp = &bp->fp[index];
3691 int rx_ring_size = 0;
3693 if (!bp->rx_ring_size &&
3694 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3695 rx_ring_size = MIN_RX_SIZE_NONTPA;
3696 bp->rx_ring_size = rx_ring_size;
3697 } else if (!bp->rx_ring_size) {
3698 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3700 if (CHIP_IS_E3(bp)) {
3701 u32 cfg = SHMEM_RD(bp,
3702 dev_info.port_hw_config[BP_PORT(bp)].
3705 /* Decrease ring size for 1G functions */
3706 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3707 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3711 /* allocate at least number of buffers required by FW */
3712 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3713 MIN_RX_SIZE_TPA, rx_ring_size);
3715 bp->rx_ring_size = rx_ring_size;
3716 } else /* if rx_ring_size specified - use it */
3717 rx_ring_size = bp->rx_ring_size;
3720 sb = &bnx2x_fp(bp, index, status_blk);
3722 if (!IS_FCOE_IDX(index)) {
3724 if (!CHIP_IS_E1x(bp))
3725 BNX2X_PCI_ALLOC(sb->e2_sb,
3726 &bnx2x_fp(bp, index, status_blk_mapping),
3727 sizeof(struct host_hc_status_block_e2));
3729 BNX2X_PCI_ALLOC(sb->e1x_sb,
3730 &bnx2x_fp(bp, index, status_blk_mapping),
3731 sizeof(struct host_hc_status_block_e1x));
3734 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3735 * set shortcuts for it.
3737 if (!IS_FCOE_IDX(index))
3738 set_sb_shortcuts(bp, index);
3741 if (!skip_tx_queue(bp, index)) {
3742 /* fastpath tx rings: tx_buf tx_desc */
3743 for_each_cos_in_tx_queue(fp, cos) {
3744 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3747 "allocating tx memory of fp %d cos %d\n",
3750 BNX2X_ALLOC(txdata->tx_buf_ring,
3751 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3752 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3753 &txdata->tx_desc_mapping,
3754 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3759 if (!skip_rx_queue(bp, index)) {
3760 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3761 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3762 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3763 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3764 &bnx2x_fp(bp, index, rx_desc_mapping),
3765 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3767 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3768 &bnx2x_fp(bp, index, rx_comp_mapping),
3769 sizeof(struct eth_fast_path_rx_cqe) *
3773 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3774 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3775 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3776 &bnx2x_fp(bp, index, rx_sge_mapping),
3777 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3779 bnx2x_set_next_page_rx_bd(fp);
3782 bnx2x_set_next_page_rx_cq(fp);
3785 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3786 if (ring_size < rx_ring_size)
3792 /* handles low memory cases */
3794 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3796 /* FW will drop all packets if queue is not big enough,
3797 * In these cases we disable the queue
3798 * Min size is different for OOO, TPA and non-TPA queues
3800 if (ring_size < (fp->disable_tpa ?
3801 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3802 /* release memory allocated for this queue */
3803 bnx2x_free_fp_mem_at(bp, index);
3809 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
3813 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3814 /* we will fail load process instead of mark
3822 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3826 /* 1. Allocate FP for leading - fatal if error
3827 * 2. Allocate RSS - fix number of queues if error
3831 if (bnx2x_alloc_fp_mem_at(bp, 0))
3835 for_each_nondefault_eth_queue(bp, i)
3836 if (bnx2x_alloc_fp_mem_at(bp, i))
3839 /* handle memory failures */
3840 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3841 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3844 if (CNIC_SUPPORT(bp))
3845 /* move non eth FPs next to last eth FP
3846 * must be done in that order
3847 * FCOE_IDX < FWD_IDX < OOO_IDX
3850 /* move FCoE fp even NO_FCOE_FLAG is on */
3851 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3852 bp->num_ethernet_queues -= delta;
3853 bp->num_queues = bp->num_ethernet_queues +
3854 bp->num_cnic_queues;
3855 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3856 bp->num_queues + delta, bp->num_queues);
3862 void bnx2x_free_mem_bp(struct bnx2x *bp)
3864 kfree(bp->fp->tpa_info);
3867 kfree(bp->fp_stats);
3868 kfree(bp->bnx2x_txq);
3869 kfree(bp->msix_table);
3873 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3875 struct bnx2x_fastpath *fp;
3876 struct msix_entry *tbl;
3877 struct bnx2x_ilt *ilt;
3878 int msix_table_size = 0;
3879 int fp_array_size, txq_array_size;
3883 * The biggest MSI-X table we might need is as a maximum number of fast
3884 * path IGU SBs plus default SB (for PF).
3886 msix_table_size = bp->igu_sb_cnt + 1;
3888 /* fp array: RSS plus CNIC related L2 queues */
3889 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
3890 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3892 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
3895 for (i = 0; i < fp_array_size; i++) {
3897 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3898 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3899 if (!(fp[i].tpa_info))
3905 /* allocate sp objs */
3906 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3911 /* allocate fp_stats */
3912 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3917 /* Allocate memory for the transmission queues array */
3919 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
3920 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
3922 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
3928 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3931 bp->msix_table = tbl;
3934 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3941 bnx2x_free_mem_bp(bp);
3946 int bnx2x_reload_if_running(struct net_device *dev)
3948 struct bnx2x *bp = netdev_priv(dev);
3950 if (unlikely(!netif_running(dev)))
3953 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
3954 return bnx2x_nic_load(bp, LOAD_NORMAL);
3957 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3959 u32 sel_phy_idx = 0;
3960 if (bp->link_params.num_phys <= 1)
3963 if (bp->link_vars.link_up) {
3964 sel_phy_idx = EXT_PHY1;
3965 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3966 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3967 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3968 sel_phy_idx = EXT_PHY2;
3971 switch (bnx2x_phy_selection(&bp->link_params)) {
3972 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3973 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3974 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3975 sel_phy_idx = EXT_PHY1;
3977 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3978 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3979 sel_phy_idx = EXT_PHY2;
3987 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3989 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3991 * The selected actived PHY is always after swapping (in case PHY
3992 * swapping is enabled). So when swapping is enabled, we need to reverse
3996 if (bp->link_params.multi_phy_config &
3997 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3998 if (sel_phy_idx == EXT_PHY1)
3999 sel_phy_idx = EXT_PHY2;
4000 else if (sel_phy_idx == EXT_PHY2)
4001 sel_phy_idx = EXT_PHY1;
4003 return LINK_CONFIG_IDX(sel_phy_idx);
4006 #ifdef NETDEV_FCOE_WWNN
4007 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4009 struct bnx2x *bp = netdev_priv(dev);
4010 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4013 case NETDEV_FCOE_WWNN:
4014 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4015 cp->fcoe_wwn_node_name_lo);
4017 case NETDEV_FCOE_WWPN:
4018 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4019 cp->fcoe_wwn_port_name_lo);
4022 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4030 /* called with rtnl_lock */
4031 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4033 struct bnx2x *bp = netdev_priv(dev);
4035 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4036 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4040 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4041 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4042 BNX2X_ERR("Can't support requested MTU size\n");
4046 /* This does not race with packet allocation
4047 * because the actual alloc size is
4048 * only updated as part of load
4052 return bnx2x_reload_if_running(dev);
4055 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4056 netdev_features_t features)
4058 struct bnx2x *bp = netdev_priv(dev);
4060 /* TPA requires Rx CSUM offloading */
4061 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4062 features &= ~NETIF_F_LRO;
4063 features &= ~NETIF_F_GRO;
4069 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4071 struct bnx2x *bp = netdev_priv(dev);
4072 u32 flags = bp->flags;
4073 bool bnx2x_reload = false;
4075 if (features & NETIF_F_LRO)
4076 flags |= TPA_ENABLE_FLAG;
4078 flags &= ~TPA_ENABLE_FLAG;
4080 if (features & NETIF_F_GRO)
4081 flags |= GRO_ENABLE_FLAG;
4083 flags &= ~GRO_ENABLE_FLAG;
4085 if (features & NETIF_F_LOOPBACK) {
4086 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4087 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4088 bnx2x_reload = true;
4091 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4092 bp->link_params.loopback_mode = LOOPBACK_NONE;
4093 bnx2x_reload = true;
4097 if (flags ^ bp->flags) {
4099 bnx2x_reload = true;
4103 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4104 return bnx2x_reload_if_running(dev);
4105 /* else: bnx2x_nic_load() will be called at end of recovery */
4111 void bnx2x_tx_timeout(struct net_device *dev)
4113 struct bnx2x *bp = netdev_priv(dev);
4115 #ifdef BNX2X_STOP_ON_ERROR
4120 smp_mb__before_clear_bit();
4121 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4122 smp_mb__after_clear_bit();
4124 /* This allows the netif to be shutdown gracefully before resetting */
4125 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4128 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4130 struct net_device *dev = pci_get_drvdata(pdev);
4134 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4137 bp = netdev_priv(dev);
4141 pci_save_state(pdev);
4143 if (!netif_running(dev)) {
4148 netif_device_detach(dev);
4150 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4152 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4159 int bnx2x_resume(struct pci_dev *pdev)
4161 struct net_device *dev = pci_get_drvdata(pdev);
4166 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4169 bp = netdev_priv(dev);
4171 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4172 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4178 pci_restore_state(pdev);
4180 if (!netif_running(dev)) {
4185 bnx2x_set_power_state(bp, PCI_D0);
4186 netif_device_attach(dev);
4188 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4196 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4199 /* ustorm cxt validation */
4200 cxt->ustorm_ag_context.cdu_usage =
4201 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4202 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4203 /* xcontext validation */
4204 cxt->xstorm_ag_context.cdu_reserved =
4205 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4206 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4209 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4210 u8 fw_sb_id, u8 sb_index,
4214 u32 addr = BAR_CSTRORM_INTMEM +
4215 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4216 REG_WR8(bp, addr, ticks);
4218 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4219 port, fw_sb_id, sb_index, ticks);
4222 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4223 u16 fw_sb_id, u8 sb_index,
4226 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4227 u32 addr = BAR_CSTRORM_INTMEM +
4228 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4229 u16 flags = REG_RD16(bp, addr);
4231 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4232 flags |= enable_flag;
4233 REG_WR16(bp, addr, flags);
4235 "port %x fw_sb_id %d sb_index %d disable %d\n",
4236 port, fw_sb_id, sb_index, disable);
4239 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4240 u8 sb_index, u8 disable, u16 usec)
4242 int port = BP_PORT(bp);
4243 u8 ticks = usec / BNX2X_BTR;
4245 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4247 disable = disable ? 1 : (usec ? 0 : 1);
4248 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);