1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
33 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36 static int bnx2x_poll(struct napi_struct *napi, int budget);
38 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42 /* Add NAPI objects */
43 for_each_rx_queue_cnic(bp, i) {
44 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45 bnx2x_poll, NAPI_POLL_WEIGHT);
46 napi_hash_add(&bnx2x_fp(bp, i, napi));
50 static void bnx2x_add_all_napi(struct bnx2x *bp)
54 /* Add NAPI objects */
55 for_each_eth_queue(bp, i) {
56 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57 bnx2x_poll, NAPI_POLL_WEIGHT);
58 napi_hash_add(&bnx2x_fp(bp, i, napi));
62 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 return bnx2x_num_queues ?
65 min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
66 min_t(int, netif_get_num_default_rss_queues(),
67 BNX2X_MAX_QUEUES(bp));
71 * bnx2x_move_fp - move content of the fastpath structure.
74 * @from: source FP index
75 * @to: destination FP index
77 * Makes sure the contents of the bp->fp[to].napi is kept
78 * intact. This is done by first copying the napi struct from
79 * the target to the source, and then mem copying the entire
80 * source onto the target. Update txdata pointers and related
83 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
85 struct bnx2x_fastpath *from_fp = &bp->fp[from];
86 struct bnx2x_fastpath *to_fp = &bp->fp[to];
87 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
88 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
89 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
90 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
91 int old_max_eth_txqs, new_max_eth_txqs;
92 int old_txdata_index = 0, new_txdata_index = 0;
93 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
95 /* Copy the NAPI object as it has been already initialized */
96 from_fp->napi = to_fp->napi;
98 /* Move bnx2x_fastpath contents */
99 memcpy(to_fp, from_fp, sizeof(*to_fp));
102 /* Retain the tpa_info of the original `to' version as we don't want
103 * 2 FPs to contain the same tpa_info pointer.
105 to_fp->tpa_info = old_tpa_info;
107 /* move sp_objs contents as well, as their indices match fp ones */
108 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
110 /* move fp_stats contents as well, as their indices match fp ones */
111 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
113 /* Update txdata pointers in fp and move txdata content accordingly:
114 * Each fp consumes 'max_cos' txdata structures, so the index should be
115 * decremented by max_cos x delta.
118 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
119 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
121 if (from == FCOE_IDX(bp)) {
122 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
123 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
126 memcpy(&bp->bnx2x_txq[new_txdata_index],
127 &bp->bnx2x_txq[old_txdata_index],
128 sizeof(struct bnx2x_fp_txdata));
129 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
133 * bnx2x_fill_fw_str - Fill buffer with FW version string.
136 * @buf: character buffer to fill with the fw name
137 * @buf_len: length of the above buffer
140 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
143 u8 phy_fw_ver[PHY_FW_VER_LEN];
145 phy_fw_ver[0] = '\0';
146 bnx2x_get_ext_phy_fw_version(&bp->link_params,
147 phy_fw_ver, PHY_FW_VER_LEN);
148 strlcpy(buf, bp->fw_ver, buf_len);
149 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
151 (bp->common.bc_ver & 0xff0000) >> 16,
152 (bp->common.bc_ver & 0xff00) >> 8,
153 (bp->common.bc_ver & 0xff),
154 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
156 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
161 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
164 * @delta: number of eth queues which were not allocated
166 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
168 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
170 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
171 * backward along the array could cause memory to be overridden
173 for (cos = 1; cos < bp->max_cos; cos++) {
174 for (i = 0; i < old_eth_num - delta; i++) {
175 struct bnx2x_fastpath *fp = &bp->fp[i];
176 int new_idx = cos * (old_eth_num - delta) + i;
178 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
179 sizeof(struct bnx2x_fp_txdata));
180 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
185 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
187 /* free skb in the packet ring at pos idx
188 * return idx of last bd freed
190 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
191 u16 idx, unsigned int *pkts_compl,
192 unsigned int *bytes_compl)
194 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
195 struct eth_tx_start_bd *tx_start_bd;
196 struct eth_tx_bd *tx_data_bd;
197 struct sk_buff *skb = tx_buf->skb;
198 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
201 /* prefetch skb end pointer to speedup dev_kfree_skb() */
204 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
205 txdata->txq_index, idx, tx_buf, skb);
208 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
209 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
210 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
212 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
213 #ifdef BNX2X_STOP_ON_ERROR
214 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
215 BNX2X_ERR("BAD nbd!\n");
219 new_cons = nbd + tx_buf->first_bd;
221 /* Get the next bd */
222 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
224 /* Skip a parse bd... */
226 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
228 /* ...and the TSO split header bd since they have no mapping */
229 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
231 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
237 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
238 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
239 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
241 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
248 (*bytes_compl) += skb->len;
251 dev_kfree_skb_any(skb);
252 tx_buf->first_bd = 0;
258 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
260 struct netdev_queue *txq;
261 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
262 unsigned int pkts_compl = 0, bytes_compl = 0;
264 #ifdef BNX2X_STOP_ON_ERROR
265 if (unlikely(bp->panic))
269 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
270 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
271 sw_cons = txdata->tx_pkt_cons;
273 while (sw_cons != hw_cons) {
276 pkt_cons = TX_BD(sw_cons);
278 DP(NETIF_MSG_TX_DONE,
279 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
280 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
282 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
283 &pkts_compl, &bytes_compl);
288 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
290 txdata->tx_pkt_cons = sw_cons;
291 txdata->tx_bd_cons = bd_cons;
293 /* Need to make the tx_bd_cons update visible to start_xmit()
294 * before checking for netif_tx_queue_stopped(). Without the
295 * memory barrier, there is a small possibility that
296 * start_xmit() will miss it and cause the queue to be stopped
298 * On the other hand we need an rmb() here to ensure the proper
299 * ordering of bit testing in the following
300 * netif_tx_queue_stopped(txq) call.
304 if (unlikely(netif_tx_queue_stopped(txq))) {
305 /* Taking tx_lock() is needed to prevent re-enabling the queue
306 * while it's empty. This could have happen if rx_action() gets
307 * suspended in bnx2x_tx_int() after the condition before
308 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
310 * stops the queue->sees fresh tx_bd_cons->releases the queue->
311 * sends some packets consuming the whole queue again->
315 __netif_tx_lock(txq, smp_processor_id());
317 if ((netif_tx_queue_stopped(txq)) &&
318 (bp->state == BNX2X_STATE_OPEN) &&
319 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
320 netif_tx_wake_queue(txq);
322 __netif_tx_unlock(txq);
327 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
330 u16 last_max = fp->last_max_sge;
332 if (SUB_S16(idx, last_max) > 0)
333 fp->last_max_sge = idx;
336 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
338 struct eth_end_agg_rx_cqe *cqe)
340 struct bnx2x *bp = fp->bp;
341 u16 last_max, last_elem, first_elem;
348 /* First mark all used pages */
349 for (i = 0; i < sge_len; i++)
350 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
351 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
353 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
354 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
356 /* Here we assume that the last SGE index is the biggest */
357 prefetch((void *)(fp->sge_mask));
358 bnx2x_update_last_max_sge(fp,
359 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
361 last_max = RX_SGE(fp->last_max_sge);
362 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
363 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
365 /* If ring is not full */
366 if (last_elem + 1 != first_elem)
369 /* Now update the prod */
370 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
371 if (likely(fp->sge_mask[i]))
374 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
375 delta += BIT_VEC64_ELEM_SZ;
379 fp->rx_sge_prod += delta;
380 /* clear page-end entries */
381 bnx2x_clear_sge_mask_next_elems(fp);
384 DP(NETIF_MSG_RX_STATUS,
385 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
386 fp->last_max_sge, fp->rx_sge_prod);
389 /* Get Toeplitz hash value in the skb using the value from the
390 * CQE (calculated by HW).
392 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
393 const struct eth_fast_path_rx_cqe *cqe,
394 enum pkt_hash_types *rxhash_type)
396 /* Get Toeplitz hash from CQE */
397 if ((bp->dev->features & NETIF_F_RXHASH) &&
398 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
399 enum eth_rss_hash_type htype;
401 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
402 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
403 (htype == TCP_IPV6_HASH_TYPE)) ?
404 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
406 return le32_to_cpu(cqe->rss_hash_result);
408 *rxhash_type = PKT_HASH_TYPE_NONE;
412 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
414 struct eth_fast_path_rx_cqe *cqe)
416 struct bnx2x *bp = fp->bp;
417 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
418 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
419 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
421 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
422 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
424 /* print error if current state != stop */
425 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
426 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
428 /* Try to map an empty data buffer from the aggregation info */
429 mapping = dma_map_single(&bp->pdev->dev,
430 first_buf->data + NET_SKB_PAD,
431 fp->rx_buf_size, DMA_FROM_DEVICE);
433 * ...if it fails - move the skb from the consumer to the producer
434 * and set the current aggregation state as ERROR to drop it
435 * when TPA_STOP arrives.
438 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
439 /* Move the BD from the consumer to the producer */
440 bnx2x_reuse_rx_data(fp, cons, prod);
441 tpa_info->tpa_state = BNX2X_TPA_ERROR;
445 /* move empty data from pool to prod */
446 prod_rx_buf->data = first_buf->data;
447 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
448 /* point prod_bd to new data */
449 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
450 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
452 /* move partial skb from cons to pool (don't unmap yet) */
453 *first_buf = *cons_rx_buf;
455 /* mark bin state as START */
456 tpa_info->parsing_flags =
457 le16_to_cpu(cqe->pars_flags.flags);
458 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
459 tpa_info->tpa_state = BNX2X_TPA_START;
460 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
461 tpa_info->placement_offset = cqe->placement_offset;
462 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
463 if (fp->mode == TPA_MODE_GRO) {
464 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
465 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
466 tpa_info->gro_size = gro_size;
469 #ifdef BNX2X_STOP_ON_ERROR
470 fp->tpa_queue_used |= (1 << queue);
471 #ifdef _ASM_GENERIC_INT_L64_H
472 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
474 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
480 /* Timestamp option length allowed for TPA aggregation:
482 * nop nop kind length echo val
484 #define TPA_TSTAMP_OPT_LEN 12
486 * bnx2x_set_gro_params - compute GRO values
489 * @parsing_flags: parsing flags from the START CQE
490 * @len_on_bd: total length of the first packet for the
492 * @pkt_len: length of all segments
494 * Approximate value of the MSS for this aggregation calculated using
495 * the first packet of it.
496 * Compute number of aggregated segments, and gso_type.
498 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
499 u16 len_on_bd, unsigned int pkt_len,
500 u16 num_of_coalesced_segs)
502 /* TPA aggregation won't have either IP options or TCP options
503 * other than timestamp or IPv6 extension headers.
505 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
507 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
508 PRS_FLAG_OVERETH_IPV6) {
509 hdrs_len += sizeof(struct ipv6hdr);
510 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
512 hdrs_len += sizeof(struct iphdr);
513 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
516 /* Check if there was a TCP timestamp, if there is it's will
517 * always be 12 bytes length: nop nop kind length echo val.
519 * Otherwise FW would close the aggregation.
521 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
522 hdrs_len += TPA_TSTAMP_OPT_LEN;
524 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
526 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
527 * to skb_shinfo(skb)->gso_segs
529 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
532 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
533 u16 index, gfp_t gfp_mask)
535 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
536 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
537 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
540 if (unlikely(page == NULL)) {
541 BNX2X_ERR("Can't alloc sge\n");
545 mapping = dma_map_page(&bp->pdev->dev, page, 0,
546 SGE_PAGES, DMA_FROM_DEVICE);
547 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
548 __free_pages(page, PAGES_PER_SGE_SHIFT);
549 BNX2X_ERR("Can't map sge\n");
554 dma_unmap_addr_set(sw_buf, mapping, mapping);
556 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
557 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
562 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
563 struct bnx2x_agg_info *tpa_info,
566 struct eth_end_agg_rx_cqe *cqe,
569 struct sw_rx_page *rx_pg, old_rx_pg;
570 u32 i, frag_len, frag_size;
571 int err, j, frag_id = 0;
572 u16 len_on_bd = tpa_info->len_on_bd;
573 u16 full_page = 0, gro_size = 0;
575 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
577 if (fp->mode == TPA_MODE_GRO) {
578 gro_size = tpa_info->gro_size;
579 full_page = tpa_info->full_page;
582 /* This is needed in order to enable forwarding support */
584 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
585 le16_to_cpu(cqe->pkt_len),
586 le16_to_cpu(cqe->num_of_coalesced_segs));
588 #ifdef BNX2X_STOP_ON_ERROR
589 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
590 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
592 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
598 /* Run through the SGL and compose the fragmented skb */
599 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
600 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
602 /* FW gives the indices of the SGE as if the ring is an array
603 (meaning that "next" element will consume 2 indices) */
604 if (fp->mode == TPA_MODE_GRO)
605 frag_len = min_t(u32, frag_size, (u32)full_page);
607 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
609 rx_pg = &fp->rx_page_ring[sge_idx];
612 /* If we fail to allocate a substitute page, we simply stop
613 where we are and drop the whole packet */
614 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
616 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
620 /* Unmap the page as we're going to pass it to the stack */
621 dma_unmap_page(&bp->pdev->dev,
622 dma_unmap_addr(&old_rx_pg, mapping),
623 SGE_PAGES, DMA_FROM_DEVICE);
624 /* Add one frag and update the appropriate fields in the skb */
625 if (fp->mode == TPA_MODE_LRO)
626 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
630 for (rem = frag_len; rem > 0; rem -= gro_size) {
631 int len = rem > gro_size ? gro_size : rem;
632 skb_fill_page_desc(skb, frag_id++,
633 old_rx_pg.page, offset, len);
635 get_page(old_rx_pg.page);
640 skb->data_len += frag_len;
641 skb->truesize += SGE_PAGES;
642 skb->len += frag_len;
644 frag_size -= frag_len;
650 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
652 if (fp->rx_frag_size)
653 put_page(virt_to_head_page(data));
658 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
660 if (fp->rx_frag_size) {
661 /* GFP_KERNEL allocations are used only during initialization */
662 if (unlikely(gfp_mask & __GFP_WAIT))
663 return (void *)__get_free_page(gfp_mask);
665 return netdev_alloc_frag(fp->rx_frag_size);
668 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
672 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
674 const struct iphdr *iph = ip_hdr(skb);
677 skb_set_transport_header(skb, sizeof(struct iphdr));
680 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
681 iph->saddr, iph->daddr, 0);
684 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
686 struct ipv6hdr *iph = ipv6_hdr(skb);
689 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
692 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
693 &iph->saddr, &iph->daddr, 0);
696 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
697 void (*gro_func)(struct bnx2x*, struct sk_buff*))
699 skb_set_network_header(skb, 0);
701 tcp_gro_complete(skb);
705 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
709 if (skb_shinfo(skb)->gso_size) {
710 switch (be16_to_cpu(skb->protocol)) {
712 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
715 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
718 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
719 be16_to_cpu(skb->protocol));
723 skb_record_rx_queue(skb, fp->rx_queue);
724 napi_gro_receive(&fp->napi, skb);
727 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
728 struct bnx2x_agg_info *tpa_info,
730 struct eth_end_agg_rx_cqe *cqe,
733 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
734 u8 pad = tpa_info->placement_offset;
735 u16 len = tpa_info->len_on_bd;
736 struct sk_buff *skb = NULL;
737 u8 *new_data, *data = rx_buf->data;
738 u8 old_tpa_state = tpa_info->tpa_state;
740 tpa_info->tpa_state = BNX2X_TPA_STOP;
742 /* If we there was an error during the handling of the TPA_START -
743 * drop this aggregation.
745 if (old_tpa_state == BNX2X_TPA_ERROR)
748 /* Try to allocate the new data */
749 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
750 /* Unmap skb in the pool anyway, as we are going to change
751 pool entry status to BNX2X_TPA_STOP even if new skb allocation
753 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
754 fp->rx_buf_size, DMA_FROM_DEVICE);
755 if (likely(new_data))
756 skb = build_skb(data, fp->rx_frag_size);
759 #ifdef BNX2X_STOP_ON_ERROR
760 if (pad + len > fp->rx_buf_size) {
761 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
762 pad, len, fp->rx_buf_size);
768 skb_reserve(skb, pad + NET_SKB_PAD);
770 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
772 skb->protocol = eth_type_trans(skb, bp->dev);
773 skb->ip_summed = CHECKSUM_UNNECESSARY;
775 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
776 skb, cqe, cqe_idx)) {
777 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
778 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
779 bnx2x_gro_receive(bp, fp, skb);
781 DP(NETIF_MSG_RX_STATUS,
782 "Failed to allocate new pages - dropping packet!\n");
783 dev_kfree_skb_any(skb);
786 /* put new data in bin */
787 rx_buf->data = new_data;
791 bnx2x_frag_free(fp, new_data);
793 /* drop the packet and keep the buffer in the bin */
794 DP(NETIF_MSG_RX_STATUS,
795 "Failed to allocate or map a new skb - dropping packet!\n");
796 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
799 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
800 u16 index, gfp_t gfp_mask)
803 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
804 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
807 data = bnx2x_frag_alloc(fp, gfp_mask);
808 if (unlikely(data == NULL))
811 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
814 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
815 bnx2x_frag_free(fp, data);
816 BNX2X_ERR("Can't map rx data\n");
821 dma_unmap_addr_set(rx_buf, mapping, mapping);
823 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
824 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
830 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
831 struct bnx2x_fastpath *fp,
832 struct bnx2x_eth_q_stats *qstats)
834 /* Do nothing if no L4 csum validation was done.
835 * We do not check whether IP csum was validated. For IPv4 we assume
836 * that if the card got as far as validating the L4 csum, it also
837 * validated the IP csum. IPv6 has no IP csum.
839 if (cqe->fast_path_cqe.status_flags &
840 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
843 /* If L4 validation was done, check if an error was found. */
845 if (cqe->fast_path_cqe.type_error_flags &
846 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
847 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
848 qstats->hw_csum_err++;
850 skb->ip_summed = CHECKSUM_UNNECESSARY;
853 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
855 struct bnx2x *bp = fp->bp;
856 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
857 u16 sw_comp_cons, sw_comp_prod;
859 union eth_rx_cqe *cqe;
860 struct eth_fast_path_rx_cqe *cqe_fp;
862 #ifdef BNX2X_STOP_ON_ERROR
863 if (unlikely(bp->panic))
867 bd_cons = fp->rx_bd_cons;
868 bd_prod = fp->rx_bd_prod;
869 bd_prod_fw = bd_prod;
870 sw_comp_cons = fp->rx_comp_cons;
871 sw_comp_prod = fp->rx_comp_prod;
873 comp_ring_cons = RCQ_BD(sw_comp_cons);
874 cqe = &fp->rx_comp_ring[comp_ring_cons];
875 cqe_fp = &cqe->fast_path_cqe;
877 DP(NETIF_MSG_RX_STATUS,
878 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
880 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
881 struct sw_rx_bd *rx_buf = NULL;
884 enum eth_rx_cqe_type cqe_fp_type;
888 enum pkt_hash_types rxhash_type;
890 #ifdef BNX2X_STOP_ON_ERROR
891 if (unlikely(bp->panic))
895 bd_prod = RX_BD(bd_prod);
896 bd_cons = RX_BD(bd_cons);
898 cqe_fp_flags = cqe_fp->type_error_flags;
899 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
901 DP(NETIF_MSG_RX_STATUS,
902 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
903 CQE_TYPE(cqe_fp_flags),
904 cqe_fp_flags, cqe_fp->status_flags,
905 le32_to_cpu(cqe_fp->rss_hash_result),
906 le16_to_cpu(cqe_fp->vlan_tag),
907 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
909 /* is this a slowpath msg? */
910 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
911 bnx2x_sp_event(fp, cqe);
915 rx_buf = &fp->rx_buf_ring[bd_cons];
918 if (!CQE_TYPE_FAST(cqe_fp_type)) {
919 struct bnx2x_agg_info *tpa_info;
920 u16 frag_size, pages;
921 #ifdef BNX2X_STOP_ON_ERROR
923 if (fp->disable_tpa &&
924 (CQE_TYPE_START(cqe_fp_type) ||
925 CQE_TYPE_STOP(cqe_fp_type)))
926 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
927 CQE_TYPE(cqe_fp_type));
930 if (CQE_TYPE_START(cqe_fp_type)) {
931 u16 queue = cqe_fp->queue_index;
932 DP(NETIF_MSG_RX_STATUS,
933 "calling tpa_start on queue %d\n",
936 bnx2x_tpa_start(fp, queue,
942 queue = cqe->end_agg_cqe.queue_index;
943 tpa_info = &fp->tpa_info[queue];
944 DP(NETIF_MSG_RX_STATUS,
945 "calling tpa_stop on queue %d\n",
948 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
951 if (fp->mode == TPA_MODE_GRO)
952 pages = (frag_size + tpa_info->full_page - 1) /
955 pages = SGE_PAGE_ALIGN(frag_size) >>
958 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
959 &cqe->end_agg_cqe, comp_ring_cons);
960 #ifdef BNX2X_STOP_ON_ERROR
965 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
969 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
970 pad = cqe_fp->placement_offset;
971 dma_sync_single_for_cpu(&bp->pdev->dev,
972 dma_unmap_addr(rx_buf, mapping),
973 pad + RX_COPY_THRESH,
976 prefetch(data + pad); /* speedup eth_type_trans() */
977 /* is this an error packet? */
978 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
979 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
980 "ERROR flags %x rx packet %u\n",
981 cqe_fp_flags, sw_comp_cons);
982 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
986 /* Since we don't have a jumbo ring
987 * copy small packets if mtu > 1500
989 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
990 (len <= RX_COPY_THRESH)) {
991 skb = netdev_alloc_skb_ip_align(bp->dev, len);
993 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
994 "ERROR packet dropped because of alloc failure\n");
995 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
998 memcpy(skb->data, data + pad, len);
999 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1001 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1002 GFP_ATOMIC) == 0)) {
1003 dma_unmap_single(&bp->pdev->dev,
1004 dma_unmap_addr(rx_buf, mapping),
1007 skb = build_skb(data, fp->rx_frag_size);
1008 if (unlikely(!skb)) {
1009 bnx2x_frag_free(fp, data);
1010 bnx2x_fp_qstats(bp, fp)->
1011 rx_skb_alloc_failed++;
1014 skb_reserve(skb, pad);
1016 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1017 "ERROR packet dropped because of alloc failure\n");
1018 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1020 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1026 skb->protocol = eth_type_trans(skb, bp->dev);
1028 /* Set Toeplitz hash for a none-LRO skb */
1029 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1030 skb_set_hash(skb, rxhash, rxhash_type);
1032 skb_checksum_none_assert(skb);
1034 if (bp->dev->features & NETIF_F_RXCSUM)
1035 bnx2x_csum_validate(skb, cqe, fp,
1036 bnx2x_fp_qstats(bp, fp));
1038 skb_record_rx_queue(skb, fp->rx_queue);
1040 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1042 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1043 le16_to_cpu(cqe_fp->vlan_tag));
1045 skb_mark_napi_id(skb, &fp->napi);
1047 if (bnx2x_fp_ll_polling(fp))
1048 netif_receive_skb(skb);
1050 napi_gro_receive(&fp->napi, skb);
1052 rx_buf->data = NULL;
1054 bd_cons = NEXT_RX_IDX(bd_cons);
1055 bd_prod = NEXT_RX_IDX(bd_prod);
1056 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1059 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1060 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1062 /* mark CQE as free */
1063 BNX2X_SEED_CQE(cqe_fp);
1065 if (rx_pkt == budget)
1068 comp_ring_cons = RCQ_BD(sw_comp_cons);
1069 cqe = &fp->rx_comp_ring[comp_ring_cons];
1070 cqe_fp = &cqe->fast_path_cqe;
1073 fp->rx_bd_cons = bd_cons;
1074 fp->rx_bd_prod = bd_prod_fw;
1075 fp->rx_comp_cons = sw_comp_cons;
1076 fp->rx_comp_prod = sw_comp_prod;
1078 /* Update producers */
1079 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1082 fp->rx_pkt += rx_pkt;
1088 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1090 struct bnx2x_fastpath *fp = fp_cookie;
1091 struct bnx2x *bp = fp->bp;
1095 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1096 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1098 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1100 #ifdef BNX2X_STOP_ON_ERROR
1101 if (unlikely(bp->panic))
1105 /* Handle Rx and Tx according to MSI-X vector */
1106 for_each_cos_in_tx_queue(fp, cos)
1107 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1109 prefetch(&fp->sb_running_index[SM_RX_ID]);
1110 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1115 /* HW Lock for shared dual port PHYs */
1116 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1118 mutex_lock(&bp->port.phy_mutex);
1120 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1123 void bnx2x_release_phy_lock(struct bnx2x *bp)
1125 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1127 mutex_unlock(&bp->port.phy_mutex);
1130 /* calculates MF speed according to current linespeed and MF configuration */
1131 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1133 u16 line_speed = bp->link_vars.line_speed;
1135 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1136 bp->mf_config[BP_VN(bp)]);
1138 /* Calculate the current MAX line speed limit for the MF
1142 line_speed = (line_speed * maxCfg) / 100;
1143 else { /* SD mode */
1144 u16 vn_max_rate = maxCfg * 100;
1146 if (vn_max_rate < line_speed)
1147 line_speed = vn_max_rate;
1155 * bnx2x_fill_report_data - fill link report data to report
1157 * @bp: driver handle
1158 * @data: link state to update
1160 * It uses a none-atomic bit operations because is called under the mutex.
1162 static void bnx2x_fill_report_data(struct bnx2x *bp,
1163 struct bnx2x_link_report_data *data)
1165 u16 line_speed = bnx2x_get_mf_speed(bp);
1167 memset(data, 0, sizeof(*data));
1169 /* Fill the report data: effective line speed */
1170 data->line_speed = line_speed;
1173 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1174 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1175 &data->link_report_flags);
1178 if (bp->link_vars.duplex == DUPLEX_FULL)
1179 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1181 /* Rx Flow Control is ON */
1182 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1183 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1185 /* Tx Flow Control is ON */
1186 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1187 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1191 * bnx2x_link_report - report link status to OS.
1193 * @bp: driver handle
1195 * Calls the __bnx2x_link_report() under the same locking scheme
1196 * as a link/PHY state managing code to ensure a consistent link
1200 void bnx2x_link_report(struct bnx2x *bp)
1202 bnx2x_acquire_phy_lock(bp);
1203 __bnx2x_link_report(bp);
1204 bnx2x_release_phy_lock(bp);
1208 * __bnx2x_link_report - report link status to OS.
1210 * @bp: driver handle
1212 * None atomic implementation.
1213 * Should be called under the phy_lock.
1215 void __bnx2x_link_report(struct bnx2x *bp)
1217 struct bnx2x_link_report_data cur_data;
1220 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1221 bnx2x_read_mf_cfg(bp);
1223 /* Read the current link report info */
1224 bnx2x_fill_report_data(bp, &cur_data);
1226 /* Don't report link down or exactly the same link status twice */
1227 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1228 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1229 &bp->last_reported_link.link_report_flags) &&
1230 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1231 &cur_data.link_report_flags)))
1236 /* We are going to report a new link parameters now -
1237 * remember the current data for the next time.
1239 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1241 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1242 &cur_data.link_report_flags)) {
1243 netif_carrier_off(bp->dev);
1244 netdev_err(bp->dev, "NIC Link is Down\n");
1250 netif_carrier_on(bp->dev);
1252 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1253 &cur_data.link_report_flags))
1258 /* Handle the FC at the end so that only these flags would be
1259 * possibly set. This way we may easily check if there is no FC
1262 if (cur_data.link_report_flags) {
1263 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1264 &cur_data.link_report_flags)) {
1265 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1266 &cur_data.link_report_flags))
1267 flow = "ON - receive & transmit";
1269 flow = "ON - receive";
1271 flow = "ON - transmit";
1276 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1277 cur_data.line_speed, duplex, flow);
1281 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1285 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1286 struct eth_rx_sge *sge;
1288 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1290 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1291 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1294 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1295 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1299 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1300 struct bnx2x_fastpath *fp, int last)
1304 for (i = 0; i < last; i++) {
1305 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1306 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1307 u8 *data = first_buf->data;
1310 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1313 if (tpa_info->tpa_state == BNX2X_TPA_START)
1314 dma_unmap_single(&bp->pdev->dev,
1315 dma_unmap_addr(first_buf, mapping),
1316 fp->rx_buf_size, DMA_FROM_DEVICE);
1317 bnx2x_frag_free(fp, data);
1318 first_buf->data = NULL;
1322 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1326 for_each_rx_queue_cnic(bp, j) {
1327 struct bnx2x_fastpath *fp = &bp->fp[j];
1331 /* Activate BD ring */
1333 * this will generate an interrupt (to the TSTORM)
1334 * must only be done after chip is initialized
1336 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1341 void bnx2x_init_rx_rings(struct bnx2x *bp)
1343 int func = BP_FUNC(bp);
1347 /* Allocate TPA resources */
1348 for_each_eth_queue(bp, j) {
1349 struct bnx2x_fastpath *fp = &bp->fp[j];
1352 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1354 if (!fp->disable_tpa) {
1355 /* Fill the per-aggregation pool */
1356 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1357 struct bnx2x_agg_info *tpa_info =
1359 struct sw_rx_bd *first_buf =
1360 &tpa_info->first_buf;
1363 bnx2x_frag_alloc(fp, GFP_KERNEL);
1364 if (!first_buf->data) {
1365 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1367 bnx2x_free_tpa_pool(bp, fp, i);
1368 fp->disable_tpa = 1;
1371 dma_unmap_addr_set(first_buf, mapping, 0);
1372 tpa_info->tpa_state = BNX2X_TPA_STOP;
1375 /* "next page" elements initialization */
1376 bnx2x_set_next_page_sgl(fp);
1378 /* set SGEs bit mask */
1379 bnx2x_init_sge_ring_bit_mask(fp);
1381 /* Allocate SGEs and initialize the ring elements */
1382 for (i = 0, ring_prod = 0;
1383 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1385 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1387 BNX2X_ERR("was only able to allocate %d rx sges\n",
1389 BNX2X_ERR("disabling TPA for queue[%d]\n",
1391 /* Cleanup already allocated elements */
1392 bnx2x_free_rx_sge_range(bp, fp,
1394 bnx2x_free_tpa_pool(bp, fp,
1396 fp->disable_tpa = 1;
1400 ring_prod = NEXT_SGE_IDX(ring_prod);
1403 fp->rx_sge_prod = ring_prod;
1407 for_each_eth_queue(bp, j) {
1408 struct bnx2x_fastpath *fp = &bp->fp[j];
1412 /* Activate BD ring */
1414 * this will generate an interrupt (to the TSTORM)
1415 * must only be done after chip is initialized
1417 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1423 if (CHIP_IS_E1(bp)) {
1424 REG_WR(bp, BAR_USTRORM_INTMEM +
1425 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1426 U64_LO(fp->rx_comp_mapping));
1427 REG_WR(bp, BAR_USTRORM_INTMEM +
1428 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1429 U64_HI(fp->rx_comp_mapping));
1434 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1437 struct bnx2x *bp = fp->bp;
1439 for_each_cos_in_tx_queue(fp, cos) {
1440 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1441 unsigned pkts_compl = 0, bytes_compl = 0;
1443 u16 sw_prod = txdata->tx_pkt_prod;
1444 u16 sw_cons = txdata->tx_pkt_cons;
1446 while (sw_cons != sw_prod) {
1447 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1448 &pkts_compl, &bytes_compl);
1452 netdev_tx_reset_queue(
1453 netdev_get_tx_queue(bp->dev,
1454 txdata->txq_index));
1458 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1462 for_each_tx_queue_cnic(bp, i) {
1463 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1467 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1471 for_each_eth_queue(bp, i) {
1472 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1476 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1478 struct bnx2x *bp = fp->bp;
1481 /* ring wasn't allocated */
1482 if (fp->rx_buf_ring == NULL)
1485 for (i = 0; i < NUM_RX_BD; i++) {
1486 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1487 u8 *data = rx_buf->data;
1491 dma_unmap_single(&bp->pdev->dev,
1492 dma_unmap_addr(rx_buf, mapping),
1493 fp->rx_buf_size, DMA_FROM_DEVICE);
1495 rx_buf->data = NULL;
1496 bnx2x_frag_free(fp, data);
1500 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1504 for_each_rx_queue_cnic(bp, j) {
1505 bnx2x_free_rx_bds(&bp->fp[j]);
1509 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1513 for_each_eth_queue(bp, j) {
1514 struct bnx2x_fastpath *fp = &bp->fp[j];
1516 bnx2x_free_rx_bds(fp);
1518 if (!fp->disable_tpa)
1519 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1523 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1525 bnx2x_free_tx_skbs_cnic(bp);
1526 bnx2x_free_rx_skbs_cnic(bp);
1529 void bnx2x_free_skbs(struct bnx2x *bp)
1531 bnx2x_free_tx_skbs(bp);
1532 bnx2x_free_rx_skbs(bp);
1535 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1537 /* load old values */
1538 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1540 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1541 /* leave all but MAX value */
1542 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1544 /* set new MAX value */
1545 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1546 & FUNC_MF_CFG_MAX_BW_MASK;
1548 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1553 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1555 * @bp: driver handle
1556 * @nvecs: number of vectors to be released
1558 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1562 if (nvecs == offset)
1565 /* VFs don't have a default SB */
1567 free_irq(bp->msix_table[offset].vector, bp->dev);
1568 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1569 bp->msix_table[offset].vector);
1573 if (CNIC_SUPPORT(bp)) {
1574 if (nvecs == offset)
1579 for_each_eth_queue(bp, i) {
1580 if (nvecs == offset)
1582 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1583 i, bp->msix_table[offset].vector);
1585 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1589 void bnx2x_free_irq(struct bnx2x *bp)
1591 if (bp->flags & USING_MSIX_FLAG &&
1592 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1593 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1595 /* vfs don't have a default status block */
1599 bnx2x_free_msix_irqs(bp, nvecs);
1601 free_irq(bp->dev->irq, bp->dev);
1605 int bnx2x_enable_msix(struct bnx2x *bp)
1607 int msix_vec = 0, i, rc;
1609 /* VFs don't have a default status block */
1611 bp->msix_table[msix_vec].entry = msix_vec;
1612 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1613 bp->msix_table[0].entry);
1617 /* Cnic requires an msix vector for itself */
1618 if (CNIC_SUPPORT(bp)) {
1619 bp->msix_table[msix_vec].entry = msix_vec;
1620 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1621 msix_vec, bp->msix_table[msix_vec].entry);
1625 /* We need separate vectors for ETH queues only (not FCoE) */
1626 for_each_eth_queue(bp, i) {
1627 bp->msix_table[msix_vec].entry = msix_vec;
1628 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1629 msix_vec, msix_vec, i);
1633 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1636 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1639 * reconfigure number of tx/rx queues according to available
1642 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1643 /* how less vectors we will have? */
1644 int diff = msix_vec - rc;
1646 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1648 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1651 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1655 * decrease number of queues by number of unallocated entries
1657 bp->num_ethernet_queues -= diff;
1658 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1660 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1662 } else if (rc > 0) {
1663 /* Get by with single vector */
1664 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1666 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1671 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1672 bp->flags |= USING_SINGLE_MSIX_FLAG;
1674 BNX2X_DEV_INFO("set number of queues to 1\n");
1675 bp->num_ethernet_queues = 1;
1676 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1677 } else if (rc < 0) {
1678 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1682 bp->flags |= USING_MSIX_FLAG;
1687 /* fall to INTx if not enough memory */
1689 bp->flags |= DISABLE_MSI_FLAG;
1694 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1696 int i, rc, offset = 0;
1698 /* no default status block for vf */
1700 rc = request_irq(bp->msix_table[offset++].vector,
1701 bnx2x_msix_sp_int, 0,
1702 bp->dev->name, bp->dev);
1704 BNX2X_ERR("request sp irq failed\n");
1709 if (CNIC_SUPPORT(bp))
1712 for_each_eth_queue(bp, i) {
1713 struct bnx2x_fastpath *fp = &bp->fp[i];
1714 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1717 rc = request_irq(bp->msix_table[offset].vector,
1718 bnx2x_msix_fp_int, 0, fp->name, fp);
1720 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1721 bp->msix_table[offset].vector, rc);
1722 bnx2x_free_msix_irqs(bp, offset);
1729 i = BNX2X_NUM_ETH_QUEUES(bp);
1731 offset = 1 + CNIC_SUPPORT(bp);
1732 netdev_info(bp->dev,
1733 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1734 bp->msix_table[0].vector,
1735 0, bp->msix_table[offset].vector,
1736 i - 1, bp->msix_table[offset + i - 1].vector);
1738 offset = CNIC_SUPPORT(bp);
1739 netdev_info(bp->dev,
1740 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1741 0, bp->msix_table[offset].vector,
1742 i - 1, bp->msix_table[offset + i - 1].vector);
1747 int bnx2x_enable_msi(struct bnx2x *bp)
1751 rc = pci_enable_msi(bp->pdev);
1753 BNX2X_DEV_INFO("MSI is not attainable\n");
1756 bp->flags |= USING_MSI_FLAG;
1761 static int bnx2x_req_irq(struct bnx2x *bp)
1763 unsigned long flags;
1766 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1769 flags = IRQF_SHARED;
1771 if (bp->flags & USING_MSIX_FLAG)
1772 irq = bp->msix_table[0].vector;
1774 irq = bp->pdev->irq;
1776 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1779 static int bnx2x_setup_irqs(struct bnx2x *bp)
1782 if (bp->flags & USING_MSIX_FLAG &&
1783 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1784 rc = bnx2x_req_msix_irqs(bp);
1788 rc = bnx2x_req_irq(bp);
1790 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1793 if (bp->flags & USING_MSI_FLAG) {
1794 bp->dev->irq = bp->pdev->irq;
1795 netdev_info(bp->dev, "using MSI IRQ %d\n",
1798 if (bp->flags & USING_MSIX_FLAG) {
1799 bp->dev->irq = bp->msix_table[0].vector;
1800 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1808 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1812 for_each_rx_queue_cnic(bp, i) {
1813 bnx2x_fp_init_lock(&bp->fp[i]);
1814 napi_enable(&bnx2x_fp(bp, i, napi));
1818 static void bnx2x_napi_enable(struct bnx2x *bp)
1822 for_each_eth_queue(bp, i) {
1823 bnx2x_fp_init_lock(&bp->fp[i]);
1824 napi_enable(&bnx2x_fp(bp, i, napi));
1828 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1833 for_each_rx_queue_cnic(bp, i) {
1834 napi_disable(&bnx2x_fp(bp, i, napi));
1835 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1841 static void bnx2x_napi_disable(struct bnx2x *bp)
1846 for_each_eth_queue(bp, i) {
1847 napi_disable(&bnx2x_fp(bp, i, napi));
1848 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1854 void bnx2x_netif_start(struct bnx2x *bp)
1856 if (netif_running(bp->dev)) {
1857 bnx2x_napi_enable(bp);
1858 if (CNIC_LOADED(bp))
1859 bnx2x_napi_enable_cnic(bp);
1860 bnx2x_int_enable(bp);
1861 if (bp->state == BNX2X_STATE_OPEN)
1862 netif_tx_wake_all_queues(bp->dev);
1866 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1868 bnx2x_int_disable_sync(bp, disable_hw);
1869 bnx2x_napi_disable(bp);
1870 if (CNIC_LOADED(bp))
1871 bnx2x_napi_disable_cnic(bp);
1874 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1876 struct bnx2x *bp = netdev_priv(dev);
1878 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1879 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1880 u16 ether_type = ntohs(hdr->h_proto);
1882 /* Skip VLAN tag if present */
1883 if (ether_type == ETH_P_8021Q) {
1884 struct vlan_ethhdr *vhdr =
1885 (struct vlan_ethhdr *)skb->data;
1887 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1890 /* If ethertype is FCoE or FIP - use FCoE ring */
1891 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1892 return bnx2x_fcoe_tx(bp, txq_index);
1895 /* select a non-FCoE queue */
1896 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1899 void bnx2x_set_num_queues(struct bnx2x *bp)
1902 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1904 /* override in STORAGE SD modes */
1905 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1906 bp->num_ethernet_queues = 1;
1908 /* Add special queues */
1909 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1910 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1912 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1916 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1918 * @bp: Driver handle
1920 * We currently support for at most 16 Tx queues for each CoS thus we will
1921 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1924 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1925 * index after all ETH L2 indices.
1927 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1928 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1929 * 16..31,...) with indices that are not coupled with any real Tx queue.
1931 * The proper configuration of skb->queue_mapping is handled by
1932 * bnx2x_select_queue() and __skb_tx_hash().
1934 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1935 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1937 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1941 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1942 rx = BNX2X_NUM_ETH_QUEUES(bp);
1944 /* account for fcoe queue */
1945 if (include_cnic && !NO_FCOE(bp)) {
1950 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1952 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1955 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1957 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1961 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1967 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1971 for_each_queue(bp, i) {
1972 struct bnx2x_fastpath *fp = &bp->fp[i];
1975 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1978 * Although there are no IP frames expected to arrive to
1979 * this ring we still want to add an
1980 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1983 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1986 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1987 IP_HEADER_ALIGNMENT_PADDING +
1990 BNX2X_FW_RX_ALIGN_END;
1991 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
1992 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1993 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1995 fp->rx_frag_size = 0;
1999 static int bnx2x_init_rss(struct bnx2x *bp)
2002 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2004 /* Prepare the initial contents for the indirection table if RSS is
2007 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2008 bp->rss_conf_obj.ind_table[i] =
2010 ethtool_rxfh_indir_default(i, num_eth_queues);
2013 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2014 * per-port, so if explicit configuration is needed , do it only
2017 * For 57712 and newer on the other hand it's a per-function
2020 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2023 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2024 bool config_hash, bool enable)
2026 struct bnx2x_config_rss_params params = {NULL};
2028 /* Although RSS is meaningless when there is a single HW queue we
2029 * still need it enabled in order to have HW Rx hash generated.
2031 * if (!is_eth_multi(bp))
2032 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2035 params.rss_obj = rss_obj;
2037 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2040 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2042 /* RSS configuration */
2043 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2044 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2045 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2046 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2047 if (rss_obj->udp_rss_v4)
2048 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2049 if (rss_obj->udp_rss_v6)
2050 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2052 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2056 params.rss_result_mask = MULTI_MASK;
2058 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2062 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2063 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2067 return bnx2x_config_rss(bp, ¶ms);
2069 return bnx2x_vfpf_config_rss(bp, ¶ms);
2072 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2074 struct bnx2x_func_state_params func_params = {NULL};
2076 /* Prepare parameters for function state transitions */
2077 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2079 func_params.f_obj = &bp->func_obj;
2080 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2082 func_params.params.hw_init.load_phase = load_code;
2084 return bnx2x_func_state_change(bp, &func_params);
2088 * Cleans the object that have internal lists without sending
2089 * ramrods. Should be run when interrupts are disabled.
2091 void bnx2x_squeeze_objects(struct bnx2x *bp)
2094 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2095 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2096 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2098 /***************** Cleanup MACs' object first *************************/
2100 /* Wait for completion of requested */
2101 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2102 /* Perform a dry cleanup */
2103 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2105 /* Clean ETH primary MAC */
2106 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2107 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2110 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2112 /* Cleanup UC list */
2114 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2115 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2118 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2120 /***************** Now clean mcast object *****************************/
2121 rparam.mcast_obj = &bp->mcast_obj;
2122 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2124 /* Add a DEL command... - Since we're doing a driver cleanup only,
2125 * we take a lock surrounding both the initial send and the CONTs,
2126 * as we don't want a true completion to disrupt us in the middle.
2128 netif_addr_lock_bh(bp->dev);
2129 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2131 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2134 /* ...and wait until all pending commands are cleared */
2135 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2138 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2140 netif_addr_unlock_bh(bp->dev);
2144 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2146 netif_addr_unlock_bh(bp->dev);
2149 #ifndef BNX2X_STOP_ON_ERROR
2150 #define LOAD_ERROR_EXIT(bp, label) \
2152 (bp)->state = BNX2X_STATE_ERROR; \
2156 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2158 bp->cnic_loaded = false; \
2161 #else /*BNX2X_STOP_ON_ERROR*/
2162 #define LOAD_ERROR_EXIT(bp, label) \
2164 (bp)->state = BNX2X_STATE_ERROR; \
2168 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2170 bp->cnic_loaded = false; \
2174 #endif /*BNX2X_STOP_ON_ERROR*/
2176 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2178 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2179 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2183 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2185 int num_groups, vf_headroom = 0;
2186 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2188 /* number of queues for statistics is number of eth queues + FCoE */
2189 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2191 /* Total number of FW statistics requests =
2192 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2193 * and fcoe l2 queue) stats + num of queues (which includes another 1
2194 * for fcoe l2 queue if applicable)
2196 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2198 /* vf stats appear in the request list, but their data is allocated by
2199 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2200 * it is used to determine where to place the vf stats queries in the
2204 vf_headroom = bnx2x_vf_headroom(bp);
2206 /* Request is built from stats_query_header and an array of
2207 * stats_query_cmd_group each of which contains
2208 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2209 * configured in the stats_query_header.
2212 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2213 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2216 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2217 bp->fw_stats_num, vf_headroom, num_groups);
2218 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2219 num_groups * sizeof(struct stats_query_cmd_group);
2221 /* Data for statistics requests + stats_counter
2222 * stats_counter holds per-STORM counters that are incremented
2223 * when STORM has finished with the current request.
2224 * memory for FCoE offloaded statistics are counted anyway,
2225 * even if they will not be sent.
2226 * VF stats are not accounted for here as the data of VF stats is stored
2227 * in memory allocated by the VF, not here.
2229 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2230 sizeof(struct per_pf_stats) +
2231 sizeof(struct fcoe_statistics_params) +
2232 sizeof(struct per_queue_stats) * num_queue_stats +
2233 sizeof(struct stats_counter);
2235 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2236 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2239 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2240 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2241 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2242 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2243 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2244 bp->fw_stats_req_sz;
2246 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2247 U64_HI(bp->fw_stats_req_mapping),
2248 U64_LO(bp->fw_stats_req_mapping));
2249 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2250 U64_HI(bp->fw_stats_data_mapping),
2251 U64_LO(bp->fw_stats_data_mapping));
2255 bnx2x_free_fw_stats_mem(bp);
2256 BNX2X_ERR("Can't allocate FW stats memory\n");
2260 /* send load request to mcp and analyze response */
2261 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2267 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2268 DRV_MSG_SEQ_NUMBER_MASK);
2269 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2271 /* Get current FW pulse sequence */
2272 bp->fw_drv_pulse_wr_seq =
2273 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2274 DRV_PULSE_SEQ_MASK);
2275 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2277 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2279 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2280 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2283 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2285 /* if mcp fails to respond we must abort */
2286 if (!(*load_code)) {
2287 BNX2X_ERR("MCP response failure, aborting\n");
2291 /* If mcp refused (e.g. other port is in diagnostic mode) we
2294 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2295 BNX2X_ERR("MCP refused load request, aborting\n");
2301 /* check whether another PF has already loaded FW to chip. In
2302 * virtualized environments a pf from another VM may have already
2303 * initialized the device including loading FW
2305 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2307 /* is another pf loaded on this engine? */
2308 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2309 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2310 /* build my FW version dword */
2311 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2312 (BCM_5710_FW_MINOR_VERSION << 8) +
2313 (BCM_5710_FW_REVISION_VERSION << 16) +
2314 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2316 /* read loaded FW from chip */
2317 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2319 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2322 /* abort nic load if version mismatch */
2323 if (my_fw != loaded_fw) {
2325 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2328 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2336 /* returns the "mcp load_code" according to global load_count array */
2337 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2339 int path = BP_PATH(bp);
2341 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2342 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2343 bnx2x_load_count[path][2]);
2344 bnx2x_load_count[path][0]++;
2345 bnx2x_load_count[path][1 + port]++;
2346 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2347 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2348 bnx2x_load_count[path][2]);
2349 if (bnx2x_load_count[path][0] == 1)
2350 return FW_MSG_CODE_DRV_LOAD_COMMON;
2351 else if (bnx2x_load_count[path][1 + port] == 1)
2352 return FW_MSG_CODE_DRV_LOAD_PORT;
2354 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2357 /* mark PMF if applicable */
2358 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2360 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2361 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2362 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2364 /* We need the barrier to ensure the ordering between the
2365 * writing to bp->port.pmf here and reading it from the
2366 * bnx2x_periodic_task().
2373 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2376 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2378 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2379 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2380 (bp->common.shmem2_base)) {
2381 if (SHMEM2_HAS(bp, dcc_support))
2382 SHMEM2_WR(bp, dcc_support,
2383 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2384 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2385 if (SHMEM2_HAS(bp, afex_driver_support))
2386 SHMEM2_WR(bp, afex_driver_support,
2387 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2390 /* Set AFEX default VLAN tag to an invalid value */
2391 bp->afex_def_vlan_tag = -1;
2395 * bnx2x_bz_fp - zero content of the fastpath structure.
2397 * @bp: driver handle
2398 * @index: fastpath index to be zeroed
2400 * Makes sure the contents of the bp->fp[index].napi is kept
2403 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2405 struct bnx2x_fastpath *fp = &bp->fp[index];
2407 struct napi_struct orig_napi = fp->napi;
2408 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2410 /* bzero bnx2x_fastpath contents */
2412 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2413 sizeof(struct bnx2x_agg_info));
2414 memset(fp, 0, sizeof(*fp));
2416 /* Restore the NAPI object as it has been already initialized */
2417 fp->napi = orig_napi;
2418 fp->tpa_info = orig_tpa_info;
2422 fp->max_cos = bp->max_cos;
2424 /* Special queues support only one CoS */
2427 /* Init txdata pointers */
2429 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2431 for_each_cos_in_tx_queue(fp, cos)
2432 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2433 BNX2X_NUM_ETH_QUEUES(bp) + index];
2435 /* set the tpa flag for each queue. The tpa flag determines the queue
2436 * minimal size so it must be set prior to queue memory allocation
2438 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2439 (bp->flags & GRO_ENABLE_FLAG &&
2440 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2441 if (bp->flags & TPA_ENABLE_FLAG)
2442 fp->mode = TPA_MODE_LRO;
2443 else if (bp->flags & GRO_ENABLE_FLAG)
2444 fp->mode = TPA_MODE_GRO;
2446 /* We don't want TPA on an FCoE L2 ring */
2448 fp->disable_tpa = 1;
2451 int bnx2x_load_cnic(struct bnx2x *bp)
2453 int i, rc, port = BP_PORT(bp);
2455 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2457 mutex_init(&bp->cnic_mutex);
2460 rc = bnx2x_alloc_mem_cnic(bp);
2462 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2463 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2467 rc = bnx2x_alloc_fp_mem_cnic(bp);
2469 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2470 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2473 /* Update the number of queues with the cnic queues */
2474 rc = bnx2x_set_real_num_queues(bp, 1);
2476 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2477 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2480 /* Add all CNIC NAPI objects */
2481 bnx2x_add_all_napi_cnic(bp);
2482 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2483 bnx2x_napi_enable_cnic(bp);
2485 rc = bnx2x_init_hw_func_cnic(bp);
2487 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2489 bnx2x_nic_init_cnic(bp);
2492 /* Enable Timer scan */
2493 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2495 /* setup cnic queues */
2496 for_each_cnic_queue(bp, i) {
2497 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2499 BNX2X_ERR("Queue setup failed\n");
2500 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2505 /* Initialize Rx filter. */
2506 bnx2x_set_rx_mode_inner(bp);
2508 /* re-read iscsi info */
2509 bnx2x_get_iscsi_info(bp);
2510 bnx2x_setup_cnic_irq_info(bp);
2511 bnx2x_setup_cnic_info(bp);
2512 bp->cnic_loaded = true;
2513 if (bp->state == BNX2X_STATE_OPEN)
2514 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2516 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2520 #ifndef BNX2X_STOP_ON_ERROR
2522 /* Disable Timer scan */
2523 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2526 bnx2x_napi_disable_cnic(bp);
2527 /* Update the number of queues without the cnic queues */
2528 if (bnx2x_set_real_num_queues(bp, 0))
2529 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2531 BNX2X_ERR("CNIC-related load failed\n");
2532 bnx2x_free_fp_mem_cnic(bp);
2533 bnx2x_free_mem_cnic(bp);
2535 #endif /* ! BNX2X_STOP_ON_ERROR */
2538 /* must be called with rtnl_lock */
2539 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2541 int port = BP_PORT(bp);
2542 int i, rc = 0, load_code = 0;
2544 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2546 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2548 #ifdef BNX2X_STOP_ON_ERROR
2549 if (unlikely(bp->panic)) {
2550 BNX2X_ERR("Can't load NIC when there is panic\n");
2555 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2557 /* zero the structure w/o any lock, before SP handler is initialized */
2558 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2559 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2560 &bp->last_reported_link.link_report_flags);
2563 /* must be called before memory allocation and HW init */
2564 bnx2x_ilt_set_info(bp);
2567 * Zero fastpath structures preserving invariants like napi, which are
2568 * allocated only once, fp index, max_cos, bp pointer.
2569 * Also set fp->disable_tpa and txdata_ptr.
2571 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2572 for_each_queue(bp, i)
2574 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2575 bp->num_cnic_queues) *
2576 sizeof(struct bnx2x_fp_txdata));
2578 bp->fcoe_init = false;
2580 /* Set the receive queues buffer size */
2581 bnx2x_set_rx_buf_size(bp);
2584 rc = bnx2x_alloc_mem(bp);
2586 BNX2X_ERR("Unable to allocate bp memory\n");
2591 /* need to be done after alloc mem, since it's self adjusting to amount
2592 * of memory available for RSS queues
2594 rc = bnx2x_alloc_fp_mem(bp);
2596 BNX2X_ERR("Unable to allocate memory for fps\n");
2597 LOAD_ERROR_EXIT(bp, load_error0);
2600 /* Allocated memory for FW statistics */
2601 if (bnx2x_alloc_fw_stats_mem(bp))
2602 LOAD_ERROR_EXIT(bp, load_error0);
2604 /* request pf to initialize status blocks */
2606 rc = bnx2x_vfpf_init(bp);
2608 LOAD_ERROR_EXIT(bp, load_error0);
2611 /* As long as bnx2x_alloc_mem() may possibly update
2612 * bp->num_queues, bnx2x_set_real_num_queues() should always
2613 * come after it. At this stage cnic queues are not counted.
2615 rc = bnx2x_set_real_num_queues(bp, 0);
2617 BNX2X_ERR("Unable to set real_num_queues\n");
2618 LOAD_ERROR_EXIT(bp, load_error0);
2621 /* configure multi cos mappings in kernel.
2622 * this configuration may be overridden by a multi class queue
2623 * discipline or by a dcbx negotiation result.
2625 bnx2x_setup_tc(bp->dev, bp->max_cos);
2627 /* Add all NAPI objects */
2628 bnx2x_add_all_napi(bp);
2629 DP(NETIF_MSG_IFUP, "napi added\n");
2630 bnx2x_napi_enable(bp);
2633 /* set pf load just before approaching the MCP */
2634 bnx2x_set_pf_load(bp);
2636 /* if mcp exists send load request and analyze response */
2637 if (!BP_NOMCP(bp)) {
2638 /* attempt to load pf */
2639 rc = bnx2x_nic_load_request(bp, &load_code);
2641 LOAD_ERROR_EXIT(bp, load_error1);
2643 /* what did mcp say? */
2644 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2646 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2647 LOAD_ERROR_EXIT(bp, load_error2);
2650 load_code = bnx2x_nic_load_no_mcp(bp, port);
2653 /* mark pmf if applicable */
2654 bnx2x_nic_load_pmf(bp, load_code);
2656 /* Init Function state controlling object */
2657 bnx2x__init_func_obj(bp);
2660 rc = bnx2x_init_hw(bp, load_code);
2662 BNX2X_ERR("HW init failed, aborting\n");
2663 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2664 LOAD_ERROR_EXIT(bp, load_error2);
2668 bnx2x_pre_irq_nic_init(bp);
2670 /* Connect to IRQs */
2671 rc = bnx2x_setup_irqs(bp);
2673 BNX2X_ERR("setup irqs failed\n");
2675 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2676 LOAD_ERROR_EXIT(bp, load_error2);
2679 /* Init per-function objects */
2681 /* Setup NIC internals and enable interrupts */
2682 bnx2x_post_irq_nic_init(bp, load_code);
2684 bnx2x_init_bp_objs(bp);
2685 bnx2x_iov_nic_init(bp);
2687 /* Set AFEX default VLAN tag to an invalid value */
2688 bp->afex_def_vlan_tag = -1;
2689 bnx2x_nic_load_afex_dcc(bp, load_code);
2690 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2691 rc = bnx2x_func_start(bp);
2693 BNX2X_ERR("Function start failed!\n");
2694 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2696 LOAD_ERROR_EXIT(bp, load_error3);
2699 /* Send LOAD_DONE command to MCP */
2700 if (!BP_NOMCP(bp)) {
2701 load_code = bnx2x_fw_command(bp,
2702 DRV_MSG_CODE_LOAD_DONE, 0);
2704 BNX2X_ERR("MCP response failure, aborting\n");
2706 LOAD_ERROR_EXIT(bp, load_error3);
2710 /* initialize FW coalescing state machines in RAM */
2711 bnx2x_update_coalesce(bp);
2714 /* setup the leading queue */
2715 rc = bnx2x_setup_leading(bp);
2717 BNX2X_ERR("Setup leading failed!\n");
2718 LOAD_ERROR_EXIT(bp, load_error3);
2721 /* set up the rest of the queues */
2722 for_each_nondefault_eth_queue(bp, i) {
2724 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2726 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2728 BNX2X_ERR("Queue %d setup failed\n", i);
2729 LOAD_ERROR_EXIT(bp, load_error3);
2734 rc = bnx2x_init_rss(bp);
2736 BNX2X_ERR("PF RSS init failed\n");
2737 LOAD_ERROR_EXIT(bp, load_error3);
2740 /* Now when Clients are configured we are ready to work */
2741 bp->state = BNX2X_STATE_OPEN;
2743 /* Configure a ucast MAC */
2745 rc = bnx2x_set_eth_mac(bp, true);
2747 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2750 BNX2X_ERR("Setting Ethernet MAC failed\n");
2751 LOAD_ERROR_EXIT(bp, load_error3);
2754 if (IS_PF(bp) && bp->pending_max) {
2755 bnx2x_update_max_mf_config(bp, bp->pending_max);
2756 bp->pending_max = 0;
2760 rc = bnx2x_initial_phy_init(bp, load_mode);
2762 LOAD_ERROR_EXIT(bp, load_error3);
2764 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2766 /* Start fast path */
2768 /* Initialize Rx filter. */
2769 bnx2x_set_rx_mode_inner(bp);
2772 switch (load_mode) {
2774 /* Tx queue should be only re-enabled */
2775 netif_tx_wake_all_queues(bp->dev);
2779 netif_tx_start_all_queues(bp->dev);
2780 smp_mb__after_clear_bit();
2784 case LOAD_LOOPBACK_EXT:
2785 bp->state = BNX2X_STATE_DIAG;
2793 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2795 bnx2x__link_status_update(bp);
2797 /* start the timer */
2798 mod_timer(&bp->timer, jiffies + bp->current_interval);
2800 if (CNIC_ENABLED(bp))
2801 bnx2x_load_cnic(bp);
2803 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2804 /* mark driver is loaded in shmem2 */
2806 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2807 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2808 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2809 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2812 /* Wait for all pending SP commands to complete */
2813 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2814 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2815 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2819 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2820 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2821 bnx2x_dcbx_init(bp, false);
2823 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2827 #ifndef BNX2X_STOP_ON_ERROR
2830 bnx2x_int_disable_sync(bp, 1);
2832 /* Clean queueable objects */
2833 bnx2x_squeeze_objects(bp);
2836 /* Free SKBs, SGEs, TPA pool and driver internals */
2837 bnx2x_free_skbs(bp);
2838 for_each_rx_queue(bp, i)
2839 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2844 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2845 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2846 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2851 bnx2x_napi_disable(bp);
2852 bnx2x_del_all_napi(bp);
2854 /* clear pf_load status, as it was already set */
2856 bnx2x_clear_pf_load(bp);
2858 bnx2x_free_fw_stats_mem(bp);
2859 bnx2x_free_fp_mem(bp);
2863 #endif /* ! BNX2X_STOP_ON_ERROR */
2866 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2870 /* Wait until tx fastpath tasks complete */
2871 for_each_tx_queue(bp, i) {
2872 struct bnx2x_fastpath *fp = &bp->fp[i];
2874 for_each_cos_in_tx_queue(fp, cos)
2875 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2882 /* must be called with rtnl_lock */
2883 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2886 bool global = false;
2888 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2890 /* mark driver is unloaded in shmem2 */
2891 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2893 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2894 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2895 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2898 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2899 (bp->state == BNX2X_STATE_CLOSED ||
2900 bp->state == BNX2X_STATE_ERROR)) {
2901 /* We can get here if the driver has been unloaded
2902 * during parity error recovery and is either waiting for a
2903 * leader to complete or for other functions to unload and
2904 * then ifdown has been issued. In this case we want to
2905 * unload and let other functions to complete a recovery
2908 bp->recovery_state = BNX2X_RECOVERY_DONE;
2910 bnx2x_release_leader_lock(bp);
2913 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2914 BNX2X_ERR("Can't unload in closed or error state\n");
2918 /* Nothing to do during unload if previous bnx2x_nic_load()
2919 * have not completed successfully - all resources are released.
2921 * we can get here only after unsuccessful ndo_* callback, during which
2922 * dev->IFF_UP flag is still on.
2924 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2927 /* It's important to set the bp->state to the value different from
2928 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2929 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2931 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2934 /* indicate to VFs that the PF is going down */
2935 bnx2x_iov_channel_down(bp);
2937 if (CNIC_LOADED(bp))
2938 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2941 bnx2x_tx_disable(bp);
2942 netdev_reset_tc(bp->dev);
2944 bp->rx_mode = BNX2X_RX_MODE_NONE;
2946 del_timer_sync(&bp->timer);
2949 /* Set ALWAYS_ALIVE bit in shmem */
2950 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2951 bnx2x_drv_pulse(bp);
2952 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2953 bnx2x_save_statistics(bp);
2956 /* wait till consumers catch up with producers in all queues */
2957 bnx2x_drain_tx_queues(bp);
2959 /* if VF indicate to PF this function is going down (PF will delete sp
2960 * elements and clear initializations
2963 bnx2x_vfpf_close_vf(bp);
2964 else if (unload_mode != UNLOAD_RECOVERY)
2965 /* if this is a normal/close unload need to clean up chip*/
2966 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2968 /* Send the UNLOAD_REQUEST to the MCP */
2969 bnx2x_send_unload_req(bp, unload_mode);
2971 /* Prevent transactions to host from the functions on the
2972 * engine that doesn't reset global blocks in case of global
2973 * attention once global blocks are reset and gates are opened
2974 * (the engine which leader will perform the recovery
2977 if (!CHIP_IS_E1x(bp))
2978 bnx2x_pf_disable(bp);
2980 /* Disable HW interrupts, NAPI */
2981 bnx2x_netif_stop(bp, 1);
2982 /* Delete all NAPI objects */
2983 bnx2x_del_all_napi(bp);
2984 if (CNIC_LOADED(bp))
2985 bnx2x_del_all_napi_cnic(bp);
2989 /* Report UNLOAD_DONE to MCP */
2990 bnx2x_send_unload_done(bp, false);
2994 * At this stage no more interrupts will arrive so we may safely clean
2995 * the queueable objects here in case they failed to get cleaned so far.
2998 bnx2x_squeeze_objects(bp);
3000 /* There should be no more pending SP commands at this stage */
3005 /* clear pending work in rtnl task */
3006 bp->sp_rtnl_state = 0;
3009 /* Free SKBs, SGEs, TPA pool and driver internals */
3010 bnx2x_free_skbs(bp);
3011 if (CNIC_LOADED(bp))
3012 bnx2x_free_skbs_cnic(bp);
3013 for_each_rx_queue(bp, i)
3014 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3016 bnx2x_free_fp_mem(bp);
3017 if (CNIC_LOADED(bp))
3018 bnx2x_free_fp_mem_cnic(bp);
3021 if (CNIC_LOADED(bp))
3022 bnx2x_free_mem_cnic(bp);
3026 bp->state = BNX2X_STATE_CLOSED;
3027 bp->cnic_loaded = false;
3029 /* Check if there are pending parity attentions. If there are - set
3030 * RECOVERY_IN_PROGRESS.
3032 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3033 bnx2x_set_reset_in_progress(bp);
3035 /* Set RESET_IS_GLOBAL if needed */
3037 bnx2x_set_reset_global(bp);
3040 /* The last driver must disable a "close the gate" if there is no
3041 * parity attention or "process kill" pending.
3044 !bnx2x_clear_pf_load(bp) &&
3045 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3046 bnx2x_disable_close_the_gate(bp);
3048 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3053 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3057 /* If there is no power capability, silently succeed */
3058 if (!bp->pdev->pm_cap) {
3059 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3063 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3067 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3068 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3069 PCI_PM_CTRL_PME_STATUS));
3071 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3072 /* delay required during transition out of D3hot */
3077 /* If there are other clients above don't
3078 shut down the power */
3079 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3081 /* Don't shut down the power for emulation and FPGA */
3082 if (CHIP_REV_IS_SLOW(bp))
3085 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3089 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3091 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3094 /* No more memory access after this point until
3095 * device is brought back to D0.
3100 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3107 * net_device service functions
3109 static int bnx2x_poll(struct napi_struct *napi, int budget)
3113 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3115 struct bnx2x *bp = fp->bp;
3118 #ifdef BNX2X_STOP_ON_ERROR
3119 if (unlikely(bp->panic)) {
3120 napi_complete(napi);
3124 if (!bnx2x_fp_lock_napi(fp))
3127 for_each_cos_in_tx_queue(fp, cos)
3128 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3129 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3131 if (bnx2x_has_rx_work(fp)) {
3132 work_done += bnx2x_rx_int(fp, budget - work_done);
3134 /* must not complete if we consumed full budget */
3135 if (work_done >= budget) {
3136 bnx2x_fp_unlock_napi(fp);
3141 /* Fall out from the NAPI loop if needed */
3142 if (!bnx2x_fp_unlock_napi(fp) &&
3143 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3145 /* No need to update SB for FCoE L2 ring as long as
3146 * it's connected to the default SB and the SB
3147 * has been updated when NAPI was scheduled.
3149 if (IS_FCOE_FP(fp)) {
3150 napi_complete(napi);
3153 bnx2x_update_fpsb_idx(fp);
3154 /* bnx2x_has_rx_work() reads the status block,
3155 * thus we need to ensure that status block indices
3156 * have been actually read (bnx2x_update_fpsb_idx)
3157 * prior to this check (bnx2x_has_rx_work) so that
3158 * we won't write the "newer" value of the status block
3159 * to IGU (if there was a DMA right after
3160 * bnx2x_has_rx_work and if there is no rmb, the memory
3161 * reading (bnx2x_update_fpsb_idx) may be postponed
3162 * to right before bnx2x_ack_sb). In this case there
3163 * will never be another interrupt until there is
3164 * another update of the status block, while there
3165 * is still unhandled work.
3169 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3170 napi_complete(napi);
3171 /* Re-enable interrupts */
3172 DP(NETIF_MSG_RX_STATUS,
3173 "Update index to %d\n", fp->fp_hc_idx);
3174 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3175 le16_to_cpu(fp->fp_hc_idx),
3185 #ifdef CONFIG_NET_RX_BUSY_POLL
3186 /* must be called with local_bh_disable()d */
3187 int bnx2x_low_latency_recv(struct napi_struct *napi)
3189 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3191 struct bnx2x *bp = fp->bp;
3194 if ((bp->state == BNX2X_STATE_CLOSED) ||
3195 (bp->state == BNX2X_STATE_ERROR) ||
3196 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3197 return LL_FLUSH_FAILED;
3199 if (!bnx2x_fp_lock_poll(fp))
3200 return LL_FLUSH_BUSY;
3202 if (bnx2x_has_rx_work(fp))
3203 found = bnx2x_rx_int(fp, 4);
3205 bnx2x_fp_unlock_poll(fp);
3211 /* we split the first BD into headers and data BDs
3212 * to ease the pain of our fellow microcode engineers
3213 * we use one mapping for both BDs
3215 static u16 bnx2x_tx_split(struct bnx2x *bp,
3216 struct bnx2x_fp_txdata *txdata,
3217 struct sw_tx_bd *tx_buf,
3218 struct eth_tx_start_bd **tx_bd, u16 hlen,
3221 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3222 struct eth_tx_bd *d_tx_bd;
3224 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3226 /* first fix first BD */
3227 h_tx_bd->nbytes = cpu_to_le16(hlen);
3229 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3230 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3232 /* now get a new data BD
3233 * (after the pbd) and fill it */
3234 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3235 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3237 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3238 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3240 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3241 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3242 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3244 /* this marks the BD as one that has no individual mapping */
3245 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3247 DP(NETIF_MSG_TX_QUEUED,
3248 "TSO split data size is %d (%x:%x)\n",
3249 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3252 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3257 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3258 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3259 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3261 __sum16 tsum = (__force __sum16) csum;
3264 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3265 csum_partial(t_header - fix, fix, 0)));
3268 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3269 csum_partial(t_header, -fix, 0)));
3271 return bswab16(tsum);
3274 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3280 if (skb->ip_summed != CHECKSUM_PARTIAL)
3283 protocol = vlan_get_protocol(skb);
3284 if (protocol == htons(ETH_P_IPV6)) {
3286 prot = ipv6_hdr(skb)->nexthdr;
3289 prot = ip_hdr(skb)->protocol;
3292 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3293 if (inner_ip_hdr(skb)->version == 6) {
3294 rc |= XMIT_CSUM_ENC_V6;
3295 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3296 rc |= XMIT_CSUM_TCP;
3298 rc |= XMIT_CSUM_ENC_V4;
3299 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3300 rc |= XMIT_CSUM_TCP;
3303 if (prot == IPPROTO_TCP)
3304 rc |= XMIT_CSUM_TCP;
3306 if (skb_is_gso(skb)) {
3307 if (skb_is_gso_v6(skb)) {
3308 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3309 if (rc & XMIT_CSUM_ENC)
3310 rc |= XMIT_GSO_ENC_V6;
3312 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3313 if (rc & XMIT_CSUM_ENC)
3314 rc |= XMIT_GSO_ENC_V4;
3321 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3322 /* check if packet requires linearization (packet is too fragmented)
3323 no need to check fragmentation if page size > 8K (there will be no
3324 violation to FW restrictions) */
3325 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3330 int first_bd_sz = 0;
3332 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3333 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3335 if (xmit_type & XMIT_GSO) {
3336 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3337 /* Check if LSO packet needs to be copied:
3338 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3339 int wnd_size = MAX_FETCH_BD - 3;
3340 /* Number of windows to check */
3341 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3346 /* Headers length */
3347 hlen = (int)(skb_transport_header(skb) - skb->data) +
3350 /* Amount of data (w/o headers) on linear part of SKB*/
3351 first_bd_sz = skb_headlen(skb) - hlen;
3353 wnd_sum = first_bd_sz;
3355 /* Calculate the first sum - it's special */
3356 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3358 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3360 /* If there was data on linear skb data - check it */
3361 if (first_bd_sz > 0) {
3362 if (unlikely(wnd_sum < lso_mss)) {
3367 wnd_sum -= first_bd_sz;
3370 /* Others are easier: run through the frag list and
3371 check all windows */
3372 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3374 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3376 if (unlikely(wnd_sum < lso_mss)) {
3381 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3384 /* in non-LSO too fragmented packet should always
3391 if (unlikely(to_copy))
3392 DP(NETIF_MSG_TX_QUEUED,
3393 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3394 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3395 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3401 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3404 struct ipv6hdr *ipv6;
3406 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3407 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3408 ETH_TX_PARSE_BD_E2_LSO_MSS;
3410 if (xmit_type & XMIT_GSO_ENC_V6)
3411 ipv6 = inner_ipv6_hdr(skb);
3412 else if (xmit_type & XMIT_GSO_V6)
3413 ipv6 = ipv6_hdr(skb);
3417 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3418 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3422 * bnx2x_set_pbd_gso - update PBD in GSO case.
3426 * @xmit_type: xmit flags
3428 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3429 struct eth_tx_parse_bd_e1x *pbd,
3430 struct eth_tx_start_bd *tx_start_bd,
3433 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3434 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3435 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3437 if (xmit_type & XMIT_GSO_V4) {
3438 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3439 pbd->tcp_pseudo_csum =
3440 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3442 0, IPPROTO_TCP, 0));
3444 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3445 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3447 pbd->tcp_pseudo_csum =
3448 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3449 &ipv6_hdr(skb)->daddr,
3450 0, IPPROTO_TCP, 0));
3454 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3458 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3460 * @bp: driver handle
3462 * @parsing_data: data to be updated
3463 * @xmit_type: xmit flags
3465 * 57712/578xx related, when skb has encapsulation
3467 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3468 u32 *parsing_data, u32 xmit_type)
3471 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3472 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3473 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3475 if (xmit_type & XMIT_CSUM_TCP) {
3476 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3477 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3478 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3480 return skb_inner_transport_header(skb) +
3481 inner_tcp_hdrlen(skb) - skb->data;
3484 /* We support checksum offload for TCP and UDP only.
3485 * No need to pass the UDP header length - it's a constant.
3487 return skb_inner_transport_header(skb) +
3488 sizeof(struct udphdr) - skb->data;
3492 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3494 * @bp: driver handle
3496 * @parsing_data: data to be updated
3497 * @xmit_type: xmit flags
3499 * 57712/578xx related
3501 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3502 u32 *parsing_data, u32 xmit_type)
3505 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3506 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3507 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3509 if (xmit_type & XMIT_CSUM_TCP) {
3510 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3511 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3512 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3514 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3516 /* We support checksum offload for TCP and UDP only.
3517 * No need to pass the UDP header length - it's a constant.
3519 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3522 /* set FW indication according to inner or outer protocols if tunneled */
3523 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3524 struct eth_tx_start_bd *tx_start_bd,
3527 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3529 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3530 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3532 if (!(xmit_type & XMIT_CSUM_TCP))
3533 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3537 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3539 * @bp: driver handle
3541 * @pbd: parse BD to be updated
3542 * @xmit_type: xmit flags
3544 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3545 struct eth_tx_parse_bd_e1x *pbd,
3548 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3550 /* for now NS flag is not used in Linux */
3553 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3554 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3556 pbd->ip_hlen_w = (skb_transport_header(skb) -
3557 skb_network_header(skb)) >> 1;
3559 hlen += pbd->ip_hlen_w;
3561 /* We support checksum offload for TCP and UDP only */
3562 if (xmit_type & XMIT_CSUM_TCP)
3563 hlen += tcp_hdrlen(skb) / 2;
3565 hlen += sizeof(struct udphdr) / 2;
3567 pbd->total_hlen_w = cpu_to_le16(hlen);
3570 if (xmit_type & XMIT_CSUM_TCP) {
3571 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3574 s8 fix = SKB_CS_OFF(skb); /* signed! */
3576 DP(NETIF_MSG_TX_QUEUED,
3577 "hlen %d fix %d csum before fix %x\n",
3578 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3580 /* HW bug: fixup the CSUM */
3581 pbd->tcp_pseudo_csum =
3582 bnx2x_csum_fix(skb_transport_header(skb),
3585 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3586 pbd->tcp_pseudo_csum);
3592 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3593 struct eth_tx_parse_bd_e2 *pbd_e2,
3594 struct eth_tx_parse_2nd_bd *pbd2,
3599 u8 outerip_off, outerip_len = 0;
3601 /* from outer IP to transport */
3602 hlen_w = (skb_inner_transport_header(skb) -
3603 skb_network_header(skb)) >> 1;
3606 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3608 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3610 /* outer IP header info */
3611 if (xmit_type & XMIT_CSUM_V4) {
3612 struct iphdr *iph = ip_hdr(skb);
3613 u32 csum = (__force u32)(~iph->check) -
3614 (__force u32)iph->tot_len -
3615 (__force u32)iph->frag_off;
3617 pbd2->fw_ip_csum_wo_len_flags_frag =
3618 bswab16(csum_fold((__force __wsum)csum));
3620 pbd2->fw_ip_hdr_to_payload_w =
3621 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3624 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3626 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3628 if (xmit_type & XMIT_GSO_V4) {
3629 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3631 pbd_e2->data.tunnel_data.pseudo_csum =
3632 bswab16(~csum_tcpudp_magic(
3633 inner_ip_hdr(skb)->saddr,
3634 inner_ip_hdr(skb)->daddr,
3635 0, IPPROTO_TCP, 0));
3637 outerip_len = ip_hdr(skb)->ihl << 1;
3639 pbd_e2->data.tunnel_data.pseudo_csum =
3640 bswab16(~csum_ipv6_magic(
3641 &inner_ipv6_hdr(skb)->saddr,
3642 &inner_ipv6_hdr(skb)->daddr,
3643 0, IPPROTO_TCP, 0));
3646 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3650 (!!(xmit_type & XMIT_CSUM_V6) <<
3651 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3653 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3654 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3655 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3657 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3658 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3659 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3663 /* called with netif_tx_lock
3664 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3665 * netif_wake_queue()
3667 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3669 struct bnx2x *bp = netdev_priv(dev);
3671 struct netdev_queue *txq;
3672 struct bnx2x_fp_txdata *txdata;
3673 struct sw_tx_bd *tx_buf;
3674 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3675 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3676 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3677 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3678 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3679 u32 pbd_e2_parsing_data = 0;
3680 u16 pkt_prod, bd_prod;
3683 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3686 __le16 pkt_size = 0;
3688 u8 mac_type = UNICAST_ADDRESS;
3690 #ifdef BNX2X_STOP_ON_ERROR
3691 if (unlikely(bp->panic))
3692 return NETDEV_TX_BUSY;
3695 txq_index = skb_get_queue_mapping(skb);
3696 txq = netdev_get_tx_queue(dev, txq_index);
3698 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3700 txdata = &bp->bnx2x_txq[txq_index];
3702 /* enable this debug print to view the transmission queue being used
3703 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3704 txq_index, fp_index, txdata_index); */
3706 /* enable this debug print to view the transmission details
3707 DP(NETIF_MSG_TX_QUEUED,
3708 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3709 txdata->cid, fp_index, txdata_index, txdata, fp); */
3711 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3712 skb_shinfo(skb)->nr_frags +
3714 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3715 /* Handle special storage cases separately */
3716 if (txdata->tx_ring_size == 0) {
3717 struct bnx2x_eth_q_stats *q_stats =
3718 bnx2x_fp_qstats(bp, txdata->parent_fp);
3719 q_stats->driver_filtered_tx_pkt++;
3721 return NETDEV_TX_OK;
3723 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3724 netif_tx_stop_queue(txq);
3725 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3727 return NETDEV_TX_BUSY;
3730 DP(NETIF_MSG_TX_QUEUED,
3731 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3732 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3733 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3736 eth = (struct ethhdr *)skb->data;
3738 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3739 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3740 if (is_broadcast_ether_addr(eth->h_dest))
3741 mac_type = BROADCAST_ADDRESS;
3743 mac_type = MULTICAST_ADDRESS;
3746 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3747 /* First, check if we need to linearize the skb (due to FW
3748 restrictions). No need to check fragmentation if page size > 8K
3749 (there will be no violation to FW restrictions) */
3750 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3751 /* Statistics of linearization */
3753 if (skb_linearize(skb) != 0) {
3754 DP(NETIF_MSG_TX_QUEUED,
3755 "SKB linearization failed - silently dropping this SKB\n");
3756 dev_kfree_skb_any(skb);
3757 return NETDEV_TX_OK;
3761 /* Map skb linear data for DMA */
3762 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3763 skb_headlen(skb), DMA_TO_DEVICE);
3764 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3765 DP(NETIF_MSG_TX_QUEUED,
3766 "SKB mapping failed - silently dropping this SKB\n");
3767 dev_kfree_skb_any(skb);
3768 return NETDEV_TX_OK;
3771 Please read carefully. First we use one BD which we mark as start,
3772 then we have a parsing info BD (used for TSO or xsum),
3773 and only then we have the rest of the TSO BDs.
3774 (don't forget to mark the last one as last,
3775 and to unmap only AFTER you write to the BD ...)
3776 And above all, all pdb sizes are in words - NOT DWORDS!
3779 /* get current pkt produced now - advance it just before sending packet
3780 * since mapping of pages may fail and cause packet to be dropped
3782 pkt_prod = txdata->tx_pkt_prod;
3783 bd_prod = TX_BD(txdata->tx_bd_prod);
3785 /* get a tx_buf and first BD
3786 * tx_start_bd may be changed during SPLIT,
3787 * but first_bd will always stay first
3789 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3790 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3791 first_bd = tx_start_bd;
3793 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3795 /* header nbd: indirectly zero other flags! */
3796 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3798 /* remember the first BD of the packet */
3799 tx_buf->first_bd = txdata->tx_bd_prod;
3803 DP(NETIF_MSG_TX_QUEUED,
3804 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3805 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3807 if (vlan_tx_tag_present(skb)) {
3808 tx_start_bd->vlan_or_ethertype =
3809 cpu_to_le16(vlan_tx_tag_get(skb));
3810 tx_start_bd->bd_flags.as_bitfield |=
3811 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3813 /* when transmitting in a vf, start bd must hold the ethertype
3814 * for fw to enforce it
3817 tx_start_bd->vlan_or_ethertype =
3818 cpu_to_le16(ntohs(eth->h_proto));
3820 /* used by FW for packet accounting */
3821 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3824 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3826 /* turn on parsing and get a BD */
3827 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3829 if (xmit_type & XMIT_CSUM)
3830 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3832 if (!CHIP_IS_E1x(bp)) {
3833 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3834 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3836 if (xmit_type & XMIT_CSUM_ENC) {
3837 u16 global_data = 0;
3839 /* Set PBD in enc checksum offload case */
3840 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3841 &pbd_e2_parsing_data,
3844 /* turn on 2nd parsing and get a BD */
3845 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3847 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3849 memset(pbd2, 0, sizeof(*pbd2));
3851 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3852 (skb_inner_network_header(skb) -
3855 if (xmit_type & XMIT_GSO_ENC)
3856 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3860 pbd2->global_data = cpu_to_le16(global_data);
3862 /* add addition parse BD indication to start BD */
3863 SET_FLAG(tx_start_bd->general_data,
3864 ETH_TX_START_BD_PARSE_NBDS, 1);
3865 /* set encapsulation flag in start BD */
3866 SET_FLAG(tx_start_bd->general_data,
3867 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3869 } else if (xmit_type & XMIT_CSUM) {
3870 /* Set PBD in checksum offload case w/o encapsulation */
3871 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3872 &pbd_e2_parsing_data,
3876 /* Add the macs to the parsing BD this is a vf */
3878 /* override GRE parameters in BD */
3879 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3880 &pbd_e2->data.mac_addr.src_mid,
3881 &pbd_e2->data.mac_addr.src_lo,
3884 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3885 &pbd_e2->data.mac_addr.dst_mid,
3886 &pbd_e2->data.mac_addr.dst_lo,
3890 SET_FLAG(pbd_e2_parsing_data,
3891 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3893 u16 global_data = 0;
3894 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3895 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3896 /* Set PBD in checksum offload case */
3897 if (xmit_type & XMIT_CSUM)
3898 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3900 SET_FLAG(global_data,
3901 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3902 pbd_e1x->global_data |= cpu_to_le16(global_data);
3905 /* Setup the data pointer of the first BD of the packet */
3906 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3907 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3908 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3909 pkt_size = tx_start_bd->nbytes;
3911 DP(NETIF_MSG_TX_QUEUED,
3912 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3913 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3914 le16_to_cpu(tx_start_bd->nbytes),
3915 tx_start_bd->bd_flags.as_bitfield,
3916 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3918 if (xmit_type & XMIT_GSO) {
3920 DP(NETIF_MSG_TX_QUEUED,
3921 "TSO packet len %d hlen %d total len %d tso size %d\n",
3922 skb->len, hlen, skb_headlen(skb),
3923 skb_shinfo(skb)->gso_size);
3925 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3927 if (unlikely(skb_headlen(skb) > hlen)) {
3929 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3933 if (!CHIP_IS_E1x(bp))
3934 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3937 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3940 /* Set the PBD's parsing_data field if not zero
3941 * (for the chips newer than 57711).
3943 if (pbd_e2_parsing_data)
3944 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3946 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3948 /* Handle fragmented skb */
3949 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3950 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3952 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3953 skb_frag_size(frag), DMA_TO_DEVICE);
3954 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3955 unsigned int pkts_compl = 0, bytes_compl = 0;
3957 DP(NETIF_MSG_TX_QUEUED,
3958 "Unable to map page - dropping packet...\n");
3960 /* we need unmap all buffers already mapped
3962 * first_bd->nbd need to be properly updated
3963 * before call to bnx2x_free_tx_pkt
3965 first_bd->nbd = cpu_to_le16(nbd);
3966 bnx2x_free_tx_pkt(bp, txdata,
3967 TX_BD(txdata->tx_pkt_prod),
3968 &pkts_compl, &bytes_compl);
3969 return NETDEV_TX_OK;
3972 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3973 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3974 if (total_pkt_bd == NULL)
3975 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3977 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3978 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3979 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3980 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3983 DP(NETIF_MSG_TX_QUEUED,
3984 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3985 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3986 le16_to_cpu(tx_data_bd->nbytes));
3989 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3991 /* update with actual num BDs */
3992 first_bd->nbd = cpu_to_le16(nbd);
3994 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3996 /* now send a tx doorbell, counting the next BD
3997 * if the packet contains or ends with it
3999 if (TX_BD_POFF(bd_prod) < nbd)
4002 /* total_pkt_bytes should be set on the first data BD if
4003 * it's not an LSO packet and there is more than one
4004 * data BD. In this case pkt_size is limited by an MTU value.
4005 * However we prefer to set it for an LSO packet (while we don't
4006 * have to) in order to save some CPU cycles in a none-LSO
4007 * case, when we much more care about them.
4009 if (total_pkt_bd != NULL)
4010 total_pkt_bd->total_pkt_bytes = pkt_size;
4013 DP(NETIF_MSG_TX_QUEUED,
4014 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4015 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4016 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4017 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4018 le16_to_cpu(pbd_e1x->total_hlen_w));
4020 DP(NETIF_MSG_TX_QUEUED,
4021 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4023 pbd_e2->data.mac_addr.dst_hi,
4024 pbd_e2->data.mac_addr.dst_mid,
4025 pbd_e2->data.mac_addr.dst_lo,
4026 pbd_e2->data.mac_addr.src_hi,
4027 pbd_e2->data.mac_addr.src_mid,
4028 pbd_e2->data.mac_addr.src_lo,
4029 pbd_e2->parsing_data);
4030 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4032 netdev_tx_sent_queue(txq, skb->len);
4034 skb_tx_timestamp(skb);
4036 txdata->tx_pkt_prod++;
4038 * Make sure that the BD data is updated before updating the producer
4039 * since FW might read the BD right after the producer is updated.
4040 * This is only applicable for weak-ordered memory model archs such
4041 * as IA-64. The following barrier is also mandatory since FW will
4042 * assumes packets must have BDs.
4046 txdata->tx_db.data.prod += nbd;
4049 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4053 txdata->tx_bd_prod += nbd;
4055 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4056 netif_tx_stop_queue(txq);
4058 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4059 * ordering of set_bit() in netif_tx_stop_queue() and read of
4063 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4064 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4065 netif_tx_wake_queue(txq);
4069 return NETDEV_TX_OK;
4073 * bnx2x_setup_tc - routine to configure net_device for multi tc
4075 * @netdev: net device to configure
4076 * @tc: number of traffic classes to enable
4078 * callback connected to the ndo_setup_tc function pointer
4080 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4082 int cos, prio, count, offset;
4083 struct bnx2x *bp = netdev_priv(dev);
4085 /* setup tc must be called under rtnl lock */
4088 /* no traffic classes requested. Aborting */
4090 netdev_reset_tc(dev);
4094 /* requested to support too many traffic classes */
4095 if (num_tc > bp->max_cos) {
4096 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4097 num_tc, bp->max_cos);
4101 /* declare amount of supported traffic classes */
4102 if (netdev_set_num_tc(dev, num_tc)) {
4103 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4107 /* configure priority to traffic class mapping */
4108 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4109 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4110 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4111 "mapping priority %d to tc %d\n",
4112 prio, bp->prio_to_cos[prio]);
4115 /* Use this configuration to differentiate tc0 from other COSes
4116 This can be used for ets or pfc, and save the effort of setting
4117 up a multio class queue disc or negotiating DCBX with a switch
4118 netdev_set_prio_tc_map(dev, 0, 0);
4119 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4120 for (prio = 1; prio < 16; prio++) {
4121 netdev_set_prio_tc_map(dev, prio, 1);
4122 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4125 /* configure traffic class to transmission queue mapping */
4126 for (cos = 0; cos < bp->max_cos; cos++) {
4127 count = BNX2X_NUM_ETH_QUEUES(bp);
4128 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4129 netdev_set_tc_queue(dev, cos, count, offset);
4130 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4131 "mapping tc %d to offset %d count %d\n",
4132 cos, offset, count);
4138 /* called with rtnl_lock */
4139 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4141 struct sockaddr *addr = p;
4142 struct bnx2x *bp = netdev_priv(dev);
4145 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4146 BNX2X_ERR("Requested MAC address is not valid\n");
4150 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4151 !is_zero_ether_addr(addr->sa_data)) {
4152 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4156 if (netif_running(dev)) {
4157 rc = bnx2x_set_eth_mac(bp, false);
4162 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4164 if (netif_running(dev))
4165 rc = bnx2x_set_eth_mac(bp, true);
4170 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4172 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4173 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4178 if (IS_FCOE_IDX(fp_index)) {
4179 memset(sb, 0, sizeof(union host_hc_status_block));
4180 fp->status_blk_mapping = 0;
4183 if (!CHIP_IS_E1x(bp))
4184 BNX2X_PCI_FREE(sb->e2_sb,
4185 bnx2x_fp(bp, fp_index,
4186 status_blk_mapping),
4187 sizeof(struct host_hc_status_block_e2));
4189 BNX2X_PCI_FREE(sb->e1x_sb,
4190 bnx2x_fp(bp, fp_index,
4191 status_blk_mapping),
4192 sizeof(struct host_hc_status_block_e1x));
4196 if (!skip_rx_queue(bp, fp_index)) {
4197 bnx2x_free_rx_bds(fp);
4199 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4200 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4201 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4202 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4203 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4205 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4206 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4207 sizeof(struct eth_fast_path_rx_cqe) *
4211 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4212 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4213 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4214 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4218 if (!skip_tx_queue(bp, fp_index)) {
4219 /* fastpath tx rings: tx_buf tx_desc */
4220 for_each_cos_in_tx_queue(fp, cos) {
4221 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4223 DP(NETIF_MSG_IFDOWN,
4224 "freeing tx memory of fp %d cos %d cid %d\n",
4225 fp_index, cos, txdata->cid);
4227 BNX2X_FREE(txdata->tx_buf_ring);
4228 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4229 txdata->tx_desc_mapping,
4230 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4233 /* end of fastpath */
4236 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4239 for_each_cnic_queue(bp, i)
4240 bnx2x_free_fp_mem_at(bp, i);
4243 void bnx2x_free_fp_mem(struct bnx2x *bp)
4246 for_each_eth_queue(bp, i)
4247 bnx2x_free_fp_mem_at(bp, i);
4250 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4252 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4253 if (!CHIP_IS_E1x(bp)) {
4254 bnx2x_fp(bp, index, sb_index_values) =
4255 (__le16 *)status_blk.e2_sb->sb.index_values;
4256 bnx2x_fp(bp, index, sb_running_index) =
4257 (__le16 *)status_blk.e2_sb->sb.running_index;
4259 bnx2x_fp(bp, index, sb_index_values) =
4260 (__le16 *)status_blk.e1x_sb->sb.index_values;
4261 bnx2x_fp(bp, index, sb_running_index) =
4262 (__le16 *)status_blk.e1x_sb->sb.running_index;
4266 /* Returns the number of actually allocated BDs */
4267 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4270 struct bnx2x *bp = fp->bp;
4271 u16 ring_prod, cqe_ring_prod;
4272 int i, failure_cnt = 0;
4274 fp->rx_comp_cons = 0;
4275 cqe_ring_prod = ring_prod = 0;
4277 /* This routine is called only during fo init so
4278 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4280 for (i = 0; i < rx_ring_size; i++) {
4281 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4285 ring_prod = NEXT_RX_IDX(ring_prod);
4286 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4287 WARN_ON(ring_prod <= (i - failure_cnt));
4291 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4292 i - failure_cnt, fp->index);
4294 fp->rx_bd_prod = ring_prod;
4295 /* Limit the CQE producer by the CQE ring size */
4296 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4298 fp->rx_pkt = fp->rx_calls = 0;
4300 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4302 return i - failure_cnt;
4305 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4309 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4310 struct eth_rx_cqe_next_page *nextpg;
4312 nextpg = (struct eth_rx_cqe_next_page *)
4313 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4315 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4316 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4318 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4319 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4323 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4325 union host_hc_status_block *sb;
4326 struct bnx2x_fastpath *fp = &bp->fp[index];
4329 int rx_ring_size = 0;
4331 if (!bp->rx_ring_size &&
4332 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4333 rx_ring_size = MIN_RX_SIZE_NONTPA;
4334 bp->rx_ring_size = rx_ring_size;
4335 } else if (!bp->rx_ring_size) {
4336 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4338 if (CHIP_IS_E3(bp)) {
4339 u32 cfg = SHMEM_RD(bp,
4340 dev_info.port_hw_config[BP_PORT(bp)].
4343 /* Decrease ring size for 1G functions */
4344 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4345 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4349 /* allocate at least number of buffers required by FW */
4350 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4351 MIN_RX_SIZE_TPA, rx_ring_size);
4353 bp->rx_ring_size = rx_ring_size;
4354 } else /* if rx_ring_size specified - use it */
4355 rx_ring_size = bp->rx_ring_size;
4357 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4360 sb = &bnx2x_fp(bp, index, status_blk);
4362 if (!IS_FCOE_IDX(index)) {
4364 if (!CHIP_IS_E1x(bp))
4365 BNX2X_PCI_ALLOC(sb->e2_sb,
4366 &bnx2x_fp(bp, index, status_blk_mapping),
4367 sizeof(struct host_hc_status_block_e2));
4369 BNX2X_PCI_ALLOC(sb->e1x_sb,
4370 &bnx2x_fp(bp, index, status_blk_mapping),
4371 sizeof(struct host_hc_status_block_e1x));
4374 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4375 * set shortcuts for it.
4377 if (!IS_FCOE_IDX(index))
4378 set_sb_shortcuts(bp, index);
4381 if (!skip_tx_queue(bp, index)) {
4382 /* fastpath tx rings: tx_buf tx_desc */
4383 for_each_cos_in_tx_queue(fp, cos) {
4384 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4387 "allocating tx memory of fp %d cos %d\n",
4390 BNX2X_ALLOC(txdata->tx_buf_ring,
4391 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4392 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4393 &txdata->tx_desc_mapping,
4394 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4399 if (!skip_rx_queue(bp, index)) {
4400 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4401 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4402 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4403 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4404 &bnx2x_fp(bp, index, rx_desc_mapping),
4405 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4407 /* Seed all CQEs by 1s */
4408 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4409 &bnx2x_fp(bp, index, rx_comp_mapping),
4410 sizeof(struct eth_fast_path_rx_cqe) *
4414 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4415 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4416 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4417 &bnx2x_fp(bp, index, rx_sge_mapping),
4418 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4420 bnx2x_set_next_page_rx_bd(fp);
4423 bnx2x_set_next_page_rx_cq(fp);
4426 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4427 if (ring_size < rx_ring_size)
4433 /* handles low memory cases */
4435 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4437 /* FW will drop all packets if queue is not big enough,
4438 * In these cases we disable the queue
4439 * Min size is different for OOO, TPA and non-TPA queues
4441 if (ring_size < (fp->disable_tpa ?
4442 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4443 /* release memory allocated for this queue */
4444 bnx2x_free_fp_mem_at(bp, index);
4450 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4454 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4455 /* we will fail load process instead of mark
4463 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4467 /* 1. Allocate FP for leading - fatal if error
4468 * 2. Allocate RSS - fix number of queues if error
4472 if (bnx2x_alloc_fp_mem_at(bp, 0))
4476 for_each_nondefault_eth_queue(bp, i)
4477 if (bnx2x_alloc_fp_mem_at(bp, i))
4480 /* handle memory failures */
4481 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4482 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4485 bnx2x_shrink_eth_fp(bp, delta);
4486 if (CNIC_SUPPORT(bp))
4487 /* move non eth FPs next to last eth FP
4488 * must be done in that order
4489 * FCOE_IDX < FWD_IDX < OOO_IDX
4492 /* move FCoE fp even NO_FCOE_FLAG is on */
4493 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4494 bp->num_ethernet_queues -= delta;
4495 bp->num_queues = bp->num_ethernet_queues +
4496 bp->num_cnic_queues;
4497 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4498 bp->num_queues + delta, bp->num_queues);
4504 void bnx2x_free_mem_bp(struct bnx2x *bp)
4508 for (i = 0; i < bp->fp_array_size; i++)
4509 kfree(bp->fp[i].tpa_info);
4512 kfree(bp->fp_stats);
4513 kfree(bp->bnx2x_txq);
4514 kfree(bp->msix_table);
4518 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4520 struct bnx2x_fastpath *fp;
4521 struct msix_entry *tbl;
4522 struct bnx2x_ilt *ilt;
4523 int msix_table_size = 0;
4524 int fp_array_size, txq_array_size;
4528 * The biggest MSI-X table we might need is as a maximum number of fast
4529 * path IGU SBs plus default SB (for PF only).
4531 msix_table_size = bp->igu_sb_cnt;
4534 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4536 /* fp array: RSS plus CNIC related L2 queues */
4537 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4538 bp->fp_array_size = fp_array_size;
4539 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4541 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4544 for (i = 0; i < bp->fp_array_size; i++) {
4546 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4547 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4548 if (!(fp[i].tpa_info))
4554 /* allocate sp objs */
4555 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4560 /* allocate fp_stats */
4561 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4566 /* Allocate memory for the transmission queues array */
4568 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4569 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4571 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4577 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4580 bp->msix_table = tbl;
4583 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4590 bnx2x_free_mem_bp(bp);
4594 int bnx2x_reload_if_running(struct net_device *dev)
4596 struct bnx2x *bp = netdev_priv(dev);
4598 if (unlikely(!netif_running(dev)))
4601 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4602 return bnx2x_nic_load(bp, LOAD_NORMAL);
4605 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4607 u32 sel_phy_idx = 0;
4608 if (bp->link_params.num_phys <= 1)
4611 if (bp->link_vars.link_up) {
4612 sel_phy_idx = EXT_PHY1;
4613 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4614 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4615 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4616 sel_phy_idx = EXT_PHY2;
4619 switch (bnx2x_phy_selection(&bp->link_params)) {
4620 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4621 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4622 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4623 sel_phy_idx = EXT_PHY1;
4625 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4626 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4627 sel_phy_idx = EXT_PHY2;
4634 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4636 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4638 * The selected activated PHY is always after swapping (in case PHY
4639 * swapping is enabled). So when swapping is enabled, we need to reverse
4643 if (bp->link_params.multi_phy_config &
4644 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4645 if (sel_phy_idx == EXT_PHY1)
4646 sel_phy_idx = EXT_PHY2;
4647 else if (sel_phy_idx == EXT_PHY2)
4648 sel_phy_idx = EXT_PHY1;
4650 return LINK_CONFIG_IDX(sel_phy_idx);
4653 #ifdef NETDEV_FCOE_WWNN
4654 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4656 struct bnx2x *bp = netdev_priv(dev);
4657 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4660 case NETDEV_FCOE_WWNN:
4661 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4662 cp->fcoe_wwn_node_name_lo);
4664 case NETDEV_FCOE_WWPN:
4665 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4666 cp->fcoe_wwn_port_name_lo);
4669 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4677 /* called with rtnl_lock */
4678 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4680 struct bnx2x *bp = netdev_priv(dev);
4682 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4683 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4687 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4688 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4689 BNX2X_ERR("Can't support requested MTU size\n");
4693 /* This does not race with packet allocation
4694 * because the actual alloc size is
4695 * only updated as part of load
4699 return bnx2x_reload_if_running(dev);
4702 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4703 netdev_features_t features)
4705 struct bnx2x *bp = netdev_priv(dev);
4707 /* TPA requires Rx CSUM offloading */
4708 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4709 features &= ~NETIF_F_LRO;
4710 features &= ~NETIF_F_GRO;
4716 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4718 struct bnx2x *bp = netdev_priv(dev);
4719 u32 flags = bp->flags;
4721 bool bnx2x_reload = false;
4723 if (features & NETIF_F_LRO)
4724 flags |= TPA_ENABLE_FLAG;
4726 flags &= ~TPA_ENABLE_FLAG;
4728 if (features & NETIF_F_GRO)
4729 flags |= GRO_ENABLE_FLAG;
4731 flags &= ~GRO_ENABLE_FLAG;
4733 if (features & NETIF_F_LOOPBACK) {
4734 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4735 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4736 bnx2x_reload = true;
4739 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4740 bp->link_params.loopback_mode = LOOPBACK_NONE;
4741 bnx2x_reload = true;
4745 changes = flags ^ bp->flags;
4747 /* if GRO is changed while LRO is enabled, don't force a reload */
4748 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4749 changes &= ~GRO_ENABLE_FLAG;
4752 bnx2x_reload = true;
4757 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4758 return bnx2x_reload_if_running(dev);
4759 /* else: bnx2x_nic_load() will be called at end of recovery */
4765 void bnx2x_tx_timeout(struct net_device *dev)
4767 struct bnx2x *bp = netdev_priv(dev);
4769 #ifdef BNX2X_STOP_ON_ERROR
4774 smp_mb__before_clear_bit();
4775 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4776 smp_mb__after_clear_bit();
4778 /* This allows the netif to be shutdown gracefully before resetting */
4779 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4782 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4784 struct net_device *dev = pci_get_drvdata(pdev);
4788 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4791 bp = netdev_priv(dev);
4795 pci_save_state(pdev);
4797 if (!netif_running(dev)) {
4802 netif_device_detach(dev);
4804 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4806 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4813 int bnx2x_resume(struct pci_dev *pdev)
4815 struct net_device *dev = pci_get_drvdata(pdev);
4820 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4823 bp = netdev_priv(dev);
4825 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4826 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4832 pci_restore_state(pdev);
4834 if (!netif_running(dev)) {
4839 bnx2x_set_power_state(bp, PCI_D0);
4840 netif_device_attach(dev);
4842 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4849 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4853 BNX2X_ERR("bad context pointer %p\n", cxt);
4857 /* ustorm cxt validation */
4858 cxt->ustorm_ag_context.cdu_usage =
4859 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4860 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4861 /* xcontext validation */
4862 cxt->xstorm_ag_context.cdu_reserved =
4863 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4864 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4867 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4868 u8 fw_sb_id, u8 sb_index,
4871 u32 addr = BAR_CSTRORM_INTMEM +
4872 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4873 REG_WR8(bp, addr, ticks);
4875 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4876 port, fw_sb_id, sb_index, ticks);
4879 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4880 u16 fw_sb_id, u8 sb_index,
4883 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4884 u32 addr = BAR_CSTRORM_INTMEM +
4885 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4886 u8 flags = REG_RD8(bp, addr);
4888 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4889 flags |= enable_flag;
4890 REG_WR8(bp, addr, flags);
4892 "port %x fw_sb_id %d sb_index %d disable %d\n",
4893 port, fw_sb_id, sb_index, disable);
4896 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4897 u8 sb_index, u8 disable, u16 usec)
4899 int port = BP_PORT(bp);
4900 u8 ticks = usec / BNX2X_BTR;
4902 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4904 disable = disable ? 1 : (usec ? 0 : 1);
4905 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);