1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target. Update txdata pointers and related
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
56 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
58 /* Copy the NAPI object as it has been already initialized */
59 from_fp->napi = to_fp->napi;
61 /* Move bnx2x_fastpath contents */
62 memcpy(to_fp, from_fp, sizeof(*to_fp));
65 /* Retain the tpa_info of the original `to' version as we don't want
66 * 2 FPs to contain the same tpa_info pointer.
68 to_fp->tpa_info = old_tpa_info;
70 /* move sp_objs contents as well, as their indices match fp ones */
71 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
73 /* move fp_stats contents as well, as their indices match fp ones */
74 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
76 /* Update txdata pointers in fp and move txdata content accordingly:
77 * Each fp consumes 'max_cos' txdata structures, so the index should be
78 * decremented by max_cos x delta.
81 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
82 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
84 if (from == FCOE_IDX(bp)) {
85 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
86 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
89 memcpy(&bp->bnx2x_txq[new_txdata_index],
90 &bp->bnx2x_txq[old_txdata_index],
91 sizeof(struct bnx2x_fp_txdata));
92 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
96 * bnx2x_fill_fw_str - Fill buffer with FW version string.
99 * @buf: character buffer to fill with the fw name
100 * @buf_len: length of the above buffer
103 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
106 u8 phy_fw_ver[PHY_FW_VER_LEN];
108 phy_fw_ver[0] = '\0';
109 bnx2x_get_ext_phy_fw_version(&bp->link_params,
110 phy_fw_ver, PHY_FW_VER_LEN);
111 strlcpy(buf, bp->fw_ver, buf_len);
112 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
114 (bp->common.bc_ver & 0xff0000) >> 16,
115 (bp->common.bc_ver & 0xff00) >> 8,
116 (bp->common.bc_ver & 0xff),
117 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
119 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
124 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
127 * @delta: number of eth queues which were not allocated
129 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
131 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
133 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
134 * backward along the array could cause memory to be overridden
136 for (cos = 1; cos < bp->max_cos; cos++) {
137 for (i = 0; i < old_eth_num - delta; i++) {
138 struct bnx2x_fastpath *fp = &bp->fp[i];
139 int new_idx = cos * (old_eth_num - delta) + i;
141 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
142 sizeof(struct bnx2x_fp_txdata));
143 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
148 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
150 /* free skb in the packet ring at pos idx
151 * return idx of last bd freed
153 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
154 u16 idx, unsigned int *pkts_compl,
155 unsigned int *bytes_compl)
157 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
158 struct eth_tx_start_bd *tx_start_bd;
159 struct eth_tx_bd *tx_data_bd;
160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
163 u16 split_bd_len = 0;
165 /* prefetch skb end pointer to speedup dev_kfree_skb() */
168 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
169 txdata->txq_index, idx, tx_buf, skb);
171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
173 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
174 #ifdef BNX2X_STOP_ON_ERROR
175 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
176 BNX2X_ERR("BAD nbd!\n");
180 new_cons = nbd + tx_buf->first_bd;
182 /* Get the next bd */
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
185 /* Skip a parse bd... */
187 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
189 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
190 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
191 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
192 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
198 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
199 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
205 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
206 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
207 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
209 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
216 (*bytes_compl) += skb->len;
219 dev_kfree_skb_any(skb);
220 tx_buf->first_bd = 0;
226 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
228 struct netdev_queue *txq;
229 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
230 unsigned int pkts_compl = 0, bytes_compl = 0;
232 #ifdef BNX2X_STOP_ON_ERROR
233 if (unlikely(bp->panic))
237 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
238 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
239 sw_cons = txdata->tx_pkt_cons;
241 while (sw_cons != hw_cons) {
244 pkt_cons = TX_BD(sw_cons);
246 DP(NETIF_MSG_TX_DONE,
247 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
248 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
250 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
251 &pkts_compl, &bytes_compl);
256 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
258 txdata->tx_pkt_cons = sw_cons;
259 txdata->tx_bd_cons = bd_cons;
261 /* Need to make the tx_bd_cons update visible to start_xmit()
262 * before checking for netif_tx_queue_stopped(). Without the
263 * memory barrier, there is a small possibility that
264 * start_xmit() will miss it and cause the queue to be stopped
266 * On the other hand we need an rmb() here to ensure the proper
267 * ordering of bit testing in the following
268 * netif_tx_queue_stopped(txq) call.
272 if (unlikely(netif_tx_queue_stopped(txq))) {
273 /* Taking tx_lock() is needed to prevent re-enabling the queue
274 * while it's empty. This could have happen if rx_action() gets
275 * suspended in bnx2x_tx_int() after the condition before
276 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
278 * stops the queue->sees fresh tx_bd_cons->releases the queue->
279 * sends some packets consuming the whole queue again->
283 __netif_tx_lock(txq, smp_processor_id());
285 if ((netif_tx_queue_stopped(txq)) &&
286 (bp->state == BNX2X_STATE_OPEN) &&
287 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
288 netif_tx_wake_queue(txq);
290 __netif_tx_unlock(txq);
295 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
298 u16 last_max = fp->last_max_sge;
300 if (SUB_S16(idx, last_max) > 0)
301 fp->last_max_sge = idx;
304 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
306 struct eth_end_agg_rx_cqe *cqe)
308 struct bnx2x *bp = fp->bp;
309 u16 last_max, last_elem, first_elem;
316 /* First mark all used pages */
317 for (i = 0; i < sge_len; i++)
318 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
319 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
321 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
322 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
324 /* Here we assume that the last SGE index is the biggest */
325 prefetch((void *)(fp->sge_mask));
326 bnx2x_update_last_max_sge(fp,
327 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
329 last_max = RX_SGE(fp->last_max_sge);
330 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
331 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
333 /* If ring is not full */
334 if (last_elem + 1 != first_elem)
337 /* Now update the prod */
338 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
339 if (likely(fp->sge_mask[i]))
342 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
343 delta += BIT_VEC64_ELEM_SZ;
347 fp->rx_sge_prod += delta;
348 /* clear page-end entries */
349 bnx2x_clear_sge_mask_next_elems(fp);
352 DP(NETIF_MSG_RX_STATUS,
353 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
354 fp->last_max_sge, fp->rx_sge_prod);
357 /* Get Toeplitz hash value in the skb using the value from the
358 * CQE (calculated by HW).
360 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
361 const struct eth_fast_path_rx_cqe *cqe,
364 /* Get Toeplitz hash from CQE */
365 if ((bp->dev->features & NETIF_F_RXHASH) &&
366 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
367 enum eth_rss_hash_type htype;
369 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
370 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
371 (htype == TCP_IPV6_HASH_TYPE);
372 return le32_to_cpu(cqe->rss_hash_result);
378 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
380 struct eth_fast_path_rx_cqe *cqe)
382 struct bnx2x *bp = fp->bp;
383 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
384 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
385 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
387 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
388 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
390 /* print error if current state != stop */
391 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
392 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
394 /* Try to map an empty data buffer from the aggregation info */
395 mapping = dma_map_single(&bp->pdev->dev,
396 first_buf->data + NET_SKB_PAD,
397 fp->rx_buf_size, DMA_FROM_DEVICE);
399 * ...if it fails - move the skb from the consumer to the producer
400 * and set the current aggregation state as ERROR to drop it
401 * when TPA_STOP arrives.
404 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
405 /* Move the BD from the consumer to the producer */
406 bnx2x_reuse_rx_data(fp, cons, prod);
407 tpa_info->tpa_state = BNX2X_TPA_ERROR;
411 /* move empty data from pool to prod */
412 prod_rx_buf->data = first_buf->data;
413 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
414 /* point prod_bd to new data */
415 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
416 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
418 /* move partial skb from cons to pool (don't unmap yet) */
419 *first_buf = *cons_rx_buf;
421 /* mark bin state as START */
422 tpa_info->parsing_flags =
423 le16_to_cpu(cqe->pars_flags.flags);
424 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
425 tpa_info->tpa_state = BNX2X_TPA_START;
426 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
427 tpa_info->placement_offset = cqe->placement_offset;
428 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
429 if (fp->mode == TPA_MODE_GRO) {
430 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
431 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
432 tpa_info->gro_size = gro_size;
435 #ifdef BNX2X_STOP_ON_ERROR
436 fp->tpa_queue_used |= (1 << queue);
437 #ifdef _ASM_GENERIC_INT_L64_H
438 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
440 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
446 /* Timestamp option length allowed for TPA aggregation:
448 * nop nop kind length echo val
450 #define TPA_TSTAMP_OPT_LEN 12
452 * bnx2x_set_gro_params - compute GRO values
455 * @parsing_flags: parsing flags from the START CQE
456 * @len_on_bd: total length of the first packet for the
458 * @pkt_len: length of all segments
460 * Approximate value of the MSS for this aggregation calculated using
461 * the first packet of it.
462 * Compute number of aggregated segments, and gso_type.
464 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
465 u16 len_on_bd, unsigned int pkt_len,
466 u16 num_of_coalesced_segs)
468 /* TPA aggregation won't have either IP options or TCP options
469 * other than timestamp or IPv6 extension headers.
471 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
473 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
474 PRS_FLAG_OVERETH_IPV6) {
475 hdrs_len += sizeof(struct ipv6hdr);
476 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
478 hdrs_len += sizeof(struct iphdr);
479 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
482 /* Check if there was a TCP timestamp, if there is it's will
483 * always be 12 bytes length: nop nop kind length echo val.
485 * Otherwise FW would close the aggregation.
487 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
488 hdrs_len += TPA_TSTAMP_OPT_LEN;
490 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
492 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
493 * to skb_shinfo(skb)->gso_segs
495 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
498 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
499 u16 index, gfp_t gfp_mask)
501 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
502 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
503 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
506 if (unlikely(page == NULL)) {
507 BNX2X_ERR("Can't alloc sge\n");
511 mapping = dma_map_page(&bp->pdev->dev, page, 0,
512 SGE_PAGES, DMA_FROM_DEVICE);
513 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
514 __free_pages(page, PAGES_PER_SGE_SHIFT);
515 BNX2X_ERR("Can't map sge\n");
520 dma_unmap_addr_set(sw_buf, mapping, mapping);
522 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
523 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
528 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
529 struct bnx2x_agg_info *tpa_info,
532 struct eth_end_agg_rx_cqe *cqe,
535 struct sw_rx_page *rx_pg, old_rx_pg;
536 u32 i, frag_len, frag_size;
537 int err, j, frag_id = 0;
538 u16 len_on_bd = tpa_info->len_on_bd;
539 u16 full_page = 0, gro_size = 0;
541 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
543 if (fp->mode == TPA_MODE_GRO) {
544 gro_size = tpa_info->gro_size;
545 full_page = tpa_info->full_page;
548 /* This is needed in order to enable forwarding support */
550 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
551 le16_to_cpu(cqe->pkt_len),
552 le16_to_cpu(cqe->num_of_coalesced_segs));
554 #ifdef BNX2X_STOP_ON_ERROR
555 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
556 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
558 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
564 /* Run through the SGL and compose the fragmented skb */
565 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
566 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
568 /* FW gives the indices of the SGE as if the ring is an array
569 (meaning that "next" element will consume 2 indices) */
570 if (fp->mode == TPA_MODE_GRO)
571 frag_len = min_t(u32, frag_size, (u32)full_page);
573 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
575 rx_pg = &fp->rx_page_ring[sge_idx];
578 /* If we fail to allocate a substitute page, we simply stop
579 where we are and drop the whole packet */
580 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
582 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
586 /* Unmap the page as we're going to pass it to the stack */
587 dma_unmap_page(&bp->pdev->dev,
588 dma_unmap_addr(&old_rx_pg, mapping),
589 SGE_PAGES, DMA_FROM_DEVICE);
590 /* Add one frag and update the appropriate fields in the skb */
591 if (fp->mode == TPA_MODE_LRO)
592 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
596 for (rem = frag_len; rem > 0; rem -= gro_size) {
597 int len = rem > gro_size ? gro_size : rem;
598 skb_fill_page_desc(skb, frag_id++,
599 old_rx_pg.page, offset, len);
601 get_page(old_rx_pg.page);
606 skb->data_len += frag_len;
607 skb->truesize += SGE_PAGES;
608 skb->len += frag_len;
610 frag_size -= frag_len;
616 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
618 if (fp->rx_frag_size)
619 put_page(virt_to_head_page(data));
624 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
626 if (fp->rx_frag_size) {
627 /* GFP_KERNEL allocations are used only during initialization */
628 if (unlikely(gfp_mask & __GFP_WAIT))
629 return (void *)__get_free_page(gfp_mask);
631 return netdev_alloc_frag(fp->rx_frag_size);
634 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
638 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
640 const struct iphdr *iph = ip_hdr(skb);
643 skb_set_transport_header(skb, sizeof(struct iphdr));
646 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
647 iph->saddr, iph->daddr, 0);
650 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
652 struct ipv6hdr *iph = ipv6_hdr(skb);
655 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
658 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
659 &iph->saddr, &iph->daddr, 0);
662 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
663 void (*gro_func)(struct bnx2x*, struct sk_buff*))
665 skb_set_network_header(skb, 0);
667 tcp_gro_complete(skb);
671 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
675 if (skb_shinfo(skb)->gso_size) {
676 switch (be16_to_cpu(skb->protocol)) {
678 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
681 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
684 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
685 be16_to_cpu(skb->protocol));
689 skb_record_rx_queue(skb, fp->rx_queue);
690 napi_gro_receive(&fp->napi, skb);
693 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
694 struct bnx2x_agg_info *tpa_info,
696 struct eth_end_agg_rx_cqe *cqe,
699 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
700 u8 pad = tpa_info->placement_offset;
701 u16 len = tpa_info->len_on_bd;
702 struct sk_buff *skb = NULL;
703 u8 *new_data, *data = rx_buf->data;
704 u8 old_tpa_state = tpa_info->tpa_state;
706 tpa_info->tpa_state = BNX2X_TPA_STOP;
708 /* If we there was an error during the handling of the TPA_START -
709 * drop this aggregation.
711 if (old_tpa_state == BNX2X_TPA_ERROR)
714 /* Try to allocate the new data */
715 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
716 /* Unmap skb in the pool anyway, as we are going to change
717 pool entry status to BNX2X_TPA_STOP even if new skb allocation
719 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
720 fp->rx_buf_size, DMA_FROM_DEVICE);
721 if (likely(new_data))
722 skb = build_skb(data, fp->rx_frag_size);
725 #ifdef BNX2X_STOP_ON_ERROR
726 if (pad + len > fp->rx_buf_size) {
727 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
728 pad, len, fp->rx_buf_size);
734 skb_reserve(skb, pad + NET_SKB_PAD);
736 skb->rxhash = tpa_info->rxhash;
737 skb->l4_rxhash = tpa_info->l4_rxhash;
739 skb->protocol = eth_type_trans(skb, bp->dev);
740 skb->ip_summed = CHECKSUM_UNNECESSARY;
742 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
743 skb, cqe, cqe_idx)) {
744 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
745 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
746 bnx2x_gro_receive(bp, fp, skb);
748 DP(NETIF_MSG_RX_STATUS,
749 "Failed to allocate new pages - dropping packet!\n");
750 dev_kfree_skb_any(skb);
753 /* put new data in bin */
754 rx_buf->data = new_data;
758 bnx2x_frag_free(fp, new_data);
760 /* drop the packet and keep the buffer in the bin */
761 DP(NETIF_MSG_RX_STATUS,
762 "Failed to allocate or map a new skb - dropping packet!\n");
763 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
766 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767 u16 index, gfp_t gfp_mask)
770 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
771 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
774 data = bnx2x_frag_alloc(fp, gfp_mask);
775 if (unlikely(data == NULL))
778 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
781 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
782 bnx2x_frag_free(fp, data);
783 BNX2X_ERR("Can't map rx data\n");
788 dma_unmap_addr_set(rx_buf, mapping, mapping);
790 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
791 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
797 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
798 struct bnx2x_fastpath *fp,
799 struct bnx2x_eth_q_stats *qstats)
801 /* Do nothing if no L4 csum validation was done.
802 * We do not check whether IP csum was validated. For IPv4 we assume
803 * that if the card got as far as validating the L4 csum, it also
804 * validated the IP csum. IPv6 has no IP csum.
806 if (cqe->fast_path_cqe.status_flags &
807 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
810 /* If L4 validation was done, check if an error was found. */
812 if (cqe->fast_path_cqe.type_error_flags &
813 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
814 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
815 qstats->hw_csum_err++;
817 skb->ip_summed = CHECKSUM_UNNECESSARY;
820 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
822 struct bnx2x *bp = fp->bp;
823 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
824 u16 sw_comp_cons, sw_comp_prod;
826 union eth_rx_cqe *cqe;
827 struct eth_fast_path_rx_cqe *cqe_fp;
829 #ifdef BNX2X_STOP_ON_ERROR
830 if (unlikely(bp->panic))
834 bd_cons = fp->rx_bd_cons;
835 bd_prod = fp->rx_bd_prod;
836 bd_prod_fw = bd_prod;
837 sw_comp_cons = fp->rx_comp_cons;
838 sw_comp_prod = fp->rx_comp_prod;
840 comp_ring_cons = RCQ_BD(sw_comp_cons);
841 cqe = &fp->rx_comp_ring[comp_ring_cons];
842 cqe_fp = &cqe->fast_path_cqe;
844 DP(NETIF_MSG_RX_STATUS,
845 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
847 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
848 struct sw_rx_bd *rx_buf = NULL;
851 enum eth_rx_cqe_type cqe_fp_type;
856 #ifdef BNX2X_STOP_ON_ERROR
857 if (unlikely(bp->panic))
861 bd_prod = RX_BD(bd_prod);
862 bd_cons = RX_BD(bd_cons);
864 cqe_fp_flags = cqe_fp->type_error_flags;
865 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
867 DP(NETIF_MSG_RX_STATUS,
868 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
869 CQE_TYPE(cqe_fp_flags),
870 cqe_fp_flags, cqe_fp->status_flags,
871 le32_to_cpu(cqe_fp->rss_hash_result),
872 le16_to_cpu(cqe_fp->vlan_tag),
873 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
875 /* is this a slowpath msg? */
876 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
877 bnx2x_sp_event(fp, cqe);
881 rx_buf = &fp->rx_buf_ring[bd_cons];
884 if (!CQE_TYPE_FAST(cqe_fp_type)) {
885 struct bnx2x_agg_info *tpa_info;
886 u16 frag_size, pages;
887 #ifdef BNX2X_STOP_ON_ERROR
889 if (fp->disable_tpa &&
890 (CQE_TYPE_START(cqe_fp_type) ||
891 CQE_TYPE_STOP(cqe_fp_type)))
892 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
893 CQE_TYPE(cqe_fp_type));
896 if (CQE_TYPE_START(cqe_fp_type)) {
897 u16 queue = cqe_fp->queue_index;
898 DP(NETIF_MSG_RX_STATUS,
899 "calling tpa_start on queue %d\n",
902 bnx2x_tpa_start(fp, queue,
908 queue = cqe->end_agg_cqe.queue_index;
909 tpa_info = &fp->tpa_info[queue];
910 DP(NETIF_MSG_RX_STATUS,
911 "calling tpa_stop on queue %d\n",
914 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
917 if (fp->mode == TPA_MODE_GRO)
918 pages = (frag_size + tpa_info->full_page - 1) /
921 pages = SGE_PAGE_ALIGN(frag_size) >>
924 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
925 &cqe->end_agg_cqe, comp_ring_cons);
926 #ifdef BNX2X_STOP_ON_ERROR
931 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
935 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
936 pad = cqe_fp->placement_offset;
937 dma_sync_single_for_cpu(&bp->pdev->dev,
938 dma_unmap_addr(rx_buf, mapping),
939 pad + RX_COPY_THRESH,
942 prefetch(data + pad); /* speedup eth_type_trans() */
943 /* is this an error packet? */
944 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
945 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
946 "ERROR flags %x rx packet %u\n",
947 cqe_fp_flags, sw_comp_cons);
948 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
952 /* Since we don't have a jumbo ring
953 * copy small packets if mtu > 1500
955 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
956 (len <= RX_COPY_THRESH)) {
957 skb = netdev_alloc_skb_ip_align(bp->dev, len);
959 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
960 "ERROR packet dropped because of alloc failure\n");
961 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
964 memcpy(skb->data, data + pad, len);
965 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
967 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
969 dma_unmap_single(&bp->pdev->dev,
970 dma_unmap_addr(rx_buf, mapping),
973 skb = build_skb(data, fp->rx_frag_size);
974 if (unlikely(!skb)) {
975 bnx2x_frag_free(fp, data);
976 bnx2x_fp_qstats(bp, fp)->
977 rx_skb_alloc_failed++;
980 skb_reserve(skb, pad);
982 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
983 "ERROR packet dropped because of alloc failure\n");
984 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
986 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
992 skb->protocol = eth_type_trans(skb, bp->dev);
994 /* Set Toeplitz hash for a none-LRO skb */
995 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
996 skb->l4_rxhash = l4_rxhash;
998 skb_checksum_none_assert(skb);
1000 if (bp->dev->features & NETIF_F_RXCSUM)
1001 bnx2x_csum_validate(skb, cqe, fp,
1002 bnx2x_fp_qstats(bp, fp));
1004 skb_record_rx_queue(skb, fp->rx_queue);
1006 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1008 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1009 le16_to_cpu(cqe_fp->vlan_tag));
1011 skb_mark_napi_id(skb, &fp->napi);
1013 if (bnx2x_fp_ll_polling(fp))
1014 netif_receive_skb(skb);
1016 napi_gro_receive(&fp->napi, skb);
1018 rx_buf->data = NULL;
1020 bd_cons = NEXT_RX_IDX(bd_cons);
1021 bd_prod = NEXT_RX_IDX(bd_prod);
1022 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1025 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1026 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1028 /* mark CQE as free */
1029 BNX2X_SEED_CQE(cqe_fp);
1031 if (rx_pkt == budget)
1034 comp_ring_cons = RCQ_BD(sw_comp_cons);
1035 cqe = &fp->rx_comp_ring[comp_ring_cons];
1036 cqe_fp = &cqe->fast_path_cqe;
1039 fp->rx_bd_cons = bd_cons;
1040 fp->rx_bd_prod = bd_prod_fw;
1041 fp->rx_comp_cons = sw_comp_cons;
1042 fp->rx_comp_prod = sw_comp_prod;
1044 /* Update producers */
1045 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1048 fp->rx_pkt += rx_pkt;
1054 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1056 struct bnx2x_fastpath *fp = fp_cookie;
1057 struct bnx2x *bp = fp->bp;
1061 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1062 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1064 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1066 #ifdef BNX2X_STOP_ON_ERROR
1067 if (unlikely(bp->panic))
1071 /* Handle Rx and Tx according to MSI-X vector */
1072 for_each_cos_in_tx_queue(fp, cos)
1073 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1075 prefetch(&fp->sb_running_index[SM_RX_ID]);
1076 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1081 /* HW Lock for shared dual port PHYs */
1082 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1084 mutex_lock(&bp->port.phy_mutex);
1086 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1089 void bnx2x_release_phy_lock(struct bnx2x *bp)
1091 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1093 mutex_unlock(&bp->port.phy_mutex);
1096 /* calculates MF speed according to current linespeed and MF configuration */
1097 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1099 u16 line_speed = bp->link_vars.line_speed;
1101 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1102 bp->mf_config[BP_VN(bp)]);
1104 /* Calculate the current MAX line speed limit for the MF
1108 line_speed = (line_speed * maxCfg) / 100;
1109 else { /* SD mode */
1110 u16 vn_max_rate = maxCfg * 100;
1112 if (vn_max_rate < line_speed)
1113 line_speed = vn_max_rate;
1121 * bnx2x_fill_report_data - fill link report data to report
1123 * @bp: driver handle
1124 * @data: link state to update
1126 * It uses a none-atomic bit operations because is called under the mutex.
1128 static void bnx2x_fill_report_data(struct bnx2x *bp,
1129 struct bnx2x_link_report_data *data)
1131 u16 line_speed = bnx2x_get_mf_speed(bp);
1133 memset(data, 0, sizeof(*data));
1135 /* Fill the report data: effective line speed */
1136 data->line_speed = line_speed;
1139 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1140 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1141 &data->link_report_flags);
1144 if (bp->link_vars.duplex == DUPLEX_FULL)
1145 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1147 /* Rx Flow Control is ON */
1148 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1149 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1151 /* Tx Flow Control is ON */
1152 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1153 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1157 * bnx2x_link_report - report link status to OS.
1159 * @bp: driver handle
1161 * Calls the __bnx2x_link_report() under the same locking scheme
1162 * as a link/PHY state managing code to ensure a consistent link
1166 void bnx2x_link_report(struct bnx2x *bp)
1168 bnx2x_acquire_phy_lock(bp);
1169 __bnx2x_link_report(bp);
1170 bnx2x_release_phy_lock(bp);
1174 * __bnx2x_link_report - report link status to OS.
1176 * @bp: driver handle
1178 * None atomic implementation.
1179 * Should be called under the phy_lock.
1181 void __bnx2x_link_report(struct bnx2x *bp)
1183 struct bnx2x_link_report_data cur_data;
1186 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1187 bnx2x_read_mf_cfg(bp);
1189 /* Read the current link report info */
1190 bnx2x_fill_report_data(bp, &cur_data);
1192 /* Don't report link down or exactly the same link status twice */
1193 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1194 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1195 &bp->last_reported_link.link_report_flags) &&
1196 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1197 &cur_data.link_report_flags)))
1202 /* We are going to report a new link parameters now -
1203 * remember the current data for the next time.
1205 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1207 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1208 &cur_data.link_report_flags)) {
1209 netif_carrier_off(bp->dev);
1210 netdev_err(bp->dev, "NIC Link is Down\n");
1216 netif_carrier_on(bp->dev);
1218 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1219 &cur_data.link_report_flags))
1224 /* Handle the FC at the end so that only these flags would be
1225 * possibly set. This way we may easily check if there is no FC
1228 if (cur_data.link_report_flags) {
1229 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1230 &cur_data.link_report_flags)) {
1231 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1232 &cur_data.link_report_flags))
1233 flow = "ON - receive & transmit";
1235 flow = "ON - receive";
1237 flow = "ON - transmit";
1242 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1243 cur_data.line_speed, duplex, flow);
1247 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1251 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1252 struct eth_rx_sge *sge;
1254 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1256 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1257 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1260 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1261 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1265 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1266 struct bnx2x_fastpath *fp, int last)
1270 for (i = 0; i < last; i++) {
1271 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1272 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1273 u8 *data = first_buf->data;
1276 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1279 if (tpa_info->tpa_state == BNX2X_TPA_START)
1280 dma_unmap_single(&bp->pdev->dev,
1281 dma_unmap_addr(first_buf, mapping),
1282 fp->rx_buf_size, DMA_FROM_DEVICE);
1283 bnx2x_frag_free(fp, data);
1284 first_buf->data = NULL;
1288 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1292 for_each_rx_queue_cnic(bp, j) {
1293 struct bnx2x_fastpath *fp = &bp->fp[j];
1297 /* Activate BD ring */
1299 * this will generate an interrupt (to the TSTORM)
1300 * must only be done after chip is initialized
1302 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1307 void bnx2x_init_rx_rings(struct bnx2x *bp)
1309 int func = BP_FUNC(bp);
1313 /* Allocate TPA resources */
1314 for_each_eth_queue(bp, j) {
1315 struct bnx2x_fastpath *fp = &bp->fp[j];
1318 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1320 if (!fp->disable_tpa) {
1321 /* Fill the per-aggregation pool */
1322 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1323 struct bnx2x_agg_info *tpa_info =
1325 struct sw_rx_bd *first_buf =
1326 &tpa_info->first_buf;
1329 bnx2x_frag_alloc(fp, GFP_KERNEL);
1330 if (!first_buf->data) {
1331 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1333 bnx2x_free_tpa_pool(bp, fp, i);
1334 fp->disable_tpa = 1;
1337 dma_unmap_addr_set(first_buf, mapping, 0);
1338 tpa_info->tpa_state = BNX2X_TPA_STOP;
1341 /* "next page" elements initialization */
1342 bnx2x_set_next_page_sgl(fp);
1344 /* set SGEs bit mask */
1345 bnx2x_init_sge_ring_bit_mask(fp);
1347 /* Allocate SGEs and initialize the ring elements */
1348 for (i = 0, ring_prod = 0;
1349 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1351 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1353 BNX2X_ERR("was only able to allocate %d rx sges\n",
1355 BNX2X_ERR("disabling TPA for queue[%d]\n",
1357 /* Cleanup already allocated elements */
1358 bnx2x_free_rx_sge_range(bp, fp,
1360 bnx2x_free_tpa_pool(bp, fp,
1362 fp->disable_tpa = 1;
1366 ring_prod = NEXT_SGE_IDX(ring_prod);
1369 fp->rx_sge_prod = ring_prod;
1373 for_each_eth_queue(bp, j) {
1374 struct bnx2x_fastpath *fp = &bp->fp[j];
1378 /* Activate BD ring */
1380 * this will generate an interrupt (to the TSTORM)
1381 * must only be done after chip is initialized
1383 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1389 if (CHIP_IS_E1(bp)) {
1390 REG_WR(bp, BAR_USTRORM_INTMEM +
1391 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1392 U64_LO(fp->rx_comp_mapping));
1393 REG_WR(bp, BAR_USTRORM_INTMEM +
1394 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1395 U64_HI(fp->rx_comp_mapping));
1400 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1403 struct bnx2x *bp = fp->bp;
1405 for_each_cos_in_tx_queue(fp, cos) {
1406 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1407 unsigned pkts_compl = 0, bytes_compl = 0;
1409 u16 sw_prod = txdata->tx_pkt_prod;
1410 u16 sw_cons = txdata->tx_pkt_cons;
1412 while (sw_cons != sw_prod) {
1413 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1414 &pkts_compl, &bytes_compl);
1418 netdev_tx_reset_queue(
1419 netdev_get_tx_queue(bp->dev,
1420 txdata->txq_index));
1424 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1428 for_each_tx_queue_cnic(bp, i) {
1429 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1433 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1437 for_each_eth_queue(bp, i) {
1438 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1442 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1444 struct bnx2x *bp = fp->bp;
1447 /* ring wasn't allocated */
1448 if (fp->rx_buf_ring == NULL)
1451 for (i = 0; i < NUM_RX_BD; i++) {
1452 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1453 u8 *data = rx_buf->data;
1457 dma_unmap_single(&bp->pdev->dev,
1458 dma_unmap_addr(rx_buf, mapping),
1459 fp->rx_buf_size, DMA_FROM_DEVICE);
1461 rx_buf->data = NULL;
1462 bnx2x_frag_free(fp, data);
1466 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1470 for_each_rx_queue_cnic(bp, j) {
1471 bnx2x_free_rx_bds(&bp->fp[j]);
1475 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1479 for_each_eth_queue(bp, j) {
1480 struct bnx2x_fastpath *fp = &bp->fp[j];
1482 bnx2x_free_rx_bds(fp);
1484 if (!fp->disable_tpa)
1485 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1489 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1491 bnx2x_free_tx_skbs_cnic(bp);
1492 bnx2x_free_rx_skbs_cnic(bp);
1495 void bnx2x_free_skbs(struct bnx2x *bp)
1497 bnx2x_free_tx_skbs(bp);
1498 bnx2x_free_rx_skbs(bp);
1501 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1503 /* load old values */
1504 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1506 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1507 /* leave all but MAX value */
1508 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1510 /* set new MAX value */
1511 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1512 & FUNC_MF_CFG_MAX_BW_MASK;
1514 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1519 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1521 * @bp: driver handle
1522 * @nvecs: number of vectors to be released
1524 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1528 if (nvecs == offset)
1531 /* VFs don't have a default SB */
1533 free_irq(bp->msix_table[offset].vector, bp->dev);
1534 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1535 bp->msix_table[offset].vector);
1539 if (CNIC_SUPPORT(bp)) {
1540 if (nvecs == offset)
1545 for_each_eth_queue(bp, i) {
1546 if (nvecs == offset)
1548 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1549 i, bp->msix_table[offset].vector);
1551 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1555 void bnx2x_free_irq(struct bnx2x *bp)
1557 if (bp->flags & USING_MSIX_FLAG &&
1558 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1559 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1561 /* vfs don't have a default status block */
1565 bnx2x_free_msix_irqs(bp, nvecs);
1567 free_irq(bp->dev->irq, bp->dev);
1571 int bnx2x_enable_msix(struct bnx2x *bp)
1573 int msix_vec = 0, i, rc;
1575 /* VFs don't have a default status block */
1577 bp->msix_table[msix_vec].entry = msix_vec;
1578 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1579 bp->msix_table[0].entry);
1583 /* Cnic requires an msix vector for itself */
1584 if (CNIC_SUPPORT(bp)) {
1585 bp->msix_table[msix_vec].entry = msix_vec;
1586 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1587 msix_vec, bp->msix_table[msix_vec].entry);
1591 /* We need separate vectors for ETH queues only (not FCoE) */
1592 for_each_eth_queue(bp, i) {
1593 bp->msix_table[msix_vec].entry = msix_vec;
1594 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1595 msix_vec, msix_vec, i);
1599 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1602 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1605 * reconfigure number of tx/rx queues according to available
1608 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1609 /* how less vectors we will have? */
1610 int diff = msix_vec - rc;
1612 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1614 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1617 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1621 * decrease number of queues by number of unallocated entries
1623 bp->num_ethernet_queues -= diff;
1624 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1626 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1628 } else if (rc > 0) {
1629 /* Get by with single vector */
1630 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1632 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1637 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1638 bp->flags |= USING_SINGLE_MSIX_FLAG;
1640 BNX2X_DEV_INFO("set number of queues to 1\n");
1641 bp->num_ethernet_queues = 1;
1642 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1643 } else if (rc < 0) {
1644 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1648 bp->flags |= USING_MSIX_FLAG;
1653 /* fall to INTx if not enough memory */
1655 bp->flags |= DISABLE_MSI_FLAG;
1660 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1662 int i, rc, offset = 0;
1664 /* no default status block for vf */
1666 rc = request_irq(bp->msix_table[offset++].vector,
1667 bnx2x_msix_sp_int, 0,
1668 bp->dev->name, bp->dev);
1670 BNX2X_ERR("request sp irq failed\n");
1675 if (CNIC_SUPPORT(bp))
1678 for_each_eth_queue(bp, i) {
1679 struct bnx2x_fastpath *fp = &bp->fp[i];
1680 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1683 rc = request_irq(bp->msix_table[offset].vector,
1684 bnx2x_msix_fp_int, 0, fp->name, fp);
1686 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1687 bp->msix_table[offset].vector, rc);
1688 bnx2x_free_msix_irqs(bp, offset);
1695 i = BNX2X_NUM_ETH_QUEUES(bp);
1697 offset = 1 + CNIC_SUPPORT(bp);
1698 netdev_info(bp->dev,
1699 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1700 bp->msix_table[0].vector,
1701 0, bp->msix_table[offset].vector,
1702 i - 1, bp->msix_table[offset + i - 1].vector);
1704 offset = CNIC_SUPPORT(bp);
1705 netdev_info(bp->dev,
1706 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1707 0, bp->msix_table[offset].vector,
1708 i - 1, bp->msix_table[offset + i - 1].vector);
1713 int bnx2x_enable_msi(struct bnx2x *bp)
1717 rc = pci_enable_msi(bp->pdev);
1719 BNX2X_DEV_INFO("MSI is not attainable\n");
1722 bp->flags |= USING_MSI_FLAG;
1727 static int bnx2x_req_irq(struct bnx2x *bp)
1729 unsigned long flags;
1732 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1735 flags = IRQF_SHARED;
1737 if (bp->flags & USING_MSIX_FLAG)
1738 irq = bp->msix_table[0].vector;
1740 irq = bp->pdev->irq;
1742 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1745 static int bnx2x_setup_irqs(struct bnx2x *bp)
1748 if (bp->flags & USING_MSIX_FLAG &&
1749 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1750 rc = bnx2x_req_msix_irqs(bp);
1754 rc = bnx2x_req_irq(bp);
1756 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1759 if (bp->flags & USING_MSI_FLAG) {
1760 bp->dev->irq = bp->pdev->irq;
1761 netdev_info(bp->dev, "using MSI IRQ %d\n",
1764 if (bp->flags & USING_MSIX_FLAG) {
1765 bp->dev->irq = bp->msix_table[0].vector;
1766 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1774 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1778 for_each_rx_queue_cnic(bp, i) {
1779 bnx2x_fp_init_lock(&bp->fp[i]);
1780 napi_enable(&bnx2x_fp(bp, i, napi));
1784 static void bnx2x_napi_enable(struct bnx2x *bp)
1788 for_each_eth_queue(bp, i) {
1789 bnx2x_fp_init_lock(&bp->fp[i]);
1790 napi_enable(&bnx2x_fp(bp, i, napi));
1794 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1798 for_each_rx_queue_cnic(bp, i) {
1799 napi_disable(&bnx2x_fp(bp, i, napi));
1800 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1801 usleep_range(1000, 2000);
1805 static void bnx2x_napi_disable(struct bnx2x *bp)
1809 for_each_eth_queue(bp, i) {
1810 napi_disable(&bnx2x_fp(bp, i, napi));
1811 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1812 usleep_range(1000, 2000);
1816 void bnx2x_netif_start(struct bnx2x *bp)
1818 if (netif_running(bp->dev)) {
1819 bnx2x_napi_enable(bp);
1820 if (CNIC_LOADED(bp))
1821 bnx2x_napi_enable_cnic(bp);
1822 bnx2x_int_enable(bp);
1823 if (bp->state == BNX2X_STATE_OPEN)
1824 netif_tx_wake_all_queues(bp->dev);
1828 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1830 bnx2x_int_disable_sync(bp, disable_hw);
1831 bnx2x_napi_disable(bp);
1832 if (CNIC_LOADED(bp))
1833 bnx2x_napi_disable_cnic(bp);
1836 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1838 struct bnx2x *bp = netdev_priv(dev);
1840 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1841 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1842 u16 ether_type = ntohs(hdr->h_proto);
1844 /* Skip VLAN tag if present */
1845 if (ether_type == ETH_P_8021Q) {
1846 struct vlan_ethhdr *vhdr =
1847 (struct vlan_ethhdr *)skb->data;
1849 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1852 /* If ethertype is FCoE or FIP - use FCoE ring */
1853 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1854 return bnx2x_fcoe_tx(bp, txq_index);
1857 /* select a non-FCoE queue */
1858 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1861 void bnx2x_set_num_queues(struct bnx2x *bp)
1864 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1866 /* override in STORAGE SD modes */
1867 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1868 bp->num_ethernet_queues = 1;
1870 /* Add special queues */
1871 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1872 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1874 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1878 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1880 * @bp: Driver handle
1882 * We currently support for at most 16 Tx queues for each CoS thus we will
1883 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1886 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1887 * index after all ETH L2 indices.
1889 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1890 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1891 * 16..31,...) with indices that are not coupled with any real Tx queue.
1893 * The proper configuration of skb->queue_mapping is handled by
1894 * bnx2x_select_queue() and __skb_tx_hash().
1896 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1897 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1899 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1903 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1904 rx = BNX2X_NUM_ETH_QUEUES(bp);
1906 /* account for fcoe queue */
1907 if (include_cnic && !NO_FCOE(bp)) {
1912 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1914 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1917 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1919 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1923 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1929 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1933 for_each_queue(bp, i) {
1934 struct bnx2x_fastpath *fp = &bp->fp[i];
1937 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1940 * Although there are no IP frames expected to arrive to
1941 * this ring we still want to add an
1942 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1945 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1948 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1949 IP_HEADER_ALIGNMENT_PADDING +
1952 BNX2X_FW_RX_ALIGN_END;
1953 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
1954 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1955 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1957 fp->rx_frag_size = 0;
1961 static int bnx2x_init_rss(struct bnx2x *bp)
1964 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1966 /* Prepare the initial contents for the indirection table if RSS is
1969 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1970 bp->rss_conf_obj.ind_table[i] =
1972 ethtool_rxfh_indir_default(i, num_eth_queues);
1975 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1976 * per-port, so if explicit configuration is needed , do it only
1979 * For 57712 and newer on the other hand it's a per-function
1982 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1985 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1986 bool config_hash, bool enable)
1988 struct bnx2x_config_rss_params params = {NULL};
1990 /* Although RSS is meaningless when there is a single HW queue we
1991 * still need it enabled in order to have HW Rx hash generated.
1993 * if (!is_eth_multi(bp))
1994 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1997 params.rss_obj = rss_obj;
1999 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2002 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2004 /* RSS configuration */
2005 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2006 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2007 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2008 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2009 if (rss_obj->udp_rss_v4)
2010 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2011 if (rss_obj->udp_rss_v6)
2012 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2014 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2018 params.rss_result_mask = MULTI_MASK;
2020 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2024 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2025 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2029 return bnx2x_config_rss(bp, ¶ms);
2031 return bnx2x_vfpf_config_rss(bp, ¶ms);
2034 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2036 struct bnx2x_func_state_params func_params = {NULL};
2038 /* Prepare parameters for function state transitions */
2039 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2041 func_params.f_obj = &bp->func_obj;
2042 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2044 func_params.params.hw_init.load_phase = load_code;
2046 return bnx2x_func_state_change(bp, &func_params);
2050 * Cleans the object that have internal lists without sending
2051 * ramrods. Should be run when interrupts are disabled.
2053 void bnx2x_squeeze_objects(struct bnx2x *bp)
2056 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2057 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2058 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2060 /***************** Cleanup MACs' object first *************************/
2062 /* Wait for completion of requested */
2063 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2064 /* Perform a dry cleanup */
2065 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2067 /* Clean ETH primary MAC */
2068 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2069 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2072 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2074 /* Cleanup UC list */
2076 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2077 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2080 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2082 /***************** Now clean mcast object *****************************/
2083 rparam.mcast_obj = &bp->mcast_obj;
2084 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2086 /* Add a DEL command... - Since we're doing a driver cleanup only,
2087 * we take a lock surrounding both the initial send and the CONTs,
2088 * as we don't want a true completion to disrupt us in the middle.
2090 netif_addr_lock_bh(bp->dev);
2091 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2093 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2096 /* ...and wait until all pending commands are cleared */
2097 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2100 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2102 netif_addr_unlock_bh(bp->dev);
2106 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2108 netif_addr_unlock_bh(bp->dev);
2111 #ifndef BNX2X_STOP_ON_ERROR
2112 #define LOAD_ERROR_EXIT(bp, label) \
2114 (bp)->state = BNX2X_STATE_ERROR; \
2118 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2120 bp->cnic_loaded = false; \
2123 #else /*BNX2X_STOP_ON_ERROR*/
2124 #define LOAD_ERROR_EXIT(bp, label) \
2126 (bp)->state = BNX2X_STATE_ERROR; \
2130 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2132 bp->cnic_loaded = false; \
2136 #endif /*BNX2X_STOP_ON_ERROR*/
2138 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2140 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2141 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2145 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2147 int num_groups, vf_headroom = 0;
2148 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2150 /* number of queues for statistics is number of eth queues + FCoE */
2151 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2153 /* Total number of FW statistics requests =
2154 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2155 * and fcoe l2 queue) stats + num of queues (which includes another 1
2156 * for fcoe l2 queue if applicable)
2158 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2160 /* vf stats appear in the request list, but their data is allocated by
2161 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2162 * it is used to determine where to place the vf stats queries in the
2166 vf_headroom = bnx2x_vf_headroom(bp);
2168 /* Request is built from stats_query_header and an array of
2169 * stats_query_cmd_group each of which contains
2170 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2171 * configured in the stats_query_header.
2174 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2175 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2178 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2179 bp->fw_stats_num, vf_headroom, num_groups);
2180 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2181 num_groups * sizeof(struct stats_query_cmd_group);
2183 /* Data for statistics requests + stats_counter
2184 * stats_counter holds per-STORM counters that are incremented
2185 * when STORM has finished with the current request.
2186 * memory for FCoE offloaded statistics are counted anyway,
2187 * even if they will not be sent.
2188 * VF stats are not accounted for here as the data of VF stats is stored
2189 * in memory allocated by the VF, not here.
2191 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2192 sizeof(struct per_pf_stats) +
2193 sizeof(struct fcoe_statistics_params) +
2194 sizeof(struct per_queue_stats) * num_queue_stats +
2195 sizeof(struct stats_counter);
2197 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2198 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2201 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2202 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2203 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2204 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2205 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2206 bp->fw_stats_req_sz;
2208 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2209 U64_HI(bp->fw_stats_req_mapping),
2210 U64_LO(bp->fw_stats_req_mapping));
2211 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2212 U64_HI(bp->fw_stats_data_mapping),
2213 U64_LO(bp->fw_stats_data_mapping));
2217 bnx2x_free_fw_stats_mem(bp);
2218 BNX2X_ERR("Can't allocate FW stats memory\n");
2222 /* send load request to mcp and analyze response */
2223 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2229 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2230 DRV_MSG_SEQ_NUMBER_MASK);
2231 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2233 /* Get current FW pulse sequence */
2234 bp->fw_drv_pulse_wr_seq =
2235 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2236 DRV_PULSE_SEQ_MASK);
2237 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2239 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2241 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2242 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2245 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2247 /* if mcp fails to respond we must abort */
2248 if (!(*load_code)) {
2249 BNX2X_ERR("MCP response failure, aborting\n");
2253 /* If mcp refused (e.g. other port is in diagnostic mode) we
2256 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2257 BNX2X_ERR("MCP refused load request, aborting\n");
2263 /* check whether another PF has already loaded FW to chip. In
2264 * virtualized environments a pf from another VM may have already
2265 * initialized the device including loading FW
2267 int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2269 /* is another pf loaded on this engine? */
2270 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2271 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2272 /* build my FW version dword */
2273 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2274 (BCM_5710_FW_MINOR_VERSION << 8) +
2275 (BCM_5710_FW_REVISION_VERSION << 16) +
2276 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2278 /* read loaded FW from chip */
2279 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2281 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2284 /* abort nic load if version mismatch */
2285 if (my_fw != loaded_fw) {
2286 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2294 /* returns the "mcp load_code" according to global load_count array */
2295 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2297 int path = BP_PATH(bp);
2299 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2300 path, load_count[path][0], load_count[path][1],
2301 load_count[path][2]);
2302 load_count[path][0]++;
2303 load_count[path][1 + port]++;
2304 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2305 path, load_count[path][0], load_count[path][1],
2306 load_count[path][2]);
2307 if (load_count[path][0] == 1)
2308 return FW_MSG_CODE_DRV_LOAD_COMMON;
2309 else if (load_count[path][1 + port] == 1)
2310 return FW_MSG_CODE_DRV_LOAD_PORT;
2312 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2315 /* mark PMF if applicable */
2316 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2318 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2319 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2320 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2322 /* We need the barrier to ensure the ordering between the
2323 * writing to bp->port.pmf here and reading it from the
2324 * bnx2x_periodic_task().
2331 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2334 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2336 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2337 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2338 (bp->common.shmem2_base)) {
2339 if (SHMEM2_HAS(bp, dcc_support))
2340 SHMEM2_WR(bp, dcc_support,
2341 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2342 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2343 if (SHMEM2_HAS(bp, afex_driver_support))
2344 SHMEM2_WR(bp, afex_driver_support,
2345 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2348 /* Set AFEX default VLAN tag to an invalid value */
2349 bp->afex_def_vlan_tag = -1;
2353 * bnx2x_bz_fp - zero content of the fastpath structure.
2355 * @bp: driver handle
2356 * @index: fastpath index to be zeroed
2358 * Makes sure the contents of the bp->fp[index].napi is kept
2361 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2363 struct bnx2x_fastpath *fp = &bp->fp[index];
2365 struct napi_struct orig_napi = fp->napi;
2366 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2368 /* bzero bnx2x_fastpath contents */
2370 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2371 sizeof(struct bnx2x_agg_info));
2372 memset(fp, 0, sizeof(*fp));
2374 /* Restore the NAPI object as it has been already initialized */
2375 fp->napi = orig_napi;
2376 fp->tpa_info = orig_tpa_info;
2380 fp->max_cos = bp->max_cos;
2382 /* Special queues support only one CoS */
2385 /* Init txdata pointers */
2387 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2389 for_each_cos_in_tx_queue(fp, cos)
2390 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2391 BNX2X_NUM_ETH_QUEUES(bp) + index];
2393 /* set the tpa flag for each queue. The tpa flag determines the queue
2394 * minimal size so it must be set prior to queue memory allocation
2396 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2397 (bp->flags & GRO_ENABLE_FLAG &&
2398 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2399 if (bp->flags & TPA_ENABLE_FLAG)
2400 fp->mode = TPA_MODE_LRO;
2401 else if (bp->flags & GRO_ENABLE_FLAG)
2402 fp->mode = TPA_MODE_GRO;
2404 /* We don't want TPA on an FCoE L2 ring */
2406 fp->disable_tpa = 1;
2409 int bnx2x_load_cnic(struct bnx2x *bp)
2411 int i, rc, port = BP_PORT(bp);
2413 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2415 mutex_init(&bp->cnic_mutex);
2418 rc = bnx2x_alloc_mem_cnic(bp);
2420 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2421 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2425 rc = bnx2x_alloc_fp_mem_cnic(bp);
2427 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2428 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2431 /* Update the number of queues with the cnic queues */
2432 rc = bnx2x_set_real_num_queues(bp, 1);
2434 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2435 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2438 /* Add all CNIC NAPI objects */
2439 bnx2x_add_all_napi_cnic(bp);
2440 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2441 bnx2x_napi_enable_cnic(bp);
2443 rc = bnx2x_init_hw_func_cnic(bp);
2445 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2447 bnx2x_nic_init_cnic(bp);
2450 /* Enable Timer scan */
2451 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2453 /* setup cnic queues */
2454 for_each_cnic_queue(bp, i) {
2455 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2457 BNX2X_ERR("Queue setup failed\n");
2458 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2463 /* Initialize Rx filter. */
2464 bnx2x_set_rx_mode_inner(bp);
2466 /* re-read iscsi info */
2467 bnx2x_get_iscsi_info(bp);
2468 bnx2x_setup_cnic_irq_info(bp);
2469 bnx2x_setup_cnic_info(bp);
2470 bp->cnic_loaded = true;
2471 if (bp->state == BNX2X_STATE_OPEN)
2472 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2474 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2478 #ifndef BNX2X_STOP_ON_ERROR
2480 /* Disable Timer scan */
2481 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2484 bnx2x_napi_disable_cnic(bp);
2485 /* Update the number of queues without the cnic queues */
2486 if (bnx2x_set_real_num_queues(bp, 0))
2487 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2489 BNX2X_ERR("CNIC-related load failed\n");
2490 bnx2x_free_fp_mem_cnic(bp);
2491 bnx2x_free_mem_cnic(bp);
2493 #endif /* ! BNX2X_STOP_ON_ERROR */
2496 /* must be called with rtnl_lock */
2497 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2499 int port = BP_PORT(bp);
2500 int i, rc = 0, load_code = 0;
2502 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2504 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2506 #ifdef BNX2X_STOP_ON_ERROR
2507 if (unlikely(bp->panic)) {
2508 BNX2X_ERR("Can't load NIC when there is panic\n");
2513 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2515 /* zero the structure w/o any lock, before SP handler is initialized */
2516 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2517 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2518 &bp->last_reported_link.link_report_flags);
2521 /* must be called before memory allocation and HW init */
2522 bnx2x_ilt_set_info(bp);
2525 * Zero fastpath structures preserving invariants like napi, which are
2526 * allocated only once, fp index, max_cos, bp pointer.
2527 * Also set fp->disable_tpa and txdata_ptr.
2529 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2530 for_each_queue(bp, i)
2532 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2533 bp->num_cnic_queues) *
2534 sizeof(struct bnx2x_fp_txdata));
2536 bp->fcoe_init = false;
2538 /* Set the receive queues buffer size */
2539 bnx2x_set_rx_buf_size(bp);
2542 rc = bnx2x_alloc_mem(bp);
2544 BNX2X_ERR("Unable to allocate bp memory\n");
2549 /* need to be done after alloc mem, since it's self adjusting to amount
2550 * of memory available for RSS queues
2552 rc = bnx2x_alloc_fp_mem(bp);
2554 BNX2X_ERR("Unable to allocate memory for fps\n");
2555 LOAD_ERROR_EXIT(bp, load_error0);
2558 /* Allocated memory for FW statistics */
2559 if (bnx2x_alloc_fw_stats_mem(bp))
2560 LOAD_ERROR_EXIT(bp, load_error0);
2562 /* request pf to initialize status blocks */
2564 rc = bnx2x_vfpf_init(bp);
2566 LOAD_ERROR_EXIT(bp, load_error0);
2569 /* As long as bnx2x_alloc_mem() may possibly update
2570 * bp->num_queues, bnx2x_set_real_num_queues() should always
2571 * come after it. At this stage cnic queues are not counted.
2573 rc = bnx2x_set_real_num_queues(bp, 0);
2575 BNX2X_ERR("Unable to set real_num_queues\n");
2576 LOAD_ERROR_EXIT(bp, load_error0);
2579 /* configure multi cos mappings in kernel.
2580 * this configuration may be overridden by a multi class queue
2581 * discipline or by a dcbx negotiation result.
2583 bnx2x_setup_tc(bp->dev, bp->max_cos);
2585 /* Add all NAPI objects */
2586 bnx2x_add_all_napi(bp);
2587 DP(NETIF_MSG_IFUP, "napi added\n");
2588 bnx2x_napi_enable(bp);
2591 /* set pf load just before approaching the MCP */
2592 bnx2x_set_pf_load(bp);
2594 /* if mcp exists send load request and analyze response */
2595 if (!BP_NOMCP(bp)) {
2596 /* attempt to load pf */
2597 rc = bnx2x_nic_load_request(bp, &load_code);
2599 LOAD_ERROR_EXIT(bp, load_error1);
2601 /* what did mcp say? */
2602 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2604 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2605 LOAD_ERROR_EXIT(bp, load_error2);
2608 load_code = bnx2x_nic_load_no_mcp(bp, port);
2611 /* mark pmf if applicable */
2612 bnx2x_nic_load_pmf(bp, load_code);
2614 /* Init Function state controlling object */
2615 bnx2x__init_func_obj(bp);
2618 rc = bnx2x_init_hw(bp, load_code);
2620 BNX2X_ERR("HW init failed, aborting\n");
2621 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2622 LOAD_ERROR_EXIT(bp, load_error2);
2626 bnx2x_pre_irq_nic_init(bp);
2628 /* Connect to IRQs */
2629 rc = bnx2x_setup_irqs(bp);
2631 BNX2X_ERR("setup irqs failed\n");
2633 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2634 LOAD_ERROR_EXIT(bp, load_error2);
2637 /* Init per-function objects */
2639 /* Setup NIC internals and enable interrupts */
2640 bnx2x_post_irq_nic_init(bp, load_code);
2642 bnx2x_init_bp_objs(bp);
2643 bnx2x_iov_nic_init(bp);
2645 /* Set AFEX default VLAN tag to an invalid value */
2646 bp->afex_def_vlan_tag = -1;
2647 bnx2x_nic_load_afex_dcc(bp, load_code);
2648 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2649 rc = bnx2x_func_start(bp);
2651 BNX2X_ERR("Function start failed!\n");
2652 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2654 LOAD_ERROR_EXIT(bp, load_error3);
2657 /* Send LOAD_DONE command to MCP */
2658 if (!BP_NOMCP(bp)) {
2659 load_code = bnx2x_fw_command(bp,
2660 DRV_MSG_CODE_LOAD_DONE, 0);
2662 BNX2X_ERR("MCP response failure, aborting\n");
2664 LOAD_ERROR_EXIT(bp, load_error3);
2668 /* initialize FW coalescing state machines in RAM */
2669 bnx2x_update_coalesce(bp);
2672 /* setup the leading queue */
2673 rc = bnx2x_setup_leading(bp);
2675 BNX2X_ERR("Setup leading failed!\n");
2676 LOAD_ERROR_EXIT(bp, load_error3);
2679 /* set up the rest of the queues */
2680 for_each_nondefault_eth_queue(bp, i) {
2682 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2684 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2686 BNX2X_ERR("Queue %d setup failed\n", i);
2687 LOAD_ERROR_EXIT(bp, load_error3);
2692 rc = bnx2x_init_rss(bp);
2694 BNX2X_ERR("PF RSS init failed\n");
2695 LOAD_ERROR_EXIT(bp, load_error3);
2698 /* Now when Clients are configured we are ready to work */
2699 bp->state = BNX2X_STATE_OPEN;
2701 /* Configure a ucast MAC */
2703 rc = bnx2x_set_eth_mac(bp, true);
2705 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2708 BNX2X_ERR("Setting Ethernet MAC failed\n");
2709 LOAD_ERROR_EXIT(bp, load_error3);
2712 if (IS_PF(bp) && bp->pending_max) {
2713 bnx2x_update_max_mf_config(bp, bp->pending_max);
2714 bp->pending_max = 0;
2718 rc = bnx2x_initial_phy_init(bp, load_mode);
2720 LOAD_ERROR_EXIT(bp, load_error3);
2722 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2724 /* Start fast path */
2726 /* Initialize Rx filter. */
2727 bnx2x_set_rx_mode_inner(bp);
2730 switch (load_mode) {
2732 /* Tx queue should be only re-enabled */
2733 netif_tx_wake_all_queues(bp->dev);
2737 netif_tx_start_all_queues(bp->dev);
2738 smp_mb__after_clear_bit();
2742 case LOAD_LOOPBACK_EXT:
2743 bp->state = BNX2X_STATE_DIAG;
2751 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2753 bnx2x__link_status_update(bp);
2755 /* start the timer */
2756 mod_timer(&bp->timer, jiffies + bp->current_interval);
2758 if (CNIC_ENABLED(bp))
2759 bnx2x_load_cnic(bp);
2761 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2762 /* mark driver is loaded in shmem2 */
2764 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2765 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2766 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2767 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2770 /* Wait for all pending SP commands to complete */
2771 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2772 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2773 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2777 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2778 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2779 bnx2x_dcbx_init(bp, false);
2781 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2785 #ifndef BNX2X_STOP_ON_ERROR
2788 bnx2x_int_disable_sync(bp, 1);
2790 /* Clean queueable objects */
2791 bnx2x_squeeze_objects(bp);
2794 /* Free SKBs, SGEs, TPA pool and driver internals */
2795 bnx2x_free_skbs(bp);
2796 for_each_rx_queue(bp, i)
2797 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2802 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2803 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2804 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2809 bnx2x_napi_disable(bp);
2810 bnx2x_del_all_napi(bp);
2812 /* clear pf_load status, as it was already set */
2814 bnx2x_clear_pf_load(bp);
2816 bnx2x_free_fw_stats_mem(bp);
2817 bnx2x_free_fp_mem(bp);
2821 #endif /* ! BNX2X_STOP_ON_ERROR */
2824 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2828 /* Wait until tx fastpath tasks complete */
2829 for_each_tx_queue(bp, i) {
2830 struct bnx2x_fastpath *fp = &bp->fp[i];
2832 for_each_cos_in_tx_queue(fp, cos)
2833 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2840 /* must be called with rtnl_lock */
2841 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2844 bool global = false;
2846 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2848 /* mark driver is unloaded in shmem2 */
2849 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2851 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2852 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2853 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2856 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2857 (bp->state == BNX2X_STATE_CLOSED ||
2858 bp->state == BNX2X_STATE_ERROR)) {
2859 /* We can get here if the driver has been unloaded
2860 * during parity error recovery and is either waiting for a
2861 * leader to complete or for other functions to unload and
2862 * then ifdown has been issued. In this case we want to
2863 * unload and let other functions to complete a recovery
2866 bp->recovery_state = BNX2X_RECOVERY_DONE;
2868 bnx2x_release_leader_lock(bp);
2871 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2872 BNX2X_ERR("Can't unload in closed or error state\n");
2876 /* Nothing to do during unload if previous bnx2x_nic_load()
2877 * have not completed successfully - all resources are released.
2879 * we can get here only after unsuccessful ndo_* callback, during which
2880 * dev->IFF_UP flag is still on.
2882 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2885 /* It's important to set the bp->state to the value different from
2886 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2887 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2889 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2892 /* indicate to VFs that the PF is going down */
2893 bnx2x_iov_channel_down(bp);
2895 if (CNIC_LOADED(bp))
2896 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2899 bnx2x_tx_disable(bp);
2900 netdev_reset_tc(bp->dev);
2902 bp->rx_mode = BNX2X_RX_MODE_NONE;
2904 del_timer_sync(&bp->timer);
2907 /* Set ALWAYS_ALIVE bit in shmem */
2908 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2909 bnx2x_drv_pulse(bp);
2910 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2911 bnx2x_save_statistics(bp);
2914 /* wait till consumers catch up with producers in all queues */
2915 bnx2x_drain_tx_queues(bp);
2917 /* if VF indicate to PF this function is going down (PF will delete sp
2918 * elements and clear initializations
2921 bnx2x_vfpf_close_vf(bp);
2922 else if (unload_mode != UNLOAD_RECOVERY)
2923 /* if this is a normal/close unload need to clean up chip*/
2924 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2926 /* Send the UNLOAD_REQUEST to the MCP */
2927 bnx2x_send_unload_req(bp, unload_mode);
2929 /* Prevent transactions to host from the functions on the
2930 * engine that doesn't reset global blocks in case of global
2931 * attention once global blocks are reset and gates are opened
2932 * (the engine which leader will perform the recovery
2935 if (!CHIP_IS_E1x(bp))
2936 bnx2x_pf_disable(bp);
2938 /* Disable HW interrupts, NAPI */
2939 bnx2x_netif_stop(bp, 1);
2940 /* Delete all NAPI objects */
2941 bnx2x_del_all_napi(bp);
2942 if (CNIC_LOADED(bp))
2943 bnx2x_del_all_napi_cnic(bp);
2947 /* Report UNLOAD_DONE to MCP */
2948 bnx2x_send_unload_done(bp, false);
2952 * At this stage no more interrupts will arrive so we may safely clean
2953 * the queueable objects here in case they failed to get cleaned so far.
2956 bnx2x_squeeze_objects(bp);
2958 /* There should be no more pending SP commands at this stage */
2963 /* clear pending work in rtnl task */
2964 bp->sp_rtnl_state = 0;
2967 /* Free SKBs, SGEs, TPA pool and driver internals */
2968 bnx2x_free_skbs(bp);
2969 if (CNIC_LOADED(bp))
2970 bnx2x_free_skbs_cnic(bp);
2971 for_each_rx_queue(bp, i)
2972 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2974 bnx2x_free_fp_mem(bp);
2975 if (CNIC_LOADED(bp))
2976 bnx2x_free_fp_mem_cnic(bp);
2979 if (CNIC_LOADED(bp))
2980 bnx2x_free_mem_cnic(bp);
2984 bp->state = BNX2X_STATE_CLOSED;
2985 bp->cnic_loaded = false;
2987 /* Check if there are pending parity attentions. If there are - set
2988 * RECOVERY_IN_PROGRESS.
2990 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2991 bnx2x_set_reset_in_progress(bp);
2993 /* Set RESET_IS_GLOBAL if needed */
2995 bnx2x_set_reset_global(bp);
2998 /* The last driver must disable a "close the gate" if there is no
2999 * parity attention or "process kill" pending.
3002 !bnx2x_clear_pf_load(bp) &&
3003 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3004 bnx2x_disable_close_the_gate(bp);
3006 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3011 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3015 /* If there is no power capability, silently succeed */
3016 if (!bp->pdev->pm_cap) {
3017 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3021 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3025 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3026 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3027 PCI_PM_CTRL_PME_STATUS));
3029 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3030 /* delay required during transition out of D3hot */
3035 /* If there are other clients above don't
3036 shut down the power */
3037 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3039 /* Don't shut down the power for emulation and FPGA */
3040 if (CHIP_REV_IS_SLOW(bp))
3043 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3047 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3049 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3052 /* No more memory access after this point until
3053 * device is brought back to D0.
3058 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3065 * net_device service functions
3067 int bnx2x_poll(struct napi_struct *napi, int budget)
3071 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3073 struct bnx2x *bp = fp->bp;
3076 #ifdef BNX2X_STOP_ON_ERROR
3077 if (unlikely(bp->panic)) {
3078 napi_complete(napi);
3082 if (!bnx2x_fp_lock_napi(fp))
3085 for_each_cos_in_tx_queue(fp, cos)
3086 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3087 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3089 if (bnx2x_has_rx_work(fp)) {
3090 work_done += bnx2x_rx_int(fp, budget - work_done);
3092 /* must not complete if we consumed full budget */
3093 if (work_done >= budget) {
3094 bnx2x_fp_unlock_napi(fp);
3099 /* Fall out from the NAPI loop if needed */
3100 if (!bnx2x_fp_unlock_napi(fp) &&
3101 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3103 /* No need to update SB for FCoE L2 ring as long as
3104 * it's connected to the default SB and the SB
3105 * has been updated when NAPI was scheduled.
3107 if (IS_FCOE_FP(fp)) {
3108 napi_complete(napi);
3111 bnx2x_update_fpsb_idx(fp);
3112 /* bnx2x_has_rx_work() reads the status block,
3113 * thus we need to ensure that status block indices
3114 * have been actually read (bnx2x_update_fpsb_idx)
3115 * prior to this check (bnx2x_has_rx_work) so that
3116 * we won't write the "newer" value of the status block
3117 * to IGU (if there was a DMA right after
3118 * bnx2x_has_rx_work and if there is no rmb, the memory
3119 * reading (bnx2x_update_fpsb_idx) may be postponed
3120 * to right before bnx2x_ack_sb). In this case there
3121 * will never be another interrupt until there is
3122 * another update of the status block, while there
3123 * is still unhandled work.
3127 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3128 napi_complete(napi);
3129 /* Re-enable interrupts */
3130 DP(NETIF_MSG_RX_STATUS,
3131 "Update index to %d\n", fp->fp_hc_idx);
3132 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3133 le16_to_cpu(fp->fp_hc_idx),
3143 #ifdef CONFIG_NET_RX_BUSY_POLL
3144 /* must be called with local_bh_disable()d */
3145 int bnx2x_low_latency_recv(struct napi_struct *napi)
3147 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3149 struct bnx2x *bp = fp->bp;
3152 if ((bp->state == BNX2X_STATE_CLOSED) ||
3153 (bp->state == BNX2X_STATE_ERROR) ||
3154 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3155 return LL_FLUSH_FAILED;
3157 if (!bnx2x_fp_lock_poll(fp))
3158 return LL_FLUSH_BUSY;
3160 if (bnx2x_has_rx_work(fp))
3161 found = bnx2x_rx_int(fp, 4);
3163 bnx2x_fp_unlock_poll(fp);
3169 /* we split the first BD into headers and data BDs
3170 * to ease the pain of our fellow microcode engineers
3171 * we use one mapping for both BDs
3173 static u16 bnx2x_tx_split(struct bnx2x *bp,
3174 struct bnx2x_fp_txdata *txdata,
3175 struct sw_tx_bd *tx_buf,
3176 struct eth_tx_start_bd **tx_bd, u16 hlen,
3179 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3180 struct eth_tx_bd *d_tx_bd;
3182 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3184 /* first fix first BD */
3185 h_tx_bd->nbytes = cpu_to_le16(hlen);
3187 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3188 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3190 /* now get a new data BD
3191 * (after the pbd) and fill it */
3192 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3193 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3195 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3196 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3198 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3199 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3200 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3202 /* this marks the BD as one that has no individual mapping */
3203 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3205 DP(NETIF_MSG_TX_QUEUED,
3206 "TSO split data size is %d (%x:%x)\n",
3207 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3210 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3215 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3216 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3217 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3219 __sum16 tsum = (__force __sum16) csum;
3222 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3223 csum_partial(t_header - fix, fix, 0)));
3226 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3227 csum_partial(t_header, -fix, 0)));
3229 return bswab16(tsum);
3232 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3238 if (skb->ip_summed != CHECKSUM_PARTIAL)
3241 protocol = vlan_get_protocol(skb);
3242 if (protocol == htons(ETH_P_IPV6)) {
3244 prot = ipv6_hdr(skb)->nexthdr;
3247 prot = ip_hdr(skb)->protocol;
3250 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3251 if (inner_ip_hdr(skb)->version == 6) {
3252 rc |= XMIT_CSUM_ENC_V6;
3253 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3254 rc |= XMIT_CSUM_TCP;
3256 rc |= XMIT_CSUM_ENC_V4;
3257 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3258 rc |= XMIT_CSUM_TCP;
3261 if (prot == IPPROTO_TCP)
3262 rc |= XMIT_CSUM_TCP;
3264 if (skb_is_gso(skb)) {
3265 if (skb_is_gso_v6(skb)) {
3266 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3267 if (rc & XMIT_CSUM_ENC)
3268 rc |= XMIT_GSO_ENC_V6;
3270 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3271 if (rc & XMIT_CSUM_ENC)
3272 rc |= XMIT_GSO_ENC_V4;
3279 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3280 /* check if packet requires linearization (packet is too fragmented)
3281 no need to check fragmentation if page size > 8K (there will be no
3282 violation to FW restrictions) */
3283 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3288 int first_bd_sz = 0;
3290 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3291 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3293 if (xmit_type & XMIT_GSO) {
3294 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3295 /* Check if LSO packet needs to be copied:
3296 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3297 int wnd_size = MAX_FETCH_BD - 3;
3298 /* Number of windows to check */
3299 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3304 /* Headers length */
3305 hlen = (int)(skb_transport_header(skb) - skb->data) +
3308 /* Amount of data (w/o headers) on linear part of SKB*/
3309 first_bd_sz = skb_headlen(skb) - hlen;
3311 wnd_sum = first_bd_sz;
3313 /* Calculate the first sum - it's special */
3314 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3316 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3318 /* If there was data on linear skb data - check it */
3319 if (first_bd_sz > 0) {
3320 if (unlikely(wnd_sum < lso_mss)) {
3325 wnd_sum -= first_bd_sz;
3328 /* Others are easier: run through the frag list and
3329 check all windows */
3330 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3332 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3334 if (unlikely(wnd_sum < lso_mss)) {
3339 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3342 /* in non-LSO too fragmented packet should always
3349 if (unlikely(to_copy))
3350 DP(NETIF_MSG_TX_QUEUED,
3351 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3352 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3353 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3359 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3362 struct ipv6hdr *ipv6;
3364 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3365 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3366 ETH_TX_PARSE_BD_E2_LSO_MSS;
3368 if (xmit_type & XMIT_GSO_ENC_V6)
3369 ipv6 = inner_ipv6_hdr(skb);
3370 else if (xmit_type & XMIT_GSO_V6)
3371 ipv6 = ipv6_hdr(skb);
3375 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3376 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3380 * bnx2x_set_pbd_gso - update PBD in GSO case.
3384 * @xmit_type: xmit flags
3386 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3387 struct eth_tx_parse_bd_e1x *pbd,
3388 struct eth_tx_start_bd *tx_start_bd,
3391 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3392 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3393 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3395 if (xmit_type & XMIT_GSO_V4) {
3396 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3397 pbd->tcp_pseudo_csum =
3398 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3400 0, IPPROTO_TCP, 0));
3402 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3403 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3405 pbd->tcp_pseudo_csum =
3406 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3407 &ipv6_hdr(skb)->daddr,
3408 0, IPPROTO_TCP, 0));
3412 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3416 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3418 * @bp: driver handle
3420 * @parsing_data: data to be updated
3421 * @xmit_type: xmit flags
3423 * 57712/578xx related, when skb has encapsulation
3425 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3426 u32 *parsing_data, u32 xmit_type)
3429 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3430 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3431 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3433 if (xmit_type & XMIT_CSUM_TCP) {
3434 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3435 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3436 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3438 return skb_inner_transport_header(skb) +
3439 inner_tcp_hdrlen(skb) - skb->data;
3442 /* We support checksum offload for TCP and UDP only.
3443 * No need to pass the UDP header length - it's a constant.
3445 return skb_inner_transport_header(skb) +
3446 sizeof(struct udphdr) - skb->data;
3450 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3452 * @bp: driver handle
3454 * @parsing_data: data to be updated
3455 * @xmit_type: xmit flags
3457 * 57712/578xx related
3459 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3460 u32 *parsing_data, u32 xmit_type)
3463 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3464 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3465 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3467 if (xmit_type & XMIT_CSUM_TCP) {
3468 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3469 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3470 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3472 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3474 /* We support checksum offload for TCP and UDP only.
3475 * No need to pass the UDP header length - it's a constant.
3477 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3480 /* set FW indication according to inner or outer protocols if tunneled */
3481 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3482 struct eth_tx_start_bd *tx_start_bd,
3485 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3487 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3488 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3490 if (!(xmit_type & XMIT_CSUM_TCP))
3491 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3495 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3497 * @bp: driver handle
3499 * @pbd: parse BD to be updated
3500 * @xmit_type: xmit flags
3502 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3503 struct eth_tx_parse_bd_e1x *pbd,
3506 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3508 /* for now NS flag is not used in Linux */
3511 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3512 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3514 pbd->ip_hlen_w = (skb_transport_header(skb) -
3515 skb_network_header(skb)) >> 1;
3517 hlen += pbd->ip_hlen_w;
3519 /* We support checksum offload for TCP and UDP only */
3520 if (xmit_type & XMIT_CSUM_TCP)
3521 hlen += tcp_hdrlen(skb) / 2;
3523 hlen += sizeof(struct udphdr) / 2;
3525 pbd->total_hlen_w = cpu_to_le16(hlen);
3528 if (xmit_type & XMIT_CSUM_TCP) {
3529 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3532 s8 fix = SKB_CS_OFF(skb); /* signed! */
3534 DP(NETIF_MSG_TX_QUEUED,
3535 "hlen %d fix %d csum before fix %x\n",
3536 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3538 /* HW bug: fixup the CSUM */
3539 pbd->tcp_pseudo_csum =
3540 bnx2x_csum_fix(skb_transport_header(skb),
3543 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3544 pbd->tcp_pseudo_csum);
3550 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3551 struct eth_tx_parse_bd_e2 *pbd_e2,
3552 struct eth_tx_parse_2nd_bd *pbd2,
3557 u8 outerip_off, outerip_len = 0;
3559 /* from outer IP to transport */
3560 hlen_w = (skb_inner_transport_header(skb) -
3561 skb_network_header(skb)) >> 1;
3564 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3566 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3568 /* outer IP header info */
3569 if (xmit_type & XMIT_CSUM_V4) {
3570 struct iphdr *iph = ip_hdr(skb);
3571 u32 csum = (__force u32)(~iph->check) -
3572 (__force u32)iph->tot_len -
3573 (__force u32)iph->frag_off;
3575 pbd2->fw_ip_csum_wo_len_flags_frag =
3576 bswab16(csum_fold((__force __wsum)csum));
3578 pbd2->fw_ip_hdr_to_payload_w =
3579 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3582 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3584 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3586 if (xmit_type & XMIT_GSO_V4) {
3587 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3589 pbd_e2->data.tunnel_data.pseudo_csum =
3590 bswab16(~csum_tcpudp_magic(
3591 inner_ip_hdr(skb)->saddr,
3592 inner_ip_hdr(skb)->daddr,
3593 0, IPPROTO_TCP, 0));
3595 outerip_len = ip_hdr(skb)->ihl << 1;
3597 pbd_e2->data.tunnel_data.pseudo_csum =
3598 bswab16(~csum_ipv6_magic(
3599 &inner_ipv6_hdr(skb)->saddr,
3600 &inner_ipv6_hdr(skb)->daddr,
3601 0, IPPROTO_TCP, 0));
3604 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3608 (!!(xmit_type & XMIT_CSUM_V6) <<
3609 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3611 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3612 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3613 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3615 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3616 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3617 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3621 /* called with netif_tx_lock
3622 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3623 * netif_wake_queue()
3625 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3627 struct bnx2x *bp = netdev_priv(dev);
3629 struct netdev_queue *txq;
3630 struct bnx2x_fp_txdata *txdata;
3631 struct sw_tx_bd *tx_buf;
3632 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3633 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3634 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3635 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3636 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3637 u32 pbd_e2_parsing_data = 0;
3638 u16 pkt_prod, bd_prod;
3641 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3644 __le16 pkt_size = 0;
3646 u8 mac_type = UNICAST_ADDRESS;
3648 #ifdef BNX2X_STOP_ON_ERROR
3649 if (unlikely(bp->panic))
3650 return NETDEV_TX_BUSY;
3653 txq_index = skb_get_queue_mapping(skb);
3654 txq = netdev_get_tx_queue(dev, txq_index);
3656 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3658 txdata = &bp->bnx2x_txq[txq_index];
3660 /* enable this debug print to view the transmission queue being used
3661 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3662 txq_index, fp_index, txdata_index); */
3664 /* enable this debug print to view the transmission details
3665 DP(NETIF_MSG_TX_QUEUED,
3666 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3667 txdata->cid, fp_index, txdata_index, txdata, fp); */
3669 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3670 skb_shinfo(skb)->nr_frags +
3672 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3673 /* Handle special storage cases separately */
3674 if (txdata->tx_ring_size == 0) {
3675 struct bnx2x_eth_q_stats *q_stats =
3676 bnx2x_fp_qstats(bp, txdata->parent_fp);
3677 q_stats->driver_filtered_tx_pkt++;
3679 return NETDEV_TX_OK;
3681 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3682 netif_tx_stop_queue(txq);
3683 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3685 return NETDEV_TX_BUSY;
3688 DP(NETIF_MSG_TX_QUEUED,
3689 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3690 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3691 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3694 eth = (struct ethhdr *)skb->data;
3696 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3697 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3698 if (is_broadcast_ether_addr(eth->h_dest))
3699 mac_type = BROADCAST_ADDRESS;
3701 mac_type = MULTICAST_ADDRESS;
3704 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3705 /* First, check if we need to linearize the skb (due to FW
3706 restrictions). No need to check fragmentation if page size > 8K
3707 (there will be no violation to FW restrictions) */
3708 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3709 /* Statistics of linearization */
3711 if (skb_linearize(skb) != 0) {
3712 DP(NETIF_MSG_TX_QUEUED,
3713 "SKB linearization failed - silently dropping this SKB\n");
3714 dev_kfree_skb_any(skb);
3715 return NETDEV_TX_OK;
3719 /* Map skb linear data for DMA */
3720 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3721 skb_headlen(skb), DMA_TO_DEVICE);
3722 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3723 DP(NETIF_MSG_TX_QUEUED,
3724 "SKB mapping failed - silently dropping this SKB\n");
3725 dev_kfree_skb_any(skb);
3726 return NETDEV_TX_OK;
3729 Please read carefully. First we use one BD which we mark as start,
3730 then we have a parsing info BD (used for TSO or xsum),
3731 and only then we have the rest of the TSO BDs.
3732 (don't forget to mark the last one as last,
3733 and to unmap only AFTER you write to the BD ...)
3734 And above all, all pdb sizes are in words - NOT DWORDS!
3737 /* get current pkt produced now - advance it just before sending packet
3738 * since mapping of pages may fail and cause packet to be dropped
3740 pkt_prod = txdata->tx_pkt_prod;
3741 bd_prod = TX_BD(txdata->tx_bd_prod);
3743 /* get a tx_buf and first BD
3744 * tx_start_bd may be changed during SPLIT,
3745 * but first_bd will always stay first
3747 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3748 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3749 first_bd = tx_start_bd;
3751 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3753 /* header nbd: indirectly zero other flags! */
3754 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3756 /* remember the first BD of the packet */
3757 tx_buf->first_bd = txdata->tx_bd_prod;
3761 DP(NETIF_MSG_TX_QUEUED,
3762 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3763 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3765 if (vlan_tx_tag_present(skb)) {
3766 tx_start_bd->vlan_or_ethertype =
3767 cpu_to_le16(vlan_tx_tag_get(skb));
3768 tx_start_bd->bd_flags.as_bitfield |=
3769 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3771 /* when transmitting in a vf, start bd must hold the ethertype
3772 * for fw to enforce it
3775 tx_start_bd->vlan_or_ethertype =
3776 cpu_to_le16(ntohs(eth->h_proto));
3778 /* used by FW for packet accounting */
3779 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3782 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3784 /* turn on parsing and get a BD */
3785 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3787 if (xmit_type & XMIT_CSUM)
3788 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3790 if (!CHIP_IS_E1x(bp)) {
3791 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3792 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3794 if (xmit_type & XMIT_CSUM_ENC) {
3795 u16 global_data = 0;
3797 /* Set PBD in enc checksum offload case */
3798 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3799 &pbd_e2_parsing_data,
3802 /* turn on 2nd parsing and get a BD */
3803 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3805 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3807 memset(pbd2, 0, sizeof(*pbd2));
3809 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3810 (skb_inner_network_header(skb) -
3813 if (xmit_type & XMIT_GSO_ENC)
3814 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3818 pbd2->global_data = cpu_to_le16(global_data);
3820 /* add addition parse BD indication to start BD */
3821 SET_FLAG(tx_start_bd->general_data,
3822 ETH_TX_START_BD_PARSE_NBDS, 1);
3823 /* set encapsulation flag in start BD */
3824 SET_FLAG(tx_start_bd->general_data,
3825 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3827 } else if (xmit_type & XMIT_CSUM) {
3828 /* Set PBD in checksum offload case w/o encapsulation */
3829 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3830 &pbd_e2_parsing_data,
3834 /* Add the macs to the parsing BD this is a vf */
3836 /* override GRE parameters in BD */
3837 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3838 &pbd_e2->data.mac_addr.src_mid,
3839 &pbd_e2->data.mac_addr.src_lo,
3842 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3843 &pbd_e2->data.mac_addr.dst_mid,
3844 &pbd_e2->data.mac_addr.dst_lo,
3848 SET_FLAG(pbd_e2_parsing_data,
3849 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3851 u16 global_data = 0;
3852 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3853 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3854 /* Set PBD in checksum offload case */
3855 if (xmit_type & XMIT_CSUM)
3856 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3858 SET_FLAG(global_data,
3859 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3860 pbd_e1x->global_data |= cpu_to_le16(global_data);
3863 /* Setup the data pointer of the first BD of the packet */
3864 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3865 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3866 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3867 pkt_size = tx_start_bd->nbytes;
3869 DP(NETIF_MSG_TX_QUEUED,
3870 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3871 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3872 le16_to_cpu(tx_start_bd->nbytes),
3873 tx_start_bd->bd_flags.as_bitfield,
3874 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3876 if (xmit_type & XMIT_GSO) {
3878 DP(NETIF_MSG_TX_QUEUED,
3879 "TSO packet len %d hlen %d total len %d tso size %d\n",
3880 skb->len, hlen, skb_headlen(skb),
3881 skb_shinfo(skb)->gso_size);
3883 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3885 if (unlikely(skb_headlen(skb) > hlen)) {
3887 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3891 if (!CHIP_IS_E1x(bp))
3892 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3895 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3898 /* Set the PBD's parsing_data field if not zero
3899 * (for the chips newer than 57711).
3901 if (pbd_e2_parsing_data)
3902 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3904 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3906 /* Handle fragmented skb */
3907 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3908 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3910 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3911 skb_frag_size(frag), DMA_TO_DEVICE);
3912 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3913 unsigned int pkts_compl = 0, bytes_compl = 0;
3915 DP(NETIF_MSG_TX_QUEUED,
3916 "Unable to map page - dropping packet...\n");
3918 /* we need unmap all buffers already mapped
3920 * first_bd->nbd need to be properly updated
3921 * before call to bnx2x_free_tx_pkt
3923 first_bd->nbd = cpu_to_le16(nbd);
3924 bnx2x_free_tx_pkt(bp, txdata,
3925 TX_BD(txdata->tx_pkt_prod),
3926 &pkts_compl, &bytes_compl);
3927 return NETDEV_TX_OK;
3930 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3931 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3932 if (total_pkt_bd == NULL)
3933 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3935 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3936 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3937 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3938 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3941 DP(NETIF_MSG_TX_QUEUED,
3942 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3943 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3944 le16_to_cpu(tx_data_bd->nbytes));
3947 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3949 /* update with actual num BDs */
3950 first_bd->nbd = cpu_to_le16(nbd);
3952 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3954 /* now send a tx doorbell, counting the next BD
3955 * if the packet contains or ends with it
3957 if (TX_BD_POFF(bd_prod) < nbd)
3960 /* total_pkt_bytes should be set on the first data BD if
3961 * it's not an LSO packet and there is more than one
3962 * data BD. In this case pkt_size is limited by an MTU value.
3963 * However we prefer to set it for an LSO packet (while we don't
3964 * have to) in order to save some CPU cycles in a none-LSO
3965 * case, when we much more care about them.
3967 if (total_pkt_bd != NULL)
3968 total_pkt_bd->total_pkt_bytes = pkt_size;
3971 DP(NETIF_MSG_TX_QUEUED,
3972 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3973 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3974 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3975 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3976 le16_to_cpu(pbd_e1x->total_hlen_w));
3978 DP(NETIF_MSG_TX_QUEUED,
3979 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3981 pbd_e2->data.mac_addr.dst_hi,
3982 pbd_e2->data.mac_addr.dst_mid,
3983 pbd_e2->data.mac_addr.dst_lo,
3984 pbd_e2->data.mac_addr.src_hi,
3985 pbd_e2->data.mac_addr.src_mid,
3986 pbd_e2->data.mac_addr.src_lo,
3987 pbd_e2->parsing_data);
3988 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3990 netdev_tx_sent_queue(txq, skb->len);
3992 skb_tx_timestamp(skb);
3994 txdata->tx_pkt_prod++;
3996 * Make sure that the BD data is updated before updating the producer
3997 * since FW might read the BD right after the producer is updated.
3998 * This is only applicable for weak-ordered memory model archs such
3999 * as IA-64. The following barrier is also mandatory since FW will
4000 * assumes packets must have BDs.
4004 txdata->tx_db.data.prod += nbd;
4007 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4011 txdata->tx_bd_prod += nbd;
4013 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4014 netif_tx_stop_queue(txq);
4016 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4017 * ordering of set_bit() in netif_tx_stop_queue() and read of
4021 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4022 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4023 netif_tx_wake_queue(txq);
4027 return NETDEV_TX_OK;
4031 * bnx2x_setup_tc - routine to configure net_device for multi tc
4033 * @netdev: net device to configure
4034 * @tc: number of traffic classes to enable
4036 * callback connected to the ndo_setup_tc function pointer
4038 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4040 int cos, prio, count, offset;
4041 struct bnx2x *bp = netdev_priv(dev);
4043 /* setup tc must be called under rtnl lock */
4046 /* no traffic classes requested. Aborting */
4048 netdev_reset_tc(dev);
4052 /* requested to support too many traffic classes */
4053 if (num_tc > bp->max_cos) {
4054 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4055 num_tc, bp->max_cos);
4059 /* declare amount of supported traffic classes */
4060 if (netdev_set_num_tc(dev, num_tc)) {
4061 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4065 /* configure priority to traffic class mapping */
4066 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4067 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4068 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4069 "mapping priority %d to tc %d\n",
4070 prio, bp->prio_to_cos[prio]);
4073 /* Use this configuration to differentiate tc0 from other COSes
4074 This can be used for ets or pfc, and save the effort of setting
4075 up a multio class queue disc or negotiating DCBX with a switch
4076 netdev_set_prio_tc_map(dev, 0, 0);
4077 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4078 for (prio = 1; prio < 16; prio++) {
4079 netdev_set_prio_tc_map(dev, prio, 1);
4080 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4083 /* configure traffic class to transmission queue mapping */
4084 for (cos = 0; cos < bp->max_cos; cos++) {
4085 count = BNX2X_NUM_ETH_QUEUES(bp);
4086 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4087 netdev_set_tc_queue(dev, cos, count, offset);
4088 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4089 "mapping tc %d to offset %d count %d\n",
4090 cos, offset, count);
4096 /* called with rtnl_lock */
4097 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4099 struct sockaddr *addr = p;
4100 struct bnx2x *bp = netdev_priv(dev);
4103 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4104 BNX2X_ERR("Requested MAC address is not valid\n");
4108 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4109 !is_zero_ether_addr(addr->sa_data)) {
4110 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4114 if (netif_running(dev)) {
4115 rc = bnx2x_set_eth_mac(bp, false);
4120 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4122 if (netif_running(dev))
4123 rc = bnx2x_set_eth_mac(bp, true);
4128 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4130 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4131 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4136 if (IS_FCOE_IDX(fp_index)) {
4137 memset(sb, 0, sizeof(union host_hc_status_block));
4138 fp->status_blk_mapping = 0;
4141 if (!CHIP_IS_E1x(bp))
4142 BNX2X_PCI_FREE(sb->e2_sb,
4143 bnx2x_fp(bp, fp_index,
4144 status_blk_mapping),
4145 sizeof(struct host_hc_status_block_e2));
4147 BNX2X_PCI_FREE(sb->e1x_sb,
4148 bnx2x_fp(bp, fp_index,
4149 status_blk_mapping),
4150 sizeof(struct host_hc_status_block_e1x));
4154 if (!skip_rx_queue(bp, fp_index)) {
4155 bnx2x_free_rx_bds(fp);
4157 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4158 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4159 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4160 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4161 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4163 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4164 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4165 sizeof(struct eth_fast_path_rx_cqe) *
4169 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4170 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4171 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4172 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4176 if (!skip_tx_queue(bp, fp_index)) {
4177 /* fastpath tx rings: tx_buf tx_desc */
4178 for_each_cos_in_tx_queue(fp, cos) {
4179 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4181 DP(NETIF_MSG_IFDOWN,
4182 "freeing tx memory of fp %d cos %d cid %d\n",
4183 fp_index, cos, txdata->cid);
4185 BNX2X_FREE(txdata->tx_buf_ring);
4186 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4187 txdata->tx_desc_mapping,
4188 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4191 /* end of fastpath */
4194 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4197 for_each_cnic_queue(bp, i)
4198 bnx2x_free_fp_mem_at(bp, i);
4201 void bnx2x_free_fp_mem(struct bnx2x *bp)
4204 for_each_eth_queue(bp, i)
4205 bnx2x_free_fp_mem_at(bp, i);
4208 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4210 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4211 if (!CHIP_IS_E1x(bp)) {
4212 bnx2x_fp(bp, index, sb_index_values) =
4213 (__le16 *)status_blk.e2_sb->sb.index_values;
4214 bnx2x_fp(bp, index, sb_running_index) =
4215 (__le16 *)status_blk.e2_sb->sb.running_index;
4217 bnx2x_fp(bp, index, sb_index_values) =
4218 (__le16 *)status_blk.e1x_sb->sb.index_values;
4219 bnx2x_fp(bp, index, sb_running_index) =
4220 (__le16 *)status_blk.e1x_sb->sb.running_index;
4224 /* Returns the number of actually allocated BDs */
4225 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4228 struct bnx2x *bp = fp->bp;
4229 u16 ring_prod, cqe_ring_prod;
4230 int i, failure_cnt = 0;
4232 fp->rx_comp_cons = 0;
4233 cqe_ring_prod = ring_prod = 0;
4235 /* This routine is called only during fo init so
4236 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4238 for (i = 0; i < rx_ring_size; i++) {
4239 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4243 ring_prod = NEXT_RX_IDX(ring_prod);
4244 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4245 WARN_ON(ring_prod <= (i - failure_cnt));
4249 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4250 i - failure_cnt, fp->index);
4252 fp->rx_bd_prod = ring_prod;
4253 /* Limit the CQE producer by the CQE ring size */
4254 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4256 fp->rx_pkt = fp->rx_calls = 0;
4258 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4260 return i - failure_cnt;
4263 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4267 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4268 struct eth_rx_cqe_next_page *nextpg;
4270 nextpg = (struct eth_rx_cqe_next_page *)
4271 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4273 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4274 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4276 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4277 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4281 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4283 union host_hc_status_block *sb;
4284 struct bnx2x_fastpath *fp = &bp->fp[index];
4287 int rx_ring_size = 0;
4289 if (!bp->rx_ring_size &&
4290 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4291 rx_ring_size = MIN_RX_SIZE_NONTPA;
4292 bp->rx_ring_size = rx_ring_size;
4293 } else if (!bp->rx_ring_size) {
4294 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4296 if (CHIP_IS_E3(bp)) {
4297 u32 cfg = SHMEM_RD(bp,
4298 dev_info.port_hw_config[BP_PORT(bp)].
4301 /* Decrease ring size for 1G functions */
4302 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4303 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4307 /* allocate at least number of buffers required by FW */
4308 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4309 MIN_RX_SIZE_TPA, rx_ring_size);
4311 bp->rx_ring_size = rx_ring_size;
4312 } else /* if rx_ring_size specified - use it */
4313 rx_ring_size = bp->rx_ring_size;
4315 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4318 sb = &bnx2x_fp(bp, index, status_blk);
4320 if (!IS_FCOE_IDX(index)) {
4322 if (!CHIP_IS_E1x(bp))
4323 BNX2X_PCI_ALLOC(sb->e2_sb,
4324 &bnx2x_fp(bp, index, status_blk_mapping),
4325 sizeof(struct host_hc_status_block_e2));
4327 BNX2X_PCI_ALLOC(sb->e1x_sb,
4328 &bnx2x_fp(bp, index, status_blk_mapping),
4329 sizeof(struct host_hc_status_block_e1x));
4332 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4333 * set shortcuts for it.
4335 if (!IS_FCOE_IDX(index))
4336 set_sb_shortcuts(bp, index);
4339 if (!skip_tx_queue(bp, index)) {
4340 /* fastpath tx rings: tx_buf tx_desc */
4341 for_each_cos_in_tx_queue(fp, cos) {
4342 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4345 "allocating tx memory of fp %d cos %d\n",
4348 BNX2X_ALLOC(txdata->tx_buf_ring,
4349 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4350 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4351 &txdata->tx_desc_mapping,
4352 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4357 if (!skip_rx_queue(bp, index)) {
4358 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4359 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4360 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4361 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4362 &bnx2x_fp(bp, index, rx_desc_mapping),
4363 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4365 /* Seed all CQEs by 1s */
4366 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4367 &bnx2x_fp(bp, index, rx_comp_mapping),
4368 sizeof(struct eth_fast_path_rx_cqe) *
4372 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4373 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4374 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4375 &bnx2x_fp(bp, index, rx_sge_mapping),
4376 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4378 bnx2x_set_next_page_rx_bd(fp);
4381 bnx2x_set_next_page_rx_cq(fp);
4384 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4385 if (ring_size < rx_ring_size)
4391 /* handles low memory cases */
4393 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4395 /* FW will drop all packets if queue is not big enough,
4396 * In these cases we disable the queue
4397 * Min size is different for OOO, TPA and non-TPA queues
4399 if (ring_size < (fp->disable_tpa ?
4400 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4401 /* release memory allocated for this queue */
4402 bnx2x_free_fp_mem_at(bp, index);
4408 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4412 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4413 /* we will fail load process instead of mark
4421 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4425 /* 1. Allocate FP for leading - fatal if error
4426 * 2. Allocate RSS - fix number of queues if error
4430 if (bnx2x_alloc_fp_mem_at(bp, 0))
4434 for_each_nondefault_eth_queue(bp, i)
4435 if (bnx2x_alloc_fp_mem_at(bp, i))
4438 /* handle memory failures */
4439 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4440 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4443 bnx2x_shrink_eth_fp(bp, delta);
4444 if (CNIC_SUPPORT(bp))
4445 /* move non eth FPs next to last eth FP
4446 * must be done in that order
4447 * FCOE_IDX < FWD_IDX < OOO_IDX
4450 /* move FCoE fp even NO_FCOE_FLAG is on */
4451 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4452 bp->num_ethernet_queues -= delta;
4453 bp->num_queues = bp->num_ethernet_queues +
4454 bp->num_cnic_queues;
4455 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4456 bp->num_queues + delta, bp->num_queues);
4462 void bnx2x_free_mem_bp(struct bnx2x *bp)
4466 for (i = 0; i < bp->fp_array_size; i++)
4467 kfree(bp->fp[i].tpa_info);
4470 kfree(bp->fp_stats);
4471 kfree(bp->bnx2x_txq);
4472 kfree(bp->msix_table);
4476 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4478 struct bnx2x_fastpath *fp;
4479 struct msix_entry *tbl;
4480 struct bnx2x_ilt *ilt;
4481 int msix_table_size = 0;
4482 int fp_array_size, txq_array_size;
4486 * The biggest MSI-X table we might need is as a maximum number of fast
4487 * path IGU SBs plus default SB (for PF only).
4489 msix_table_size = bp->igu_sb_cnt;
4492 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4494 /* fp array: RSS plus CNIC related L2 queues */
4495 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4496 bp->fp_array_size = fp_array_size;
4497 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4499 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4502 for (i = 0; i < bp->fp_array_size; i++) {
4504 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4505 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4506 if (!(fp[i].tpa_info))
4512 /* allocate sp objs */
4513 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4518 /* allocate fp_stats */
4519 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4524 /* Allocate memory for the transmission queues array */
4526 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4527 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4529 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4535 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4538 bp->msix_table = tbl;
4541 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4548 bnx2x_free_mem_bp(bp);
4552 int bnx2x_reload_if_running(struct net_device *dev)
4554 struct bnx2x *bp = netdev_priv(dev);
4556 if (unlikely(!netif_running(dev)))
4559 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4560 return bnx2x_nic_load(bp, LOAD_NORMAL);
4563 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4565 u32 sel_phy_idx = 0;
4566 if (bp->link_params.num_phys <= 1)
4569 if (bp->link_vars.link_up) {
4570 sel_phy_idx = EXT_PHY1;
4571 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4572 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4573 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4574 sel_phy_idx = EXT_PHY2;
4577 switch (bnx2x_phy_selection(&bp->link_params)) {
4578 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4579 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4580 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4581 sel_phy_idx = EXT_PHY1;
4583 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4584 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4585 sel_phy_idx = EXT_PHY2;
4592 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4594 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4596 * The selected activated PHY is always after swapping (in case PHY
4597 * swapping is enabled). So when swapping is enabled, we need to reverse
4601 if (bp->link_params.multi_phy_config &
4602 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4603 if (sel_phy_idx == EXT_PHY1)
4604 sel_phy_idx = EXT_PHY2;
4605 else if (sel_phy_idx == EXT_PHY2)
4606 sel_phy_idx = EXT_PHY1;
4608 return LINK_CONFIG_IDX(sel_phy_idx);
4611 #ifdef NETDEV_FCOE_WWNN
4612 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4614 struct bnx2x *bp = netdev_priv(dev);
4615 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4618 case NETDEV_FCOE_WWNN:
4619 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4620 cp->fcoe_wwn_node_name_lo);
4622 case NETDEV_FCOE_WWPN:
4623 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4624 cp->fcoe_wwn_port_name_lo);
4627 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4635 /* called with rtnl_lock */
4636 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4638 struct bnx2x *bp = netdev_priv(dev);
4640 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4641 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4645 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4646 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4647 BNX2X_ERR("Can't support requested MTU size\n");
4651 /* This does not race with packet allocation
4652 * because the actual alloc size is
4653 * only updated as part of load
4657 return bnx2x_reload_if_running(dev);
4660 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4661 netdev_features_t features)
4663 struct bnx2x *bp = netdev_priv(dev);
4665 /* TPA requires Rx CSUM offloading */
4666 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4667 features &= ~NETIF_F_LRO;
4668 features &= ~NETIF_F_GRO;
4674 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4676 struct bnx2x *bp = netdev_priv(dev);
4677 u32 flags = bp->flags;
4679 bool bnx2x_reload = false;
4681 if (features & NETIF_F_LRO)
4682 flags |= TPA_ENABLE_FLAG;
4684 flags &= ~TPA_ENABLE_FLAG;
4686 if (features & NETIF_F_GRO)
4687 flags |= GRO_ENABLE_FLAG;
4689 flags &= ~GRO_ENABLE_FLAG;
4691 if (features & NETIF_F_LOOPBACK) {
4692 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4693 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4694 bnx2x_reload = true;
4697 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4698 bp->link_params.loopback_mode = LOOPBACK_NONE;
4699 bnx2x_reload = true;
4703 changes = flags ^ bp->flags;
4705 /* if GRO is changed while LRO is enabled, don't force a reload */
4706 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4707 changes &= ~GRO_ENABLE_FLAG;
4710 bnx2x_reload = true;
4715 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4716 return bnx2x_reload_if_running(dev);
4717 /* else: bnx2x_nic_load() will be called at end of recovery */
4723 void bnx2x_tx_timeout(struct net_device *dev)
4725 struct bnx2x *bp = netdev_priv(dev);
4727 #ifdef BNX2X_STOP_ON_ERROR
4732 smp_mb__before_clear_bit();
4733 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4734 smp_mb__after_clear_bit();
4736 /* This allows the netif to be shutdown gracefully before resetting */
4737 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4740 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4742 struct net_device *dev = pci_get_drvdata(pdev);
4746 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4749 bp = netdev_priv(dev);
4753 pci_save_state(pdev);
4755 if (!netif_running(dev)) {
4760 netif_device_detach(dev);
4762 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4764 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4771 int bnx2x_resume(struct pci_dev *pdev)
4773 struct net_device *dev = pci_get_drvdata(pdev);
4778 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4781 bp = netdev_priv(dev);
4783 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4784 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4790 pci_restore_state(pdev);
4792 if (!netif_running(dev)) {
4797 bnx2x_set_power_state(bp, PCI_D0);
4798 netif_device_attach(dev);
4800 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4807 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4811 BNX2X_ERR("bad context pointer %p\n", cxt);
4815 /* ustorm cxt validation */
4816 cxt->ustorm_ag_context.cdu_usage =
4817 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4818 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4819 /* xcontext validation */
4820 cxt->xstorm_ag_context.cdu_reserved =
4821 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4822 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4825 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4826 u8 fw_sb_id, u8 sb_index,
4829 u32 addr = BAR_CSTRORM_INTMEM +
4830 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4831 REG_WR8(bp, addr, ticks);
4833 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4834 port, fw_sb_id, sb_index, ticks);
4837 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4838 u16 fw_sb_id, u8 sb_index,
4841 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4842 u32 addr = BAR_CSTRORM_INTMEM +
4843 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4844 u8 flags = REG_RD8(bp, addr);
4846 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4847 flags |= enable_flag;
4848 REG_WR8(bp, addr, flags);
4850 "port %x fw_sb_id %d sb_index %d disable %d\n",
4851 port, fw_sb_id, sb_index, disable);
4854 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4855 u8 sb_index, u8 disable, u16 usec)
4857 int port = BP_PORT(bp);
4858 u8 ticks = usec / BNX2X_BTR;
4860 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4862 disable = disable ? 1 : (usec ? 0 : 1);
4863 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);