1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <net/ll_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target. Update txdata pointers and related
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
60 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
83 memcpy(&bp->bnx2x_txq[new_txdata_index],
84 &bp->bnx2x_txq[old_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
90 * bnx2x_fill_fw_str - Fill buffer with FW version string.
93 * @buf: character buffer to fill with the fw name
94 * @buf_len: length of the above buffer
97 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
100 u8 phy_fw_ver[PHY_FW_VER_LEN];
102 phy_fw_ver[0] = '\0';
103 bnx2x_get_ext_phy_fw_version(&bp->link_params,
104 phy_fw_ver, PHY_FW_VER_LEN);
105 strlcpy(buf, bp->fw_ver, buf_len);
106 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
108 (bp->common.bc_ver & 0xff0000) >> 16,
109 (bp->common.bc_ver & 0xff00) >> 8,
110 (bp->common.bc_ver & 0xff),
111 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
113 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
118 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
121 * @delta: number of eth queues which were not allocated
123 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
125 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
127 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
128 * backward along the array could cause memory to be overridden
130 for (cos = 1; cos < bp->max_cos; cos++) {
131 for (i = 0; i < old_eth_num - delta; i++) {
132 struct bnx2x_fastpath *fp = &bp->fp[i];
133 int new_idx = cos * (old_eth_num - delta) + i;
135 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
136 sizeof(struct bnx2x_fp_txdata));
137 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
142 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
144 /* free skb in the packet ring at pos idx
145 * return idx of last bd freed
147 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
148 u16 idx, unsigned int *pkts_compl,
149 unsigned int *bytes_compl)
151 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
152 struct eth_tx_start_bd *tx_start_bd;
153 struct eth_tx_bd *tx_data_bd;
154 struct sk_buff *skb = tx_buf->skb;
155 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
158 /* prefetch skb end pointer to speedup dev_kfree_skb() */
161 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
162 txdata->txq_index, idx, tx_buf, skb);
165 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
166 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
167 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170 #ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
176 new_cons = nbd + tx_buf->first_bd;
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
181 /* Skip a parse bd... */
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
205 (*bytes_compl) += skb->len;
208 dev_kfree_skb_any(skb);
209 tx_buf->first_bd = 0;
215 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
217 struct netdev_queue *txq;
218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
219 unsigned int pkts_compl = 0, bytes_compl = 0;
221 #ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
230 while (sw_cons != hw_cons) {
233 pkt_cons = TX_BD(sw_cons);
235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
240 &pkts_compl, &bytes_compl);
245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
261 if (unlikely(netif_tx_queue_stopped(txq))) {
262 /* Taking tx_lock() is needed to prevent re-enabling the queue
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
272 __netif_tx_lock(txq, smp_processor_id());
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
277 netif_tx_wake_queue(txq);
279 __netif_tx_unlock(txq);
284 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
287 u16 last_max = fp->last_max_sge;
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
293 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
295 struct eth_end_agg_rx_cqe *cqe)
297 struct bnx2x *bp = fp->bp;
298 u16 last_max, last_elem, first_elem;
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
315 bnx2x_update_last_max_sge(fp,
316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
318 last_max = RX_SGE(fp->last_max_sge);
319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
346 /* Get Toeplitz hash value in the skb using the value from the
347 * CQE (calculated by HW).
349 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
350 const struct eth_fast_path_rx_cqe *cqe,
353 /* Get Toeplitz hash from CQE */
354 if ((bp->dev->features & NETIF_F_RXHASH) &&
355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
361 return le32_to_cpu(cqe->rss_hash_result);
367 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
369 struct eth_fast_path_rx_cqe *cqe)
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
383 /* Try to map an empty data buffer from the aggregation info */
384 mapping = dma_map_single(&bp->pdev->dev,
385 first_buf->data + NET_SKB_PAD,
386 fp->rx_buf_size, DMA_FROM_DEVICE);
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
395 bnx2x_reuse_rx_data(fp, cons, prod);
396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
403 /* point prod_bd to new data */
404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
420 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
421 tpa_info->gro_size = gro_size;
424 #ifdef BNX2X_STOP_ON_ERROR
425 fp->tpa_queue_used |= (1 << queue);
426 #ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
429 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
435 /* Timestamp option length allowed for TPA aggregation:
437 * nop nop kind length echo val
439 #define TPA_TSTAMP_OPT_LEN 12
441 * bnx2x_set_gro_params - compute GRO values
444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
447 * @pkt_len: length of all segments
449 * Approximate value of the MSS for this aggregation calculated using
450 * the first packet of it.
451 * Compute number of aggregated segments, and gso_type.
453 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
454 u16 len_on_bd, unsigned int pkt_len,
455 u16 num_of_coalesced_segs)
457 /* TPA aggregation won't have either IP options or TCP options
458 * other than timestamp or IPv6 extension headers.
460 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
462 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
463 PRS_FLAG_OVERETH_IPV6) {
464 hdrs_len += sizeof(struct ipv6hdr);
465 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
467 hdrs_len += sizeof(struct iphdr);
468 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
471 /* Check if there was a TCP timestamp, if there is it's will
472 * always be 12 bytes length: nop nop kind length echo val.
474 * Otherwise FW would close the aggregation.
476 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477 hdrs_len += TPA_TSTAMP_OPT_LEN;
479 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482 * to skb_shinfo(skb)->gso_segs
484 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
487 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
488 struct bnx2x_fastpath *fp, u16 index)
490 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
491 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
492 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
495 if (unlikely(page == NULL)) {
496 BNX2X_ERR("Can't alloc sge\n");
500 mapping = dma_map_page(&bp->pdev->dev, page, 0,
501 SGE_PAGES, DMA_FROM_DEVICE);
502 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
503 __free_pages(page, PAGES_PER_SGE_SHIFT);
504 BNX2X_ERR("Can't map sge\n");
509 dma_unmap_addr_set(sw_buf, mapping, mapping);
511 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
512 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
517 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
518 struct bnx2x_agg_info *tpa_info,
521 struct eth_end_agg_rx_cqe *cqe,
524 struct sw_rx_page *rx_pg, old_rx_pg;
525 u32 i, frag_len, frag_size;
526 int err, j, frag_id = 0;
527 u16 len_on_bd = tpa_info->len_on_bd;
528 u16 full_page = 0, gro_size = 0;
530 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
532 if (fp->mode == TPA_MODE_GRO) {
533 gro_size = tpa_info->gro_size;
534 full_page = tpa_info->full_page;
537 /* This is needed in order to enable forwarding support */
539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
540 le16_to_cpu(cqe->pkt_len),
541 le16_to_cpu(cqe->num_of_coalesced_segs));
543 #ifdef BNX2X_STOP_ON_ERROR
544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
562 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
564 rx_pg = &fp->rx_page_ring[sge_idx];
567 /* If we fail to allocate a substitute page, we simply stop
568 where we are and drop the whole packet */
569 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
571 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
575 /* Unmap the page as we're going to pass it to the stack */
576 dma_unmap_page(&bp->pdev->dev,
577 dma_unmap_addr(&old_rx_pg, mapping),
578 SGE_PAGES, DMA_FROM_DEVICE);
579 /* Add one frag and update the appropriate fields in the skb */
580 if (fp->mode == TPA_MODE_LRO)
581 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
585 for (rem = frag_len; rem > 0; rem -= gro_size) {
586 int len = rem > gro_size ? gro_size : rem;
587 skb_fill_page_desc(skb, frag_id++,
588 old_rx_pg.page, offset, len);
590 get_page(old_rx_pg.page);
595 skb->data_len += frag_len;
596 skb->truesize += SGE_PAGES;
597 skb->len += frag_len;
599 frag_size -= frag_len;
605 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
607 if (fp->rx_frag_size)
608 put_page(virt_to_head_page(data));
613 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
615 if (fp->rx_frag_size)
616 return netdev_alloc_frag(fp->rx_frag_size);
618 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
622 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
624 const struct iphdr *iph = ip_hdr(skb);
627 skb_set_transport_header(skb, sizeof(struct iphdr));
630 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
631 iph->saddr, iph->daddr, 0);
634 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
636 struct ipv6hdr *iph = ipv6_hdr(skb);
639 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
642 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
643 &iph->saddr, &iph->daddr, 0);
646 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
647 void (*gro_func)(struct bnx2x*, struct sk_buff*))
649 skb_set_network_header(skb, 0);
651 tcp_gro_complete(skb);
655 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
659 if (skb_shinfo(skb)->gso_size) {
660 switch (be16_to_cpu(skb->protocol)) {
662 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
665 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
668 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
669 be16_to_cpu(skb->protocol));
673 napi_gro_receive(&fp->napi, skb);
676 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
677 struct bnx2x_agg_info *tpa_info,
679 struct eth_end_agg_rx_cqe *cqe,
682 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
683 u8 pad = tpa_info->placement_offset;
684 u16 len = tpa_info->len_on_bd;
685 struct sk_buff *skb = NULL;
686 u8 *new_data, *data = rx_buf->data;
687 u8 old_tpa_state = tpa_info->tpa_state;
689 tpa_info->tpa_state = BNX2X_TPA_STOP;
691 /* If we there was an error during the handling of the TPA_START -
692 * drop this aggregation.
694 if (old_tpa_state == BNX2X_TPA_ERROR)
697 /* Try to allocate the new data */
698 new_data = bnx2x_frag_alloc(fp);
699 /* Unmap skb in the pool anyway, as we are going to change
700 pool entry status to BNX2X_TPA_STOP even if new skb allocation
702 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
703 fp->rx_buf_size, DMA_FROM_DEVICE);
704 if (likely(new_data))
705 skb = build_skb(data, fp->rx_frag_size);
708 #ifdef BNX2X_STOP_ON_ERROR
709 if (pad + len > fp->rx_buf_size) {
710 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
711 pad, len, fp->rx_buf_size);
717 skb_reserve(skb, pad + NET_SKB_PAD);
719 skb->rxhash = tpa_info->rxhash;
720 skb->l4_rxhash = tpa_info->l4_rxhash;
722 skb->protocol = eth_type_trans(skb, bp->dev);
723 skb->ip_summed = CHECKSUM_UNNECESSARY;
725 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
726 skb, cqe, cqe_idx)) {
727 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
728 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
729 bnx2x_gro_receive(bp, fp, skb);
731 DP(NETIF_MSG_RX_STATUS,
732 "Failed to allocate new pages - dropping packet!\n");
733 dev_kfree_skb_any(skb);
736 /* put new data in bin */
737 rx_buf->data = new_data;
741 bnx2x_frag_free(fp, new_data);
743 /* drop the packet and keep the buffer in the bin */
744 DP(NETIF_MSG_RX_STATUS,
745 "Failed to allocate or map a new skb - dropping packet!\n");
746 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
749 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
750 struct bnx2x_fastpath *fp, u16 index)
753 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
754 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
757 data = bnx2x_frag_alloc(fp);
758 if (unlikely(data == NULL))
761 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
764 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
765 bnx2x_frag_free(fp, data);
766 BNX2X_ERR("Can't map rx data\n");
771 dma_unmap_addr_set(rx_buf, mapping, mapping);
773 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
774 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
780 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
781 struct bnx2x_fastpath *fp,
782 struct bnx2x_eth_q_stats *qstats)
784 /* Do nothing if no L4 csum validation was done.
785 * We do not check whether IP csum was validated. For IPv4 we assume
786 * that if the card got as far as validating the L4 csum, it also
787 * validated the IP csum. IPv6 has no IP csum.
789 if (cqe->fast_path_cqe.status_flags &
790 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
793 /* If L4 validation was done, check if an error was found. */
795 if (cqe->fast_path_cqe.type_error_flags &
796 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
797 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
798 qstats->hw_csum_err++;
800 skb->ip_summed = CHECKSUM_UNNECESSARY;
803 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
805 struct bnx2x *bp = fp->bp;
806 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
807 u16 sw_comp_cons, sw_comp_prod;
809 union eth_rx_cqe *cqe;
810 struct eth_fast_path_rx_cqe *cqe_fp;
812 #ifdef BNX2X_STOP_ON_ERROR
813 if (unlikely(bp->panic))
817 bd_cons = fp->rx_bd_cons;
818 bd_prod = fp->rx_bd_prod;
819 bd_prod_fw = bd_prod;
820 sw_comp_cons = fp->rx_comp_cons;
821 sw_comp_prod = fp->rx_comp_prod;
823 comp_ring_cons = RCQ_BD(sw_comp_cons);
824 cqe = &fp->rx_comp_ring[comp_ring_cons];
825 cqe_fp = &cqe->fast_path_cqe;
827 DP(NETIF_MSG_RX_STATUS,
828 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
830 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
831 struct sw_rx_bd *rx_buf = NULL;
834 enum eth_rx_cqe_type cqe_fp_type;
839 #ifdef BNX2X_STOP_ON_ERROR
840 if (unlikely(bp->panic))
844 bd_prod = RX_BD(bd_prod);
845 bd_cons = RX_BD(bd_cons);
847 cqe_fp_flags = cqe_fp->type_error_flags;
848 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
850 DP(NETIF_MSG_RX_STATUS,
851 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
852 CQE_TYPE(cqe_fp_flags),
853 cqe_fp_flags, cqe_fp->status_flags,
854 le32_to_cpu(cqe_fp->rss_hash_result),
855 le16_to_cpu(cqe_fp->vlan_tag),
856 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
858 /* is this a slowpath msg? */
859 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
860 bnx2x_sp_event(fp, cqe);
864 rx_buf = &fp->rx_buf_ring[bd_cons];
867 if (!CQE_TYPE_FAST(cqe_fp_type)) {
868 struct bnx2x_agg_info *tpa_info;
869 u16 frag_size, pages;
870 #ifdef BNX2X_STOP_ON_ERROR
872 if (fp->disable_tpa &&
873 (CQE_TYPE_START(cqe_fp_type) ||
874 CQE_TYPE_STOP(cqe_fp_type)))
875 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
876 CQE_TYPE(cqe_fp_type));
879 if (CQE_TYPE_START(cqe_fp_type)) {
880 u16 queue = cqe_fp->queue_index;
881 DP(NETIF_MSG_RX_STATUS,
882 "calling tpa_start on queue %d\n",
885 bnx2x_tpa_start(fp, queue,
891 queue = cqe->end_agg_cqe.queue_index;
892 tpa_info = &fp->tpa_info[queue];
893 DP(NETIF_MSG_RX_STATUS,
894 "calling tpa_stop on queue %d\n",
897 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
900 if (fp->mode == TPA_MODE_GRO)
901 pages = (frag_size + tpa_info->full_page - 1) /
904 pages = SGE_PAGE_ALIGN(frag_size) >>
907 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
908 &cqe->end_agg_cqe, comp_ring_cons);
909 #ifdef BNX2X_STOP_ON_ERROR
914 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
918 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
919 pad = cqe_fp->placement_offset;
920 dma_sync_single_for_cpu(&bp->pdev->dev,
921 dma_unmap_addr(rx_buf, mapping),
922 pad + RX_COPY_THRESH,
925 prefetch(data + pad); /* speedup eth_type_trans() */
926 /* is this an error packet? */
927 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
928 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
929 "ERROR flags %x rx packet %u\n",
930 cqe_fp_flags, sw_comp_cons);
931 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
935 /* Since we don't have a jumbo ring
936 * copy small packets if mtu > 1500
938 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
939 (len <= RX_COPY_THRESH)) {
940 skb = netdev_alloc_skb_ip_align(bp->dev, len);
942 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
943 "ERROR packet dropped because of alloc failure\n");
944 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
947 memcpy(skb->data, data + pad, len);
948 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
950 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
951 dma_unmap_single(&bp->pdev->dev,
952 dma_unmap_addr(rx_buf, mapping),
955 skb = build_skb(data, fp->rx_frag_size);
956 if (unlikely(!skb)) {
957 bnx2x_frag_free(fp, data);
958 bnx2x_fp_qstats(bp, fp)->
959 rx_skb_alloc_failed++;
962 skb_reserve(skb, pad);
964 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
965 "ERROR packet dropped because of alloc failure\n");
966 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
968 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
974 skb->protocol = eth_type_trans(skb, bp->dev);
976 /* Set Toeplitz hash for a none-LRO skb */
977 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
978 skb->l4_rxhash = l4_rxhash;
980 skb_checksum_none_assert(skb);
982 if (bp->dev->features & NETIF_F_RXCSUM)
983 bnx2x_csum_validate(skb, cqe, fp,
984 bnx2x_fp_qstats(bp, fp));
986 skb_record_rx_queue(skb, fp->rx_queue);
988 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
990 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
991 le16_to_cpu(cqe_fp->vlan_tag));
993 skb_mark_ll(skb, &fp->napi);
995 if (bnx2x_fp_ll_polling(fp))
996 netif_receive_skb(skb);
998 napi_gro_receive(&fp->napi, skb);
1000 rx_buf->data = NULL;
1002 bd_cons = NEXT_RX_IDX(bd_cons);
1003 bd_prod = NEXT_RX_IDX(bd_prod);
1004 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1007 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1008 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1010 /* mark CQE as free */
1011 BNX2X_SEED_CQE(cqe_fp);
1013 if (rx_pkt == budget)
1016 comp_ring_cons = RCQ_BD(sw_comp_cons);
1017 cqe = &fp->rx_comp_ring[comp_ring_cons];
1018 cqe_fp = &cqe->fast_path_cqe;
1021 fp->rx_bd_cons = bd_cons;
1022 fp->rx_bd_prod = bd_prod_fw;
1023 fp->rx_comp_cons = sw_comp_cons;
1024 fp->rx_comp_prod = sw_comp_prod;
1026 /* Update producers */
1027 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1030 fp->rx_pkt += rx_pkt;
1036 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1038 struct bnx2x_fastpath *fp = fp_cookie;
1039 struct bnx2x *bp = fp->bp;
1043 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1044 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1046 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1048 #ifdef BNX2X_STOP_ON_ERROR
1049 if (unlikely(bp->panic))
1053 /* Handle Rx and Tx according to MSI-X vector */
1054 for_each_cos_in_tx_queue(fp, cos)
1055 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1057 prefetch(&fp->sb_running_index[SM_RX_ID]);
1058 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1063 /* HW Lock for shared dual port PHYs */
1064 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1066 mutex_lock(&bp->port.phy_mutex);
1068 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1071 void bnx2x_release_phy_lock(struct bnx2x *bp)
1073 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1075 mutex_unlock(&bp->port.phy_mutex);
1078 /* calculates MF speed according to current linespeed and MF configuration */
1079 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1081 u16 line_speed = bp->link_vars.line_speed;
1083 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1084 bp->mf_config[BP_VN(bp)]);
1086 /* Calculate the current MAX line speed limit for the MF
1090 line_speed = (line_speed * maxCfg) / 100;
1091 else { /* SD mode */
1092 u16 vn_max_rate = maxCfg * 100;
1094 if (vn_max_rate < line_speed)
1095 line_speed = vn_max_rate;
1103 * bnx2x_fill_report_data - fill link report data to report
1105 * @bp: driver handle
1106 * @data: link state to update
1108 * It uses a none-atomic bit operations because is called under the mutex.
1110 static void bnx2x_fill_report_data(struct bnx2x *bp,
1111 struct bnx2x_link_report_data *data)
1113 u16 line_speed = bnx2x_get_mf_speed(bp);
1115 memset(data, 0, sizeof(*data));
1117 /* Fill the report data: effective line speed */
1118 data->line_speed = line_speed;
1121 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1122 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1123 &data->link_report_flags);
1126 if (bp->link_vars.duplex == DUPLEX_FULL)
1127 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1129 /* Rx Flow Control is ON */
1130 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1131 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1133 /* Tx Flow Control is ON */
1134 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1135 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1139 * bnx2x_link_report - report link status to OS.
1141 * @bp: driver handle
1143 * Calls the __bnx2x_link_report() under the same locking scheme
1144 * as a link/PHY state managing code to ensure a consistent link
1148 void bnx2x_link_report(struct bnx2x *bp)
1150 bnx2x_acquire_phy_lock(bp);
1151 __bnx2x_link_report(bp);
1152 bnx2x_release_phy_lock(bp);
1156 * __bnx2x_link_report - report link status to OS.
1158 * @bp: driver handle
1160 * None atomic implementation.
1161 * Should be called under the phy_lock.
1163 void __bnx2x_link_report(struct bnx2x *bp)
1165 struct bnx2x_link_report_data cur_data;
1168 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1169 bnx2x_read_mf_cfg(bp);
1171 /* Read the current link report info */
1172 bnx2x_fill_report_data(bp, &cur_data);
1174 /* Don't report link down or exactly the same link status twice */
1175 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1176 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1177 &bp->last_reported_link.link_report_flags) &&
1178 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1179 &cur_data.link_report_flags)))
1184 /* We are going to report a new link parameters now -
1185 * remember the current data for the next time.
1187 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1189 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1190 &cur_data.link_report_flags)) {
1191 netif_carrier_off(bp->dev);
1192 netdev_err(bp->dev, "NIC Link is Down\n");
1198 netif_carrier_on(bp->dev);
1200 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1201 &cur_data.link_report_flags))
1206 /* Handle the FC at the end so that only these flags would be
1207 * possibly set. This way we may easily check if there is no FC
1210 if (cur_data.link_report_flags) {
1211 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1212 &cur_data.link_report_flags)) {
1213 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1214 &cur_data.link_report_flags))
1215 flow = "ON - receive & transmit";
1217 flow = "ON - receive";
1219 flow = "ON - transmit";
1224 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1225 cur_data.line_speed, duplex, flow);
1229 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1233 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1234 struct eth_rx_sge *sge;
1236 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1238 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1239 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1242 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1243 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1247 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1248 struct bnx2x_fastpath *fp, int last)
1252 for (i = 0; i < last; i++) {
1253 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1254 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1255 u8 *data = first_buf->data;
1258 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1261 if (tpa_info->tpa_state == BNX2X_TPA_START)
1262 dma_unmap_single(&bp->pdev->dev,
1263 dma_unmap_addr(first_buf, mapping),
1264 fp->rx_buf_size, DMA_FROM_DEVICE);
1265 bnx2x_frag_free(fp, data);
1266 first_buf->data = NULL;
1270 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1274 for_each_rx_queue_cnic(bp, j) {
1275 struct bnx2x_fastpath *fp = &bp->fp[j];
1279 /* Activate BD ring */
1281 * this will generate an interrupt (to the TSTORM)
1282 * must only be done after chip is initialized
1284 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1289 void bnx2x_init_rx_rings(struct bnx2x *bp)
1291 int func = BP_FUNC(bp);
1295 /* Allocate TPA resources */
1296 for_each_eth_queue(bp, j) {
1297 struct bnx2x_fastpath *fp = &bp->fp[j];
1300 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1302 if (!fp->disable_tpa) {
1303 /* Fill the per-aggregation pool */
1304 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1305 struct bnx2x_agg_info *tpa_info =
1307 struct sw_rx_bd *first_buf =
1308 &tpa_info->first_buf;
1310 first_buf->data = bnx2x_frag_alloc(fp);
1311 if (!first_buf->data) {
1312 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1314 bnx2x_free_tpa_pool(bp, fp, i);
1315 fp->disable_tpa = 1;
1318 dma_unmap_addr_set(first_buf, mapping, 0);
1319 tpa_info->tpa_state = BNX2X_TPA_STOP;
1322 /* "next page" elements initialization */
1323 bnx2x_set_next_page_sgl(fp);
1325 /* set SGEs bit mask */
1326 bnx2x_init_sge_ring_bit_mask(fp);
1328 /* Allocate SGEs and initialize the ring elements */
1329 for (i = 0, ring_prod = 0;
1330 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1332 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1333 BNX2X_ERR("was only able to allocate %d rx sges\n",
1335 BNX2X_ERR("disabling TPA for queue[%d]\n",
1337 /* Cleanup already allocated elements */
1338 bnx2x_free_rx_sge_range(bp, fp,
1340 bnx2x_free_tpa_pool(bp, fp,
1342 fp->disable_tpa = 1;
1346 ring_prod = NEXT_SGE_IDX(ring_prod);
1349 fp->rx_sge_prod = ring_prod;
1353 for_each_eth_queue(bp, j) {
1354 struct bnx2x_fastpath *fp = &bp->fp[j];
1358 /* Activate BD ring */
1360 * this will generate an interrupt (to the TSTORM)
1361 * must only be done after chip is initialized
1363 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1369 if (CHIP_IS_E1(bp)) {
1370 REG_WR(bp, BAR_USTRORM_INTMEM +
1371 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1372 U64_LO(fp->rx_comp_mapping));
1373 REG_WR(bp, BAR_USTRORM_INTMEM +
1374 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1375 U64_HI(fp->rx_comp_mapping));
1380 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1383 struct bnx2x *bp = fp->bp;
1385 for_each_cos_in_tx_queue(fp, cos) {
1386 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1387 unsigned pkts_compl = 0, bytes_compl = 0;
1389 u16 sw_prod = txdata->tx_pkt_prod;
1390 u16 sw_cons = txdata->tx_pkt_cons;
1392 while (sw_cons != sw_prod) {
1393 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1394 &pkts_compl, &bytes_compl);
1398 netdev_tx_reset_queue(
1399 netdev_get_tx_queue(bp->dev,
1400 txdata->txq_index));
1404 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1408 for_each_tx_queue_cnic(bp, i) {
1409 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1413 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1417 for_each_eth_queue(bp, i) {
1418 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1422 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1424 struct bnx2x *bp = fp->bp;
1427 /* ring wasn't allocated */
1428 if (fp->rx_buf_ring == NULL)
1431 for (i = 0; i < NUM_RX_BD; i++) {
1432 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1433 u8 *data = rx_buf->data;
1437 dma_unmap_single(&bp->pdev->dev,
1438 dma_unmap_addr(rx_buf, mapping),
1439 fp->rx_buf_size, DMA_FROM_DEVICE);
1441 rx_buf->data = NULL;
1442 bnx2x_frag_free(fp, data);
1446 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1450 for_each_rx_queue_cnic(bp, j) {
1451 bnx2x_free_rx_bds(&bp->fp[j]);
1455 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1459 for_each_eth_queue(bp, j) {
1460 struct bnx2x_fastpath *fp = &bp->fp[j];
1462 bnx2x_free_rx_bds(fp);
1464 if (!fp->disable_tpa)
1465 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1469 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1471 bnx2x_free_tx_skbs_cnic(bp);
1472 bnx2x_free_rx_skbs_cnic(bp);
1475 void bnx2x_free_skbs(struct bnx2x *bp)
1477 bnx2x_free_tx_skbs(bp);
1478 bnx2x_free_rx_skbs(bp);
1481 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1483 /* load old values */
1484 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1486 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1487 /* leave all but MAX value */
1488 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1490 /* set new MAX value */
1491 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1492 & FUNC_MF_CFG_MAX_BW_MASK;
1494 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1499 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1501 * @bp: driver handle
1502 * @nvecs: number of vectors to be released
1504 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1508 if (nvecs == offset)
1511 /* VFs don't have a default SB */
1513 free_irq(bp->msix_table[offset].vector, bp->dev);
1514 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1515 bp->msix_table[offset].vector);
1519 if (CNIC_SUPPORT(bp)) {
1520 if (nvecs == offset)
1525 for_each_eth_queue(bp, i) {
1526 if (nvecs == offset)
1528 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1529 i, bp->msix_table[offset].vector);
1531 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1535 void bnx2x_free_irq(struct bnx2x *bp)
1537 if (bp->flags & USING_MSIX_FLAG &&
1538 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1539 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1541 /* vfs don't have a default status block */
1545 bnx2x_free_msix_irqs(bp, nvecs);
1547 free_irq(bp->dev->irq, bp->dev);
1551 int bnx2x_enable_msix(struct bnx2x *bp)
1553 int msix_vec = 0, i, rc;
1555 /* VFs don't have a default status block */
1557 bp->msix_table[msix_vec].entry = msix_vec;
1558 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1559 bp->msix_table[0].entry);
1563 /* Cnic requires an msix vector for itself */
1564 if (CNIC_SUPPORT(bp)) {
1565 bp->msix_table[msix_vec].entry = msix_vec;
1566 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1567 msix_vec, bp->msix_table[msix_vec].entry);
1571 /* We need separate vectors for ETH queues only (not FCoE) */
1572 for_each_eth_queue(bp, i) {
1573 bp->msix_table[msix_vec].entry = msix_vec;
1574 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1575 msix_vec, msix_vec, i);
1579 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1582 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1585 * reconfigure number of tx/rx queues according to available
1588 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1589 /* how less vectors we will have? */
1590 int diff = msix_vec - rc;
1592 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1594 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1597 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1601 * decrease number of queues by number of unallocated entries
1603 bp->num_ethernet_queues -= diff;
1604 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1606 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1608 } else if (rc > 0) {
1609 /* Get by with single vector */
1610 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1612 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1617 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1618 bp->flags |= USING_SINGLE_MSIX_FLAG;
1620 BNX2X_DEV_INFO("set number of queues to 1\n");
1621 bp->num_ethernet_queues = 1;
1622 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1623 } else if (rc < 0) {
1624 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1628 bp->flags |= USING_MSIX_FLAG;
1633 /* fall to INTx if not enough memory */
1635 bp->flags |= DISABLE_MSI_FLAG;
1640 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1642 int i, rc, offset = 0;
1644 /* no default status block for vf */
1646 rc = request_irq(bp->msix_table[offset++].vector,
1647 bnx2x_msix_sp_int, 0,
1648 bp->dev->name, bp->dev);
1650 BNX2X_ERR("request sp irq failed\n");
1655 if (CNIC_SUPPORT(bp))
1658 for_each_eth_queue(bp, i) {
1659 struct bnx2x_fastpath *fp = &bp->fp[i];
1660 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1663 rc = request_irq(bp->msix_table[offset].vector,
1664 bnx2x_msix_fp_int, 0, fp->name, fp);
1666 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1667 bp->msix_table[offset].vector, rc);
1668 bnx2x_free_msix_irqs(bp, offset);
1675 i = BNX2X_NUM_ETH_QUEUES(bp);
1677 offset = 1 + CNIC_SUPPORT(bp);
1678 netdev_info(bp->dev,
1679 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1680 bp->msix_table[0].vector,
1681 0, bp->msix_table[offset].vector,
1682 i - 1, bp->msix_table[offset + i - 1].vector);
1684 offset = CNIC_SUPPORT(bp);
1685 netdev_info(bp->dev,
1686 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1687 0, bp->msix_table[offset].vector,
1688 i - 1, bp->msix_table[offset + i - 1].vector);
1693 int bnx2x_enable_msi(struct bnx2x *bp)
1697 rc = pci_enable_msi(bp->pdev);
1699 BNX2X_DEV_INFO("MSI is not attainable\n");
1702 bp->flags |= USING_MSI_FLAG;
1707 static int bnx2x_req_irq(struct bnx2x *bp)
1709 unsigned long flags;
1712 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1715 flags = IRQF_SHARED;
1717 if (bp->flags & USING_MSIX_FLAG)
1718 irq = bp->msix_table[0].vector;
1720 irq = bp->pdev->irq;
1722 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1725 int bnx2x_setup_irqs(struct bnx2x *bp)
1728 if (bp->flags & USING_MSIX_FLAG &&
1729 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1730 rc = bnx2x_req_msix_irqs(bp);
1734 rc = bnx2x_req_irq(bp);
1736 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1739 if (bp->flags & USING_MSI_FLAG) {
1740 bp->dev->irq = bp->pdev->irq;
1741 netdev_info(bp->dev, "using MSI IRQ %d\n",
1744 if (bp->flags & USING_MSIX_FLAG) {
1745 bp->dev->irq = bp->msix_table[0].vector;
1746 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1754 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1758 for_each_rx_queue_cnic(bp, i) {
1759 bnx2x_fp_init_lock(&bp->fp[i]);
1760 napi_enable(&bnx2x_fp(bp, i, napi));
1764 static void bnx2x_napi_enable(struct bnx2x *bp)
1768 for_each_eth_queue(bp, i) {
1769 bnx2x_fp_init_lock(&bp->fp[i]);
1770 napi_enable(&bnx2x_fp(bp, i, napi));
1774 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1779 for_each_rx_queue_cnic(bp, i) {
1780 napi_disable(&bnx2x_fp(bp, i, napi));
1781 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1787 static void bnx2x_napi_disable(struct bnx2x *bp)
1792 for_each_eth_queue(bp, i) {
1793 napi_disable(&bnx2x_fp(bp, i, napi));
1794 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1800 void bnx2x_netif_start(struct bnx2x *bp)
1802 if (netif_running(bp->dev)) {
1803 bnx2x_napi_enable(bp);
1804 if (CNIC_LOADED(bp))
1805 bnx2x_napi_enable_cnic(bp);
1806 bnx2x_int_enable(bp);
1807 if (bp->state == BNX2X_STATE_OPEN)
1808 netif_tx_wake_all_queues(bp->dev);
1812 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1814 bnx2x_int_disable_sync(bp, disable_hw);
1815 bnx2x_napi_disable(bp);
1816 if (CNIC_LOADED(bp))
1817 bnx2x_napi_disable_cnic(bp);
1820 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1822 struct bnx2x *bp = netdev_priv(dev);
1824 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1825 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1826 u16 ether_type = ntohs(hdr->h_proto);
1828 /* Skip VLAN tag if present */
1829 if (ether_type == ETH_P_8021Q) {
1830 struct vlan_ethhdr *vhdr =
1831 (struct vlan_ethhdr *)skb->data;
1833 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1836 /* If ethertype is FCoE or FIP - use FCoE ring */
1837 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1838 return bnx2x_fcoe_tx(bp, txq_index);
1841 /* select a non-FCoE queue */
1842 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1845 void bnx2x_set_num_queues(struct bnx2x *bp)
1848 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1850 /* override in STORAGE SD modes */
1851 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1852 bp->num_ethernet_queues = 1;
1854 /* Add special queues */
1855 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1856 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1858 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1862 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1864 * @bp: Driver handle
1866 * We currently support for at most 16 Tx queues for each CoS thus we will
1867 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1870 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1871 * index after all ETH L2 indices.
1873 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1874 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1875 * 16..31,...) with indices that are not coupled with any real Tx queue.
1877 * The proper configuration of skb->queue_mapping is handled by
1878 * bnx2x_select_queue() and __skb_tx_hash().
1880 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1881 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1883 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1887 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1888 rx = BNX2X_NUM_ETH_QUEUES(bp);
1890 /* account for fcoe queue */
1891 if (include_cnic && !NO_FCOE(bp)) {
1896 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1898 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1901 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1903 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1907 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1913 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1917 for_each_queue(bp, i) {
1918 struct bnx2x_fastpath *fp = &bp->fp[i];
1921 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1924 * Although there are no IP frames expected to arrive to
1925 * this ring we still want to add an
1926 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1929 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1932 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1933 IP_HEADER_ALIGNMENT_PADDING +
1936 BNX2X_FW_RX_ALIGN_END;
1937 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
1938 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1939 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1941 fp->rx_frag_size = 0;
1945 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1948 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1950 /* Prepare the initial contents for the indirection table if RSS is
1953 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1954 bp->rss_conf_obj.ind_table[i] =
1956 ethtool_rxfh_indir_default(i, num_eth_queues);
1959 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1960 * per-port, so if explicit configuration is needed , do it only
1963 * For 57712 and newer on the other hand it's a per-function
1966 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1969 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1972 struct bnx2x_config_rss_params params = {NULL};
1974 /* Although RSS is meaningless when there is a single HW queue we
1975 * still need it enabled in order to have HW Rx hash generated.
1977 * if (!is_eth_multi(bp))
1978 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1981 params.rss_obj = rss_obj;
1983 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1985 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1987 /* RSS configuration */
1988 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1989 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1990 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1991 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1992 if (rss_obj->udp_rss_v4)
1993 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
1994 if (rss_obj->udp_rss_v6)
1995 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
1998 params.rss_result_mask = MULTI_MASK;
2000 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2004 prandom_bytes(params.rss_key, sizeof(params.rss_key));
2005 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2008 return bnx2x_config_rss(bp, ¶ms);
2011 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2013 struct bnx2x_func_state_params func_params = {NULL};
2015 /* Prepare parameters for function state transitions */
2016 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2018 func_params.f_obj = &bp->func_obj;
2019 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2021 func_params.params.hw_init.load_phase = load_code;
2023 return bnx2x_func_state_change(bp, &func_params);
2027 * Cleans the object that have internal lists without sending
2028 * ramrods. Should be run when interrupts are disabled.
2030 void bnx2x_squeeze_objects(struct bnx2x *bp)
2033 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2034 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2035 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2037 /***************** Cleanup MACs' object first *************************/
2039 /* Wait for completion of requested */
2040 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2041 /* Perform a dry cleanup */
2042 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2044 /* Clean ETH primary MAC */
2045 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2046 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2049 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2051 /* Cleanup UC list */
2053 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2054 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2057 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2059 /***************** Now clean mcast object *****************************/
2060 rparam.mcast_obj = &bp->mcast_obj;
2061 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2063 /* Add a DEL command... */
2064 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2066 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2069 /* ...and wait until all pending commands are cleared */
2070 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2073 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2078 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2082 #ifndef BNX2X_STOP_ON_ERROR
2083 #define LOAD_ERROR_EXIT(bp, label) \
2085 (bp)->state = BNX2X_STATE_ERROR; \
2089 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2091 bp->cnic_loaded = false; \
2094 #else /*BNX2X_STOP_ON_ERROR*/
2095 #define LOAD_ERROR_EXIT(bp, label) \
2097 (bp)->state = BNX2X_STATE_ERROR; \
2101 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2103 bp->cnic_loaded = false; \
2107 #endif /*BNX2X_STOP_ON_ERROR*/
2109 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2111 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2112 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2116 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2118 int num_groups, vf_headroom = 0;
2119 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2121 /* number of queues for statistics is number of eth queues + FCoE */
2122 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2124 /* Total number of FW statistics requests =
2125 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2126 * and fcoe l2 queue) stats + num of queues (which includes another 1
2127 * for fcoe l2 queue if applicable)
2129 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2131 /* vf stats appear in the request list, but their data is allocated by
2132 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2133 * it is used to determine where to place the vf stats queries in the
2137 vf_headroom = bnx2x_vf_headroom(bp);
2139 /* Request is built from stats_query_header and an array of
2140 * stats_query_cmd_group each of which contains
2141 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2142 * configured in the stats_query_header.
2145 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2146 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2149 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2150 bp->fw_stats_num, vf_headroom, num_groups);
2151 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2152 num_groups * sizeof(struct stats_query_cmd_group);
2154 /* Data for statistics requests + stats_counter
2155 * stats_counter holds per-STORM counters that are incremented
2156 * when STORM has finished with the current request.
2157 * memory for FCoE offloaded statistics are counted anyway,
2158 * even if they will not be sent.
2159 * VF stats are not accounted for here as the data of VF stats is stored
2160 * in memory allocated by the VF, not here.
2162 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2163 sizeof(struct per_pf_stats) +
2164 sizeof(struct fcoe_statistics_params) +
2165 sizeof(struct per_queue_stats) * num_queue_stats +
2166 sizeof(struct stats_counter);
2168 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2169 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2172 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2173 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2174 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2175 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2176 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2177 bp->fw_stats_req_sz;
2179 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2180 U64_HI(bp->fw_stats_req_mapping),
2181 U64_LO(bp->fw_stats_req_mapping));
2182 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2183 U64_HI(bp->fw_stats_data_mapping),
2184 U64_LO(bp->fw_stats_data_mapping));
2188 bnx2x_free_fw_stats_mem(bp);
2189 BNX2X_ERR("Can't allocate FW stats memory\n");
2193 /* send load request to mcp and analyze response */
2194 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2200 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2201 DRV_MSG_SEQ_NUMBER_MASK);
2202 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2204 /* Get current FW pulse sequence */
2205 bp->fw_drv_pulse_wr_seq =
2206 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2207 DRV_PULSE_SEQ_MASK);
2208 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2210 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2212 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2213 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2216 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2218 /* if mcp fails to respond we must abort */
2219 if (!(*load_code)) {
2220 BNX2X_ERR("MCP response failure, aborting\n");
2224 /* If mcp refused (e.g. other port is in diagnostic mode) we
2227 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2228 BNX2X_ERR("MCP refused load request, aborting\n");
2234 /* check whether another PF has already loaded FW to chip. In
2235 * virtualized environments a pf from another VM may have already
2236 * initialized the device including loading FW
2238 int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2240 /* is another pf loaded on this engine? */
2241 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2242 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2243 /* build my FW version dword */
2244 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2245 (BCM_5710_FW_MINOR_VERSION << 8) +
2246 (BCM_5710_FW_REVISION_VERSION << 16) +
2247 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2249 /* read loaded FW from chip */
2250 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2252 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2255 /* abort nic load if version mismatch */
2256 if (my_fw != loaded_fw) {
2257 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2265 /* returns the "mcp load_code" according to global load_count array */
2266 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2268 int path = BP_PATH(bp);
2270 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2271 path, load_count[path][0], load_count[path][1],
2272 load_count[path][2]);
2273 load_count[path][0]++;
2274 load_count[path][1 + port]++;
2275 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2276 path, load_count[path][0], load_count[path][1],
2277 load_count[path][2]);
2278 if (load_count[path][0] == 1)
2279 return FW_MSG_CODE_DRV_LOAD_COMMON;
2280 else if (load_count[path][1 + port] == 1)
2281 return FW_MSG_CODE_DRV_LOAD_PORT;
2283 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2286 /* mark PMF if applicable */
2287 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2289 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2290 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2291 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2293 /* We need the barrier to ensure the ordering between the
2294 * writing to bp->port.pmf here and reading it from the
2295 * bnx2x_periodic_task().
2302 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2305 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2307 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2308 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2309 (bp->common.shmem2_base)) {
2310 if (SHMEM2_HAS(bp, dcc_support))
2311 SHMEM2_WR(bp, dcc_support,
2312 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2313 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2314 if (SHMEM2_HAS(bp, afex_driver_support))
2315 SHMEM2_WR(bp, afex_driver_support,
2316 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2319 /* Set AFEX default VLAN tag to an invalid value */
2320 bp->afex_def_vlan_tag = -1;
2324 * bnx2x_bz_fp - zero content of the fastpath structure.
2326 * @bp: driver handle
2327 * @index: fastpath index to be zeroed
2329 * Makes sure the contents of the bp->fp[index].napi is kept
2332 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2334 struct bnx2x_fastpath *fp = &bp->fp[index];
2336 struct napi_struct orig_napi = fp->napi;
2337 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2339 /* bzero bnx2x_fastpath contents */
2341 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2342 sizeof(struct bnx2x_agg_info));
2343 memset(fp, 0, sizeof(*fp));
2345 /* Restore the NAPI object as it has been already initialized */
2346 fp->napi = orig_napi;
2347 fp->tpa_info = orig_tpa_info;
2351 fp->max_cos = bp->max_cos;
2353 /* Special queues support only one CoS */
2356 /* Init txdata pointers */
2358 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2360 for_each_cos_in_tx_queue(fp, cos)
2361 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2362 BNX2X_NUM_ETH_QUEUES(bp) + index];
2364 /* set the tpa flag for each queue. The tpa flag determines the queue
2365 * minimal size so it must be set prior to queue memory allocation
2367 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2368 (bp->flags & GRO_ENABLE_FLAG &&
2369 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2370 if (bp->flags & TPA_ENABLE_FLAG)
2371 fp->mode = TPA_MODE_LRO;
2372 else if (bp->flags & GRO_ENABLE_FLAG)
2373 fp->mode = TPA_MODE_GRO;
2375 /* We don't want TPA on an FCoE L2 ring */
2377 fp->disable_tpa = 1;
2380 int bnx2x_load_cnic(struct bnx2x *bp)
2382 int i, rc, port = BP_PORT(bp);
2384 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2386 mutex_init(&bp->cnic_mutex);
2389 rc = bnx2x_alloc_mem_cnic(bp);
2391 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2392 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2396 rc = bnx2x_alloc_fp_mem_cnic(bp);
2398 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2399 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2402 /* Update the number of queues with the cnic queues */
2403 rc = bnx2x_set_real_num_queues(bp, 1);
2405 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2406 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2409 /* Add all CNIC NAPI objects */
2410 bnx2x_add_all_napi_cnic(bp);
2411 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2412 bnx2x_napi_enable_cnic(bp);
2414 rc = bnx2x_init_hw_func_cnic(bp);
2416 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2418 bnx2x_nic_init_cnic(bp);
2421 /* Enable Timer scan */
2422 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2424 /* setup cnic queues */
2425 for_each_cnic_queue(bp, i) {
2426 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2428 BNX2X_ERR("Queue setup failed\n");
2429 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2434 /* Initialize Rx filter. */
2435 netif_addr_lock_bh(bp->dev);
2436 bnx2x_set_rx_mode(bp->dev);
2437 netif_addr_unlock_bh(bp->dev);
2439 /* re-read iscsi info */
2440 bnx2x_get_iscsi_info(bp);
2441 bnx2x_setup_cnic_irq_info(bp);
2442 bnx2x_setup_cnic_info(bp);
2443 bp->cnic_loaded = true;
2444 if (bp->state == BNX2X_STATE_OPEN)
2445 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2447 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2451 #ifndef BNX2X_STOP_ON_ERROR
2453 /* Disable Timer scan */
2454 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2457 bnx2x_napi_disable_cnic(bp);
2458 /* Update the number of queues without the cnic queues */
2459 rc = bnx2x_set_real_num_queues(bp, 0);
2461 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2463 BNX2X_ERR("CNIC-related load failed\n");
2464 bnx2x_free_fp_mem_cnic(bp);
2465 bnx2x_free_mem_cnic(bp);
2467 #endif /* ! BNX2X_STOP_ON_ERROR */
2470 /* must be called with rtnl_lock */
2471 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2473 int port = BP_PORT(bp);
2474 int i, rc = 0, load_code = 0;
2476 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2478 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2480 #ifdef BNX2X_STOP_ON_ERROR
2481 if (unlikely(bp->panic)) {
2482 BNX2X_ERR("Can't load NIC when there is panic\n");
2487 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2489 /* zero the structure w/o any lock, before SP handler is initialized */
2490 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2491 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2492 &bp->last_reported_link.link_report_flags);
2495 /* must be called before memory allocation and HW init */
2496 bnx2x_ilt_set_info(bp);
2499 * Zero fastpath structures preserving invariants like napi, which are
2500 * allocated only once, fp index, max_cos, bp pointer.
2501 * Also set fp->disable_tpa and txdata_ptr.
2503 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2504 for_each_queue(bp, i)
2506 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2507 bp->num_cnic_queues) *
2508 sizeof(struct bnx2x_fp_txdata));
2510 bp->fcoe_init = false;
2512 /* Set the receive queues buffer size */
2513 bnx2x_set_rx_buf_size(bp);
2516 rc = bnx2x_alloc_mem(bp);
2518 BNX2X_ERR("Unable to allocate bp memory\n");
2523 /* Allocated memory for FW statistics */
2524 if (bnx2x_alloc_fw_stats_mem(bp))
2525 LOAD_ERROR_EXIT(bp, load_error0);
2527 /* need to be done after alloc mem, since it's self adjusting to amount
2528 * of memory available for RSS queues
2530 rc = bnx2x_alloc_fp_mem(bp);
2532 BNX2X_ERR("Unable to allocate memory for fps\n");
2533 LOAD_ERROR_EXIT(bp, load_error0);
2536 /* request pf to initialize status blocks */
2538 rc = bnx2x_vfpf_init(bp);
2540 LOAD_ERROR_EXIT(bp, load_error0);
2543 /* As long as bnx2x_alloc_mem() may possibly update
2544 * bp->num_queues, bnx2x_set_real_num_queues() should always
2545 * come after it. At this stage cnic queues are not counted.
2547 rc = bnx2x_set_real_num_queues(bp, 0);
2549 BNX2X_ERR("Unable to set real_num_queues\n");
2550 LOAD_ERROR_EXIT(bp, load_error0);
2553 /* configure multi cos mappings in kernel.
2554 * this configuration may be overridden by a multi class queue
2555 * discipline or by a dcbx negotiation result.
2557 bnx2x_setup_tc(bp->dev, bp->max_cos);
2559 /* Add all NAPI objects */
2560 bnx2x_add_all_napi(bp);
2561 DP(NETIF_MSG_IFUP, "napi added\n");
2562 bnx2x_napi_enable(bp);
2565 /* set pf load just before approaching the MCP */
2566 bnx2x_set_pf_load(bp);
2568 /* if mcp exists send load request and analyze response */
2569 if (!BP_NOMCP(bp)) {
2570 /* attempt to load pf */
2571 rc = bnx2x_nic_load_request(bp, &load_code);
2573 LOAD_ERROR_EXIT(bp, load_error1);
2575 /* what did mcp say? */
2576 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2578 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2579 LOAD_ERROR_EXIT(bp, load_error2);
2582 load_code = bnx2x_nic_load_no_mcp(bp, port);
2585 /* mark pmf if applicable */
2586 bnx2x_nic_load_pmf(bp, load_code);
2588 /* Init Function state controlling object */
2589 bnx2x__init_func_obj(bp);
2592 rc = bnx2x_init_hw(bp, load_code);
2594 BNX2X_ERR("HW init failed, aborting\n");
2595 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2596 LOAD_ERROR_EXIT(bp, load_error2);
2600 bnx2x_pre_irq_nic_init(bp);
2602 /* Connect to IRQs */
2603 rc = bnx2x_setup_irqs(bp);
2605 BNX2X_ERR("setup irqs failed\n");
2607 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2608 LOAD_ERROR_EXIT(bp, load_error2);
2611 /* Init per-function objects */
2613 /* Setup NIC internals and enable interrupts */
2614 bnx2x_post_irq_nic_init(bp, load_code);
2616 bnx2x_init_bp_objs(bp);
2617 bnx2x_iov_nic_init(bp);
2619 /* Set AFEX default VLAN tag to an invalid value */
2620 bp->afex_def_vlan_tag = -1;
2621 bnx2x_nic_load_afex_dcc(bp, load_code);
2622 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2623 rc = bnx2x_func_start(bp);
2625 BNX2X_ERR("Function start failed!\n");
2626 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2628 LOAD_ERROR_EXIT(bp, load_error3);
2631 /* Send LOAD_DONE command to MCP */
2632 if (!BP_NOMCP(bp)) {
2633 load_code = bnx2x_fw_command(bp,
2634 DRV_MSG_CODE_LOAD_DONE, 0);
2636 BNX2X_ERR("MCP response failure, aborting\n");
2638 LOAD_ERROR_EXIT(bp, load_error3);
2642 /* initialize FW coalescing state machines in RAM */
2643 bnx2x_update_coalesce(bp);
2645 /* setup the leading queue */
2646 rc = bnx2x_setup_leading(bp);
2648 BNX2X_ERR("Setup leading failed!\n");
2649 LOAD_ERROR_EXIT(bp, load_error3);
2652 /* set up the rest of the queues */
2653 for_each_nondefault_eth_queue(bp, i) {
2654 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2656 BNX2X_ERR("Queue setup failed\n");
2657 LOAD_ERROR_EXIT(bp, load_error3);
2662 rc = bnx2x_init_rss_pf(bp);
2664 BNX2X_ERR("PF RSS init failed\n");
2665 LOAD_ERROR_EXIT(bp, load_error3);
2669 for_each_eth_queue(bp, i) {
2670 rc = bnx2x_vfpf_setup_q(bp, i);
2672 BNX2X_ERR("Queue setup failed\n");
2673 LOAD_ERROR_EXIT(bp, load_error3);
2678 /* Now when Clients are configured we are ready to work */
2679 bp->state = BNX2X_STATE_OPEN;
2681 /* Configure a ucast MAC */
2683 rc = bnx2x_set_eth_mac(bp, true);
2685 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2688 BNX2X_ERR("Setting Ethernet MAC failed\n");
2689 LOAD_ERROR_EXIT(bp, load_error3);
2692 if (IS_PF(bp) && bp->pending_max) {
2693 bnx2x_update_max_mf_config(bp, bp->pending_max);
2694 bp->pending_max = 0;
2698 rc = bnx2x_initial_phy_init(bp, load_mode);
2700 LOAD_ERROR_EXIT(bp, load_error3);
2702 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2704 /* Start fast path */
2706 /* Initialize Rx filter. */
2707 netif_addr_lock_bh(bp->dev);
2708 bnx2x_set_rx_mode(bp->dev);
2709 netif_addr_unlock_bh(bp->dev);
2712 switch (load_mode) {
2714 /* Tx queue should be only re-enabled */
2715 netif_tx_wake_all_queues(bp->dev);
2719 netif_tx_start_all_queues(bp->dev);
2720 smp_mb__after_clear_bit();
2724 case LOAD_LOOPBACK_EXT:
2725 bp->state = BNX2X_STATE_DIAG;
2733 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2735 bnx2x__link_status_update(bp);
2737 /* start the timer */
2738 mod_timer(&bp->timer, jiffies + bp->current_interval);
2740 if (CNIC_ENABLED(bp))
2741 bnx2x_load_cnic(bp);
2743 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2744 /* mark driver is loaded in shmem2 */
2746 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2747 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2748 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2749 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2752 /* Wait for all pending SP commands to complete */
2753 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2754 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2755 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2759 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2760 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2761 bnx2x_dcbx_init(bp, false);
2763 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2767 #ifndef BNX2X_STOP_ON_ERROR
2770 bnx2x_int_disable_sync(bp, 1);
2772 /* Clean queueable objects */
2773 bnx2x_squeeze_objects(bp);
2776 /* Free SKBs, SGEs, TPA pool and driver internals */
2777 bnx2x_free_skbs(bp);
2778 for_each_rx_queue(bp, i)
2779 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2784 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2785 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2786 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2791 bnx2x_napi_disable(bp);
2792 bnx2x_del_all_napi(bp);
2794 /* clear pf_load status, as it was already set */
2796 bnx2x_clear_pf_load(bp);
2798 bnx2x_free_fp_mem(bp);
2799 bnx2x_free_fw_stats_mem(bp);
2803 #endif /* ! BNX2X_STOP_ON_ERROR */
2806 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2810 /* Wait until tx fastpath tasks complete */
2811 for_each_tx_queue(bp, i) {
2812 struct bnx2x_fastpath *fp = &bp->fp[i];
2814 for_each_cos_in_tx_queue(fp, cos)
2815 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2822 /* must be called with rtnl_lock */
2823 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2826 bool global = false;
2828 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2830 /* mark driver is unloaded in shmem2 */
2831 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2833 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2834 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2835 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2838 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2839 (bp->state == BNX2X_STATE_CLOSED ||
2840 bp->state == BNX2X_STATE_ERROR)) {
2841 /* We can get here if the driver has been unloaded
2842 * during parity error recovery and is either waiting for a
2843 * leader to complete or for other functions to unload and
2844 * then ifdown has been issued. In this case we want to
2845 * unload and let other functions to complete a recovery
2848 bp->recovery_state = BNX2X_RECOVERY_DONE;
2850 bnx2x_release_leader_lock(bp);
2853 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2854 BNX2X_ERR("Can't unload in closed or error state\n");
2858 /* Nothing to do during unload if previous bnx2x_nic_load()
2859 * have not completed successfully - all resources are released.
2861 * we can get here only after unsuccessful ndo_* callback, during which
2862 * dev->IFF_UP flag is still on.
2864 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2867 /* It's important to set the bp->state to the value different from
2868 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2869 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2871 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2874 if (CNIC_LOADED(bp))
2875 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2878 bnx2x_tx_disable(bp);
2879 netdev_reset_tc(bp->dev);
2881 bp->rx_mode = BNX2X_RX_MODE_NONE;
2883 del_timer_sync(&bp->timer);
2886 /* Set ALWAYS_ALIVE bit in shmem */
2887 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2888 bnx2x_drv_pulse(bp);
2889 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2890 bnx2x_save_statistics(bp);
2893 /* wait till consumers catch up with producers in all queues */
2894 bnx2x_drain_tx_queues(bp);
2896 /* if VF indicate to PF this function is going down (PF will delete sp
2897 * elements and clear initializations
2900 bnx2x_vfpf_close_vf(bp);
2901 else if (unload_mode != UNLOAD_RECOVERY)
2902 /* if this is a normal/close unload need to clean up chip*/
2903 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2905 /* Send the UNLOAD_REQUEST to the MCP */
2906 bnx2x_send_unload_req(bp, unload_mode);
2908 /* Prevent transactions to host from the functions on the
2909 * engine that doesn't reset global blocks in case of global
2910 * attention once global blocks are reset and gates are opened
2911 * (the engine which leader will perform the recovery
2914 if (!CHIP_IS_E1x(bp))
2915 bnx2x_pf_disable(bp);
2917 /* Disable HW interrupts, NAPI */
2918 bnx2x_netif_stop(bp, 1);
2919 /* Delete all NAPI objects */
2920 bnx2x_del_all_napi(bp);
2921 if (CNIC_LOADED(bp))
2922 bnx2x_del_all_napi_cnic(bp);
2926 /* Report UNLOAD_DONE to MCP */
2927 bnx2x_send_unload_done(bp, false);
2931 * At this stage no more interrupts will arrive so we may safely clean
2932 * the queueable objects here in case they failed to get cleaned so far.
2935 bnx2x_squeeze_objects(bp);
2937 /* There should be no more pending SP commands at this stage */
2942 /* Free SKBs, SGEs, TPA pool and driver internals */
2943 bnx2x_free_skbs(bp);
2944 if (CNIC_LOADED(bp))
2945 bnx2x_free_skbs_cnic(bp);
2946 for_each_rx_queue(bp, i)
2947 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2949 bnx2x_free_fp_mem(bp);
2950 if (CNIC_LOADED(bp))
2951 bnx2x_free_fp_mem_cnic(bp);
2954 if (CNIC_LOADED(bp))
2955 bnx2x_free_mem_cnic(bp);
2958 bp->state = BNX2X_STATE_CLOSED;
2959 bp->cnic_loaded = false;
2961 /* Check if there are pending parity attentions. If there are - set
2962 * RECOVERY_IN_PROGRESS.
2964 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2965 bnx2x_set_reset_in_progress(bp);
2967 /* Set RESET_IS_GLOBAL if needed */
2969 bnx2x_set_reset_global(bp);
2972 /* The last driver must disable a "close the gate" if there is no
2973 * parity attention or "process kill" pending.
2976 !bnx2x_clear_pf_load(bp) &&
2977 bnx2x_reset_is_done(bp, BP_PATH(bp)))
2978 bnx2x_disable_close_the_gate(bp);
2980 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2985 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2989 /* If there is no power capability, silently succeed */
2991 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2995 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2999 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3000 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3001 PCI_PM_CTRL_PME_STATUS));
3003 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3004 /* delay required during transition out of D3hot */
3009 /* If there are other clients above don't
3010 shut down the power */
3011 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3013 /* Don't shut down the power for emulation and FPGA */
3014 if (CHIP_REV_IS_SLOW(bp))
3017 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3021 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3023 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3026 /* No more memory access after this point until
3027 * device is brought back to D0.
3032 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3039 * net_device service functions
3041 int bnx2x_poll(struct napi_struct *napi, int budget)
3045 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3047 struct bnx2x *bp = fp->bp;
3050 #ifdef BNX2X_STOP_ON_ERROR
3051 if (unlikely(bp->panic)) {
3052 napi_complete(napi);
3056 if (!bnx2x_fp_lock_napi(fp))
3059 for_each_cos_in_tx_queue(fp, cos)
3060 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3061 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3063 if (bnx2x_has_rx_work(fp)) {
3064 work_done += bnx2x_rx_int(fp, budget - work_done);
3066 /* must not complete if we consumed full budget */
3067 if (work_done >= budget) {
3068 bnx2x_fp_unlock_napi(fp);
3073 /* Fall out from the NAPI loop if needed */
3074 if (!bnx2x_fp_unlock_napi(fp) &&
3075 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3077 /* No need to update SB for FCoE L2 ring as long as
3078 * it's connected to the default SB and the SB
3079 * has been updated when NAPI was scheduled.
3081 if (IS_FCOE_FP(fp)) {
3082 napi_complete(napi);
3085 bnx2x_update_fpsb_idx(fp);
3086 /* bnx2x_has_rx_work() reads the status block,
3087 * thus we need to ensure that status block indices
3088 * have been actually read (bnx2x_update_fpsb_idx)
3089 * prior to this check (bnx2x_has_rx_work) so that
3090 * we won't write the "newer" value of the status block
3091 * to IGU (if there was a DMA right after
3092 * bnx2x_has_rx_work and if there is no rmb, the memory
3093 * reading (bnx2x_update_fpsb_idx) may be postponed
3094 * to right before bnx2x_ack_sb). In this case there
3095 * will never be another interrupt until there is
3096 * another update of the status block, while there
3097 * is still unhandled work.
3101 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3102 napi_complete(napi);
3103 /* Re-enable interrupts */
3104 DP(NETIF_MSG_RX_STATUS,
3105 "Update index to %d\n", fp->fp_hc_idx);
3106 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3107 le16_to_cpu(fp->fp_hc_idx),
3117 #ifdef CONFIG_NET_LL_RX_POLL
3118 /* must be called with local_bh_disable()d */
3119 int bnx2x_low_latency_recv(struct napi_struct *napi)
3121 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3123 struct bnx2x *bp = fp->bp;
3126 if ((bp->state == BNX2X_STATE_CLOSED) ||
3127 (bp->state == BNX2X_STATE_ERROR) ||
3128 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3129 return LL_FLUSH_FAILED;
3131 if (!bnx2x_fp_lock_poll(fp))
3132 return LL_FLUSH_BUSY;
3134 if (bnx2x_has_rx_work(fp))
3135 found = bnx2x_rx_int(fp, 4);
3137 bnx2x_fp_unlock_poll(fp);
3143 /* we split the first BD into headers and data BDs
3144 * to ease the pain of our fellow microcode engineers
3145 * we use one mapping for both BDs
3147 static u16 bnx2x_tx_split(struct bnx2x *bp,
3148 struct bnx2x_fp_txdata *txdata,
3149 struct sw_tx_bd *tx_buf,
3150 struct eth_tx_start_bd **tx_bd, u16 hlen,
3153 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3154 struct eth_tx_bd *d_tx_bd;
3156 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3158 /* first fix first BD */
3159 h_tx_bd->nbytes = cpu_to_le16(hlen);
3161 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3162 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3164 /* now get a new data BD
3165 * (after the pbd) and fill it */
3166 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3167 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3169 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3170 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3172 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3173 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3174 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3176 /* this marks the BD as one that has no individual mapping */
3177 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3179 DP(NETIF_MSG_TX_QUEUED,
3180 "TSO split data size is %d (%x:%x)\n",
3181 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3184 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3189 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3190 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3191 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3193 __sum16 tsum = (__force __sum16) csum;
3196 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3197 csum_partial(t_header - fix, fix, 0)));
3200 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3201 csum_partial(t_header, -fix, 0)));
3203 return bswab16(tsum);
3206 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3212 if (skb->ip_summed != CHECKSUM_PARTIAL)
3215 protocol = vlan_get_protocol(skb);
3216 if (protocol == htons(ETH_P_IPV6)) {
3218 prot = ipv6_hdr(skb)->nexthdr;
3221 prot = ip_hdr(skb)->protocol;
3224 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3225 if (inner_ip_hdr(skb)->version == 6) {
3226 rc |= XMIT_CSUM_ENC_V6;
3227 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3228 rc |= XMIT_CSUM_TCP;
3230 rc |= XMIT_CSUM_ENC_V4;
3231 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3232 rc |= XMIT_CSUM_TCP;
3235 if (prot == IPPROTO_TCP)
3236 rc |= XMIT_CSUM_TCP;
3238 if (skb_is_gso_v6(skb)) {
3239 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3240 if (rc & XMIT_CSUM_ENC)
3241 rc |= XMIT_GSO_ENC_V6;
3242 } else if (skb_is_gso(skb)) {
3243 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3244 if (rc & XMIT_CSUM_ENC)
3245 rc |= XMIT_GSO_ENC_V4;
3251 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3252 /* check if packet requires linearization (packet is too fragmented)
3253 no need to check fragmentation if page size > 8K (there will be no
3254 violation to FW restrictions) */
3255 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3260 int first_bd_sz = 0;
3262 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3263 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3265 if (xmit_type & XMIT_GSO) {
3266 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3267 /* Check if LSO packet needs to be copied:
3268 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3269 int wnd_size = MAX_FETCH_BD - 3;
3270 /* Number of windows to check */
3271 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3276 /* Headers length */
3277 hlen = (int)(skb_transport_header(skb) - skb->data) +
3280 /* Amount of data (w/o headers) on linear part of SKB*/
3281 first_bd_sz = skb_headlen(skb) - hlen;
3283 wnd_sum = first_bd_sz;
3285 /* Calculate the first sum - it's special */
3286 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3288 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3290 /* If there was data on linear skb data - check it */
3291 if (first_bd_sz > 0) {
3292 if (unlikely(wnd_sum < lso_mss)) {
3297 wnd_sum -= first_bd_sz;
3300 /* Others are easier: run through the frag list and
3301 check all windows */
3302 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3304 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3306 if (unlikely(wnd_sum < lso_mss)) {
3311 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3314 /* in non-LSO too fragmented packet should always
3321 if (unlikely(to_copy))
3322 DP(NETIF_MSG_TX_QUEUED,
3323 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3324 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3325 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3331 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3334 struct ipv6hdr *ipv6;
3336 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3337 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3338 ETH_TX_PARSE_BD_E2_LSO_MSS;
3340 if (xmit_type & XMIT_GSO_ENC_V6)
3341 ipv6 = inner_ipv6_hdr(skb);
3342 else if (xmit_type & XMIT_GSO_V6)
3343 ipv6 = ipv6_hdr(skb);
3347 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3348 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3352 * bnx2x_set_pbd_gso - update PBD in GSO case.
3356 * @xmit_type: xmit flags
3358 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3359 struct eth_tx_parse_bd_e1x *pbd,
3360 struct eth_tx_start_bd *tx_start_bd,
3363 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3364 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3365 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3367 if (xmit_type & XMIT_GSO_V4) {
3368 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3369 pbd->tcp_pseudo_csum =
3370 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3372 0, IPPROTO_TCP, 0));
3374 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3375 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3377 pbd->tcp_pseudo_csum =
3378 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3379 &ipv6_hdr(skb)->daddr,
3380 0, IPPROTO_TCP, 0));
3384 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3388 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3390 * @bp: driver handle
3392 * @parsing_data: data to be updated
3393 * @xmit_type: xmit flags
3395 * 57712/578xx related, when skb has encapsulation
3397 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3398 u32 *parsing_data, u32 xmit_type)
3401 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3402 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3403 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3405 if (xmit_type & XMIT_CSUM_TCP) {
3406 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3407 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3408 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3410 return skb_inner_transport_header(skb) +
3411 inner_tcp_hdrlen(skb) - skb->data;
3414 /* We support checksum offload for TCP and UDP only.
3415 * No need to pass the UDP header length - it's a constant.
3417 return skb_inner_transport_header(skb) +
3418 sizeof(struct udphdr) - skb->data;
3422 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3424 * @bp: driver handle
3426 * @parsing_data: data to be updated
3427 * @xmit_type: xmit flags
3429 * 57712/578xx related
3431 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3432 u32 *parsing_data, u32 xmit_type)
3435 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3436 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3437 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3439 if (xmit_type & XMIT_CSUM_TCP) {
3440 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3441 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3442 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3444 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3446 /* We support checksum offload for TCP and UDP only.
3447 * No need to pass the UDP header length - it's a constant.
3449 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3452 /* set FW indication according to inner or outer protocols if tunneled */
3453 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3454 struct eth_tx_start_bd *tx_start_bd,
3457 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3459 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3460 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3462 if (!(xmit_type & XMIT_CSUM_TCP))
3463 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3467 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3469 * @bp: driver handle
3471 * @pbd: parse BD to be updated
3472 * @xmit_type: xmit flags
3474 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3475 struct eth_tx_parse_bd_e1x *pbd,
3478 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3480 /* for now NS flag is not used in Linux */
3483 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3484 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3486 pbd->ip_hlen_w = (skb_transport_header(skb) -
3487 skb_network_header(skb)) >> 1;
3489 hlen += pbd->ip_hlen_w;
3491 /* We support checksum offload for TCP and UDP only */
3492 if (xmit_type & XMIT_CSUM_TCP)
3493 hlen += tcp_hdrlen(skb) / 2;
3495 hlen += sizeof(struct udphdr) / 2;
3497 pbd->total_hlen_w = cpu_to_le16(hlen);
3500 if (xmit_type & XMIT_CSUM_TCP) {
3501 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3504 s8 fix = SKB_CS_OFF(skb); /* signed! */
3506 DP(NETIF_MSG_TX_QUEUED,
3507 "hlen %d fix %d csum before fix %x\n",
3508 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3510 /* HW bug: fixup the CSUM */
3511 pbd->tcp_pseudo_csum =
3512 bnx2x_csum_fix(skb_transport_header(skb),
3515 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3516 pbd->tcp_pseudo_csum);
3522 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3523 struct eth_tx_parse_bd_e2 *pbd_e2,
3524 struct eth_tx_parse_2nd_bd *pbd2,
3529 u8 outerip_off, outerip_len = 0;
3531 /* from outer IP to transport */
3532 hlen_w = (skb_inner_transport_header(skb) -
3533 skb_network_header(skb)) >> 1;
3536 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3538 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3540 /* outer IP header info */
3541 if (xmit_type & XMIT_CSUM_V4) {
3542 struct iphdr *iph = ip_hdr(skb);
3543 pbd2->fw_ip_csum_wo_len_flags_frag =
3544 bswab16(csum_fold((~iph->check) -
3545 iph->tot_len - iph->frag_off));
3547 pbd2->fw_ip_hdr_to_payload_w =
3548 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3551 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3553 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3555 if (xmit_type & XMIT_GSO_V4) {
3556 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3558 pbd_e2->data.tunnel_data.pseudo_csum =
3559 bswab16(~csum_tcpudp_magic(
3560 inner_ip_hdr(skb)->saddr,
3561 inner_ip_hdr(skb)->daddr,
3562 0, IPPROTO_TCP, 0));
3564 outerip_len = ip_hdr(skb)->ihl << 1;
3566 pbd_e2->data.tunnel_data.pseudo_csum =
3567 bswab16(~csum_ipv6_magic(
3568 &inner_ipv6_hdr(skb)->saddr,
3569 &inner_ipv6_hdr(skb)->daddr,
3570 0, IPPROTO_TCP, 0));
3573 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3577 (!!(xmit_type & XMIT_CSUM_V6) <<
3578 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3580 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3581 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3582 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3584 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3585 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3586 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3590 /* called with netif_tx_lock
3591 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3592 * netif_wake_queue()
3594 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3596 struct bnx2x *bp = netdev_priv(dev);
3598 struct netdev_queue *txq;
3599 struct bnx2x_fp_txdata *txdata;
3600 struct sw_tx_bd *tx_buf;
3601 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3602 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3603 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3604 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3605 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3606 u32 pbd_e2_parsing_data = 0;
3607 u16 pkt_prod, bd_prod;
3610 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3613 __le16 pkt_size = 0;
3615 u8 mac_type = UNICAST_ADDRESS;
3617 #ifdef BNX2X_STOP_ON_ERROR
3618 if (unlikely(bp->panic))
3619 return NETDEV_TX_BUSY;
3622 txq_index = skb_get_queue_mapping(skb);
3623 txq = netdev_get_tx_queue(dev, txq_index);
3625 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3627 txdata = &bp->bnx2x_txq[txq_index];
3629 /* enable this debug print to view the transmission queue being used
3630 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3631 txq_index, fp_index, txdata_index); */
3633 /* enable this debug print to view the transmission details
3634 DP(NETIF_MSG_TX_QUEUED,
3635 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3636 txdata->cid, fp_index, txdata_index, txdata, fp); */
3638 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3639 skb_shinfo(skb)->nr_frags +
3641 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3642 /* Handle special storage cases separately */
3643 if (txdata->tx_ring_size == 0) {
3644 struct bnx2x_eth_q_stats *q_stats =
3645 bnx2x_fp_qstats(bp, txdata->parent_fp);
3646 q_stats->driver_filtered_tx_pkt++;
3648 return NETDEV_TX_OK;
3650 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3651 netif_tx_stop_queue(txq);
3652 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3654 return NETDEV_TX_BUSY;
3657 DP(NETIF_MSG_TX_QUEUED,
3658 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3659 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3660 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3663 eth = (struct ethhdr *)skb->data;
3665 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3666 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3667 if (is_broadcast_ether_addr(eth->h_dest))
3668 mac_type = BROADCAST_ADDRESS;
3670 mac_type = MULTICAST_ADDRESS;
3673 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3674 /* First, check if we need to linearize the skb (due to FW
3675 restrictions). No need to check fragmentation if page size > 8K
3676 (there will be no violation to FW restrictions) */
3677 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3678 /* Statistics of linearization */
3680 if (skb_linearize(skb) != 0) {
3681 DP(NETIF_MSG_TX_QUEUED,
3682 "SKB linearization failed - silently dropping this SKB\n");
3683 dev_kfree_skb_any(skb);
3684 return NETDEV_TX_OK;
3688 /* Map skb linear data for DMA */
3689 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3690 skb_headlen(skb), DMA_TO_DEVICE);
3691 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3692 DP(NETIF_MSG_TX_QUEUED,
3693 "SKB mapping failed - silently dropping this SKB\n");
3694 dev_kfree_skb_any(skb);
3695 return NETDEV_TX_OK;
3698 Please read carefully. First we use one BD which we mark as start,
3699 then we have a parsing info BD (used for TSO or xsum),
3700 and only then we have the rest of the TSO BDs.
3701 (don't forget to mark the last one as last,
3702 and to unmap only AFTER you write to the BD ...)
3703 And above all, all pdb sizes are in words - NOT DWORDS!
3706 /* get current pkt produced now - advance it just before sending packet
3707 * since mapping of pages may fail and cause packet to be dropped
3709 pkt_prod = txdata->tx_pkt_prod;
3710 bd_prod = TX_BD(txdata->tx_bd_prod);
3712 /* get a tx_buf and first BD
3713 * tx_start_bd may be changed during SPLIT,
3714 * but first_bd will always stay first
3716 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3717 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3718 first_bd = tx_start_bd;
3720 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3722 /* header nbd: indirectly zero other flags! */
3723 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3725 /* remember the first BD of the packet */
3726 tx_buf->first_bd = txdata->tx_bd_prod;
3730 DP(NETIF_MSG_TX_QUEUED,
3731 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3732 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3734 if (vlan_tx_tag_present(skb)) {
3735 tx_start_bd->vlan_or_ethertype =
3736 cpu_to_le16(vlan_tx_tag_get(skb));
3737 tx_start_bd->bd_flags.as_bitfield |=
3738 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3740 /* when transmitting in a vf, start bd must hold the ethertype
3741 * for fw to enforce it
3744 tx_start_bd->vlan_or_ethertype =
3745 cpu_to_le16(ntohs(eth->h_proto));
3747 /* used by FW for packet accounting */
3748 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3751 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3753 /* turn on parsing and get a BD */
3754 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3756 if (xmit_type & XMIT_CSUM)
3757 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3759 if (!CHIP_IS_E1x(bp)) {
3760 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3761 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3763 if (xmit_type & XMIT_CSUM_ENC) {
3764 u16 global_data = 0;
3766 /* Set PBD in enc checksum offload case */
3767 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3768 &pbd_e2_parsing_data,
3771 /* turn on 2nd parsing and get a BD */
3772 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3774 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3776 memset(pbd2, 0, sizeof(*pbd2));
3778 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3779 (skb_inner_network_header(skb) -
3782 if (xmit_type & XMIT_GSO_ENC)
3783 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3787 pbd2->global_data = cpu_to_le16(global_data);
3789 /* add addition parse BD indication to start BD */
3790 SET_FLAG(tx_start_bd->general_data,
3791 ETH_TX_START_BD_PARSE_NBDS, 1);
3792 /* set encapsulation flag in start BD */
3793 SET_FLAG(tx_start_bd->general_data,
3794 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3796 } else if (xmit_type & XMIT_CSUM) {
3797 /* Set PBD in checksum offload case w/o encapsulation */
3798 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3799 &pbd_e2_parsing_data,
3803 /* Add the macs to the parsing BD this is a vf */
3805 /* override GRE parameters in BD */
3806 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3807 &pbd_e2->data.mac_addr.src_mid,
3808 &pbd_e2->data.mac_addr.src_lo,
3811 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3812 &pbd_e2->data.mac_addr.dst_mid,
3813 &pbd_e2->data.mac_addr.dst_lo,
3817 SET_FLAG(pbd_e2_parsing_data,
3818 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3820 u16 global_data = 0;
3821 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3822 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3823 /* Set PBD in checksum offload case */
3824 if (xmit_type & XMIT_CSUM)
3825 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3827 SET_FLAG(global_data,
3828 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3829 pbd_e1x->global_data |= cpu_to_le16(global_data);
3832 /* Setup the data pointer of the first BD of the packet */
3833 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3834 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3835 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3836 pkt_size = tx_start_bd->nbytes;
3838 DP(NETIF_MSG_TX_QUEUED,
3839 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3840 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3841 le16_to_cpu(tx_start_bd->nbytes),
3842 tx_start_bd->bd_flags.as_bitfield,
3843 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3845 if (xmit_type & XMIT_GSO) {
3847 DP(NETIF_MSG_TX_QUEUED,
3848 "TSO packet len %d hlen %d total len %d tso size %d\n",
3849 skb->len, hlen, skb_headlen(skb),
3850 skb_shinfo(skb)->gso_size);
3852 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3854 if (unlikely(skb_headlen(skb) > hlen)) {
3856 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3860 if (!CHIP_IS_E1x(bp))
3861 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3864 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3867 /* Set the PBD's parsing_data field if not zero
3868 * (for the chips newer than 57711).
3870 if (pbd_e2_parsing_data)
3871 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3873 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3875 /* Handle fragmented skb */
3876 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3877 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3879 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3880 skb_frag_size(frag), DMA_TO_DEVICE);
3881 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3882 unsigned int pkts_compl = 0, bytes_compl = 0;
3884 DP(NETIF_MSG_TX_QUEUED,
3885 "Unable to map page - dropping packet...\n");
3887 /* we need unmap all buffers already mapped
3889 * first_bd->nbd need to be properly updated
3890 * before call to bnx2x_free_tx_pkt
3892 first_bd->nbd = cpu_to_le16(nbd);
3893 bnx2x_free_tx_pkt(bp, txdata,
3894 TX_BD(txdata->tx_pkt_prod),
3895 &pkts_compl, &bytes_compl);
3896 return NETDEV_TX_OK;
3899 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3900 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3901 if (total_pkt_bd == NULL)
3902 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3904 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3905 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3906 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3907 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3910 DP(NETIF_MSG_TX_QUEUED,
3911 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3912 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3913 le16_to_cpu(tx_data_bd->nbytes));
3916 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3918 /* update with actual num BDs */
3919 first_bd->nbd = cpu_to_le16(nbd);
3921 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3923 /* now send a tx doorbell, counting the next BD
3924 * if the packet contains or ends with it
3926 if (TX_BD_POFF(bd_prod) < nbd)
3929 /* total_pkt_bytes should be set on the first data BD if
3930 * it's not an LSO packet and there is more than one
3931 * data BD. In this case pkt_size is limited by an MTU value.
3932 * However we prefer to set it for an LSO packet (while we don't
3933 * have to) in order to save some CPU cycles in a none-LSO
3934 * case, when we much more care about them.
3936 if (total_pkt_bd != NULL)
3937 total_pkt_bd->total_pkt_bytes = pkt_size;
3940 DP(NETIF_MSG_TX_QUEUED,
3941 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3942 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3943 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3944 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3945 le16_to_cpu(pbd_e1x->total_hlen_w));
3947 DP(NETIF_MSG_TX_QUEUED,
3948 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3950 pbd_e2->data.mac_addr.dst_hi,
3951 pbd_e2->data.mac_addr.dst_mid,
3952 pbd_e2->data.mac_addr.dst_lo,
3953 pbd_e2->data.mac_addr.src_hi,
3954 pbd_e2->data.mac_addr.src_mid,
3955 pbd_e2->data.mac_addr.src_lo,
3956 pbd_e2->parsing_data);
3957 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3959 netdev_tx_sent_queue(txq, skb->len);
3961 skb_tx_timestamp(skb);
3963 txdata->tx_pkt_prod++;
3965 * Make sure that the BD data is updated before updating the producer
3966 * since FW might read the BD right after the producer is updated.
3967 * This is only applicable for weak-ordered memory model archs such
3968 * as IA-64. The following barrier is also mandatory since FW will
3969 * assumes packets must have BDs.
3973 txdata->tx_db.data.prod += nbd;
3976 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3980 txdata->tx_bd_prod += nbd;
3982 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3983 netif_tx_stop_queue(txq);
3985 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3986 * ordering of set_bit() in netif_tx_stop_queue() and read of
3990 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3991 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3992 netif_tx_wake_queue(txq);
3996 return NETDEV_TX_OK;
4000 * bnx2x_setup_tc - routine to configure net_device for multi tc
4002 * @netdev: net device to configure
4003 * @tc: number of traffic classes to enable
4005 * callback connected to the ndo_setup_tc function pointer
4007 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4009 int cos, prio, count, offset;
4010 struct bnx2x *bp = netdev_priv(dev);
4012 /* setup tc must be called under rtnl lock */
4015 /* no traffic classes requested. Aborting */
4017 netdev_reset_tc(dev);
4021 /* requested to support too many traffic classes */
4022 if (num_tc > bp->max_cos) {
4023 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4024 num_tc, bp->max_cos);
4028 /* declare amount of supported traffic classes */
4029 if (netdev_set_num_tc(dev, num_tc)) {
4030 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4034 /* configure priority to traffic class mapping */
4035 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4036 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4037 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4038 "mapping priority %d to tc %d\n",
4039 prio, bp->prio_to_cos[prio]);
4042 /* Use this configuration to differentiate tc0 from other COSes
4043 This can be used for ets or pfc, and save the effort of setting
4044 up a multio class queue disc or negotiating DCBX with a switch
4045 netdev_set_prio_tc_map(dev, 0, 0);
4046 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4047 for (prio = 1; prio < 16; prio++) {
4048 netdev_set_prio_tc_map(dev, prio, 1);
4049 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4052 /* configure traffic class to transmission queue mapping */
4053 for (cos = 0; cos < bp->max_cos; cos++) {
4054 count = BNX2X_NUM_ETH_QUEUES(bp);
4055 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4056 netdev_set_tc_queue(dev, cos, count, offset);
4057 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4058 "mapping tc %d to offset %d count %d\n",
4059 cos, offset, count);
4065 /* called with rtnl_lock */
4066 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4068 struct sockaddr *addr = p;
4069 struct bnx2x *bp = netdev_priv(dev);
4072 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4073 BNX2X_ERR("Requested MAC address is not valid\n");
4077 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4078 !is_zero_ether_addr(addr->sa_data)) {
4079 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4083 if (netif_running(dev)) {
4084 rc = bnx2x_set_eth_mac(bp, false);
4089 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4091 if (netif_running(dev))
4092 rc = bnx2x_set_eth_mac(bp, true);
4097 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4099 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4100 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4105 if (IS_FCOE_IDX(fp_index)) {
4106 memset(sb, 0, sizeof(union host_hc_status_block));
4107 fp->status_blk_mapping = 0;
4110 if (!CHIP_IS_E1x(bp))
4111 BNX2X_PCI_FREE(sb->e2_sb,
4112 bnx2x_fp(bp, fp_index,
4113 status_blk_mapping),
4114 sizeof(struct host_hc_status_block_e2));
4116 BNX2X_PCI_FREE(sb->e1x_sb,
4117 bnx2x_fp(bp, fp_index,
4118 status_blk_mapping),
4119 sizeof(struct host_hc_status_block_e1x));
4123 if (!skip_rx_queue(bp, fp_index)) {
4124 bnx2x_free_rx_bds(fp);
4126 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4127 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4128 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4129 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4130 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4132 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4133 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4134 sizeof(struct eth_fast_path_rx_cqe) *
4138 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4139 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4140 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4141 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4145 if (!skip_tx_queue(bp, fp_index)) {
4146 /* fastpath tx rings: tx_buf tx_desc */
4147 for_each_cos_in_tx_queue(fp, cos) {
4148 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4150 DP(NETIF_MSG_IFDOWN,
4151 "freeing tx memory of fp %d cos %d cid %d\n",
4152 fp_index, cos, txdata->cid);
4154 BNX2X_FREE(txdata->tx_buf_ring);
4155 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4156 txdata->tx_desc_mapping,
4157 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4160 /* end of fastpath */
4163 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4166 for_each_cnic_queue(bp, i)
4167 bnx2x_free_fp_mem_at(bp, i);
4170 void bnx2x_free_fp_mem(struct bnx2x *bp)
4173 for_each_eth_queue(bp, i)
4174 bnx2x_free_fp_mem_at(bp, i);
4177 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4179 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4180 if (!CHIP_IS_E1x(bp)) {
4181 bnx2x_fp(bp, index, sb_index_values) =
4182 (__le16 *)status_blk.e2_sb->sb.index_values;
4183 bnx2x_fp(bp, index, sb_running_index) =
4184 (__le16 *)status_blk.e2_sb->sb.running_index;
4186 bnx2x_fp(bp, index, sb_index_values) =
4187 (__le16 *)status_blk.e1x_sb->sb.index_values;
4188 bnx2x_fp(bp, index, sb_running_index) =
4189 (__le16 *)status_blk.e1x_sb->sb.running_index;
4193 /* Returns the number of actually allocated BDs */
4194 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4197 struct bnx2x *bp = fp->bp;
4198 u16 ring_prod, cqe_ring_prod;
4199 int i, failure_cnt = 0;
4201 fp->rx_comp_cons = 0;
4202 cqe_ring_prod = ring_prod = 0;
4204 /* This routine is called only during fo init so
4205 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4207 for (i = 0; i < rx_ring_size; i++) {
4208 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4212 ring_prod = NEXT_RX_IDX(ring_prod);
4213 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4214 WARN_ON(ring_prod <= (i - failure_cnt));
4218 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4219 i - failure_cnt, fp->index);
4221 fp->rx_bd_prod = ring_prod;
4222 /* Limit the CQE producer by the CQE ring size */
4223 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4225 fp->rx_pkt = fp->rx_calls = 0;
4227 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4229 return i - failure_cnt;
4232 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4236 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4237 struct eth_rx_cqe_next_page *nextpg;
4239 nextpg = (struct eth_rx_cqe_next_page *)
4240 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4242 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4243 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4245 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4246 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4250 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4252 union host_hc_status_block *sb;
4253 struct bnx2x_fastpath *fp = &bp->fp[index];
4256 int rx_ring_size = 0;
4258 if (!bp->rx_ring_size &&
4259 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4260 rx_ring_size = MIN_RX_SIZE_NONTPA;
4261 bp->rx_ring_size = rx_ring_size;
4262 } else if (!bp->rx_ring_size) {
4263 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4265 if (CHIP_IS_E3(bp)) {
4266 u32 cfg = SHMEM_RD(bp,
4267 dev_info.port_hw_config[BP_PORT(bp)].
4270 /* Decrease ring size for 1G functions */
4271 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4272 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4276 /* allocate at least number of buffers required by FW */
4277 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4278 MIN_RX_SIZE_TPA, rx_ring_size);
4280 bp->rx_ring_size = rx_ring_size;
4281 } else /* if rx_ring_size specified - use it */
4282 rx_ring_size = bp->rx_ring_size;
4284 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4287 sb = &bnx2x_fp(bp, index, status_blk);
4289 if (!IS_FCOE_IDX(index)) {
4291 if (!CHIP_IS_E1x(bp))
4292 BNX2X_PCI_ALLOC(sb->e2_sb,
4293 &bnx2x_fp(bp, index, status_blk_mapping),
4294 sizeof(struct host_hc_status_block_e2));
4296 BNX2X_PCI_ALLOC(sb->e1x_sb,
4297 &bnx2x_fp(bp, index, status_blk_mapping),
4298 sizeof(struct host_hc_status_block_e1x));
4301 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4302 * set shortcuts for it.
4304 if (!IS_FCOE_IDX(index))
4305 set_sb_shortcuts(bp, index);
4308 if (!skip_tx_queue(bp, index)) {
4309 /* fastpath tx rings: tx_buf tx_desc */
4310 for_each_cos_in_tx_queue(fp, cos) {
4311 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4314 "allocating tx memory of fp %d cos %d\n",
4317 BNX2X_ALLOC(txdata->tx_buf_ring,
4318 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4319 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4320 &txdata->tx_desc_mapping,
4321 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4326 if (!skip_rx_queue(bp, index)) {
4327 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4328 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4329 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4330 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4331 &bnx2x_fp(bp, index, rx_desc_mapping),
4332 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4334 /* Seed all CQEs by 1s */
4335 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4336 &bnx2x_fp(bp, index, rx_comp_mapping),
4337 sizeof(struct eth_fast_path_rx_cqe) *
4341 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4342 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4343 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4344 &bnx2x_fp(bp, index, rx_sge_mapping),
4345 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4347 bnx2x_set_next_page_rx_bd(fp);
4350 bnx2x_set_next_page_rx_cq(fp);
4353 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4354 if (ring_size < rx_ring_size)
4360 /* handles low memory cases */
4362 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4364 /* FW will drop all packets if queue is not big enough,
4365 * In these cases we disable the queue
4366 * Min size is different for OOO, TPA and non-TPA queues
4368 if (ring_size < (fp->disable_tpa ?
4369 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4370 /* release memory allocated for this queue */
4371 bnx2x_free_fp_mem_at(bp, index);
4377 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4381 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4382 /* we will fail load process instead of mark
4390 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4394 /* 1. Allocate FP for leading - fatal if error
4395 * 2. Allocate RSS - fix number of queues if error
4399 if (bnx2x_alloc_fp_mem_at(bp, 0))
4403 for_each_nondefault_eth_queue(bp, i)
4404 if (bnx2x_alloc_fp_mem_at(bp, i))
4407 /* handle memory failures */
4408 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4409 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4412 bnx2x_shrink_eth_fp(bp, delta);
4413 if (CNIC_SUPPORT(bp))
4414 /* move non eth FPs next to last eth FP
4415 * must be done in that order
4416 * FCOE_IDX < FWD_IDX < OOO_IDX
4419 /* move FCoE fp even NO_FCOE_FLAG is on */
4420 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4421 bp->num_ethernet_queues -= delta;
4422 bp->num_queues = bp->num_ethernet_queues +
4423 bp->num_cnic_queues;
4424 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4425 bp->num_queues + delta, bp->num_queues);
4431 void bnx2x_free_mem_bp(struct bnx2x *bp)
4435 for (i = 0; i < bp->fp_array_size; i++)
4436 kfree(bp->fp[i].tpa_info);
4439 kfree(bp->fp_stats);
4440 kfree(bp->bnx2x_txq);
4441 kfree(bp->msix_table);
4445 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4447 struct bnx2x_fastpath *fp;
4448 struct msix_entry *tbl;
4449 struct bnx2x_ilt *ilt;
4450 int msix_table_size = 0;
4451 int fp_array_size, txq_array_size;
4455 * The biggest MSI-X table we might need is as a maximum number of fast
4456 * path IGU SBs plus default SB (for PF only).
4458 msix_table_size = bp->igu_sb_cnt;
4461 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4463 /* fp array: RSS plus CNIC related L2 queues */
4464 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4465 bp->fp_array_size = fp_array_size;
4466 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4468 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4471 for (i = 0; i < bp->fp_array_size; i++) {
4473 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4474 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4475 if (!(fp[i].tpa_info))
4481 /* allocate sp objs */
4482 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4487 /* allocate fp_stats */
4488 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4493 /* Allocate memory for the transmission queues array */
4495 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4496 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4498 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4504 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4507 bp->msix_table = tbl;
4510 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4517 bnx2x_free_mem_bp(bp);
4521 int bnx2x_reload_if_running(struct net_device *dev)
4523 struct bnx2x *bp = netdev_priv(dev);
4525 if (unlikely(!netif_running(dev)))
4528 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4529 return bnx2x_nic_load(bp, LOAD_NORMAL);
4532 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4534 u32 sel_phy_idx = 0;
4535 if (bp->link_params.num_phys <= 1)
4538 if (bp->link_vars.link_up) {
4539 sel_phy_idx = EXT_PHY1;
4540 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4541 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4542 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4543 sel_phy_idx = EXT_PHY2;
4546 switch (bnx2x_phy_selection(&bp->link_params)) {
4547 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4548 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4549 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4550 sel_phy_idx = EXT_PHY1;
4552 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4553 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4554 sel_phy_idx = EXT_PHY2;
4561 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4563 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4565 * The selected activated PHY is always after swapping (in case PHY
4566 * swapping is enabled). So when swapping is enabled, we need to reverse
4570 if (bp->link_params.multi_phy_config &
4571 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4572 if (sel_phy_idx == EXT_PHY1)
4573 sel_phy_idx = EXT_PHY2;
4574 else if (sel_phy_idx == EXT_PHY2)
4575 sel_phy_idx = EXT_PHY1;
4577 return LINK_CONFIG_IDX(sel_phy_idx);
4580 #ifdef NETDEV_FCOE_WWNN
4581 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4583 struct bnx2x *bp = netdev_priv(dev);
4584 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4587 case NETDEV_FCOE_WWNN:
4588 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4589 cp->fcoe_wwn_node_name_lo);
4591 case NETDEV_FCOE_WWPN:
4592 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4593 cp->fcoe_wwn_port_name_lo);
4596 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4604 /* called with rtnl_lock */
4605 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4607 struct bnx2x *bp = netdev_priv(dev);
4609 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4610 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4614 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4615 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4616 BNX2X_ERR("Can't support requested MTU size\n");
4620 /* This does not race with packet allocation
4621 * because the actual alloc size is
4622 * only updated as part of load
4626 return bnx2x_reload_if_running(dev);
4629 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4630 netdev_features_t features)
4632 struct bnx2x *bp = netdev_priv(dev);
4634 /* TPA requires Rx CSUM offloading */
4635 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4636 features &= ~NETIF_F_LRO;
4637 features &= ~NETIF_F_GRO;
4643 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4645 struct bnx2x *bp = netdev_priv(dev);
4646 u32 flags = bp->flags;
4648 bool bnx2x_reload = false;
4650 if (features & NETIF_F_LRO)
4651 flags |= TPA_ENABLE_FLAG;
4653 flags &= ~TPA_ENABLE_FLAG;
4655 if (features & NETIF_F_GRO)
4656 flags |= GRO_ENABLE_FLAG;
4658 flags &= ~GRO_ENABLE_FLAG;
4660 if (features & NETIF_F_LOOPBACK) {
4661 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4662 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4663 bnx2x_reload = true;
4666 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4667 bp->link_params.loopback_mode = LOOPBACK_NONE;
4668 bnx2x_reload = true;
4672 changes = flags ^ bp->flags;
4674 /* if GRO is changed while LRO is enabled, don't force a reload */
4675 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4676 changes &= ~GRO_ENABLE_FLAG;
4679 bnx2x_reload = true;
4684 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4685 return bnx2x_reload_if_running(dev);
4686 /* else: bnx2x_nic_load() will be called at end of recovery */
4692 void bnx2x_tx_timeout(struct net_device *dev)
4694 struct bnx2x *bp = netdev_priv(dev);
4696 #ifdef BNX2X_STOP_ON_ERROR
4701 smp_mb__before_clear_bit();
4702 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4703 smp_mb__after_clear_bit();
4705 /* This allows the netif to be shutdown gracefully before resetting */
4706 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4709 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4711 struct net_device *dev = pci_get_drvdata(pdev);
4715 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4718 bp = netdev_priv(dev);
4722 pci_save_state(pdev);
4724 if (!netif_running(dev)) {
4729 netif_device_detach(dev);
4731 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4733 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4740 int bnx2x_resume(struct pci_dev *pdev)
4742 struct net_device *dev = pci_get_drvdata(pdev);
4747 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4750 bp = netdev_priv(dev);
4752 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4753 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4759 pci_restore_state(pdev);
4761 if (!netif_running(dev)) {
4766 bnx2x_set_power_state(bp, PCI_D0);
4767 netif_device_attach(dev);
4769 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4776 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4779 /* ustorm cxt validation */
4780 cxt->ustorm_ag_context.cdu_usage =
4781 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4782 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4783 /* xcontext validation */
4784 cxt->xstorm_ag_context.cdu_reserved =
4785 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4786 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4789 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4790 u8 fw_sb_id, u8 sb_index,
4793 u32 addr = BAR_CSTRORM_INTMEM +
4794 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4795 REG_WR8(bp, addr, ticks);
4797 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4798 port, fw_sb_id, sb_index, ticks);
4801 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4802 u16 fw_sb_id, u8 sb_index,
4805 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4806 u32 addr = BAR_CSTRORM_INTMEM +
4807 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4808 u8 flags = REG_RD8(bp, addr);
4810 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4811 flags |= enable_flag;
4812 REG_WR8(bp, addr, flags);
4814 "port %x fw_sb_id %d sb_index %d disable %d\n",
4815 port, fw_sb_id, sb_index, disable);
4818 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4819 u8 sb_index, u8 disable, u16 usec)
4821 int port = BP_PORT(bp);
4822 u8 ticks = usec / BNX2X_BTR;
4824 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4826 disable = disable ? 1 : (usec ? 0 : 1);
4827 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);