]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
c80f1d26f40d5487fd0db150b62f8e7bd5c9b9f8
[~andy/linux] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/tcp.h>
25 #include <net/ipv6.h>
26 #include <net/ip6_checksum.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
30 #include "bnx2x_sp.h"
31
32 /**
33  * bnx2x_move_fp - move content of the fastpath structure.
34  *
35  * @bp:         driver handle
36  * @from:       source FP index
37  * @to:         destination FP index
38  *
39  * Makes sure the contents of the bp->fp[to].napi is kept
40  * intact. This is done by first copying the napi struct from
41  * the target to the source, and then mem copying the entire
42  * source onto the target. Update txdata pointers and related
43  * content.
44  */
45 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46 {
47         struct bnx2x_fastpath *from_fp = &bp->fp[from];
48         struct bnx2x_fastpath *to_fp = &bp->fp[to];
49         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
53         int old_max_eth_txqs, new_max_eth_txqs;
54         int old_txdata_index = 0, new_txdata_index = 0;
55
56         /* Copy the NAPI object as it has been already initialized */
57         from_fp->napi = to_fp->napi;
58
59         /* Move bnx2x_fastpath contents */
60         memcpy(to_fp, from_fp, sizeof(*to_fp));
61         to_fp->index = to;
62
63         /* move sp_objs contents as well, as their indices match fp ones */
64         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
65
66         /* move fp_stats contents as well, as their indices match fp ones */
67         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
68
69         /* Update txdata pointers in fp and move txdata content accordingly:
70          * Each fp consumes 'max_cos' txdata structures, so the index should be
71          * decremented by max_cos x delta.
72          */
73
74         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
76                                 (bp)->max_cos;
77         if (from == FCOE_IDX(bp)) {
78                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80         }
81
82         memcpy(&bp->bnx2x_txq[new_txdata_index],
83                &bp->bnx2x_txq[old_txdata_index],
84                sizeof(struct bnx2x_fp_txdata));
85         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
86 }
87
88 /**
89  * bnx2x_fill_fw_str - Fill buffer with FW version string.
90  *
91  * @bp:        driver handle
92  * @buf:       character buffer to fill with the fw name
93  * @buf_len:   length of the above buffer
94  *
95  */
96 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
97 {
98         if (IS_PF(bp)) {
99                 u8 phy_fw_ver[PHY_FW_VER_LEN];
100
101                 phy_fw_ver[0] = '\0';
102                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103                                              phy_fw_ver, PHY_FW_VER_LEN);
104                 strlcpy(buf, bp->fw_ver, buf_len);
105                 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
106                          "bc %d.%d.%d%s%s",
107                          (bp->common.bc_ver & 0xff0000) >> 16,
108                          (bp->common.bc_ver & 0xff00) >> 8,
109                          (bp->common.bc_ver & 0xff),
110                          ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
111         } else {
112                 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
113         }
114 }
115
116 /**
117  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
118  *
119  * @bp: driver handle
120  * @delta:      number of eth queues which were not allocated
121  */
122 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
123 {
124         int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
125
126         /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127          * backward along the array could cause memory to be overriden
128          */
129         for (cos = 1; cos < bp->max_cos; cos++) {
130                 for (i = 0; i < old_eth_num - delta; i++) {
131                         struct bnx2x_fastpath *fp = &bp->fp[i];
132                         int new_idx = cos * (old_eth_num - delta) + i;
133
134                         memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
135                                sizeof(struct bnx2x_fp_txdata));
136                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
137                 }
138         }
139 }
140
141 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
142
143 /* free skb in the packet ring at pos idx
144  * return idx of last bd freed
145  */
146 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
147                              u16 idx, unsigned int *pkts_compl,
148                              unsigned int *bytes_compl)
149 {
150         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
151         struct eth_tx_start_bd *tx_start_bd;
152         struct eth_tx_bd *tx_data_bd;
153         struct sk_buff *skb = tx_buf->skb;
154         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
155         int nbd;
156
157         /* prefetch skb end pointer to speedup dev_kfree_skb() */
158         prefetch(&skb->end);
159
160         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
161            txdata->txq_index, idx, tx_buf, skb);
162
163         /* unmap first bd */
164         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
165         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
166                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
167
168
169         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170 #ifdef BNX2X_STOP_ON_ERROR
171         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172                 BNX2X_ERR("BAD nbd!\n");
173                 bnx2x_panic();
174         }
175 #endif
176         new_cons = nbd + tx_buf->first_bd;
177
178         /* Get the next bd */
179         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
180
181         /* Skip a parse bd... */
182         --nbd;
183         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
184
185         /* ...and the TSO split header bd since they have no mapping */
186         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
187                 --nbd;
188                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
189         }
190
191         /* now free frags */
192         while (nbd > 0) {
193
194                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
195                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
197                 if (--nbd)
198                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
199         }
200
201         /* release skb */
202         WARN_ON(!skb);
203         if (likely(skb)) {
204                 (*pkts_compl)++;
205                 (*bytes_compl) += skb->len;
206         }
207
208         dev_kfree_skb_any(skb);
209         tx_buf->first_bd = 0;
210         tx_buf->skb = NULL;
211
212         return new_cons;
213 }
214
215 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
216 {
217         struct netdev_queue *txq;
218         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
219         unsigned int pkts_compl = 0, bytes_compl = 0;
220
221 #ifdef BNX2X_STOP_ON_ERROR
222         if (unlikely(bp->panic))
223                 return -1;
224 #endif
225
226         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228         sw_cons = txdata->tx_pkt_cons;
229
230         while (sw_cons != hw_cons) {
231                 u16 pkt_cons;
232
233                 pkt_cons = TX_BD(sw_cons);
234
235                 DP(NETIF_MSG_TX_DONE,
236                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
237                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
238
239                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
240                                             &pkts_compl, &bytes_compl);
241
242                 sw_cons++;
243         }
244
245         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
246
247         txdata->tx_pkt_cons = sw_cons;
248         txdata->tx_bd_cons = bd_cons;
249
250         /* Need to make the tx_bd_cons update visible to start_xmit()
251          * before checking for netif_tx_queue_stopped().  Without the
252          * memory barrier, there is a small possibility that
253          * start_xmit() will miss it and cause the queue to be stopped
254          * forever.
255          * On the other hand we need an rmb() here to ensure the proper
256          * ordering of bit testing in the following
257          * netif_tx_queue_stopped(txq) call.
258          */
259         smp_mb();
260
261         if (unlikely(netif_tx_queue_stopped(txq))) {
262                 /* Taking tx_lock() is needed to prevent reenabling the queue
263                  * while it's empty. This could have happen if rx_action() gets
264                  * suspended in bnx2x_tx_int() after the condition before
265                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266                  *
267                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
268                  * sends some packets consuming the whole queue again->
269                  * stops the queue
270                  */
271
272                 __netif_tx_lock(txq, smp_processor_id());
273
274                 if ((netif_tx_queue_stopped(txq)) &&
275                     (bp->state == BNX2X_STATE_OPEN) &&
276                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
277                         netif_tx_wake_queue(txq);
278
279                 __netif_tx_unlock(txq);
280         }
281         return 0;
282 }
283
284 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
285                                              u16 idx)
286 {
287         u16 last_max = fp->last_max_sge;
288
289         if (SUB_S16(idx, last_max) > 0)
290                 fp->last_max_sge = idx;
291 }
292
293 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
294                                          u16 sge_len,
295                                          struct eth_end_agg_rx_cqe *cqe)
296 {
297         struct bnx2x *bp = fp->bp;
298         u16 last_max, last_elem, first_elem;
299         u16 delta = 0;
300         u16 i;
301
302         if (!sge_len)
303                 return;
304
305         /* First mark all used pages */
306         for (i = 0; i < sge_len; i++)
307                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
308                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
309
310         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
311            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
312
313         /* Here we assume that the last SGE index is the biggest */
314         prefetch((void *)(fp->sge_mask));
315         bnx2x_update_last_max_sge(fp,
316                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
317
318         last_max = RX_SGE(fp->last_max_sge);
319         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
321
322         /* If ring is not full */
323         if (last_elem + 1 != first_elem)
324                 last_elem++;
325
326         /* Now update the prod */
327         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328                 if (likely(fp->sge_mask[i]))
329                         break;
330
331                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332                 delta += BIT_VEC64_ELEM_SZ;
333         }
334
335         if (delta > 0) {
336                 fp->rx_sge_prod += delta;
337                 /* clear page-end entries */
338                 bnx2x_clear_sge_mask_next_elems(fp);
339         }
340
341         DP(NETIF_MSG_RX_STATUS,
342            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
343            fp->last_max_sge, fp->rx_sge_prod);
344 }
345
346 /* Get Toeplitz hash value in the skb using the value from the
347  * CQE (calculated by HW).
348  */
349 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
350                             const struct eth_fast_path_rx_cqe *cqe,
351                             bool *l4_rxhash)
352 {
353         /* Get Toeplitz hash from CQE */
354         if ((bp->dev->features & NETIF_F_RXHASH) &&
355             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356                 enum eth_rss_hash_type htype;
357
358                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359                 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360                              (htype == TCP_IPV6_HASH_TYPE);
361                 return le32_to_cpu(cqe->rss_hash_result);
362         }
363         *l4_rxhash = false;
364         return 0;
365 }
366
367 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
368                             u16 cons, u16 prod,
369                             struct eth_fast_path_rx_cqe *cqe)
370 {
371         struct bnx2x *bp = fp->bp;
372         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
375         dma_addr_t mapping;
376         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
378
379         /* print error if current state != stop */
380         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
381                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
382
383         /* Try to map an empty data buffer from the aggregation info  */
384         mapping = dma_map_single(&bp->pdev->dev,
385                                  first_buf->data + NET_SKB_PAD,
386                                  fp->rx_buf_size, DMA_FROM_DEVICE);
387         /*
388          *  ...if it fails - move the skb from the consumer to the producer
389          *  and set the current aggregation state as ERROR to drop it
390          *  when TPA_STOP arrives.
391          */
392
393         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394                 /* Move the BD from the consumer to the producer */
395                 bnx2x_reuse_rx_data(fp, cons, prod);
396                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
397                 return;
398         }
399
400         /* move empty data from pool to prod */
401         prod_rx_buf->data = first_buf->data;
402         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
403         /* point prod_bd to new data */
404         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
406
407         /* move partial skb from cons to pool (don't unmap yet) */
408         *first_buf = *cons_rx_buf;
409
410         /* mark bin state as START */
411         tpa_info->parsing_flags =
412                 le16_to_cpu(cqe->pars_flags.flags);
413         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414         tpa_info->tpa_state = BNX2X_TPA_START;
415         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416         tpa_info->placement_offset = cqe->placement_offset;
417         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
418         if (fp->mode == TPA_MODE_GRO) {
419                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
420                 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
421                 tpa_info->gro_size = gro_size;
422         }
423
424 #ifdef BNX2X_STOP_ON_ERROR
425         fp->tpa_queue_used |= (1 << queue);
426 #ifdef _ASM_GENERIC_INT_L64_H
427         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
428 #else
429         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
430 #endif
431            fp->tpa_queue_used);
432 #endif
433 }
434
435 /* Timestamp option length allowed for TPA aggregation:
436  *
437  *              nop nop kind length echo val
438  */
439 #define TPA_TSTAMP_OPT_LEN      12
440 /**
441  * bnx2x_set_gro_params - compute GRO values
442  *
443  * @skb:                packet skb
444  * @parsing_flags:      parsing flags from the START CQE
445  * @len_on_bd:          total length of the first packet for the
446  *                      aggregation.
447  * @pkt_len:            length of all segments
448  *
449  * Approximate value of the MSS for this aggregation calculated using
450  * the first packet of it.
451  * Compute number of aggregated segments, and gso_type.
452  */
453 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
454                                  u16 len_on_bd, unsigned int pkt_len,
455                                  u16 num_of_coalesced_segs)
456 {
457         /* TPA aggregation won't have either IP options or TCP options
458          * other than timestamp or IPv6 extension headers.
459          */
460         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461
462         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
463             PRS_FLAG_OVERETH_IPV6) {
464                 hdrs_len += sizeof(struct ipv6hdr);
465                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
466         } else {
467                 hdrs_len += sizeof(struct iphdr);
468                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
469         }
470
471         /* Check if there was a TCP timestamp, if there is it's will
472          * always be 12 bytes length: nop nop kind length echo val.
473          *
474          * Otherwise FW would close the aggregation.
475          */
476         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477                 hdrs_len += TPA_TSTAMP_OPT_LEN;
478
479         skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
480
481         /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482          * to skb_shinfo(skb)->gso_segs
483          */
484         NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
485 }
486
487 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
488                               struct bnx2x_fastpath *fp, u16 index)
489 {
490         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
491         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
492         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
493         dma_addr_t mapping;
494
495         if (unlikely(page == NULL)) {
496                 BNX2X_ERR("Can't alloc sge\n");
497                 return -ENOMEM;
498         }
499
500         mapping = dma_map_page(&bp->pdev->dev, page, 0,
501                                SGE_PAGES, DMA_FROM_DEVICE);
502         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
503                 __free_pages(page, PAGES_PER_SGE_SHIFT);
504                 BNX2X_ERR("Can't map sge\n");
505                 return -ENOMEM;
506         }
507
508         sw_buf->page = page;
509         dma_unmap_addr_set(sw_buf, mapping, mapping);
510
511         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
512         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
513
514         return 0;
515 }
516
517 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
518                                struct bnx2x_agg_info *tpa_info,
519                                u16 pages,
520                                struct sk_buff *skb,
521                                struct eth_end_agg_rx_cqe *cqe,
522                                u16 cqe_idx)
523 {
524         struct sw_rx_page *rx_pg, old_rx_pg;
525         u32 i, frag_len, frag_size;
526         int err, j, frag_id = 0;
527         u16 len_on_bd = tpa_info->len_on_bd;
528         u16 full_page = 0, gro_size = 0;
529
530         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
531
532         if (fp->mode == TPA_MODE_GRO) {
533                 gro_size = tpa_info->gro_size;
534                 full_page = tpa_info->full_page;
535         }
536
537         /* This is needed in order to enable forwarding support */
538         if (frag_size)
539                 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
540                                      le16_to_cpu(cqe->pkt_len),
541                                      le16_to_cpu(cqe->num_of_coalesced_segs));
542
543 #ifdef BNX2X_STOP_ON_ERROR
544         if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
545                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546                           pages, cqe_idx);
547                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
548                 bnx2x_panic();
549                 return -EINVAL;
550         }
551 #endif
552
553         /* Run through the SGL and compose the fragmented skb */
554         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
555                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
556
557                 /* FW gives the indices of the SGE as if the ring is an array
558                    (meaning that "next" element will consume 2 indices) */
559                 if (fp->mode == TPA_MODE_GRO)
560                         frag_len = min_t(u32, frag_size, (u32)full_page);
561                 else /* LRO */
562                         frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
563
564                 rx_pg = &fp->rx_page_ring[sge_idx];
565                 old_rx_pg = *rx_pg;
566
567                 /* If we fail to allocate a substitute page, we simply stop
568                    where we are and drop the whole packet */
569                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
570                 if (unlikely(err)) {
571                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
572                         return err;
573                 }
574
575                 /* Unmap the page as we r going to pass it to the stack */
576                 dma_unmap_page(&bp->pdev->dev,
577                                dma_unmap_addr(&old_rx_pg, mapping),
578                                SGE_PAGES, DMA_FROM_DEVICE);
579                 /* Add one frag and update the appropriate fields in the skb */
580                 if (fp->mode == TPA_MODE_LRO)
581                         skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
582                 else { /* GRO */
583                         int rem;
584                         int offset = 0;
585                         for (rem = frag_len; rem > 0; rem -= gro_size) {
586                                 int len = rem > gro_size ? gro_size : rem;
587                                 skb_fill_page_desc(skb, frag_id++,
588                                                    old_rx_pg.page, offset, len);
589                                 if (offset)
590                                         get_page(old_rx_pg.page);
591                                 offset += len;
592                         }
593                 }
594
595                 skb->data_len += frag_len;
596                 skb->truesize += SGE_PAGES;
597                 skb->len += frag_len;
598
599                 frag_size -= frag_len;
600         }
601
602         return 0;
603 }
604
605 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
606 {
607         if (fp->rx_frag_size)
608                 put_page(virt_to_head_page(data));
609         else
610                 kfree(data);
611 }
612
613 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
614 {
615         if (fp->rx_frag_size)
616                 return netdev_alloc_frag(fp->rx_frag_size);
617
618         return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
619 }
620
621 #ifdef CONFIG_INET
622 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
623 {
624         const struct iphdr *iph = ip_hdr(skb);
625         struct tcphdr *th;
626
627         skb_set_transport_header(skb, sizeof(struct iphdr));
628         th = tcp_hdr(skb);
629
630         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
631                                   iph->saddr, iph->daddr, 0);
632 }
633
634 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
635 {
636         struct ipv6hdr *iph = ipv6_hdr(skb);
637         struct tcphdr *th;
638
639         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
640         th = tcp_hdr(skb);
641
642         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
643                                   &iph->saddr, &iph->daddr, 0);
644 }
645
646 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
647                             void (*gro_func)(struct bnx2x*, struct sk_buff*))
648 {
649         skb_set_network_header(skb, 0);
650         gro_func(bp, skb);
651         tcp_gro_complete(skb);
652 }
653 #endif
654
655 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
656                                struct sk_buff *skb)
657 {
658 #ifdef CONFIG_INET
659         if (skb_shinfo(skb)->gso_size) {
660                 switch (be16_to_cpu(skb->protocol)) {
661                 case ETH_P_IP:
662                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
663                         break;
664                 case ETH_P_IPV6:
665                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
666                         break;
667                 default:
668                         BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
669                                   be16_to_cpu(skb->protocol));
670                 }
671         }
672 #endif
673         napi_gro_receive(&fp->napi, skb);
674 }
675
676 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
677                            struct bnx2x_agg_info *tpa_info,
678                            u16 pages,
679                            struct eth_end_agg_rx_cqe *cqe,
680                            u16 cqe_idx)
681 {
682         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
683         u8 pad = tpa_info->placement_offset;
684         u16 len = tpa_info->len_on_bd;
685         struct sk_buff *skb = NULL;
686         u8 *new_data, *data = rx_buf->data;
687         u8 old_tpa_state = tpa_info->tpa_state;
688
689         tpa_info->tpa_state = BNX2X_TPA_STOP;
690
691         /* If we there was an error during the handling of the TPA_START -
692          * drop this aggregation.
693          */
694         if (old_tpa_state == BNX2X_TPA_ERROR)
695                 goto drop;
696
697         /* Try to allocate the new data */
698         new_data = bnx2x_frag_alloc(fp);
699         /* Unmap skb in the pool anyway, as we are going to change
700            pool entry status to BNX2X_TPA_STOP even if new skb allocation
701            fails. */
702         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
703                          fp->rx_buf_size, DMA_FROM_DEVICE);
704         if (likely(new_data))
705                 skb = build_skb(data, fp->rx_frag_size);
706
707         if (likely(skb)) {
708 #ifdef BNX2X_STOP_ON_ERROR
709                 if (pad + len > fp->rx_buf_size) {
710                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
711                                   pad, len, fp->rx_buf_size);
712                         bnx2x_panic();
713                         return;
714                 }
715 #endif
716
717                 skb_reserve(skb, pad + NET_SKB_PAD);
718                 skb_put(skb, len);
719                 skb->rxhash = tpa_info->rxhash;
720                 skb->l4_rxhash = tpa_info->l4_rxhash;
721
722                 skb->protocol = eth_type_trans(skb, bp->dev);
723                 skb->ip_summed = CHECKSUM_UNNECESSARY;
724
725                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
726                                          skb, cqe, cqe_idx)) {
727                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
728                                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
729                         bnx2x_gro_receive(bp, fp, skb);
730                 } else {
731                         DP(NETIF_MSG_RX_STATUS,
732                            "Failed to allocate new pages - dropping packet!\n");
733                         dev_kfree_skb_any(skb);
734                 }
735
736
737                 /* put new data in bin */
738                 rx_buf->data = new_data;
739
740                 return;
741         }
742         bnx2x_frag_free(fp, new_data);
743 drop:
744         /* drop the packet and keep the buffer in the bin */
745         DP(NETIF_MSG_RX_STATUS,
746            "Failed to allocate or map a new skb - dropping packet!\n");
747         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
748 }
749
750 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
751                                struct bnx2x_fastpath *fp, u16 index)
752 {
753         u8 *data;
754         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
755         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
756         dma_addr_t mapping;
757
758         data = bnx2x_frag_alloc(fp);
759         if (unlikely(data == NULL))
760                 return -ENOMEM;
761
762         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
763                                  fp->rx_buf_size,
764                                  DMA_FROM_DEVICE);
765         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
766                 bnx2x_frag_free(fp, data);
767                 BNX2X_ERR("Can't map rx data\n");
768                 return -ENOMEM;
769         }
770
771         rx_buf->data = data;
772         dma_unmap_addr_set(rx_buf, mapping, mapping);
773
774         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
775         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
776
777         return 0;
778 }
779
780 static
781 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
782                                  struct bnx2x_fastpath *fp,
783                                  struct bnx2x_eth_q_stats *qstats)
784 {
785         /* Do nothing if no L4 csum validation was done.
786          * We do not check whether IP csum was validated. For IPv4 we assume
787          * that if the card got as far as validating the L4 csum, it also
788          * validated the IP csum. IPv6 has no IP csum.
789          */
790         if (cqe->fast_path_cqe.status_flags &
791             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
792                 return;
793
794         /* If L4 validation was done, check if an error was found. */
795
796         if (cqe->fast_path_cqe.type_error_flags &
797             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
798              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
799                 qstats->hw_csum_err++;
800         else
801                 skb->ip_summed = CHECKSUM_UNNECESSARY;
802 }
803
804 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
805 {
806         struct bnx2x *bp = fp->bp;
807         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
808         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
809         int rx_pkt = 0;
810
811 #ifdef BNX2X_STOP_ON_ERROR
812         if (unlikely(bp->panic))
813                 return 0;
814 #endif
815
816         /* CQ "next element" is of the size of the regular element,
817            that's why it's ok here */
818         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
819         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
820                 hw_comp_cons++;
821
822         bd_cons = fp->rx_bd_cons;
823         bd_prod = fp->rx_bd_prod;
824         bd_prod_fw = bd_prod;
825         sw_comp_cons = fp->rx_comp_cons;
826         sw_comp_prod = fp->rx_comp_prod;
827
828         /* Memory barrier necessary as speculative reads of the rx
829          * buffer can be ahead of the index in the status block
830          */
831         rmb();
832
833         DP(NETIF_MSG_RX_STATUS,
834            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
835            fp->index, hw_comp_cons, sw_comp_cons);
836
837         while (sw_comp_cons != hw_comp_cons) {
838                 struct sw_rx_bd *rx_buf = NULL;
839                 struct sk_buff *skb;
840                 union eth_rx_cqe *cqe;
841                 struct eth_fast_path_rx_cqe *cqe_fp;
842                 u8 cqe_fp_flags;
843                 enum eth_rx_cqe_type cqe_fp_type;
844                 u16 len, pad, queue;
845                 u8 *data;
846                 bool l4_rxhash;
847
848 #ifdef BNX2X_STOP_ON_ERROR
849                 if (unlikely(bp->panic))
850                         return 0;
851 #endif
852
853                 comp_ring_cons = RCQ_BD(sw_comp_cons);
854                 bd_prod = RX_BD(bd_prod);
855                 bd_cons = RX_BD(bd_cons);
856
857                 cqe = &fp->rx_comp_ring[comp_ring_cons];
858                 cqe_fp = &cqe->fast_path_cqe;
859                 cqe_fp_flags = cqe_fp->type_error_flags;
860                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
861
862                 DP(NETIF_MSG_RX_STATUS,
863                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
864                    CQE_TYPE(cqe_fp_flags),
865                    cqe_fp_flags, cqe_fp->status_flags,
866                    le32_to_cpu(cqe_fp->rss_hash_result),
867                    le16_to_cpu(cqe_fp->vlan_tag),
868                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
869
870                 /* is this a slowpath msg? */
871                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
872                         bnx2x_sp_event(fp, cqe);
873                         goto next_cqe;
874                 }
875
876                 rx_buf = &fp->rx_buf_ring[bd_cons];
877                 data = rx_buf->data;
878
879                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
880                         struct bnx2x_agg_info *tpa_info;
881                         u16 frag_size, pages;
882 #ifdef BNX2X_STOP_ON_ERROR
883                         /* sanity check */
884                         if (fp->disable_tpa &&
885                             (CQE_TYPE_START(cqe_fp_type) ||
886                              CQE_TYPE_STOP(cqe_fp_type)))
887                                 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
888                                           CQE_TYPE(cqe_fp_type));
889 #endif
890
891                         if (CQE_TYPE_START(cqe_fp_type)) {
892                                 u16 queue = cqe_fp->queue_index;
893                                 DP(NETIF_MSG_RX_STATUS,
894                                    "calling tpa_start on queue %d\n",
895                                    queue);
896
897                                 bnx2x_tpa_start(fp, queue,
898                                                 bd_cons, bd_prod,
899                                                 cqe_fp);
900
901                                 goto next_rx;
902
903                         }
904                         queue = cqe->end_agg_cqe.queue_index;
905                         tpa_info = &fp->tpa_info[queue];
906                         DP(NETIF_MSG_RX_STATUS,
907                            "calling tpa_stop on queue %d\n",
908                            queue);
909
910                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
911                                     tpa_info->len_on_bd;
912
913                         if (fp->mode == TPA_MODE_GRO)
914                                 pages = (frag_size + tpa_info->full_page - 1) /
915                                          tpa_info->full_page;
916                         else
917                                 pages = SGE_PAGE_ALIGN(frag_size) >>
918                                         SGE_PAGE_SHIFT;
919
920                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
921                                        &cqe->end_agg_cqe, comp_ring_cons);
922 #ifdef BNX2X_STOP_ON_ERROR
923                         if (bp->panic)
924                                 return 0;
925 #endif
926
927                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
928                         goto next_cqe;
929                 }
930                 /* non TPA */
931                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
932                 pad = cqe_fp->placement_offset;
933                 dma_sync_single_for_cpu(&bp->pdev->dev,
934                                         dma_unmap_addr(rx_buf, mapping),
935                                         pad + RX_COPY_THRESH,
936                                         DMA_FROM_DEVICE);
937                 pad += NET_SKB_PAD;
938                 prefetch(data + pad); /* speedup eth_type_trans() */
939                 /* is this an error packet? */
940                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
941                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
942                            "ERROR  flags %x  rx packet %u\n",
943                            cqe_fp_flags, sw_comp_cons);
944                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
945                         goto reuse_rx;
946                 }
947
948                 /* Since we don't have a jumbo ring
949                  * copy small packets if mtu > 1500
950                  */
951                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
952                     (len <= RX_COPY_THRESH)) {
953                         skb = netdev_alloc_skb_ip_align(bp->dev, len);
954                         if (skb == NULL) {
955                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
956                                    "ERROR  packet dropped because of alloc failure\n");
957                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
958                                 goto reuse_rx;
959                         }
960                         memcpy(skb->data, data + pad, len);
961                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
962                 } else {
963                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
964                                 dma_unmap_single(&bp->pdev->dev,
965                                                  dma_unmap_addr(rx_buf, mapping),
966                                                  fp->rx_buf_size,
967                                                  DMA_FROM_DEVICE);
968                                 skb = build_skb(data, fp->rx_frag_size);
969                                 if (unlikely(!skb)) {
970                                         bnx2x_frag_free(fp, data);
971                                         bnx2x_fp_qstats(bp, fp)->
972                                                         rx_skb_alloc_failed++;
973                                         goto next_rx;
974                                 }
975                                 skb_reserve(skb, pad);
976                         } else {
977                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
978                                    "ERROR  packet dropped because of alloc failure\n");
979                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
980 reuse_rx:
981                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
982                                 goto next_rx;
983                         }
984                 }
985
986                 skb_put(skb, len);
987                 skb->protocol = eth_type_trans(skb, bp->dev);
988
989                 /* Set Toeplitz hash for a none-LRO skb */
990                 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
991                 skb->l4_rxhash = l4_rxhash;
992
993                 skb_checksum_none_assert(skb);
994
995                 if (bp->dev->features & NETIF_F_RXCSUM)
996                         bnx2x_csum_validate(skb, cqe, fp,
997                                             bnx2x_fp_qstats(bp, fp));
998
999                 skb_record_rx_queue(skb, fp->rx_queue);
1000
1001                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1002                     PARSING_FLAGS_VLAN)
1003                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1004                                                le16_to_cpu(cqe_fp->vlan_tag));
1005                 napi_gro_receive(&fp->napi, skb);
1006
1007
1008 next_rx:
1009                 rx_buf->data = NULL;
1010
1011                 bd_cons = NEXT_RX_IDX(bd_cons);
1012                 bd_prod = NEXT_RX_IDX(bd_prod);
1013                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1014                 rx_pkt++;
1015 next_cqe:
1016                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1017                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1018
1019                 if (rx_pkt == budget)
1020                         break;
1021         } /* while */
1022
1023         fp->rx_bd_cons = bd_cons;
1024         fp->rx_bd_prod = bd_prod_fw;
1025         fp->rx_comp_cons = sw_comp_cons;
1026         fp->rx_comp_prod = sw_comp_prod;
1027
1028         /* Update producers */
1029         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1030                              fp->rx_sge_prod);
1031
1032         fp->rx_pkt += rx_pkt;
1033         fp->rx_calls++;
1034
1035         return rx_pkt;
1036 }
1037
1038 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1039 {
1040         struct bnx2x_fastpath *fp = fp_cookie;
1041         struct bnx2x *bp = fp->bp;
1042         u8 cos;
1043
1044         DP(NETIF_MSG_INTR,
1045            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1046            fp->index, fp->fw_sb_id, fp->igu_sb_id);
1047
1048         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1049
1050 #ifdef BNX2X_STOP_ON_ERROR
1051         if (unlikely(bp->panic))
1052                 return IRQ_HANDLED;
1053 #endif
1054
1055         /* Handle Rx and Tx according to MSI-X vector */
1056         prefetch(fp->rx_cons_sb);
1057
1058         for_each_cos_in_tx_queue(fp, cos)
1059                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1060
1061         prefetch(&fp->sb_running_index[SM_RX_ID]);
1062         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1063
1064         return IRQ_HANDLED;
1065 }
1066
1067 /* HW Lock for shared dual port PHYs */
1068 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1069 {
1070         mutex_lock(&bp->port.phy_mutex);
1071
1072         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1073 }
1074
1075 void bnx2x_release_phy_lock(struct bnx2x *bp)
1076 {
1077         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1078
1079         mutex_unlock(&bp->port.phy_mutex);
1080 }
1081
1082 /* calculates MF speed according to current linespeed and MF configuration */
1083 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1084 {
1085         u16 line_speed = bp->link_vars.line_speed;
1086         if (IS_MF(bp)) {
1087                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1088                                                    bp->mf_config[BP_VN(bp)]);
1089
1090                 /* Calculate the current MAX line speed limit for the MF
1091                  * devices
1092                  */
1093                 if (IS_MF_SI(bp))
1094                         line_speed = (line_speed * maxCfg) / 100;
1095                 else { /* SD mode */
1096                         u16 vn_max_rate = maxCfg * 100;
1097
1098                         if (vn_max_rate < line_speed)
1099                                 line_speed = vn_max_rate;
1100                 }
1101         }
1102
1103         return line_speed;
1104 }
1105
1106 /**
1107  * bnx2x_fill_report_data - fill link report data to report
1108  *
1109  * @bp:         driver handle
1110  * @data:       link state to update
1111  *
1112  * It uses a none-atomic bit operations because is called under the mutex.
1113  */
1114 static void bnx2x_fill_report_data(struct bnx2x *bp,
1115                                    struct bnx2x_link_report_data *data)
1116 {
1117         u16 line_speed = bnx2x_get_mf_speed(bp);
1118
1119         memset(data, 0, sizeof(*data));
1120
1121         /* Fill the report data: efective line speed */
1122         data->line_speed = line_speed;
1123
1124         /* Link is down */
1125         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1126                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1127                           &data->link_report_flags);
1128
1129         /* Full DUPLEX */
1130         if (bp->link_vars.duplex == DUPLEX_FULL)
1131                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1132
1133         /* Rx Flow Control is ON */
1134         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1135                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1136
1137         /* Tx Flow Control is ON */
1138         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1139                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1140 }
1141
1142 /**
1143  * bnx2x_link_report - report link status to OS.
1144  *
1145  * @bp:         driver handle
1146  *
1147  * Calls the __bnx2x_link_report() under the same locking scheme
1148  * as a link/PHY state managing code to ensure a consistent link
1149  * reporting.
1150  */
1151
1152 void bnx2x_link_report(struct bnx2x *bp)
1153 {
1154         bnx2x_acquire_phy_lock(bp);
1155         __bnx2x_link_report(bp);
1156         bnx2x_release_phy_lock(bp);
1157 }
1158
1159 /**
1160  * __bnx2x_link_report - report link status to OS.
1161  *
1162  * @bp:         driver handle
1163  *
1164  * None atomic inmlementation.
1165  * Should be called under the phy_lock.
1166  */
1167 void __bnx2x_link_report(struct bnx2x *bp)
1168 {
1169         struct bnx2x_link_report_data cur_data;
1170
1171         /* reread mf_cfg */
1172         if (IS_PF(bp) && !CHIP_IS_E1(bp))
1173                 bnx2x_read_mf_cfg(bp);
1174
1175         /* Read the current link report info */
1176         bnx2x_fill_report_data(bp, &cur_data);
1177
1178         /* Don't report link down or exactly the same link status twice */
1179         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1180             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1181                       &bp->last_reported_link.link_report_flags) &&
1182              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1183                       &cur_data.link_report_flags)))
1184                 return;
1185
1186         bp->link_cnt++;
1187
1188         /* We are going to report a new link parameters now -
1189          * remember the current data for the next time.
1190          */
1191         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1192
1193         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1194                      &cur_data.link_report_flags)) {
1195                 netif_carrier_off(bp->dev);
1196                 netdev_err(bp->dev, "NIC Link is Down\n");
1197                 return;
1198         } else {
1199                 const char *duplex;
1200                 const char *flow;
1201
1202                 netif_carrier_on(bp->dev);
1203
1204                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1205                                        &cur_data.link_report_flags))
1206                         duplex = "full";
1207                 else
1208                         duplex = "half";
1209
1210                 /* Handle the FC at the end so that only these flags would be
1211                  * possibly set. This way we may easily check if there is no FC
1212                  * enabled.
1213                  */
1214                 if (cur_data.link_report_flags) {
1215                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1216                                      &cur_data.link_report_flags)) {
1217                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1218                                      &cur_data.link_report_flags))
1219                                         flow = "ON - receive & transmit";
1220                                 else
1221                                         flow = "ON - receive";
1222                         } else {
1223                                 flow = "ON - transmit";
1224                         }
1225                 } else {
1226                         flow = "none";
1227                 }
1228                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1229                             cur_data.line_speed, duplex, flow);
1230         }
1231 }
1232
1233 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1234 {
1235         int i;
1236
1237         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1238                 struct eth_rx_sge *sge;
1239
1240                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1241                 sge->addr_hi =
1242                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1243                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1244
1245                 sge->addr_lo =
1246                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1247                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1248         }
1249 }
1250
1251 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1252                                 struct bnx2x_fastpath *fp, int last)
1253 {
1254         int i;
1255
1256         for (i = 0; i < last; i++) {
1257                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1258                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1259                 u8 *data = first_buf->data;
1260
1261                 if (data == NULL) {
1262                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1263                         continue;
1264                 }
1265                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1266                         dma_unmap_single(&bp->pdev->dev,
1267                                          dma_unmap_addr(first_buf, mapping),
1268                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1269                 bnx2x_frag_free(fp, data);
1270                 first_buf->data = NULL;
1271         }
1272 }
1273
1274 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1275 {
1276         int j;
1277
1278         for_each_rx_queue_cnic(bp, j) {
1279                 struct bnx2x_fastpath *fp = &bp->fp[j];
1280
1281                 fp->rx_bd_cons = 0;
1282
1283                 /* Activate BD ring */
1284                 /* Warning!
1285                  * this will generate an interrupt (to the TSTORM)
1286                  * must only be done after chip is initialized
1287                  */
1288                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1289                                      fp->rx_sge_prod);
1290         }
1291 }
1292
1293 void bnx2x_init_rx_rings(struct bnx2x *bp)
1294 {
1295         int func = BP_FUNC(bp);
1296         u16 ring_prod;
1297         int i, j;
1298
1299         /* Allocate TPA resources */
1300         for_each_eth_queue(bp, j) {
1301                 struct bnx2x_fastpath *fp = &bp->fp[j];
1302
1303                 DP(NETIF_MSG_IFUP,
1304                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1305
1306                 if (!fp->disable_tpa) {
1307                         /* Fill the per-aggregtion pool */
1308                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1309                                 struct bnx2x_agg_info *tpa_info =
1310                                         &fp->tpa_info[i];
1311                                 struct sw_rx_bd *first_buf =
1312                                         &tpa_info->first_buf;
1313
1314                                 first_buf->data = bnx2x_frag_alloc(fp);
1315                                 if (!first_buf->data) {
1316                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1317                                                   j);
1318                                         bnx2x_free_tpa_pool(bp, fp, i);
1319                                         fp->disable_tpa = 1;
1320                                         break;
1321                                 }
1322                                 dma_unmap_addr_set(first_buf, mapping, 0);
1323                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1324                         }
1325
1326                         /* "next page" elements initialization */
1327                         bnx2x_set_next_page_sgl(fp);
1328
1329                         /* set SGEs bit mask */
1330                         bnx2x_init_sge_ring_bit_mask(fp);
1331
1332                         /* Allocate SGEs and initialize the ring elements */
1333                         for (i = 0, ring_prod = 0;
1334                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1335
1336                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1337                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1338                                                   i);
1339                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1340                                                   j);
1341                                         /* Cleanup already allocated elements */
1342                                         bnx2x_free_rx_sge_range(bp, fp,
1343                                                                 ring_prod);
1344                                         bnx2x_free_tpa_pool(bp, fp,
1345                                                             MAX_AGG_QS(bp));
1346                                         fp->disable_tpa = 1;
1347                                         ring_prod = 0;
1348                                         break;
1349                                 }
1350                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1351                         }
1352
1353                         fp->rx_sge_prod = ring_prod;
1354                 }
1355         }
1356
1357         for_each_eth_queue(bp, j) {
1358                 struct bnx2x_fastpath *fp = &bp->fp[j];
1359
1360                 fp->rx_bd_cons = 0;
1361
1362                 /* Activate BD ring */
1363                 /* Warning!
1364                  * this will generate an interrupt (to the TSTORM)
1365                  * must only be done after chip is initialized
1366                  */
1367                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1368                                      fp->rx_sge_prod);
1369
1370                 if (j != 0)
1371                         continue;
1372
1373                 if (CHIP_IS_E1(bp)) {
1374                         REG_WR(bp, BAR_USTRORM_INTMEM +
1375                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1376                                U64_LO(fp->rx_comp_mapping));
1377                         REG_WR(bp, BAR_USTRORM_INTMEM +
1378                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1379                                U64_HI(fp->rx_comp_mapping));
1380                 }
1381         }
1382 }
1383
1384 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1385 {
1386         u8 cos;
1387         struct bnx2x *bp = fp->bp;
1388
1389         for_each_cos_in_tx_queue(fp, cos) {
1390                 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1391                 unsigned pkts_compl = 0, bytes_compl = 0;
1392
1393                 u16 sw_prod = txdata->tx_pkt_prod;
1394                 u16 sw_cons = txdata->tx_pkt_cons;
1395
1396                 while (sw_cons != sw_prod) {
1397                         bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1398                                           &pkts_compl, &bytes_compl);
1399                         sw_cons++;
1400                 }
1401
1402                 netdev_tx_reset_queue(
1403                         netdev_get_tx_queue(bp->dev,
1404                                             txdata->txq_index));
1405         }
1406 }
1407
1408 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1409 {
1410         int i;
1411
1412         for_each_tx_queue_cnic(bp, i) {
1413                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1414         }
1415 }
1416
1417 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1418 {
1419         int i;
1420
1421         for_each_eth_queue(bp, i) {
1422                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1423         }
1424 }
1425
1426 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1427 {
1428         struct bnx2x *bp = fp->bp;
1429         int i;
1430
1431         /* ring wasn't allocated */
1432         if (fp->rx_buf_ring == NULL)
1433                 return;
1434
1435         for (i = 0; i < NUM_RX_BD; i++) {
1436                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1437                 u8 *data = rx_buf->data;
1438
1439                 if (data == NULL)
1440                         continue;
1441                 dma_unmap_single(&bp->pdev->dev,
1442                                  dma_unmap_addr(rx_buf, mapping),
1443                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1444
1445                 rx_buf->data = NULL;
1446                 bnx2x_frag_free(fp, data);
1447         }
1448 }
1449
1450 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1451 {
1452         int j;
1453
1454         for_each_rx_queue_cnic(bp, j) {
1455                 bnx2x_free_rx_bds(&bp->fp[j]);
1456         }
1457 }
1458
1459 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1460 {
1461         int j;
1462
1463         for_each_eth_queue(bp, j) {
1464                 struct bnx2x_fastpath *fp = &bp->fp[j];
1465
1466                 bnx2x_free_rx_bds(fp);
1467
1468                 if (!fp->disable_tpa)
1469                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1470         }
1471 }
1472
1473 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1474 {
1475         bnx2x_free_tx_skbs_cnic(bp);
1476         bnx2x_free_rx_skbs_cnic(bp);
1477 }
1478
1479 void bnx2x_free_skbs(struct bnx2x *bp)
1480 {
1481         bnx2x_free_tx_skbs(bp);
1482         bnx2x_free_rx_skbs(bp);
1483 }
1484
1485 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1486 {
1487         /* load old values */
1488         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1489
1490         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1491                 /* leave all but MAX value */
1492                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1493
1494                 /* set new MAX value */
1495                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1496                                 & FUNC_MF_CFG_MAX_BW_MASK;
1497
1498                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1499         }
1500 }
1501
1502 /**
1503  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1504  *
1505  * @bp:         driver handle
1506  * @nvecs:      number of vectors to be released
1507  */
1508 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1509 {
1510         int i, offset = 0;
1511
1512         if (nvecs == offset)
1513                 return;
1514
1515         /* VFs don't have a default SB */
1516         if (IS_PF(bp)) {
1517                 free_irq(bp->msix_table[offset].vector, bp->dev);
1518                 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1519                    bp->msix_table[offset].vector);
1520                 offset++;
1521         }
1522
1523         if (CNIC_SUPPORT(bp)) {
1524                 if (nvecs == offset)
1525                         return;
1526                 offset++;
1527         }
1528
1529         for_each_eth_queue(bp, i) {
1530                 if (nvecs == offset)
1531                         return;
1532                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1533                    i, bp->msix_table[offset].vector);
1534
1535                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1536         }
1537 }
1538
1539 void bnx2x_free_irq(struct bnx2x *bp)
1540 {
1541         if (bp->flags & USING_MSIX_FLAG &&
1542             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1543                 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1544
1545                 /* vfs don't have a default status block */
1546                 if (IS_PF(bp))
1547                         nvecs++;
1548
1549                 bnx2x_free_msix_irqs(bp, nvecs);
1550         } else {
1551                 free_irq(bp->dev->irq, bp->dev);
1552         }
1553 }
1554
1555 int bnx2x_enable_msix(struct bnx2x *bp)
1556 {
1557         int msix_vec = 0, i, rc;
1558
1559         /* VFs don't have a default status block */
1560         if (IS_PF(bp)) {
1561                 bp->msix_table[msix_vec].entry = msix_vec;
1562                 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1563                                bp->msix_table[0].entry);
1564                 msix_vec++;
1565         }
1566
1567         /* Cnic requires an msix vector for itself */
1568         if (CNIC_SUPPORT(bp)) {
1569                 bp->msix_table[msix_vec].entry = msix_vec;
1570                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1571                                msix_vec, bp->msix_table[msix_vec].entry);
1572                 msix_vec++;
1573         }
1574
1575         /* We need separate vectors for ETH queues only (not FCoE) */
1576         for_each_eth_queue(bp, i) {
1577                 bp->msix_table[msix_vec].entry = msix_vec;
1578                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1579                                msix_vec, msix_vec, i);
1580                 msix_vec++;
1581         }
1582
1583         DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1584            msix_vec);
1585
1586         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1587
1588         /*
1589          * reconfigure number of tx/rx queues according to available
1590          * MSI-X vectors
1591          */
1592         if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1593                 /* how less vectors we will have? */
1594                 int diff = msix_vec - rc;
1595
1596                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1597
1598                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1599
1600                 if (rc) {
1601                         BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1602                         goto no_msix;
1603                 }
1604                 /*
1605                  * decrease number of queues by number of unallocated entries
1606                  */
1607                 bp->num_ethernet_queues -= diff;
1608                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1609
1610                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1611                                bp->num_queues);
1612         } else if (rc > 0) {
1613                 /* Get by with single vector */
1614                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1615                 if (rc) {
1616                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1617                                        rc);
1618                         goto no_msix;
1619                 }
1620
1621                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1622                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1623
1624                 BNX2X_DEV_INFO("set number of queues to 1\n");
1625                 bp->num_ethernet_queues = 1;
1626                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1627         } else if (rc < 0) {
1628                 BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
1629                 goto no_msix;
1630         }
1631
1632         bp->flags |= USING_MSIX_FLAG;
1633
1634         return 0;
1635
1636 no_msix:
1637         /* fall to INTx if not enough memory */
1638         if (rc == -ENOMEM)
1639                 bp->flags |= DISABLE_MSI_FLAG;
1640
1641         return rc;
1642 }
1643
1644 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1645 {
1646         int i, rc, offset = 0;
1647
1648         /* no default status block for vf */
1649         if (IS_PF(bp)) {
1650                 rc = request_irq(bp->msix_table[offset++].vector,
1651                                  bnx2x_msix_sp_int, 0,
1652                                  bp->dev->name, bp->dev);
1653                 if (rc) {
1654                         BNX2X_ERR("request sp irq failed\n");
1655                         return -EBUSY;
1656                 }
1657         }
1658
1659         if (CNIC_SUPPORT(bp))
1660                 offset++;
1661
1662         for_each_eth_queue(bp, i) {
1663                 struct bnx2x_fastpath *fp = &bp->fp[i];
1664                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1665                          bp->dev->name, i);
1666
1667                 rc = request_irq(bp->msix_table[offset].vector,
1668                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1669                 if (rc) {
1670                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1671                               bp->msix_table[offset].vector, rc);
1672                         bnx2x_free_msix_irqs(bp, offset);
1673                         return -EBUSY;
1674                 }
1675
1676                 offset++;
1677         }
1678
1679         i = BNX2X_NUM_ETH_QUEUES(bp);
1680         if (IS_PF(bp)) {
1681                 offset = 1 + CNIC_SUPPORT(bp);
1682                 netdev_info(bp->dev,
1683                             "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1684                             bp->msix_table[0].vector,
1685                             0, bp->msix_table[offset].vector,
1686                             i - 1, bp->msix_table[offset + i - 1].vector);
1687         } else {
1688                 offset = CNIC_SUPPORT(bp);
1689                 netdev_info(bp->dev,
1690                             "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1691                             0, bp->msix_table[offset].vector,
1692                             i - 1, bp->msix_table[offset + i - 1].vector);
1693         }
1694         return 0;
1695 }
1696
1697 int bnx2x_enable_msi(struct bnx2x *bp)
1698 {
1699         int rc;
1700
1701         rc = pci_enable_msi(bp->pdev);
1702         if (rc) {
1703                 BNX2X_DEV_INFO("MSI is not attainable\n");
1704                 return -1;
1705         }
1706         bp->flags |= USING_MSI_FLAG;
1707
1708         return 0;
1709 }
1710
1711 static int bnx2x_req_irq(struct bnx2x *bp)
1712 {
1713         unsigned long flags;
1714         unsigned int irq;
1715
1716         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1717                 flags = 0;
1718         else
1719                 flags = IRQF_SHARED;
1720
1721         if (bp->flags & USING_MSIX_FLAG)
1722                 irq = bp->msix_table[0].vector;
1723         else
1724                 irq = bp->pdev->irq;
1725
1726         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1727 }
1728
1729 int bnx2x_setup_irqs(struct bnx2x *bp)
1730 {
1731         int rc = 0;
1732         if (bp->flags & USING_MSIX_FLAG &&
1733             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1734                 rc = bnx2x_req_msix_irqs(bp);
1735                 if (rc)
1736                         return rc;
1737         } else {
1738                 rc = bnx2x_req_irq(bp);
1739                 if (rc) {
1740                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1741                         return rc;
1742                 }
1743                 if (bp->flags & USING_MSI_FLAG) {
1744                         bp->dev->irq = bp->pdev->irq;
1745                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1746                                     bp->dev->irq);
1747                 }
1748                 if (bp->flags & USING_MSIX_FLAG) {
1749                         bp->dev->irq = bp->msix_table[0].vector;
1750                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1751                                     bp->dev->irq);
1752                 }
1753         }
1754
1755         return 0;
1756 }
1757
1758 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1759 {
1760         int i;
1761
1762         for_each_rx_queue_cnic(bp, i)
1763                 napi_enable(&bnx2x_fp(bp, i, napi));
1764 }
1765
1766 static void bnx2x_napi_enable(struct bnx2x *bp)
1767 {
1768         int i;
1769
1770         for_each_eth_queue(bp, i)
1771                 napi_enable(&bnx2x_fp(bp, i, napi));
1772 }
1773
1774 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1775 {
1776         int i;
1777
1778         for_each_rx_queue_cnic(bp, i)
1779                 napi_disable(&bnx2x_fp(bp, i, napi));
1780 }
1781
1782 static void bnx2x_napi_disable(struct bnx2x *bp)
1783 {
1784         int i;
1785
1786         for_each_eth_queue(bp, i)
1787                 napi_disable(&bnx2x_fp(bp, i, napi));
1788 }
1789
1790 void bnx2x_netif_start(struct bnx2x *bp)
1791 {
1792         if (netif_running(bp->dev)) {
1793                 bnx2x_napi_enable(bp);
1794                 if (CNIC_LOADED(bp))
1795                         bnx2x_napi_enable_cnic(bp);
1796                 bnx2x_int_enable(bp);
1797                 if (bp->state == BNX2X_STATE_OPEN)
1798                         netif_tx_wake_all_queues(bp->dev);
1799         }
1800 }
1801
1802 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1803 {
1804         bnx2x_int_disable_sync(bp, disable_hw);
1805         bnx2x_napi_disable(bp);
1806         if (CNIC_LOADED(bp))
1807                 bnx2x_napi_disable_cnic(bp);
1808 }
1809
1810 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1811 {
1812         struct bnx2x *bp = netdev_priv(dev);
1813
1814         if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1815                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1816                 u16 ether_type = ntohs(hdr->h_proto);
1817
1818                 /* Skip VLAN tag if present */
1819                 if (ether_type == ETH_P_8021Q) {
1820                         struct vlan_ethhdr *vhdr =
1821                                 (struct vlan_ethhdr *)skb->data;
1822
1823                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1824                 }
1825
1826                 /* If ethertype is FCoE or FIP - use FCoE ring */
1827                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1828                         return bnx2x_fcoe_tx(bp, txq_index);
1829         }
1830
1831         /* select a non-FCoE queue */
1832         return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1833 }
1834
1835 void bnx2x_set_num_queues(struct bnx2x *bp)
1836 {
1837         /* RSS queues */
1838         bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1839
1840         /* override in STORAGE SD modes */
1841         if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1842                 bp->num_ethernet_queues = 1;
1843
1844         /* Add special queues */
1845         bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1846         bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1847
1848         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1849 }
1850
1851 /**
1852  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1853  *
1854  * @bp:         Driver handle
1855  *
1856  * We currently support for at most 16 Tx queues for each CoS thus we will
1857  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1858  * bp->max_cos.
1859  *
1860  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1861  * index after all ETH L2 indices.
1862  *
1863  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1864  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1865  * 16..31,...) with indicies that are not coupled with any real Tx queue.
1866  *
1867  * The proper configuration of skb->queue_mapping is handled by
1868  * bnx2x_select_queue() and __skb_tx_hash().
1869  *
1870  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1871  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1872  */
1873 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1874 {
1875         int rc, tx, rx;
1876
1877         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1878         rx = BNX2X_NUM_ETH_QUEUES(bp);
1879
1880 /* account for fcoe queue */
1881         if (include_cnic && !NO_FCOE(bp)) {
1882                 rx++;
1883                 tx++;
1884         }
1885
1886         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1887         if (rc) {
1888                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1889                 return rc;
1890         }
1891         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1892         if (rc) {
1893                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1894                 return rc;
1895         }
1896
1897         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1898                           tx, rx);
1899
1900         return rc;
1901 }
1902
1903 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1904 {
1905         int i;
1906
1907         for_each_queue(bp, i) {
1908                 struct bnx2x_fastpath *fp = &bp->fp[i];
1909                 u32 mtu;
1910
1911                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1912                 if (IS_FCOE_IDX(i))
1913                         /*
1914                          * Although there are no IP frames expected to arrive to
1915                          * this ring we still want to add an
1916                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1917                          * overrun attack.
1918                          */
1919                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1920                 else
1921                         mtu = bp->dev->mtu;
1922                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1923                                   IP_HEADER_ALIGNMENT_PADDING +
1924                                   ETH_OVREHEAD +
1925                                   mtu +
1926                                   BNX2X_FW_RX_ALIGN_END;
1927                 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1928                 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1929                         fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1930                 else
1931                         fp->rx_frag_size = 0;
1932         }
1933 }
1934
1935 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1936 {
1937         int i;
1938         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1939
1940         /* Prepare the initial contents fo the indirection table if RSS is
1941          * enabled
1942          */
1943         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1944                 bp->rss_conf_obj.ind_table[i] =
1945                         bp->fp->cl_id +
1946                         ethtool_rxfh_indir_default(i, num_eth_queues);
1947
1948         /*
1949          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1950          * per-port, so if explicit configuration is needed , do it only
1951          * for a PMF.
1952          *
1953          * For 57712 and newer on the other hand it's a per-function
1954          * configuration.
1955          */
1956         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1957 }
1958
1959 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1960                         bool config_hash)
1961 {
1962         struct bnx2x_config_rss_params params = {NULL};
1963
1964         /* Although RSS is meaningless when there is a single HW queue we
1965          * still need it enabled in order to have HW Rx hash generated.
1966          *
1967          * if (!is_eth_multi(bp))
1968          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
1969          */
1970
1971         params.rss_obj = rss_obj;
1972
1973         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1974
1975         __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1976
1977         /* RSS configuration */
1978         __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1979         __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1980         __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1981         __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1982         if (rss_obj->udp_rss_v4)
1983                 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1984         if (rss_obj->udp_rss_v6)
1985                 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
1986
1987         /* Hash bits */
1988         params.rss_result_mask = MULTI_MASK;
1989
1990         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1991
1992         if (config_hash) {
1993                 /* RSS keys */
1994                 prandom_bytes(params.rss_key, sizeof(params.rss_key));
1995                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1996         }
1997
1998         return bnx2x_config_rss(bp, &params);
1999 }
2000
2001 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2002 {
2003         struct bnx2x_func_state_params func_params = {NULL};
2004
2005         /* Prepare parameters for function state transitions */
2006         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2007
2008         func_params.f_obj = &bp->func_obj;
2009         func_params.cmd = BNX2X_F_CMD_HW_INIT;
2010
2011         func_params.params.hw_init.load_phase = load_code;
2012
2013         return bnx2x_func_state_change(bp, &func_params);
2014 }
2015
2016 /*
2017  * Cleans the object that have internal lists without sending
2018  * ramrods. Should be run when interrutps are disabled.
2019  */
2020 void bnx2x_squeeze_objects(struct bnx2x *bp)
2021 {
2022         int rc;
2023         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2024         struct bnx2x_mcast_ramrod_params rparam = {NULL};
2025         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2026
2027         /***************** Cleanup MACs' object first *************************/
2028
2029         /* Wait for completion of requested */
2030         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2031         /* Perform a dry cleanup */
2032         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2033
2034         /* Clean ETH primary MAC */
2035         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2036         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2037                                  &ramrod_flags);
2038         if (rc != 0)
2039                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2040
2041         /* Cleanup UC list */
2042         vlan_mac_flags = 0;
2043         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2044         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2045                                  &ramrod_flags);
2046         if (rc != 0)
2047                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2048
2049         /***************** Now clean mcast object *****************************/
2050         rparam.mcast_obj = &bp->mcast_obj;
2051         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2052
2053         /* Add a DEL command... */
2054         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2055         if (rc < 0)
2056                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2057                           rc);
2058
2059         /* ...and wait until all pending commands are cleared */
2060         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2061         while (rc != 0) {
2062                 if (rc < 0) {
2063                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2064                                   rc);
2065                         return;
2066                 }
2067
2068                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2069         }
2070 }
2071
2072 #ifndef BNX2X_STOP_ON_ERROR
2073 #define LOAD_ERROR_EXIT(bp, label) \
2074         do { \
2075                 (bp)->state = BNX2X_STATE_ERROR; \
2076                 goto label; \
2077         } while (0)
2078
2079 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2080         do { \
2081                 bp->cnic_loaded = false; \
2082                 goto label; \
2083         } while (0)
2084 #else /*BNX2X_STOP_ON_ERROR*/
2085 #define LOAD_ERROR_EXIT(bp, label) \
2086         do { \
2087                 (bp)->state = BNX2X_STATE_ERROR; \
2088                 (bp)->panic = 1; \
2089                 return -EBUSY; \
2090         } while (0)
2091 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2092         do { \
2093                 bp->cnic_loaded = false; \
2094                 (bp)->panic = 1; \
2095                 return -EBUSY; \
2096         } while (0)
2097 #endif /*BNX2X_STOP_ON_ERROR*/
2098
2099 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2100 {
2101         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2102                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2103         return;
2104 }
2105
2106 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2107 {
2108         int num_groups, vf_headroom = 0;
2109         int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2110
2111         /* number of queues for statistics is number of eth queues + FCoE */
2112         u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2113
2114         /* Total number of FW statistics requests =
2115          * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2116          * and fcoe l2 queue) stats + num of queues (which includes another 1
2117          * for fcoe l2 queue if applicable)
2118          */
2119         bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2120
2121         /* vf stats appear in the request list, but their data is allocated by
2122          * the VFs themselves. We don't include them in the bp->fw_stats_num as
2123          * it is used to determine where to place the vf stats queries in the
2124          * request struct
2125          */
2126         if (IS_SRIOV(bp))
2127                 vf_headroom = bnx2x_vf_headroom(bp);
2128
2129         /* Request is built from stats_query_header and an array of
2130          * stats_query_cmd_group each of which contains
2131          * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2132          * configured in the stats_query_header.
2133          */
2134         num_groups =
2135                 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2136                  (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2137                  1 : 0));
2138
2139         DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2140            bp->fw_stats_num, vf_headroom, num_groups);
2141         bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2142                 num_groups * sizeof(struct stats_query_cmd_group);
2143
2144         /* Data for statistics requests + stats_counter
2145          * stats_counter holds per-STORM counters that are incremented
2146          * when STORM has finished with the current request.
2147          * memory for FCoE offloaded statistics are counted anyway,
2148          * even if they will not be sent.
2149          * VF stats are not accounted for here as the data of VF stats is stored
2150          * in memory allocated by the VF, not here.
2151          */
2152         bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2153                 sizeof(struct per_pf_stats) +
2154                 sizeof(struct fcoe_statistics_params) +
2155                 sizeof(struct per_queue_stats) * num_queue_stats +
2156                 sizeof(struct stats_counter);
2157
2158         BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2159                         bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2160
2161         /* Set shortcuts */
2162         bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2163         bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2164         bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2165                 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2166         bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2167                 bp->fw_stats_req_sz;
2168
2169         DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2170            U64_HI(bp->fw_stats_req_mapping),
2171            U64_LO(bp->fw_stats_req_mapping));
2172         DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2173            U64_HI(bp->fw_stats_data_mapping),
2174            U64_LO(bp->fw_stats_data_mapping));
2175         return 0;
2176
2177 alloc_mem_err:
2178         bnx2x_free_fw_stats_mem(bp);
2179         BNX2X_ERR("Can't allocate FW stats memory\n");
2180         return -ENOMEM;
2181 }
2182
2183 /* send load request to mcp and analyze response */
2184 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2185 {
2186         u32 param;
2187
2188         /* init fw_seq */
2189         bp->fw_seq =
2190                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2191                  DRV_MSG_SEQ_NUMBER_MASK);
2192         BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2193
2194         /* Get current FW pulse sequence */
2195         bp->fw_drv_pulse_wr_seq =
2196                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2197                  DRV_PULSE_SEQ_MASK);
2198         BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2199
2200         param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2201
2202         if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2203                 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2204
2205         /* load request */
2206         (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2207
2208         /* if mcp fails to respond we must abort */
2209         if (!(*load_code)) {
2210                 BNX2X_ERR("MCP response failure, aborting\n");
2211                 return -EBUSY;
2212         }
2213
2214         /* If mcp refused (e.g. other port is in diagnostic mode) we
2215          * must abort
2216          */
2217         if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2218                 BNX2X_ERR("MCP refused load request, aborting\n");
2219                 return -EBUSY;
2220         }
2221         return 0;
2222 }
2223
2224 /* check whether another PF has already loaded FW to chip. In
2225  * virtualized environments a pf from another VM may have already
2226  * initialized the device including loading FW
2227  */
2228 int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2229 {
2230         /* is another pf loaded on this engine? */
2231         if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2232             load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2233                 /* build my FW version dword */
2234                 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2235                         (BCM_5710_FW_MINOR_VERSION << 8) +
2236                         (BCM_5710_FW_REVISION_VERSION << 16) +
2237                         (BCM_5710_FW_ENGINEERING_VERSION << 24);
2238
2239                 /* read loaded FW from chip */
2240                 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2241
2242                 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2243                    loaded_fw, my_fw);
2244
2245                 /* abort nic load if version mismatch */
2246                 if (my_fw != loaded_fw) {
2247                         BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
2248                                   loaded_fw, my_fw);
2249                         return -EBUSY;
2250                 }
2251         }
2252         return 0;
2253 }
2254
2255 /* returns the "mcp load_code" according to global load_count array */
2256 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2257 {
2258         int path = BP_PATH(bp);
2259
2260         DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2261            path, load_count[path][0], load_count[path][1],
2262            load_count[path][2]);
2263         load_count[path][0]++;
2264         load_count[path][1 + port]++;
2265         DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2266            path, load_count[path][0], load_count[path][1],
2267            load_count[path][2]);
2268         if (load_count[path][0] == 1)
2269                 return FW_MSG_CODE_DRV_LOAD_COMMON;
2270         else if (load_count[path][1 + port] == 1)
2271                 return FW_MSG_CODE_DRV_LOAD_PORT;
2272         else
2273                 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2274 }
2275
2276 /* mark PMF if applicable */
2277 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2278 {
2279         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2280             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2281             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2282                 bp->port.pmf = 1;
2283                 /* We need the barrier to ensure the ordering between the
2284                  * writing to bp->port.pmf here and reading it from the
2285                  * bnx2x_periodic_task().
2286                  */
2287                 smp_mb();
2288         } else {
2289                 bp->port.pmf = 0;
2290         }
2291
2292         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2293 }
2294
2295 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2296 {
2297         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2298              (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2299             (bp->common.shmem2_base)) {
2300                 if (SHMEM2_HAS(bp, dcc_support))
2301                         SHMEM2_WR(bp, dcc_support,
2302                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2303                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2304                 if (SHMEM2_HAS(bp, afex_driver_support))
2305                         SHMEM2_WR(bp, afex_driver_support,
2306                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2307         }
2308
2309         /* Set AFEX default VLAN tag to an invalid value */
2310         bp->afex_def_vlan_tag = -1;
2311 }
2312
2313 /**
2314  * bnx2x_bz_fp - zero content of the fastpath structure.
2315  *
2316  * @bp:         driver handle
2317  * @index:      fastpath index to be zeroed
2318  *
2319  * Makes sure the contents of the bp->fp[index].napi is kept
2320  * intact.
2321  */
2322 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2323 {
2324         struct bnx2x_fastpath *fp = &bp->fp[index];
2325
2326         int cos;
2327         struct napi_struct orig_napi = fp->napi;
2328         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2329         /* bzero bnx2x_fastpath contents */
2330         if (fp->tpa_info)
2331                 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2332                        sizeof(struct bnx2x_agg_info));
2333         memset(fp, 0, sizeof(*fp));
2334
2335         /* Restore the NAPI object as it has been already initialized */
2336         fp->napi = orig_napi;
2337         fp->tpa_info = orig_tpa_info;
2338         fp->bp = bp;
2339         fp->index = index;
2340         if (IS_ETH_FP(fp))
2341                 fp->max_cos = bp->max_cos;
2342         else
2343                 /* Special queues support only one CoS */
2344                 fp->max_cos = 1;
2345
2346         /* Init txdata pointers */
2347         if (IS_FCOE_FP(fp))
2348                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2349         if (IS_ETH_FP(fp))
2350                 for_each_cos_in_tx_queue(fp, cos)
2351                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2352                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
2353
2354         /*
2355          * set the tpa flag for each queue. The tpa flag determines the queue
2356          * minimal size so it must be set prior to queue memory allocation
2357          */
2358         fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2359                                   (bp->flags & GRO_ENABLE_FLAG &&
2360                                    bnx2x_mtu_allows_gro(bp->dev->mtu)));
2361         if (bp->flags & TPA_ENABLE_FLAG)
2362                 fp->mode = TPA_MODE_LRO;
2363         else if (bp->flags & GRO_ENABLE_FLAG)
2364                 fp->mode = TPA_MODE_GRO;
2365
2366         /* We don't want TPA on an FCoE L2 ring */
2367         if (IS_FCOE_FP(fp))
2368                 fp->disable_tpa = 1;
2369 }
2370
2371 int bnx2x_load_cnic(struct bnx2x *bp)
2372 {
2373         int i, rc, port = BP_PORT(bp);
2374
2375         DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2376
2377         mutex_init(&bp->cnic_mutex);
2378
2379         if (IS_PF(bp)) {
2380                 rc = bnx2x_alloc_mem_cnic(bp);
2381                 if (rc) {
2382                         BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2383                         LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2384                 }
2385         }
2386
2387         rc = bnx2x_alloc_fp_mem_cnic(bp);
2388         if (rc) {
2389                 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2390                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2391         }
2392
2393         /* Update the number of queues with the cnic queues */
2394         rc = bnx2x_set_real_num_queues(bp, 1);
2395         if (rc) {
2396                 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2397                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2398         }
2399
2400         /* Add all CNIC NAPI objects */
2401         bnx2x_add_all_napi_cnic(bp);
2402         DP(NETIF_MSG_IFUP, "cnic napi added\n");
2403         bnx2x_napi_enable_cnic(bp);
2404
2405         rc = bnx2x_init_hw_func_cnic(bp);
2406         if (rc)
2407                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2408
2409         bnx2x_nic_init_cnic(bp);
2410
2411         if (IS_PF(bp)) {
2412                 /* Enable Timer scan */
2413                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2414
2415                 /* setup cnic queues */
2416                 for_each_cnic_queue(bp, i) {
2417                         rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2418                         if (rc) {
2419                                 BNX2X_ERR("Queue setup failed\n");
2420                                 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2421                         }
2422                 }
2423         }
2424
2425         /* Initialize Rx filter. */
2426         netif_addr_lock_bh(bp->dev);
2427         bnx2x_set_rx_mode(bp->dev);
2428         netif_addr_unlock_bh(bp->dev);
2429
2430         /* re-read iscsi info */
2431         bnx2x_get_iscsi_info(bp);
2432         bnx2x_setup_cnic_irq_info(bp);
2433         bnx2x_setup_cnic_info(bp);
2434         bp->cnic_loaded = true;
2435         if (bp->state == BNX2X_STATE_OPEN)
2436                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2437
2438
2439         DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2440
2441         return 0;
2442
2443 #ifndef BNX2X_STOP_ON_ERROR
2444 load_error_cnic2:
2445         /* Disable Timer scan */
2446         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2447
2448 load_error_cnic1:
2449         bnx2x_napi_disable_cnic(bp);
2450         /* Update the number of queues without the cnic queues */
2451         rc = bnx2x_set_real_num_queues(bp, 0);
2452         if (rc)
2453                 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2454 load_error_cnic0:
2455         BNX2X_ERR("CNIC-related load failed\n");
2456         bnx2x_free_fp_mem_cnic(bp);
2457         bnx2x_free_mem_cnic(bp);
2458         return rc;
2459 #endif /* ! BNX2X_STOP_ON_ERROR */
2460 }
2461
2462 /* must be called with rtnl_lock */
2463 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2464 {
2465         int port = BP_PORT(bp);
2466         int i, rc = 0, load_code = 0;
2467
2468         DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2469         DP(NETIF_MSG_IFUP,
2470            "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2471
2472 #ifdef BNX2X_STOP_ON_ERROR
2473         if (unlikely(bp->panic)) {
2474                 BNX2X_ERR("Can't load NIC when there is panic\n");
2475                 return -EPERM;
2476         }
2477 #endif
2478
2479         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2480
2481         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2482         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2483                 &bp->last_reported_link.link_report_flags);
2484
2485         if (IS_PF(bp))
2486                 /* must be called before memory allocation and HW init */
2487                 bnx2x_ilt_set_info(bp);
2488
2489         /*
2490          * Zero fastpath structures preserving invariants like napi, which are
2491          * allocated only once, fp index, max_cos, bp pointer.
2492          * Also set fp->disable_tpa and txdata_ptr.
2493          */
2494         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2495         for_each_queue(bp, i)
2496                 bnx2x_bz_fp(bp, i);
2497         memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2498                                   bp->num_cnic_queues) *
2499                                   sizeof(struct bnx2x_fp_txdata));
2500
2501         bp->fcoe_init = false;
2502
2503         /* Set the receive queues buffer size */
2504         bnx2x_set_rx_buf_size(bp);
2505
2506         if (IS_PF(bp)) {
2507                 rc = bnx2x_alloc_mem(bp);
2508                 if (rc) {
2509                         BNX2X_ERR("Unable to allocate bp memory\n");
2510                         return rc;
2511                 }
2512         }
2513
2514         /* Allocated memory for FW statistics  */
2515         if (bnx2x_alloc_fw_stats_mem(bp))
2516                 LOAD_ERROR_EXIT(bp, load_error0);
2517
2518         /* need to be done after alloc mem, since it's self adjusting to amount
2519          * of memory available for RSS queues
2520          */
2521         rc = bnx2x_alloc_fp_mem(bp);
2522         if (rc) {
2523                 BNX2X_ERR("Unable to allocate memory for fps\n");
2524                 LOAD_ERROR_EXIT(bp, load_error0);
2525         }
2526
2527         /* request pf to initialize status blocks */
2528         if (IS_VF(bp)) {
2529                 rc = bnx2x_vfpf_init(bp);
2530                 if (rc)
2531                         LOAD_ERROR_EXIT(bp, load_error0);
2532         }
2533
2534         /* As long as bnx2x_alloc_mem() may possibly update
2535          * bp->num_queues, bnx2x_set_real_num_queues() should always
2536          * come after it. At this stage cnic queues are not counted.
2537          */
2538         rc = bnx2x_set_real_num_queues(bp, 0);
2539         if (rc) {
2540                 BNX2X_ERR("Unable to set real_num_queues\n");
2541                 LOAD_ERROR_EXIT(bp, load_error0);
2542         }
2543
2544         /* configure multi cos mappings in kernel.
2545          * this configuration may be overriden by a multi class queue discipline
2546          * or by a dcbx negotiation result.
2547          */
2548         bnx2x_setup_tc(bp->dev, bp->max_cos);
2549
2550         /* Add all NAPI objects */
2551         bnx2x_add_all_napi(bp);
2552         DP(NETIF_MSG_IFUP, "napi added\n");
2553         bnx2x_napi_enable(bp);
2554
2555         if (IS_PF(bp)) {
2556                 /* set pf load just before approaching the MCP */
2557                 bnx2x_set_pf_load(bp);
2558
2559                 /* if mcp exists send load request and analyze response */
2560                 if (!BP_NOMCP(bp)) {
2561                         /* attempt to load pf */
2562                         rc = bnx2x_nic_load_request(bp, &load_code);
2563                         if (rc)
2564                                 LOAD_ERROR_EXIT(bp, load_error1);
2565
2566                         /* what did mcp say? */
2567                         rc = bnx2x_nic_load_analyze_req(bp, load_code);
2568                         if (rc) {
2569                                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2570                                 LOAD_ERROR_EXIT(bp, load_error2);
2571                         }
2572                 } else {
2573                         load_code = bnx2x_nic_load_no_mcp(bp, port);
2574                 }
2575
2576                 /* mark pmf if applicable */
2577                 bnx2x_nic_load_pmf(bp, load_code);
2578
2579                 /* Init Function state controlling object */
2580                 bnx2x__init_func_obj(bp);
2581
2582                 /* Initialize HW */
2583                 rc = bnx2x_init_hw(bp, load_code);
2584                 if (rc) {
2585                         BNX2X_ERR("HW init failed, aborting\n");
2586                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2587                         LOAD_ERROR_EXIT(bp, load_error2);
2588                 }
2589         }
2590
2591         bnx2x_pre_irq_nic_init(bp);
2592
2593         /* Connect to IRQs */
2594         rc = bnx2x_setup_irqs(bp);
2595         if (rc) {
2596                 BNX2X_ERR("setup irqs failed\n");
2597                 if (IS_PF(bp))
2598                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2599                 LOAD_ERROR_EXIT(bp, load_error2);
2600         }
2601
2602         /* Init per-function objects */
2603         if (IS_PF(bp)) {
2604                 /* Setup NIC internals and enable interrupts */
2605                 bnx2x_post_irq_nic_init(bp, load_code);
2606
2607                 bnx2x_init_bp_objs(bp);
2608                 bnx2x_iov_nic_init(bp);
2609
2610                 /* Set AFEX default VLAN tag to an invalid value */
2611                 bp->afex_def_vlan_tag = -1;
2612                 bnx2x_nic_load_afex_dcc(bp, load_code);
2613                 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2614                 rc = bnx2x_func_start(bp);
2615                 if (rc) {
2616                         BNX2X_ERR("Function start failed!\n");
2617                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2618
2619                         LOAD_ERROR_EXIT(bp, load_error3);
2620                 }
2621
2622                 /* Send LOAD_DONE command to MCP */
2623                 if (!BP_NOMCP(bp)) {
2624                         load_code = bnx2x_fw_command(bp,
2625                                                      DRV_MSG_CODE_LOAD_DONE, 0);
2626                         if (!load_code) {
2627                                 BNX2X_ERR("MCP response failure, aborting\n");
2628                                 rc = -EBUSY;
2629                                 LOAD_ERROR_EXIT(bp, load_error3);
2630                         }
2631                 }
2632
2633                 /* initialize FW coalescing state machines in RAM */
2634                 bnx2x_update_coalesce(bp);
2635
2636                 /* setup the leading queue */
2637                 rc = bnx2x_setup_leading(bp);
2638                 if (rc) {
2639                         BNX2X_ERR("Setup leading failed!\n");
2640                         LOAD_ERROR_EXIT(bp, load_error3);
2641                 }
2642
2643                 /* set up the rest of the queues */
2644                 for_each_nondefault_eth_queue(bp, i) {
2645                         rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2646                         if (rc) {
2647                                 BNX2X_ERR("Queue setup failed\n");
2648                                 LOAD_ERROR_EXIT(bp, load_error3);
2649                         }
2650                 }
2651
2652                 /* setup rss */
2653                 rc = bnx2x_init_rss_pf(bp);
2654                 if (rc) {
2655                         BNX2X_ERR("PF RSS init failed\n");
2656                         LOAD_ERROR_EXIT(bp, load_error3);
2657                 }
2658
2659         } else { /* vf */
2660                 for_each_eth_queue(bp, i) {
2661                         rc = bnx2x_vfpf_setup_q(bp, i);
2662                         if (rc) {
2663                                 BNX2X_ERR("Queue setup failed\n");
2664                                 LOAD_ERROR_EXIT(bp, load_error3);
2665                         }
2666                 }
2667         }
2668
2669         /* Now when Clients are configured we are ready to work */
2670         bp->state = BNX2X_STATE_OPEN;
2671
2672         /* Configure a ucast MAC */
2673         if (IS_PF(bp))
2674                 rc = bnx2x_set_eth_mac(bp, true);
2675         else /* vf */
2676                 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2677                                            true);
2678         if (rc) {
2679                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2680                 LOAD_ERROR_EXIT(bp, load_error3);
2681         }
2682
2683         if (IS_PF(bp) && bp->pending_max) {
2684                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2685                 bp->pending_max = 0;
2686         }
2687
2688         if (bp->port.pmf) {
2689                 rc = bnx2x_initial_phy_init(bp, load_mode);
2690                 if (rc)
2691                         LOAD_ERROR_EXIT(bp, load_error3);
2692         }
2693         bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2694
2695         /* Start fast path */
2696
2697         /* Initialize Rx filter. */
2698         netif_addr_lock_bh(bp->dev);
2699         bnx2x_set_rx_mode(bp->dev);
2700         netif_addr_unlock_bh(bp->dev);
2701
2702         /* Start the Tx */
2703         switch (load_mode) {
2704         case LOAD_NORMAL:
2705                 /* Tx queue should be only reenabled */
2706                 netif_tx_wake_all_queues(bp->dev);
2707                 break;
2708
2709         case LOAD_OPEN:
2710                 netif_tx_start_all_queues(bp->dev);
2711                 smp_mb__after_clear_bit();
2712                 break;
2713
2714         case LOAD_DIAG:
2715         case LOAD_LOOPBACK_EXT:
2716                 bp->state = BNX2X_STATE_DIAG;
2717                 break;
2718
2719         default:
2720                 break;
2721         }
2722
2723         if (bp->port.pmf)
2724                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2725         else
2726                 bnx2x__link_status_update(bp);
2727
2728         /* start the timer */
2729         mod_timer(&bp->timer, jiffies + bp->current_interval);
2730
2731         if (CNIC_ENABLED(bp))
2732                 bnx2x_load_cnic(bp);
2733
2734         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2735                 /* mark driver is loaded in shmem2 */
2736                 u32 val;
2737                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2738                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2739                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2740                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2741         }
2742
2743         /* Wait for all pending SP commands to complete */
2744         if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2745                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2746                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2747                 return -EBUSY;
2748         }
2749
2750         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2751         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2752                 bnx2x_dcbx_init(bp, false);
2753
2754         DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2755
2756         return 0;
2757
2758 #ifndef BNX2X_STOP_ON_ERROR
2759 load_error3:
2760         if (IS_PF(bp)) {
2761                 bnx2x_int_disable_sync(bp, 1);
2762
2763                 /* Clean queueable objects */
2764                 bnx2x_squeeze_objects(bp);
2765         }
2766
2767         /* Free SKBs, SGEs, TPA pool and driver internals */
2768         bnx2x_free_skbs(bp);
2769         for_each_rx_queue(bp, i)
2770                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2771
2772         /* Release IRQs */
2773         bnx2x_free_irq(bp);
2774 load_error2:
2775         if (IS_PF(bp) && !BP_NOMCP(bp)) {
2776                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2777                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2778         }
2779
2780         bp->port.pmf = 0;
2781 load_error1:
2782         bnx2x_napi_disable(bp);
2783         bnx2x_del_all_napi(bp);
2784
2785         /* clear pf_load status, as it was already set */
2786         if (IS_PF(bp))
2787                 bnx2x_clear_pf_load(bp);
2788 load_error0:
2789         bnx2x_free_fp_mem(bp);
2790         bnx2x_free_fw_stats_mem(bp);
2791         bnx2x_free_mem(bp);
2792
2793         return rc;
2794 #endif /* ! BNX2X_STOP_ON_ERROR */
2795 }
2796
2797 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2798 {
2799         u8 rc = 0, cos, i;
2800
2801         /* Wait until tx fastpath tasks complete */
2802         for_each_tx_queue(bp, i) {
2803                 struct bnx2x_fastpath *fp = &bp->fp[i];
2804
2805                 for_each_cos_in_tx_queue(fp, cos)
2806                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2807                 if (rc)
2808                         return rc;
2809         }
2810         return 0;
2811 }
2812
2813 /* must be called with rtnl_lock */
2814 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2815 {
2816         int i;
2817         bool global = false;
2818
2819         DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2820
2821         /* mark driver is unloaded in shmem2 */
2822         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2823                 u32 val;
2824                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2825                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2826                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2827         }
2828
2829         if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2830             (bp->state == BNX2X_STATE_CLOSED ||
2831              bp->state == BNX2X_STATE_ERROR)) {
2832                 /* We can get here if the driver has been unloaded
2833                  * during parity error recovery and is either waiting for a
2834                  * leader to complete or for other functions to unload and
2835                  * then ifdown has been issued. In this case we want to
2836                  * unload and let other functions to complete a recovery
2837                  * process.
2838                  */
2839                 bp->recovery_state = BNX2X_RECOVERY_DONE;
2840                 bp->is_leader = 0;
2841                 bnx2x_release_leader_lock(bp);
2842                 smp_mb();
2843
2844                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2845                 BNX2X_ERR("Can't unload in closed or error state\n");
2846                 return -EINVAL;
2847         }
2848
2849         /* Nothing to do during unload if previous bnx2x_nic_load()
2850          * have not completed succesfully - all resourses are released.
2851          *
2852          * we can get here only after unsuccessful ndo_* callback, during which
2853          * dev->IFF_UP flag is still on.
2854          */
2855         if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2856                 return 0;
2857
2858         /* It's important to set the bp->state to the value different from
2859          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2860          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2861          */
2862         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2863         smp_mb();
2864
2865         if (CNIC_LOADED(bp))
2866                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2867
2868         /* Stop Tx */
2869         bnx2x_tx_disable(bp);
2870         netdev_reset_tc(bp->dev);
2871
2872         bp->rx_mode = BNX2X_RX_MODE_NONE;
2873
2874         del_timer_sync(&bp->timer);
2875
2876         if (IS_PF(bp)) {
2877                 /* Set ALWAYS_ALIVE bit in shmem */
2878                 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2879                 bnx2x_drv_pulse(bp);
2880                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2881                 bnx2x_save_statistics(bp);
2882         }
2883
2884         /* wait till consumers catch up with producers in all queues */
2885         bnx2x_drain_tx_queues(bp);
2886
2887         /* if VF indicate to PF this function is going down (PF will delete sp
2888          * elements and clear initializations
2889          */
2890         if (IS_VF(bp))
2891                 bnx2x_vfpf_close_vf(bp);
2892         else if (unload_mode != UNLOAD_RECOVERY)
2893                 /* if this is a normal/close unload need to clean up chip*/
2894                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2895         else {
2896                 /* Send the UNLOAD_REQUEST to the MCP */
2897                 bnx2x_send_unload_req(bp, unload_mode);
2898
2899                 /*
2900                  * Prevent transactions to host from the functions on the
2901                  * engine that doesn't reset global blocks in case of global
2902                  * attention once gloabl blocks are reset and gates are opened
2903                  * (the engine which leader will perform the recovery
2904                  * last).
2905                  */
2906                 if (!CHIP_IS_E1x(bp))
2907                         bnx2x_pf_disable(bp);
2908
2909                 /* Disable HW interrupts, NAPI */
2910                 bnx2x_netif_stop(bp, 1);
2911                 /* Delete all NAPI objects */
2912                 bnx2x_del_all_napi(bp);
2913                 if (CNIC_LOADED(bp))
2914                         bnx2x_del_all_napi_cnic(bp);
2915                 /* Release IRQs */
2916                 bnx2x_free_irq(bp);
2917
2918                 /* Report UNLOAD_DONE to MCP */
2919                 bnx2x_send_unload_done(bp, false);
2920         }
2921
2922         /*
2923          * At this stage no more interrupts will arrive so we may safly clean
2924          * the queueable objects here in case they failed to get cleaned so far.
2925          */
2926         if (IS_PF(bp))
2927                 bnx2x_squeeze_objects(bp);
2928
2929         /* There should be no more pending SP commands at this stage */
2930         bp->sp_state = 0;
2931
2932         bp->port.pmf = 0;
2933
2934         /* Free SKBs, SGEs, TPA pool and driver internals */
2935         bnx2x_free_skbs(bp);
2936         if (CNIC_LOADED(bp))
2937                 bnx2x_free_skbs_cnic(bp);
2938         for_each_rx_queue(bp, i)
2939                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2940
2941         bnx2x_free_fp_mem(bp);
2942         if (CNIC_LOADED(bp))
2943                 bnx2x_free_fp_mem_cnic(bp);
2944
2945         if (IS_PF(bp)) {
2946                 if (CNIC_LOADED(bp))
2947                         bnx2x_free_mem_cnic(bp);
2948                 bnx2x_free_mem(bp);
2949         }
2950         bp->state = BNX2X_STATE_CLOSED;
2951         bp->cnic_loaded = false;
2952
2953         /* Check if there are pending parity attentions. If there are - set
2954          * RECOVERY_IN_PROGRESS.
2955          */
2956         if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2957                 bnx2x_set_reset_in_progress(bp);
2958
2959                 /* Set RESET_IS_GLOBAL if needed */
2960                 if (global)
2961                         bnx2x_set_reset_global(bp);
2962         }
2963
2964
2965         /* The last driver must disable a "close the gate" if there is no
2966          * parity attention or "process kill" pending.
2967          */
2968         if (IS_PF(bp) &&
2969             !bnx2x_clear_pf_load(bp) &&
2970             bnx2x_reset_is_done(bp, BP_PATH(bp)))
2971                 bnx2x_disable_close_the_gate(bp);
2972
2973         DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2974
2975         return 0;
2976 }
2977
2978 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2979 {
2980         u16 pmcsr;
2981
2982         /* If there is no power capability, silently succeed */
2983         if (!bp->pm_cap) {
2984                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2985                 return 0;
2986         }
2987
2988         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2989
2990         switch (state) {
2991         case PCI_D0:
2992                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2993                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2994                                        PCI_PM_CTRL_PME_STATUS));
2995
2996                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2997                         /* delay required during transition out of D3hot */
2998                         msleep(20);
2999                 break;
3000
3001         case PCI_D3hot:
3002                 /* If there are other clients above don't
3003                    shut down the power */
3004                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3005                         return 0;
3006                 /* Don't shut down the power for emulation and FPGA */
3007                 if (CHIP_REV_IS_SLOW(bp))
3008                         return 0;
3009
3010                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3011                 pmcsr |= 3;
3012
3013                 if (bp->wol)
3014                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3015
3016                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3017                                       pmcsr);
3018
3019                 /* No more memory access after this point until
3020                 * device is brought back to D0.
3021                 */
3022                 break;
3023
3024         default:
3025                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3026                 return -EINVAL;
3027         }
3028         return 0;
3029 }
3030
3031 /*
3032  * net_device service functions
3033  */
3034 int bnx2x_poll(struct napi_struct *napi, int budget)
3035 {
3036         int work_done = 0;
3037         u8 cos;
3038         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3039                                                  napi);
3040         struct bnx2x *bp = fp->bp;
3041
3042         while (1) {
3043 #ifdef BNX2X_STOP_ON_ERROR
3044                 if (unlikely(bp->panic)) {
3045                         napi_complete(napi);
3046                         return 0;
3047                 }
3048 #endif
3049
3050                 for_each_cos_in_tx_queue(fp, cos)
3051                         if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3052                                 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3053
3054                 if (bnx2x_has_rx_work(fp)) {
3055                         work_done += bnx2x_rx_int(fp, budget - work_done);
3056
3057                         /* must not complete if we consumed full budget */
3058                         if (work_done >= budget)
3059                                 break;
3060                 }
3061
3062                 /* Fall out from the NAPI loop if needed */
3063                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3064
3065                         /* No need to update SB for FCoE L2 ring as long as
3066                          * it's connected to the default SB and the SB
3067                          * has been updated when NAPI was scheduled.
3068                          */
3069                         if (IS_FCOE_FP(fp)) {
3070                                 napi_complete(napi);
3071                                 break;
3072                         }
3073                         bnx2x_update_fpsb_idx(fp);
3074                         /* bnx2x_has_rx_work() reads the status block,
3075                          * thus we need to ensure that status block indices
3076                          * have been actually read (bnx2x_update_fpsb_idx)
3077                          * prior to this check (bnx2x_has_rx_work) so that
3078                          * we won't write the "newer" value of the status block
3079                          * to IGU (if there was a DMA right after
3080                          * bnx2x_has_rx_work and if there is no rmb, the memory
3081                          * reading (bnx2x_update_fpsb_idx) may be postponed
3082                          * to right before bnx2x_ack_sb). In this case there
3083                          * will never be another interrupt until there is
3084                          * another update of the status block, while there
3085                          * is still unhandled work.
3086                          */
3087                         rmb();
3088
3089                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3090                                 napi_complete(napi);
3091                                 /* Re-enable interrupts */
3092                                 DP(NETIF_MSG_RX_STATUS,
3093                                    "Update index to %d\n", fp->fp_hc_idx);
3094                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3095                                              le16_to_cpu(fp->fp_hc_idx),
3096                                              IGU_INT_ENABLE, 1);
3097                                 break;
3098                         }
3099                 }
3100         }
3101
3102         return work_done;
3103 }
3104
3105 /* we split the first BD into headers and data BDs
3106  * to ease the pain of our fellow microcode engineers
3107  * we use one mapping for both BDs
3108  */
3109 static u16 bnx2x_tx_split(struct bnx2x *bp,
3110                           struct bnx2x_fp_txdata *txdata,
3111                           struct sw_tx_bd *tx_buf,
3112                           struct eth_tx_start_bd **tx_bd, u16 hlen,
3113                           u16 bd_prod)
3114 {
3115         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3116         struct eth_tx_bd *d_tx_bd;
3117         dma_addr_t mapping;
3118         int old_len = le16_to_cpu(h_tx_bd->nbytes);
3119
3120         /* first fix first BD */
3121         h_tx_bd->nbytes = cpu_to_le16(hlen);
3122
3123         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3124            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3125
3126         /* now get a new data BD
3127          * (after the pbd) and fill it */
3128         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3129         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3130
3131         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3132                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3133
3134         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3135         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3136         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3137
3138         /* this marks the BD as one that has no individual mapping */
3139         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3140
3141         DP(NETIF_MSG_TX_QUEUED,
3142            "TSO split data size is %d (%x:%x)\n",
3143            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3144
3145         /* update tx_bd */
3146         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3147
3148         return bd_prod;
3149 }
3150
3151 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3152 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3153 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3154 {
3155         __sum16 tsum = (__force __sum16) csum;
3156
3157         if (fix > 0)
3158                 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3159                                   csum_partial(t_header - fix, fix, 0)));
3160
3161         else if (fix < 0)
3162                 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3163                                   csum_partial(t_header, -fix, 0)));
3164
3165         return bswab16(tsum);
3166 }
3167
3168 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3169 {
3170         u32 rc;
3171         __u8 prot = 0;
3172         __be16 protocol;
3173
3174         if (skb->ip_summed != CHECKSUM_PARTIAL)
3175                 return XMIT_PLAIN;
3176
3177         protocol = vlan_get_protocol(skb);
3178         if (protocol == htons(ETH_P_IPV6)) {
3179                 rc = XMIT_CSUM_V6;
3180                 prot = ipv6_hdr(skb)->nexthdr;
3181         } else {
3182                 rc = XMIT_CSUM_V4;
3183                 prot = ip_hdr(skb)->protocol;
3184         }
3185
3186         if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3187                 if (inner_ip_hdr(skb)->version == 6) {
3188                         rc |= XMIT_CSUM_ENC_V6;
3189                         if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3190                                 rc |= XMIT_CSUM_TCP;
3191                 } else {
3192                         rc |= XMIT_CSUM_ENC_V4;
3193                         if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3194                                 rc |= XMIT_CSUM_TCP;
3195                 }
3196         }
3197         if (prot == IPPROTO_TCP)
3198                 rc |= XMIT_CSUM_TCP;
3199
3200         if (skb_is_gso_v6(skb)) {
3201                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
3202                 if (rc & XMIT_CSUM_ENC)
3203                         rc |= XMIT_GSO_ENC_V6;
3204         } else if (skb_is_gso(skb)) {
3205                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
3206                 if (rc & XMIT_CSUM_ENC)
3207                         rc |= XMIT_GSO_ENC_V4;
3208         }
3209
3210         return rc;
3211 }
3212
3213 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3214 /* check if packet requires linearization (packet is too fragmented)
3215    no need to check fragmentation if page size > 8K (there will be no
3216    violation to FW restrictions) */
3217 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3218                              u32 xmit_type)
3219 {
3220         int to_copy = 0;
3221         int hlen = 0;
3222         int first_bd_sz = 0;
3223
3224         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3225         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3226
3227                 if (xmit_type & XMIT_GSO) {
3228                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3229                         /* Check if LSO packet needs to be copied:
3230                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3231                         int wnd_size = MAX_FETCH_BD - 3;
3232                         /* Number of windows to check */
3233                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3234                         int wnd_idx = 0;
3235                         int frag_idx = 0;
3236                         u32 wnd_sum = 0;
3237
3238                         /* Headers length */
3239                         hlen = (int)(skb_transport_header(skb) - skb->data) +
3240                                 tcp_hdrlen(skb);
3241
3242                         /* Amount of data (w/o headers) on linear part of SKB*/
3243                         first_bd_sz = skb_headlen(skb) - hlen;
3244
3245                         wnd_sum  = first_bd_sz;
3246
3247                         /* Calculate the first sum - it's special */
3248                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3249                                 wnd_sum +=
3250                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3251
3252                         /* If there was data on linear skb data - check it */
3253                         if (first_bd_sz > 0) {
3254                                 if (unlikely(wnd_sum < lso_mss)) {
3255                                         to_copy = 1;
3256                                         goto exit_lbl;
3257                                 }
3258
3259                                 wnd_sum -= first_bd_sz;
3260                         }
3261
3262                         /* Others are easier: run through the frag list and
3263                            check all windows */
3264                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3265                                 wnd_sum +=
3266                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3267
3268                                 if (unlikely(wnd_sum < lso_mss)) {
3269                                         to_copy = 1;
3270                                         break;
3271                                 }
3272                                 wnd_sum -=
3273                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3274                         }
3275                 } else {
3276                         /* in non-LSO too fragmented packet should always
3277                            be linearized */
3278                         to_copy = 1;
3279                 }
3280         }
3281
3282 exit_lbl:
3283         if (unlikely(to_copy))
3284                 DP(NETIF_MSG_TX_QUEUED,
3285                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3286                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3287                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3288
3289         return to_copy;
3290 }
3291 #endif
3292
3293 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3294                                  u32 xmit_type)
3295 {
3296         struct ipv6hdr *ipv6;
3297
3298         *parsing_data |= (skb_shinfo(skb)->gso_size <<
3299                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3300                               ETH_TX_PARSE_BD_E2_LSO_MSS;
3301
3302         if (xmit_type & XMIT_GSO_ENC_V6)
3303                 ipv6 = inner_ipv6_hdr(skb);
3304         else if (xmit_type & XMIT_GSO_V6)
3305                 ipv6 = ipv6_hdr(skb);
3306         else
3307                 ipv6 = NULL;
3308
3309         if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3310                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3311 }
3312
3313 /**
3314  * bnx2x_set_pbd_gso - update PBD in GSO case.
3315  *
3316  * @skb:        packet skb
3317  * @pbd:        parse BD
3318  * @xmit_type:  xmit flags
3319  */
3320 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3321                               struct eth_tx_parse_bd_e1x *pbd,
3322                               struct eth_tx_start_bd *tx_start_bd,
3323                               u32 xmit_type)
3324 {
3325         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3326         pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3327         pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3328
3329         if (xmit_type & XMIT_GSO_V4) {
3330                 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3331                 pbd->tcp_pseudo_csum =
3332                         bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3333                                                    ip_hdr(skb)->daddr,
3334                                                    0, IPPROTO_TCP, 0));
3335
3336                 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3337                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3338         } else {
3339                 pbd->tcp_pseudo_csum =
3340                         bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3341                                                  &ipv6_hdr(skb)->daddr,
3342                                                  0, IPPROTO_TCP, 0));
3343         }
3344
3345         pbd->global_data |=
3346                 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3347 }
3348
3349 /**
3350  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3351  *
3352  * @bp:                 driver handle
3353  * @skb:                packet skb
3354  * @parsing_data:       data to be updated
3355  * @xmit_type:          xmit flags
3356  *
3357  * 57712/578xx related, when skb has encapsulation
3358  */
3359 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3360                                  u32 *parsing_data, u32 xmit_type)
3361 {
3362         *parsing_data |=
3363                 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3364                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3365                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3366
3367         if (xmit_type & XMIT_CSUM_TCP) {
3368                 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3369                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3370                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3371
3372                 return skb_inner_transport_header(skb) +
3373                         inner_tcp_hdrlen(skb) - skb->data;
3374         }
3375
3376         /* We support checksum offload for TCP and UDP only.
3377          * No need to pass the UDP header length - it's a constant.
3378          */
3379         return skb_inner_transport_header(skb) +
3380                 sizeof(struct udphdr) - skb->data;
3381 }
3382
3383 /**
3384  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3385  *
3386  * @bp:                 driver handle
3387  * @skb:                packet skb
3388  * @parsing_data:       data to be updated
3389  * @xmit_type:          xmit flags
3390  *
3391  * 57712/578xx related
3392  */
3393 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3394                                 u32 *parsing_data, u32 xmit_type)
3395 {
3396         *parsing_data |=
3397                 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3398                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3399                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3400
3401         if (xmit_type & XMIT_CSUM_TCP) {
3402                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3403                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3404                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3405
3406                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3407         }
3408         /* We support checksum offload for TCP and UDP only.
3409          * No need to pass the UDP header length - it's a constant.
3410          */
3411         return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3412 }
3413
3414 /* set FW indication according to inner or outer protocols if tunneled */
3415 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3416                                struct eth_tx_start_bd *tx_start_bd,
3417                                u32 xmit_type)
3418 {
3419         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3420
3421         if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3422                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3423
3424         if (!(xmit_type & XMIT_CSUM_TCP))
3425                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3426 }
3427
3428 /**
3429  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3430  *
3431  * @bp:         driver handle
3432  * @skb:        packet skb
3433  * @pbd:        parse BD to be updated
3434  * @xmit_type:  xmit flags
3435  */
3436 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3437                              struct eth_tx_parse_bd_e1x *pbd,
3438                              u32 xmit_type)
3439 {
3440         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3441
3442         /* for now NS flag is not used in Linux */
3443         pbd->global_data =
3444                 cpu_to_le16(hlen |
3445                             ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3446                              ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3447
3448         pbd->ip_hlen_w = (skb_transport_header(skb) -
3449                         skb_network_header(skb)) >> 1;
3450
3451         hlen += pbd->ip_hlen_w;
3452
3453         /* We support checksum offload for TCP and UDP only */
3454         if (xmit_type & XMIT_CSUM_TCP)
3455                 hlen += tcp_hdrlen(skb) / 2;
3456         else
3457                 hlen += sizeof(struct udphdr) / 2;
3458
3459         pbd->total_hlen_w = cpu_to_le16(hlen);
3460         hlen = hlen*2;
3461
3462         if (xmit_type & XMIT_CSUM_TCP) {
3463                 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3464
3465         } else {
3466                 s8 fix = SKB_CS_OFF(skb); /* signed! */
3467
3468                 DP(NETIF_MSG_TX_QUEUED,
3469                    "hlen %d  fix %d  csum before fix %x\n",
3470                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3471
3472                 /* HW bug: fixup the CSUM */
3473                 pbd->tcp_pseudo_csum =
3474                         bnx2x_csum_fix(skb_transport_header(skb),
3475                                        SKB_CS(skb), fix);
3476
3477                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3478                    pbd->tcp_pseudo_csum);
3479         }
3480
3481         return hlen;
3482 }
3483
3484 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3485                                       struct eth_tx_parse_bd_e2 *pbd_e2,
3486                                       struct eth_tx_parse_2nd_bd *pbd2,
3487                                       u16 *global_data,
3488                                       u32 xmit_type)
3489 {
3490         u16 hlen_w = 0;
3491         u8 outerip_off, outerip_len = 0;
3492         /* from outer IP to transport */
3493         hlen_w = (skb_inner_transport_header(skb) -
3494                   skb_network_header(skb)) >> 1;
3495
3496         /* transport len */
3497         if (xmit_type & XMIT_CSUM_TCP)
3498                 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3499         else
3500                 hlen_w += sizeof(struct udphdr) >> 1;
3501
3502         pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3503
3504         if (xmit_type & XMIT_CSUM_ENC_V4) {
3505                 struct iphdr *iph = ip_hdr(skb);
3506                 pbd2->fw_ip_csum_wo_len_flags_frag =
3507                         bswab16(csum_fold((~iph->check) -
3508                                           iph->tot_len - iph->frag_off));
3509         } else {
3510                 pbd2->fw_ip_hdr_to_payload_w =
3511                         hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3512         }
3513
3514         pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3515
3516         pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3517
3518         if (xmit_type & XMIT_GSO_V4) {
3519                 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3520
3521                 pbd_e2->data.tunnel_data.pseudo_csum =
3522                         bswab16(~csum_tcpudp_magic(
3523                                         inner_ip_hdr(skb)->saddr,
3524                                         inner_ip_hdr(skb)->daddr,
3525                                         0, IPPROTO_TCP, 0));
3526
3527                 outerip_len = ip_hdr(skb)->ihl << 1;
3528         } else {
3529                 pbd_e2->data.tunnel_data.pseudo_csum =
3530                         bswab16(~csum_ipv6_magic(
3531                                         &inner_ipv6_hdr(skb)->saddr,
3532                                         &inner_ipv6_hdr(skb)->daddr,
3533                                         0, IPPROTO_TCP, 0));
3534         }
3535
3536         outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3537
3538         *global_data |=
3539                 outerip_off |
3540                 (!!(xmit_type & XMIT_CSUM_V6) <<
3541                         ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3542                 (outerip_len <<
3543                         ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3544                 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3545                         ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3546
3547         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3548                 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3549                 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3550         }
3551 }
3552
3553 /* called with netif_tx_lock
3554  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3555  * netif_wake_queue()
3556  */
3557 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3558 {
3559         struct bnx2x *bp = netdev_priv(dev);
3560
3561         struct netdev_queue *txq;
3562         struct bnx2x_fp_txdata *txdata;
3563         struct sw_tx_bd *tx_buf;
3564         struct eth_tx_start_bd *tx_start_bd, *first_bd;
3565         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3566         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3567         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3568         struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3569         u32 pbd_e2_parsing_data = 0;
3570         u16 pkt_prod, bd_prod;
3571         int nbd, txq_index;
3572         dma_addr_t mapping;
3573         u32 xmit_type = bnx2x_xmit_type(bp, skb);
3574         int i;
3575         u8 hlen = 0;
3576         __le16 pkt_size = 0;
3577         struct ethhdr *eth;
3578         u8 mac_type = UNICAST_ADDRESS;
3579
3580 #ifdef BNX2X_STOP_ON_ERROR
3581         if (unlikely(bp->panic))
3582                 return NETDEV_TX_BUSY;
3583 #endif
3584
3585         txq_index = skb_get_queue_mapping(skb);
3586         txq = netdev_get_tx_queue(dev, txq_index);
3587
3588         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3589
3590         txdata = &bp->bnx2x_txq[txq_index];
3591
3592         /* enable this debug print to view the transmission queue being used
3593         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3594            txq_index, fp_index, txdata_index); */
3595
3596         /* enable this debug print to view the tranmission details
3597         DP(NETIF_MSG_TX_QUEUED,
3598            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3599            txdata->cid, fp_index, txdata_index, txdata, fp); */
3600
3601         if (unlikely(bnx2x_tx_avail(bp, txdata) <
3602                         skb_shinfo(skb)->nr_frags +
3603                         BDS_PER_TX_PKT +
3604                         NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3605                 /* Handle special storage cases separately */
3606                 if (txdata->tx_ring_size == 0) {
3607                         struct bnx2x_eth_q_stats *q_stats =
3608                                 bnx2x_fp_qstats(bp, txdata->parent_fp);
3609                         q_stats->driver_filtered_tx_pkt++;
3610                         dev_kfree_skb(skb);
3611                         return NETDEV_TX_OK;
3612                 }
3613                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3614                 netif_tx_stop_queue(txq);
3615                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3616
3617                 return NETDEV_TX_BUSY;
3618         }
3619
3620         DP(NETIF_MSG_TX_QUEUED,
3621            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3622            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3623            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3624            skb->len);
3625
3626         eth = (struct ethhdr *)skb->data;
3627
3628         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3629         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3630                 if (is_broadcast_ether_addr(eth->h_dest))
3631                         mac_type = BROADCAST_ADDRESS;
3632                 else
3633                         mac_type = MULTICAST_ADDRESS;
3634         }
3635
3636 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3637         /* First, check if we need to linearize the skb (due to FW
3638            restrictions). No need to check fragmentation if page size > 8K
3639            (there will be no violation to FW restrictions) */
3640         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3641                 /* Statistics of linearization */
3642                 bp->lin_cnt++;
3643                 if (skb_linearize(skb) != 0) {
3644                         DP(NETIF_MSG_TX_QUEUED,
3645                            "SKB linearization failed - silently dropping this SKB\n");
3646                         dev_kfree_skb_any(skb);
3647                         return NETDEV_TX_OK;
3648                 }
3649         }
3650 #endif
3651         /* Map skb linear data for DMA */
3652         mapping = dma_map_single(&bp->pdev->dev, skb->data,
3653                                  skb_headlen(skb), DMA_TO_DEVICE);
3654         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3655                 DP(NETIF_MSG_TX_QUEUED,
3656                    "SKB mapping failed - silently dropping this SKB\n");
3657                 dev_kfree_skb_any(skb);
3658                 return NETDEV_TX_OK;
3659         }
3660         /*
3661         Please read carefully. First we use one BD which we mark as start,
3662         then we have a parsing info BD (used for TSO or xsum),
3663         and only then we have the rest of the TSO BDs.
3664         (don't forget to mark the last one as last,
3665         and to unmap only AFTER you write to the BD ...)
3666         And above all, all pdb sizes are in words - NOT DWORDS!
3667         */
3668
3669         /* get current pkt produced now - advance it just before sending packet
3670          * since mapping of pages may fail and cause packet to be dropped
3671          */
3672         pkt_prod = txdata->tx_pkt_prod;
3673         bd_prod = TX_BD(txdata->tx_bd_prod);
3674
3675         /* get a tx_buf and first BD
3676          * tx_start_bd may be changed during SPLIT,
3677          * but first_bd will always stay first
3678          */
3679         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3680         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3681         first_bd = tx_start_bd;
3682
3683         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3684
3685         /* header nbd: indirectly zero other flags! */
3686         tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3687
3688         /* remember the first BD of the packet */
3689         tx_buf->first_bd = txdata->tx_bd_prod;
3690         tx_buf->skb = skb;
3691         tx_buf->flags = 0;
3692
3693         DP(NETIF_MSG_TX_QUEUED,
3694            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3695            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3696
3697         if (vlan_tx_tag_present(skb)) {
3698                 tx_start_bd->vlan_or_ethertype =
3699                     cpu_to_le16(vlan_tx_tag_get(skb));
3700                 tx_start_bd->bd_flags.as_bitfield |=
3701                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3702         } else {
3703                 /* when transmitting in a vf, start bd must hold the ethertype
3704                  * for fw to enforce it
3705                  */
3706                 if (IS_VF(bp))
3707                         tx_start_bd->vlan_or_ethertype =
3708                                 cpu_to_le16(ntohs(eth->h_proto));
3709                 else
3710                         /* used by FW for packet accounting */
3711                         tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3712         }
3713
3714         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3715
3716         /* turn on parsing and get a BD */
3717         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3718
3719         if (xmit_type & XMIT_CSUM)
3720                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3721
3722         if (!CHIP_IS_E1x(bp)) {
3723                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3724                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3725
3726                 if (xmit_type & XMIT_CSUM_ENC) {
3727                         u16 global_data = 0;
3728
3729                         /* Set PBD in enc checksum offload case */
3730                         hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3731                                                       &pbd_e2_parsing_data,
3732                                                       xmit_type);
3733
3734                         /* turn on 2nd parsing and get a BD */
3735                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3736
3737                         pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3738
3739                         memset(pbd2, 0, sizeof(*pbd2));
3740
3741                         pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3742                                 (skb_inner_network_header(skb) -
3743                                  skb->data) >> 1;
3744
3745                         if (xmit_type & XMIT_GSO_ENC)
3746                                 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3747                                                           &global_data,
3748                                                           xmit_type);
3749
3750                         pbd2->global_data = cpu_to_le16(global_data);
3751
3752                         /* add addition parse BD indication to start BD */
3753                         SET_FLAG(tx_start_bd->general_data,
3754                                  ETH_TX_START_BD_PARSE_NBDS, 1);
3755                         /* set encapsulation flag in start BD */
3756                         SET_FLAG(tx_start_bd->general_data,
3757                                  ETH_TX_START_BD_TUNNEL_EXIST, 1);
3758                         nbd++;
3759                 } else if (xmit_type & XMIT_CSUM) {
3760                         /* Set PBD in checksum offload case w/o encapsulation */
3761                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3762                                                      &pbd_e2_parsing_data,
3763                                                      xmit_type);
3764                 }
3765
3766                 /* Add the macs to the parsing BD this is a vf */
3767                 if (IS_VF(bp)) {
3768                         /* override GRE parameters in BD */
3769                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3770                                               &pbd_e2->data.mac_addr.src_mid,
3771                                               &pbd_e2->data.mac_addr.src_lo,
3772                                               eth->h_source);
3773
3774                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3775                                               &pbd_e2->data.mac_addr.dst_mid,
3776                                               &pbd_e2->data.mac_addr.dst_lo,
3777                                               eth->h_dest);
3778                 }
3779
3780                 SET_FLAG(pbd_e2_parsing_data,
3781                          ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3782         } else {
3783                 u16 global_data = 0;
3784                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3785                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3786                 /* Set PBD in checksum offload case */
3787                 if (xmit_type & XMIT_CSUM)
3788                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3789
3790                 SET_FLAG(global_data,
3791                          ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3792                 pbd_e1x->global_data |= cpu_to_le16(global_data);
3793         }
3794
3795         /* Setup the data pointer of the first BD of the packet */
3796         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3797         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3798         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3799         pkt_size = tx_start_bd->nbytes;
3800
3801         DP(NETIF_MSG_TX_QUEUED,
3802            "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
3803            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3804            le16_to_cpu(tx_start_bd->nbytes),
3805            tx_start_bd->bd_flags.as_bitfield,
3806            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3807
3808         if (xmit_type & XMIT_GSO) {
3809
3810                 DP(NETIF_MSG_TX_QUEUED,
3811                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
3812                    skb->len, hlen, skb_headlen(skb),
3813                    skb_shinfo(skb)->gso_size);
3814
3815                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3816
3817                 if (unlikely(skb_headlen(skb) > hlen)) {
3818                         nbd++;
3819                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3820                                                  &tx_start_bd, hlen,
3821                                                  bd_prod);
3822                 }
3823                 if (!CHIP_IS_E1x(bp))
3824                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3825                                              xmit_type);
3826                 else
3827                         bnx2x_set_pbd_gso(skb, pbd_e1x, tx_start_bd,
3828                                           xmit_type);
3829         }
3830
3831         /* Set the PBD's parsing_data field if not zero
3832          * (for the chips newer than 57711).
3833          */
3834         if (pbd_e2_parsing_data)
3835                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3836
3837         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3838
3839         /* Handle fragmented skb */
3840         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3841                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3842
3843                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3844                                            skb_frag_size(frag), DMA_TO_DEVICE);
3845                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3846                         unsigned int pkts_compl = 0, bytes_compl = 0;
3847
3848                         DP(NETIF_MSG_TX_QUEUED,
3849                            "Unable to map page - dropping packet...\n");
3850
3851                         /* we need unmap all buffers already mapped
3852                          * for this SKB;
3853                          * first_bd->nbd need to be properly updated
3854                          * before call to bnx2x_free_tx_pkt
3855                          */
3856                         first_bd->nbd = cpu_to_le16(nbd);
3857                         bnx2x_free_tx_pkt(bp, txdata,
3858                                           TX_BD(txdata->tx_pkt_prod),
3859                                           &pkts_compl, &bytes_compl);
3860                         return NETDEV_TX_OK;
3861                 }
3862
3863                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3864                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3865                 if (total_pkt_bd == NULL)
3866                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3867
3868                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3869                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3870                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3871                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3872                 nbd++;
3873
3874                 DP(NETIF_MSG_TX_QUEUED,
3875                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
3876                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3877                    le16_to_cpu(tx_data_bd->nbytes));
3878         }
3879
3880         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3881
3882         /* update with actual num BDs */
3883         first_bd->nbd = cpu_to_le16(nbd);
3884
3885         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3886
3887         /* now send a tx doorbell, counting the next BD
3888          * if the packet contains or ends with it
3889          */
3890         if (TX_BD_POFF(bd_prod) < nbd)
3891                 nbd++;
3892
3893         /* total_pkt_bytes should be set on the first data BD if
3894          * it's not an LSO packet and there is more than one
3895          * data BD. In this case pkt_size is limited by an MTU value.
3896          * However we prefer to set it for an LSO packet (while we don't
3897          * have to) in order to save some CPU cycles in a none-LSO
3898          * case, when we much more care about them.
3899          */
3900         if (total_pkt_bd != NULL)
3901                 total_pkt_bd->total_pkt_bytes = pkt_size;
3902
3903         if (pbd_e1x)
3904                 DP(NETIF_MSG_TX_QUEUED,
3905                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
3906                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3907                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3908                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3909                     le16_to_cpu(pbd_e1x->total_hlen_w));
3910         if (pbd_e2)
3911                 DP(NETIF_MSG_TX_QUEUED,
3912                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
3913                    pbd_e2,
3914                    pbd_e2->data.mac_addr.dst_hi,
3915                    pbd_e2->data.mac_addr.dst_mid,
3916                    pbd_e2->data.mac_addr.dst_lo,
3917                    pbd_e2->data.mac_addr.src_hi,
3918                    pbd_e2->data.mac_addr.src_mid,
3919                    pbd_e2->data.mac_addr.src_lo,
3920                    pbd_e2->parsing_data);
3921         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
3922
3923         netdev_tx_sent_queue(txq, skb->len);
3924
3925         skb_tx_timestamp(skb);
3926
3927         txdata->tx_pkt_prod++;
3928         /*
3929          * Make sure that the BD data is updated before updating the producer
3930          * since FW might read the BD right after the producer is updated.
3931          * This is only applicable for weak-ordered memory model archs such
3932          * as IA-64. The following barrier is also mandatory since FW will
3933          * assumes packets must have BDs.
3934          */
3935         wmb();
3936
3937         txdata->tx_db.data.prod += nbd;
3938         barrier();
3939
3940         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3941
3942         mmiowb();
3943
3944         txdata->tx_bd_prod += nbd;
3945
3946         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3947                 netif_tx_stop_queue(txq);
3948
3949                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3950                  * ordering of set_bit() in netif_tx_stop_queue() and read of
3951                  * fp->bd_tx_cons */
3952                 smp_mb();
3953
3954                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3955                 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3956                         netif_tx_wake_queue(txq);
3957         }
3958         txdata->tx_pkt++;
3959
3960         return NETDEV_TX_OK;
3961 }
3962
3963 /**
3964  * bnx2x_setup_tc - routine to configure net_device for multi tc
3965  *
3966  * @netdev: net device to configure
3967  * @tc: number of traffic classes to enable
3968  *
3969  * callback connected to the ndo_setup_tc function pointer
3970  */
3971 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3972 {
3973         int cos, prio, count, offset;
3974         struct bnx2x *bp = netdev_priv(dev);
3975
3976         /* setup tc must be called under rtnl lock */
3977         ASSERT_RTNL();
3978
3979         /* no traffic classes requested. aborting */
3980         if (!num_tc) {
3981                 netdev_reset_tc(dev);
3982                 return 0;
3983         }
3984
3985         /* requested to support too many traffic classes */
3986         if (num_tc > bp->max_cos) {
3987                 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3988                           num_tc, bp->max_cos);
3989                 return -EINVAL;
3990         }
3991
3992         /* declare amount of supported traffic classes */
3993         if (netdev_set_num_tc(dev, num_tc)) {
3994                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3995                 return -EINVAL;
3996         }
3997
3998         /* configure priority to traffic class mapping */
3999         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4000                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4001                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4002                    "mapping priority %d to tc %d\n",
4003                    prio, bp->prio_to_cos[prio]);
4004         }
4005
4006
4007         /* Use this configuration to diffrentiate tc0 from other COSes
4008            This can be used for ets or pfc, and save the effort of setting
4009            up a multio class queue disc or negotiating DCBX with a switch
4010         netdev_set_prio_tc_map(dev, 0, 0);
4011         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4012         for (prio = 1; prio < 16; prio++) {
4013                 netdev_set_prio_tc_map(dev, prio, 1);
4014                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4015         } */
4016
4017         /* configure traffic class to transmission queue mapping */
4018         for (cos = 0; cos < bp->max_cos; cos++) {
4019                 count = BNX2X_NUM_ETH_QUEUES(bp);
4020                 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4021                 netdev_set_tc_queue(dev, cos, count, offset);
4022                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4023                    "mapping tc %d to offset %d count %d\n",
4024                    cos, offset, count);
4025         }
4026
4027         return 0;
4028 }
4029
4030 /* called with rtnl_lock */
4031 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4032 {
4033         struct sockaddr *addr = p;
4034         struct bnx2x *bp = netdev_priv(dev);
4035         int rc = 0;
4036
4037         if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4038                 BNX2X_ERR("Requested MAC address is not valid\n");
4039                 return -EINVAL;
4040         }
4041
4042         if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4043             !is_zero_ether_addr(addr->sa_data)) {
4044                 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4045                 return -EINVAL;
4046         }
4047
4048         if (netif_running(dev))  {
4049                 rc = bnx2x_set_eth_mac(bp, false);
4050                 if (rc)
4051                         return rc;
4052         }
4053
4054         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4055
4056         if (netif_running(dev))
4057                 rc = bnx2x_set_eth_mac(bp, true);
4058
4059         return rc;
4060 }
4061
4062 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4063 {
4064         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4065         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4066         u8 cos;
4067
4068         /* Common */
4069
4070         if (IS_FCOE_IDX(fp_index)) {
4071                 memset(sb, 0, sizeof(union host_hc_status_block));
4072                 fp->status_blk_mapping = 0;
4073         } else {
4074                 /* status blocks */
4075                 if (!CHIP_IS_E1x(bp))
4076                         BNX2X_PCI_FREE(sb->e2_sb,
4077                                        bnx2x_fp(bp, fp_index,
4078                                                 status_blk_mapping),
4079                                        sizeof(struct host_hc_status_block_e2));
4080                 else
4081                         BNX2X_PCI_FREE(sb->e1x_sb,
4082                                        bnx2x_fp(bp, fp_index,
4083                                                 status_blk_mapping),
4084                                        sizeof(struct host_hc_status_block_e1x));
4085         }
4086
4087         /* Rx */
4088         if (!skip_rx_queue(bp, fp_index)) {
4089                 bnx2x_free_rx_bds(fp);
4090
4091                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4092                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4093                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4094                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
4095                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4096
4097                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4098                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
4099                                sizeof(struct eth_fast_path_rx_cqe) *
4100                                NUM_RCQ_BD);
4101
4102                 /* SGE ring */
4103                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4104                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4105                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
4106                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4107         }
4108
4109         /* Tx */
4110         if (!skip_tx_queue(bp, fp_index)) {
4111                 /* fastpath tx rings: tx_buf tx_desc */
4112                 for_each_cos_in_tx_queue(fp, cos) {
4113                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4114
4115                         DP(NETIF_MSG_IFDOWN,
4116                            "freeing tx memory of fp %d cos %d cid %d\n",
4117                            fp_index, cos, txdata->cid);
4118
4119                         BNX2X_FREE(txdata->tx_buf_ring);
4120                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
4121                                 txdata->tx_desc_mapping,
4122                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4123                 }
4124         }
4125         /* end of fastpath */
4126 }
4127
4128 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4129 {
4130         int i;
4131         for_each_cnic_queue(bp, i)
4132                 bnx2x_free_fp_mem_at(bp, i);
4133 }
4134
4135 void bnx2x_free_fp_mem(struct bnx2x *bp)
4136 {
4137         int i;
4138         for_each_eth_queue(bp, i)
4139                 bnx2x_free_fp_mem_at(bp, i);
4140 }
4141
4142 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4143 {
4144         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4145         if (!CHIP_IS_E1x(bp)) {
4146                 bnx2x_fp(bp, index, sb_index_values) =
4147                         (__le16 *)status_blk.e2_sb->sb.index_values;
4148                 bnx2x_fp(bp, index, sb_running_index) =
4149                         (__le16 *)status_blk.e2_sb->sb.running_index;
4150         } else {
4151                 bnx2x_fp(bp, index, sb_index_values) =
4152                         (__le16 *)status_blk.e1x_sb->sb.index_values;
4153                 bnx2x_fp(bp, index, sb_running_index) =
4154                         (__le16 *)status_blk.e1x_sb->sb.running_index;
4155         }
4156 }
4157
4158 /* Returns the number of actually allocated BDs */
4159 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4160                               int rx_ring_size)
4161 {
4162         struct bnx2x *bp = fp->bp;
4163         u16 ring_prod, cqe_ring_prod;
4164         int i, failure_cnt = 0;
4165
4166         fp->rx_comp_cons = 0;
4167         cqe_ring_prod = ring_prod = 0;
4168
4169         /* This routine is called only during fo init so
4170          * fp->eth_q_stats.rx_skb_alloc_failed = 0
4171          */
4172         for (i = 0; i < rx_ring_size; i++) {
4173                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4174                         failure_cnt++;
4175                         continue;
4176                 }
4177                 ring_prod = NEXT_RX_IDX(ring_prod);
4178                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4179                 WARN_ON(ring_prod <= (i - failure_cnt));
4180         }
4181
4182         if (failure_cnt)
4183                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4184                           i - failure_cnt, fp->index);
4185
4186         fp->rx_bd_prod = ring_prod;
4187         /* Limit the CQE producer by the CQE ring size */
4188         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4189                                cqe_ring_prod);
4190         fp->rx_pkt = fp->rx_calls = 0;
4191
4192         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4193
4194         return i - failure_cnt;
4195 }
4196
4197 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4198 {
4199         int i;
4200
4201         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4202                 struct eth_rx_cqe_next_page *nextpg;
4203
4204                 nextpg = (struct eth_rx_cqe_next_page *)
4205                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4206                 nextpg->addr_hi =
4207                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4208                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4209                 nextpg->addr_lo =
4210                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4211                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4212         }
4213 }
4214
4215 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4216 {
4217         union host_hc_status_block *sb;
4218         struct bnx2x_fastpath *fp = &bp->fp[index];
4219         int ring_size = 0;
4220         u8 cos;
4221         int rx_ring_size = 0;
4222
4223         if (!bp->rx_ring_size &&
4224             (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4225                 rx_ring_size = MIN_RX_SIZE_NONTPA;
4226                 bp->rx_ring_size = rx_ring_size;
4227         } else if (!bp->rx_ring_size) {
4228                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4229
4230                 if (CHIP_IS_E3(bp)) {
4231                         u32 cfg = SHMEM_RD(bp,
4232                                            dev_info.port_hw_config[BP_PORT(bp)].
4233                                            default_cfg);
4234
4235                         /* Decrease ring size for 1G functions */
4236                         if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4237                             PORT_HW_CFG_NET_SERDES_IF_SGMII)
4238                                 rx_ring_size /= 10;
4239                 }
4240
4241                 /* allocate at least number of buffers required by FW */
4242                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4243                                      MIN_RX_SIZE_TPA, rx_ring_size);
4244
4245                 bp->rx_ring_size = rx_ring_size;
4246         } else /* if rx_ring_size specified - use it */
4247                 rx_ring_size = bp->rx_ring_size;
4248
4249         DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4250
4251         /* Common */
4252         sb = &bnx2x_fp(bp, index, status_blk);
4253
4254         if (!IS_FCOE_IDX(index)) {
4255                 /* status blocks */
4256                 if (!CHIP_IS_E1x(bp))
4257                         BNX2X_PCI_ALLOC(sb->e2_sb,
4258                                 &bnx2x_fp(bp, index, status_blk_mapping),
4259                                 sizeof(struct host_hc_status_block_e2));
4260                 else
4261                         BNX2X_PCI_ALLOC(sb->e1x_sb,
4262                                 &bnx2x_fp(bp, index, status_blk_mapping),
4263                             sizeof(struct host_hc_status_block_e1x));
4264         }
4265
4266         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4267          * set shortcuts for it.
4268          */
4269         if (!IS_FCOE_IDX(index))
4270                 set_sb_shortcuts(bp, index);
4271
4272         /* Tx */
4273         if (!skip_tx_queue(bp, index)) {
4274                 /* fastpath tx rings: tx_buf tx_desc */
4275                 for_each_cos_in_tx_queue(fp, cos) {
4276                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4277
4278                         DP(NETIF_MSG_IFUP,
4279                            "allocating tx memory of fp %d cos %d\n",
4280                            index, cos);
4281
4282                         BNX2X_ALLOC(txdata->tx_buf_ring,
4283                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4284                         BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4285                                 &txdata->tx_desc_mapping,
4286                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4287                 }
4288         }
4289
4290         /* Rx */
4291         if (!skip_rx_queue(bp, index)) {
4292                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4293                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4294                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4295                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4296                                 &bnx2x_fp(bp, index, rx_desc_mapping),
4297                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4298
4299                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4300                                 &bnx2x_fp(bp, index, rx_comp_mapping),
4301                                 sizeof(struct eth_fast_path_rx_cqe) *
4302                                 NUM_RCQ_BD);
4303
4304                 /* SGE ring */
4305                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4306                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4307                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4308                                 &bnx2x_fp(bp, index, rx_sge_mapping),
4309                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4310                 /* RX BD ring */
4311                 bnx2x_set_next_page_rx_bd(fp);
4312
4313                 /* CQ ring */
4314                 bnx2x_set_next_page_rx_cq(fp);
4315
4316                 /* BDs */
4317                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4318                 if (ring_size < rx_ring_size)
4319                         goto alloc_mem_err;
4320         }
4321
4322         return 0;
4323
4324 /* handles low memory cases */
4325 alloc_mem_err:
4326         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4327                                                 index, ring_size);
4328         /* FW will drop all packets if queue is not big enough,
4329          * In these cases we disable the queue
4330          * Min size is different for OOO, TPA and non-TPA queues
4331          */
4332         if (ring_size < (fp->disable_tpa ?
4333                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4334                         /* release memory allocated for this queue */
4335                         bnx2x_free_fp_mem_at(bp, index);
4336                         return -ENOMEM;
4337         }
4338         return 0;
4339 }
4340
4341 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4342 {
4343         if (!NO_FCOE(bp))
4344                 /* FCoE */
4345                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4346                         /* we will fail load process instead of mark
4347                          * NO_FCOE_FLAG
4348                          */
4349                         return -ENOMEM;
4350
4351         return 0;
4352 }
4353
4354 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4355 {
4356         int i;
4357
4358         /* 1. Allocate FP for leading - fatal if error
4359          * 2. Allocate RSS - fix number of queues if error
4360          */
4361
4362         /* leading */
4363         if (bnx2x_alloc_fp_mem_at(bp, 0))
4364                 return -ENOMEM;
4365
4366         /* RSS */
4367         for_each_nondefault_eth_queue(bp, i)
4368                 if (bnx2x_alloc_fp_mem_at(bp, i))
4369                         break;
4370
4371         /* handle memory failures */
4372         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4373                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4374
4375                 WARN_ON(delta < 0);
4376                 bnx2x_shrink_eth_fp(bp, delta);
4377                 if (CNIC_SUPPORT(bp))
4378                         /* move non eth FPs next to last eth FP
4379                          * must be done in that order
4380                          * FCOE_IDX < FWD_IDX < OOO_IDX
4381                          */
4382
4383                         /* move FCoE fp even NO_FCOE_FLAG is on */
4384                         bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4385                 bp->num_ethernet_queues -= delta;
4386                 bp->num_queues = bp->num_ethernet_queues +
4387                                  bp->num_cnic_queues;
4388                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4389                           bp->num_queues + delta, bp->num_queues);
4390         }
4391
4392         return 0;
4393 }
4394
4395 void bnx2x_free_mem_bp(struct bnx2x *bp)
4396 {
4397         int i;
4398
4399         for (i = 0; i < bp->fp_array_size; i++)
4400                 kfree(bp->fp[i].tpa_info);
4401         kfree(bp->fp);
4402         kfree(bp->sp_objs);
4403         kfree(bp->fp_stats);
4404         kfree(bp->bnx2x_txq);
4405         kfree(bp->msix_table);
4406         kfree(bp->ilt);
4407 }
4408
4409 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4410 {
4411         struct bnx2x_fastpath *fp;
4412         struct msix_entry *tbl;
4413         struct bnx2x_ilt *ilt;
4414         int msix_table_size = 0;
4415         int fp_array_size, txq_array_size;
4416         int i;
4417
4418         /*
4419          * The biggest MSI-X table we might need is as a maximum number of fast
4420          * path IGU SBs plus default SB (for PF only).
4421          */
4422         msix_table_size = bp->igu_sb_cnt;
4423         if (IS_PF(bp))
4424                 msix_table_size++;
4425         BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4426
4427         /* fp array: RSS plus CNIC related L2 queues */
4428         fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4429         bp->fp_array_size = fp_array_size;
4430         BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4431
4432         fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4433         if (!fp)
4434                 goto alloc_err;
4435         for (i = 0; i < bp->fp_array_size; i++) {
4436                 fp[i].tpa_info =
4437                         kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4438                                 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4439                 if (!(fp[i].tpa_info))
4440                         goto alloc_err;
4441         }
4442
4443         bp->fp = fp;
4444
4445         /* allocate sp objs */
4446         bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4447                               GFP_KERNEL);
4448         if (!bp->sp_objs)
4449                 goto alloc_err;
4450
4451         /* allocate fp_stats */
4452         bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4453                                GFP_KERNEL);
4454         if (!bp->fp_stats)
4455                 goto alloc_err;
4456
4457         /* Allocate memory for the transmission queues array */
4458         txq_array_size =
4459                 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4460         BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4461
4462         bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4463                                 GFP_KERNEL);
4464         if (!bp->bnx2x_txq)
4465                 goto alloc_err;
4466
4467         /* msix table */
4468         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4469         if (!tbl)
4470                 goto alloc_err;
4471         bp->msix_table = tbl;
4472
4473         /* ilt */
4474         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4475         if (!ilt)
4476                 goto alloc_err;
4477         bp->ilt = ilt;
4478
4479         return 0;
4480 alloc_err:
4481         bnx2x_free_mem_bp(bp);
4482         return -ENOMEM;
4483
4484 }
4485
4486 int bnx2x_reload_if_running(struct net_device *dev)
4487 {
4488         struct bnx2x *bp = netdev_priv(dev);
4489
4490         if (unlikely(!netif_running(dev)))
4491                 return 0;
4492
4493         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4494         return bnx2x_nic_load(bp, LOAD_NORMAL);
4495 }
4496
4497 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4498 {
4499         u32 sel_phy_idx = 0;
4500         if (bp->link_params.num_phys <= 1)
4501                 return INT_PHY;
4502
4503         if (bp->link_vars.link_up) {
4504                 sel_phy_idx = EXT_PHY1;
4505                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4506                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4507                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4508                         sel_phy_idx = EXT_PHY2;
4509         } else {
4510
4511                 switch (bnx2x_phy_selection(&bp->link_params)) {
4512                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4513                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4514                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4515                        sel_phy_idx = EXT_PHY1;
4516                        break;
4517                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4518                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4519                        sel_phy_idx = EXT_PHY2;
4520                        break;
4521                 }
4522         }
4523
4524         return sel_phy_idx;
4525
4526 }
4527 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4528 {
4529         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4530         /*
4531          * The selected activated PHY is always after swapping (in case PHY
4532          * swapping is enabled). So when swapping is enabled, we need to reverse
4533          * the configuration
4534          */
4535
4536         if (bp->link_params.multi_phy_config &
4537             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4538                 if (sel_phy_idx == EXT_PHY1)
4539                         sel_phy_idx = EXT_PHY2;
4540                 else if (sel_phy_idx == EXT_PHY2)
4541                         sel_phy_idx = EXT_PHY1;
4542         }
4543         return LINK_CONFIG_IDX(sel_phy_idx);
4544 }
4545
4546 #ifdef NETDEV_FCOE_WWNN
4547 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4548 {
4549         struct bnx2x *bp = netdev_priv(dev);
4550         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4551
4552         switch (type) {
4553         case NETDEV_FCOE_WWNN:
4554                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4555                                 cp->fcoe_wwn_node_name_lo);
4556                 break;
4557         case NETDEV_FCOE_WWPN:
4558                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4559                                 cp->fcoe_wwn_port_name_lo);
4560                 break;
4561         default:
4562                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4563                 return -EINVAL;
4564         }
4565
4566         return 0;
4567 }
4568 #endif
4569
4570 /* called with rtnl_lock */
4571 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4572 {
4573         struct bnx2x *bp = netdev_priv(dev);
4574
4575         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4576                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4577                 return -EAGAIN;
4578         }
4579
4580         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4581             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4582                 BNX2X_ERR("Can't support requested MTU size\n");
4583                 return -EINVAL;
4584         }
4585
4586         /* This does not race with packet allocation
4587          * because the actual alloc size is
4588          * only updated as part of load
4589          */
4590         dev->mtu = new_mtu;
4591
4592         return bnx2x_reload_if_running(dev);
4593 }
4594
4595 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4596                                      netdev_features_t features)
4597 {
4598         struct bnx2x *bp = netdev_priv(dev);
4599
4600         /* TPA requires Rx CSUM offloading */
4601         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4602                 features &= ~NETIF_F_LRO;
4603                 features &= ~NETIF_F_GRO;
4604         }
4605
4606         return features;
4607 }
4608
4609 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4610 {
4611         struct bnx2x *bp = netdev_priv(dev);
4612         u32 flags = bp->flags;
4613         u32 changes;
4614         bool bnx2x_reload = false;
4615
4616         if (features & NETIF_F_LRO)
4617                 flags |= TPA_ENABLE_FLAG;
4618         else
4619                 flags &= ~TPA_ENABLE_FLAG;
4620
4621         if (features & NETIF_F_GRO)
4622                 flags |= GRO_ENABLE_FLAG;
4623         else
4624                 flags &= ~GRO_ENABLE_FLAG;
4625
4626         if (features & NETIF_F_LOOPBACK) {
4627                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4628                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
4629                         bnx2x_reload = true;
4630                 }
4631         } else {
4632                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4633                         bp->link_params.loopback_mode = LOOPBACK_NONE;
4634                         bnx2x_reload = true;
4635                 }
4636         }
4637
4638         changes = flags ^ bp->flags;
4639
4640         /* if GRO is changed while LRO is enabled, dont force a reload */
4641         if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4642                 changes &= ~GRO_ENABLE_FLAG;
4643
4644         if (changes)
4645                 bnx2x_reload = true;
4646
4647         bp->flags = flags;
4648
4649         if (bnx2x_reload) {
4650                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4651                         return bnx2x_reload_if_running(dev);
4652                 /* else: bnx2x_nic_load() will be called at end of recovery */
4653         }
4654
4655         return 0;
4656 }
4657
4658 void bnx2x_tx_timeout(struct net_device *dev)
4659 {
4660         struct bnx2x *bp = netdev_priv(dev);
4661
4662 #ifdef BNX2X_STOP_ON_ERROR
4663         if (!bp->panic)
4664                 bnx2x_panic();
4665 #endif
4666
4667         smp_mb__before_clear_bit();
4668         set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4669         smp_mb__after_clear_bit();
4670
4671         /* This allows the netif to be shutdown gracefully before resetting */
4672         schedule_delayed_work(&bp->sp_rtnl_task, 0);
4673 }
4674
4675 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4676 {
4677         struct net_device *dev = pci_get_drvdata(pdev);
4678         struct bnx2x *bp;
4679
4680         if (!dev) {
4681                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4682                 return -ENODEV;
4683         }
4684         bp = netdev_priv(dev);
4685
4686         rtnl_lock();
4687
4688         pci_save_state(pdev);
4689
4690         if (!netif_running(dev)) {
4691                 rtnl_unlock();
4692                 return 0;
4693         }
4694
4695         netif_device_detach(dev);
4696
4697         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4698
4699         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4700
4701         rtnl_unlock();
4702
4703         return 0;
4704 }
4705
4706 int bnx2x_resume(struct pci_dev *pdev)
4707 {
4708         struct net_device *dev = pci_get_drvdata(pdev);
4709         struct bnx2x *bp;
4710         int rc;
4711
4712         if (!dev) {
4713                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4714                 return -ENODEV;
4715         }
4716         bp = netdev_priv(dev);
4717
4718         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4719                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4720                 return -EAGAIN;
4721         }
4722
4723         rtnl_lock();
4724
4725         pci_restore_state(pdev);
4726
4727         if (!netif_running(dev)) {
4728                 rtnl_unlock();
4729                 return 0;
4730         }
4731
4732         bnx2x_set_power_state(bp, PCI_D0);
4733         netif_device_attach(dev);
4734
4735         rc = bnx2x_nic_load(bp, LOAD_OPEN);
4736
4737         rtnl_unlock();
4738
4739         return rc;
4740 }
4741
4742
4743 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4744                               u32 cid)
4745 {
4746         /* ustorm cxt validation */
4747         cxt->ustorm_ag_context.cdu_usage =
4748                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4749                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4750         /* xcontext validation */
4751         cxt->xstorm_ag_context.cdu_reserved =
4752                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4753                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4754 }
4755
4756 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4757                                     u8 fw_sb_id, u8 sb_index,
4758                                     u8 ticks)
4759 {
4760
4761         u32 addr = BAR_CSTRORM_INTMEM +
4762                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4763         REG_WR8(bp, addr, ticks);
4764         DP(NETIF_MSG_IFUP,
4765            "port %x fw_sb_id %d sb_index %d ticks %d\n",
4766            port, fw_sb_id, sb_index, ticks);
4767 }
4768
4769 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4770                                     u16 fw_sb_id, u8 sb_index,
4771                                     u8 disable)
4772 {
4773         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4774         u32 addr = BAR_CSTRORM_INTMEM +
4775                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4776         u8 flags = REG_RD8(bp, addr);
4777         /* clear and set */
4778         flags &= ~HC_INDEX_DATA_HC_ENABLED;
4779         flags |= enable_flag;
4780         REG_WR8(bp, addr, flags);
4781         DP(NETIF_MSG_IFUP,
4782            "port %x fw_sb_id %d sb_index %d disable %d\n",
4783            port, fw_sb_id, sb_index, disable);
4784 }
4785
4786 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4787                                     u8 sb_index, u8 disable, u16 usec)
4788 {
4789         int port = BP_PORT(bp);
4790         u8 ticks = usec / BNX2X_BTR;
4791
4792         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4793
4794         disable = disable ? 1 : (usec ? 0 : 1);
4795         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4796 }