]> Pileus Git - ~andy/linux/blob - drivers/net/bnx2x/bnx2x_cmn.c
bnx2x: remove redundant commands during error handling
[~andy/linux] / drivers / net / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/etherdevice.h>
19 #include <linux/ip.h>
20 #include <net/ipv6.h>
21 #include <net/ip6_checksum.h>
22 #include <linux/firmware.h>
23 #include "bnx2x_cmn.h"
24
25 #ifdef BCM_VLAN
26 #include <linux/if_vlan.h>
27 #endif
28
29 #include "bnx2x_init.h"
30
31
32 /* free skb in the packet ring at pos idx
33  * return idx of last bd freed
34  */
35 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
36                              u16 idx)
37 {
38         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
39         struct eth_tx_start_bd *tx_start_bd;
40         struct eth_tx_bd *tx_data_bd;
41         struct sk_buff *skb = tx_buf->skb;
42         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
43         int nbd;
44
45         /* prefetch skb end pointer to speedup dev_kfree_skb() */
46         prefetch(&skb->end);
47
48         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
49            idx, tx_buf, skb);
50
51         /* unmap first bd */
52         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
53         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
54         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
55                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
56
57         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
58 #ifdef BNX2X_STOP_ON_ERROR
59         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
60                 BNX2X_ERR("BAD nbd!\n");
61                 bnx2x_panic();
62         }
63 #endif
64         new_cons = nbd + tx_buf->first_bd;
65
66         /* Get the next bd */
67         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
68
69         /* Skip a parse bd... */
70         --nbd;
71         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
72
73         /* ...and the TSO split header bd since they have no mapping */
74         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
75                 --nbd;
76                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
77         }
78
79         /* now free frags */
80         while (nbd > 0) {
81
82                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
83                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
84                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
85                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
86                 if (--nbd)
87                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
88         }
89
90         /* release skb */
91         WARN_ON(!skb);
92         dev_kfree_skb(skb);
93         tx_buf->first_bd = 0;
94         tx_buf->skb = NULL;
95
96         return new_cons;
97 }
98
99 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
100 {
101         struct bnx2x *bp = fp->bp;
102         struct netdev_queue *txq;
103         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
104
105 #ifdef BNX2X_STOP_ON_ERROR
106         if (unlikely(bp->panic))
107                 return -1;
108 #endif
109
110         txq = netdev_get_tx_queue(bp->dev, fp->index);
111         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
112         sw_cons = fp->tx_pkt_cons;
113
114         while (sw_cons != hw_cons) {
115                 u16 pkt_cons;
116
117                 pkt_cons = TX_BD(sw_cons);
118
119                 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
120                                       " pkt_cons %u\n",
121                    fp->index, hw_cons, sw_cons, pkt_cons);
122
123                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
124                 sw_cons++;
125         }
126
127         fp->tx_pkt_cons = sw_cons;
128         fp->tx_bd_cons = bd_cons;
129
130         /* Need to make the tx_bd_cons update visible to start_xmit()
131          * before checking for netif_tx_queue_stopped().  Without the
132          * memory barrier, there is a small possibility that
133          * start_xmit() will miss it and cause the queue to be stopped
134          * forever.
135          */
136         smp_mb();
137
138         if (unlikely(netif_tx_queue_stopped(txq))) {
139                 /* Taking tx_lock() is needed to prevent reenabling the queue
140                  * while it's empty. This could have happen if rx_action() gets
141                  * suspended in bnx2x_tx_int() after the condition before
142                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
143                  *
144                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
145                  * sends some packets consuming the whole queue again->
146                  * stops the queue
147                  */
148
149                 __netif_tx_lock(txq, smp_processor_id());
150
151                 if ((netif_tx_queue_stopped(txq)) &&
152                     (bp->state == BNX2X_STATE_OPEN) &&
153                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
154                         netif_tx_wake_queue(txq);
155
156                 __netif_tx_unlock(txq);
157         }
158         return 0;
159 }
160
161 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
162                                              u16 idx)
163 {
164         u16 last_max = fp->last_max_sge;
165
166         if (SUB_S16(idx, last_max) > 0)
167                 fp->last_max_sge = idx;
168 }
169
170 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
171                                   struct eth_fast_path_rx_cqe *fp_cqe)
172 {
173         struct bnx2x *bp = fp->bp;
174         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
175                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
176                       SGE_PAGE_SHIFT;
177         u16 last_max, last_elem, first_elem;
178         u16 delta = 0;
179         u16 i;
180
181         if (!sge_len)
182                 return;
183
184         /* First mark all used pages */
185         for (i = 0; i < sge_len; i++)
186                 SGE_MASK_CLEAR_BIT(fp,
187                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
188
189         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
190            sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
191
192         /* Here we assume that the last SGE index is the biggest */
193         prefetch((void *)(fp->sge_mask));
194         bnx2x_update_last_max_sge(fp,
195                 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
196
197         last_max = RX_SGE(fp->last_max_sge);
198         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
199         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
200
201         /* If ring is not full */
202         if (last_elem + 1 != first_elem)
203                 last_elem++;
204
205         /* Now update the prod */
206         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
207                 if (likely(fp->sge_mask[i]))
208                         break;
209
210                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
211                 delta += RX_SGE_MASK_ELEM_SZ;
212         }
213
214         if (delta > 0) {
215                 fp->rx_sge_prod += delta;
216                 /* clear page-end entries */
217                 bnx2x_clear_sge_mask_next_elems(fp);
218         }
219
220         DP(NETIF_MSG_RX_STATUS,
221            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
222            fp->last_max_sge, fp->rx_sge_prod);
223 }
224
225 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
226                             struct sk_buff *skb, u16 cons, u16 prod)
227 {
228         struct bnx2x *bp = fp->bp;
229         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
230         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
231         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
232         dma_addr_t mapping;
233
234         /* move empty skb from pool to prod and map it */
235         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
236         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
237                                  bp->rx_buf_size, DMA_FROM_DEVICE);
238         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
239
240         /* move partial skb from cons to pool (don't unmap yet) */
241         fp->tpa_pool[queue] = *cons_rx_buf;
242
243         /* mark bin state as start - print error if current state != stop */
244         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
245                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
246
247         fp->tpa_state[queue] = BNX2X_TPA_START;
248
249         /* point prod_bd to new skb */
250         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
251         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
252
253 #ifdef BNX2X_STOP_ON_ERROR
254         fp->tpa_queue_used |= (1 << queue);
255 #ifdef _ASM_GENERIC_INT_L64_H
256         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
257 #else
258         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
259 #endif
260            fp->tpa_queue_used);
261 #endif
262 }
263
264 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
265                                struct sk_buff *skb,
266                                struct eth_fast_path_rx_cqe *fp_cqe,
267                                u16 cqe_idx)
268 {
269         struct sw_rx_page *rx_pg, old_rx_pg;
270         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
271         u32 i, frag_len, frag_size, pages;
272         int err;
273         int j;
274
275         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
276         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
277
278         /* This is needed in order to enable forwarding support */
279         if (frag_size)
280                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
281                                                max(frag_size, (u32)len_on_bd));
282
283 #ifdef BNX2X_STOP_ON_ERROR
284         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
285                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
286                           pages, cqe_idx);
287                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
288                           fp_cqe->pkt_len, len_on_bd);
289                 bnx2x_panic();
290                 return -EINVAL;
291         }
292 #endif
293
294         /* Run through the SGL and compose the fragmented skb */
295         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
296                 u16 sge_idx =
297                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
298
299                 /* FW gives the indices of the SGE as if the ring is an array
300                    (meaning that "next" element will consume 2 indices) */
301                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
302                 rx_pg = &fp->rx_page_ring[sge_idx];
303                 old_rx_pg = *rx_pg;
304
305                 /* If we fail to allocate a substitute page, we simply stop
306                    where we are and drop the whole packet */
307                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
308                 if (unlikely(err)) {
309                         fp->eth_q_stats.rx_skb_alloc_failed++;
310                         return err;
311                 }
312
313                 /* Unmap the page as we r going to pass it to the stack */
314                 dma_unmap_page(&bp->pdev->dev,
315                                dma_unmap_addr(&old_rx_pg, mapping),
316                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
317
318                 /* Add one frag and update the appropriate fields in the skb */
319                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
320
321                 skb->data_len += frag_len;
322                 skb->truesize += frag_len;
323                 skb->len += frag_len;
324
325                 frag_size -= frag_len;
326         }
327
328         return 0;
329 }
330
331 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
332                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
333                            u16 cqe_idx)
334 {
335         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
336         struct sk_buff *skb = rx_buf->skb;
337         /* alloc new skb */
338         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
339
340         /* Unmap skb in the pool anyway, as we are going to change
341            pool entry status to BNX2X_TPA_STOP even if new skb allocation
342            fails. */
343         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
344                          bp->rx_buf_size, DMA_FROM_DEVICE);
345
346         if (likely(new_skb)) {
347                 /* fix ip xsum and give it to the stack */
348                 /* (no need to map the new skb) */
349 #ifdef BCM_VLAN
350                 int is_vlan_cqe =
351                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
352                          PARSING_FLAGS_VLAN);
353                 int is_not_hwaccel_vlan_cqe =
354                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
355 #endif
356
357                 prefetch(skb);
358                 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
359
360 #ifdef BNX2X_STOP_ON_ERROR
361                 if (pad + len > bp->rx_buf_size) {
362                         BNX2X_ERR("skb_put is about to fail...  "
363                                   "pad %d  len %d  rx_buf_size %d\n",
364                                   pad, len, bp->rx_buf_size);
365                         bnx2x_panic();
366                         return;
367                 }
368 #endif
369
370                 skb_reserve(skb, pad);
371                 skb_put(skb, len);
372
373                 skb->protocol = eth_type_trans(skb, bp->dev);
374                 skb->ip_summed = CHECKSUM_UNNECESSARY;
375
376                 {
377                         struct iphdr *iph;
378
379                         iph = (struct iphdr *)skb->data;
380 #ifdef BCM_VLAN
381                         /* If there is no Rx VLAN offloading -
382                            take VLAN tag into an account */
383                         if (unlikely(is_not_hwaccel_vlan_cqe))
384                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
385 #endif
386                         iph->check = 0;
387                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
388                 }
389
390                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
391                                          &cqe->fast_path_cqe, cqe_idx)) {
392 #ifdef BCM_VLAN
393                         if ((bp->vlgrp != NULL) &&
394                                 (le16_to_cpu(cqe->fast_path_cqe.
395                                 pars_flags.flags) & PARSING_FLAGS_VLAN))
396                                 vlan_gro_receive(&fp->napi, bp->vlgrp,
397                                                  le16_to_cpu(cqe->fast_path_cqe.
398                                                              vlan_tag), skb);
399                         else
400 #endif
401                                 napi_gro_receive(&fp->napi, skb);
402                 } else {
403                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
404                            " - dropping packet!\n");
405                         dev_kfree_skb(skb);
406                 }
407
408
409                 /* put new skb in bin */
410                 fp->tpa_pool[queue].skb = new_skb;
411
412         } else {
413                 /* else drop the packet and keep the buffer in the bin */
414                 DP(NETIF_MSG_RX_STATUS,
415                    "Failed to allocate new skb - dropping packet!\n");
416                 fp->eth_q_stats.rx_skb_alloc_failed++;
417         }
418
419         fp->tpa_state[queue] = BNX2X_TPA_STOP;
420 }
421
422 /* Set Toeplitz hash value in the skb using the value from the
423  * CQE (calculated by HW).
424  */
425 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
426                                         struct sk_buff *skb)
427 {
428         /* Set Toeplitz hash from CQE */
429         if ((bp->dev->features & NETIF_F_RXHASH) &&
430             (cqe->fast_path_cqe.status_flags &
431              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
432                 skb->rxhash =
433                 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
434 }
435
436 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
437 {
438         struct bnx2x *bp = fp->bp;
439         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
440         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
441         int rx_pkt = 0;
442
443 #ifdef BNX2X_STOP_ON_ERROR
444         if (unlikely(bp->panic))
445                 return 0;
446 #endif
447
448         /* CQ "next element" is of the size of the regular element,
449            that's why it's ok here */
450         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
451         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
452                 hw_comp_cons++;
453
454         bd_cons = fp->rx_bd_cons;
455         bd_prod = fp->rx_bd_prod;
456         bd_prod_fw = bd_prod;
457         sw_comp_cons = fp->rx_comp_cons;
458         sw_comp_prod = fp->rx_comp_prod;
459
460         /* Memory barrier necessary as speculative reads of the rx
461          * buffer can be ahead of the index in the status block
462          */
463         rmb();
464
465         DP(NETIF_MSG_RX_STATUS,
466            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
467            fp->index, hw_comp_cons, sw_comp_cons);
468
469         while (sw_comp_cons != hw_comp_cons) {
470                 struct sw_rx_bd *rx_buf = NULL;
471                 struct sk_buff *skb;
472                 union eth_rx_cqe *cqe;
473                 u8 cqe_fp_flags;
474                 u16 len, pad;
475
476                 comp_ring_cons = RCQ_BD(sw_comp_cons);
477                 bd_prod = RX_BD(bd_prod);
478                 bd_cons = RX_BD(bd_cons);
479
480                 /* Prefetch the page containing the BD descriptor
481                    at producer's index. It will be needed when new skb is
482                    allocated */
483                 prefetch((void *)(PAGE_ALIGN((unsigned long)
484                                              (&fp->rx_desc_ring[bd_prod])) -
485                                   PAGE_SIZE + 1));
486
487                 cqe = &fp->rx_comp_ring[comp_ring_cons];
488                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
489
490                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
491                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
492                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
493                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
494                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
495                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
496
497                 /* is this a slowpath msg? */
498                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
499                         bnx2x_sp_event(fp, cqe);
500                         goto next_cqe;
501
502                 /* this is an rx packet */
503                 } else {
504                         rx_buf = &fp->rx_buf_ring[bd_cons];
505                         skb = rx_buf->skb;
506                         prefetch(skb);
507                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
508                         pad = cqe->fast_path_cqe.placement_offset;
509
510                         /* - If CQE is marked both TPA_START and TPA_END it is
511                          *   a non-TPA CQE.
512                          * - FP CQE will always have either TPA_START or/and
513                          *   TPA_STOP flags set.
514                          */
515                         if ((!fp->disable_tpa) &&
516                             (TPA_TYPE(cqe_fp_flags) !=
517                                         (TPA_TYPE_START | TPA_TYPE_END))) {
518                                 u16 queue = cqe->fast_path_cqe.queue_index;
519
520                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
521                                         DP(NETIF_MSG_RX_STATUS,
522                                            "calling tpa_start on queue %d\n",
523                                            queue);
524
525                                         bnx2x_tpa_start(fp, queue, skb,
526                                                         bd_cons, bd_prod);
527
528                                         /* Set Toeplitz hash for an LRO skb */
529                                         bnx2x_set_skb_rxhash(bp, cqe, skb);
530
531                                         goto next_rx;
532                                 } else { /* TPA_STOP */
533                                         DP(NETIF_MSG_RX_STATUS,
534                                            "calling tpa_stop on queue %d\n",
535                                            queue);
536
537                                         if (!BNX2X_RX_SUM_FIX(cqe))
538                                                 BNX2X_ERR("STOP on none TCP "
539                                                           "data\n");
540
541                                         /* This is a size of the linear data
542                                            on this skb */
543                                         len = le16_to_cpu(cqe->fast_path_cqe.
544                                                                 len_on_bd);
545                                         bnx2x_tpa_stop(bp, fp, queue, pad,
546                                                     len, cqe, comp_ring_cons);
547 #ifdef BNX2X_STOP_ON_ERROR
548                                         if (bp->panic)
549                                                 return 0;
550 #endif
551
552                                         bnx2x_update_sge_prod(fp,
553                                                         &cqe->fast_path_cqe);
554                                         goto next_cqe;
555                                 }
556                         }
557
558                         dma_sync_single_for_device(&bp->pdev->dev,
559                                         dma_unmap_addr(rx_buf, mapping),
560                                                    pad + RX_COPY_THRESH,
561                                                    DMA_FROM_DEVICE);
562                         prefetch(((char *)(skb)) + L1_CACHE_BYTES);
563
564                         /* is this an error packet? */
565                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
566                                 DP(NETIF_MSG_RX_ERR,
567                                    "ERROR  flags %x  rx packet %u\n",
568                                    cqe_fp_flags, sw_comp_cons);
569                                 fp->eth_q_stats.rx_err_discard_pkt++;
570                                 goto reuse_rx;
571                         }
572
573                         /* Since we don't have a jumbo ring
574                          * copy small packets if mtu > 1500
575                          */
576                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
577                             (len <= RX_COPY_THRESH)) {
578                                 struct sk_buff *new_skb;
579
580                                 new_skb = netdev_alloc_skb(bp->dev,
581                                                            len + pad);
582                                 if (new_skb == NULL) {
583                                         DP(NETIF_MSG_RX_ERR,
584                                            "ERROR  packet dropped "
585                                            "because of alloc failure\n");
586                                         fp->eth_q_stats.rx_skb_alloc_failed++;
587                                         goto reuse_rx;
588                                 }
589
590                                 /* aligned copy */
591                                 skb_copy_from_linear_data_offset(skb, pad,
592                                                     new_skb->data + pad, len);
593                                 skb_reserve(new_skb, pad);
594                                 skb_put(new_skb, len);
595
596                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
597
598                                 skb = new_skb;
599
600                         } else
601                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
602                                 dma_unmap_single(&bp->pdev->dev,
603                                         dma_unmap_addr(rx_buf, mapping),
604                                                  bp->rx_buf_size,
605                                                  DMA_FROM_DEVICE);
606                                 skb_reserve(skb, pad);
607                                 skb_put(skb, len);
608
609                         } else {
610                                 DP(NETIF_MSG_RX_ERR,
611                                    "ERROR  packet dropped because "
612                                    "of alloc failure\n");
613                                 fp->eth_q_stats.rx_skb_alloc_failed++;
614 reuse_rx:
615                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
616                                 goto next_rx;
617                         }
618
619                         skb->protocol = eth_type_trans(skb, bp->dev);
620
621                         /* Set Toeplitz hash for a none-LRO skb */
622                         bnx2x_set_skb_rxhash(bp, cqe, skb);
623
624                         skb_checksum_none_assert(skb);
625
626                         if (bp->rx_csum) {
627                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
629                                 else
630                                         fp->eth_q_stats.hw_csum_err++;
631                         }
632                 }
633
634                 skb_record_rx_queue(skb, fp->index);
635
636 #ifdef BCM_VLAN
637                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
638                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
639                      PARSING_FLAGS_VLAN))
640                         vlan_gro_receive(&fp->napi, bp->vlgrp,
641                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
642                 else
643 #endif
644                         napi_gro_receive(&fp->napi, skb);
645
646
647 next_rx:
648                 rx_buf->skb = NULL;
649
650                 bd_cons = NEXT_RX_IDX(bd_cons);
651                 bd_prod = NEXT_RX_IDX(bd_prod);
652                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
653                 rx_pkt++;
654 next_cqe:
655                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
656                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
657
658                 if (rx_pkt == budget)
659                         break;
660         } /* while */
661
662         fp->rx_bd_cons = bd_cons;
663         fp->rx_bd_prod = bd_prod_fw;
664         fp->rx_comp_cons = sw_comp_cons;
665         fp->rx_comp_prod = sw_comp_prod;
666
667         /* Update producers */
668         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
669                              fp->rx_sge_prod);
670
671         fp->rx_pkt += rx_pkt;
672         fp->rx_calls++;
673
674         return rx_pkt;
675 }
676
677 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
678 {
679         struct bnx2x_fastpath *fp = fp_cookie;
680         struct bnx2x *bp = fp->bp;
681
682         /* Return here if interrupt is disabled */
683         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
684                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
685                 return IRQ_HANDLED;
686         }
687
688         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
689                          "[fp %d fw_sd %d igusb %d]\n",
690            fp->index, fp->fw_sb_id, fp->igu_sb_id);
691         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
692
693 #ifdef BNX2X_STOP_ON_ERROR
694         if (unlikely(bp->panic))
695                 return IRQ_HANDLED;
696 #endif
697
698         /* Handle Rx and Tx according to MSI-X vector */
699         prefetch(fp->rx_cons_sb);
700         prefetch(fp->tx_cons_sb);
701         prefetch(&fp->sb_running_index[SM_RX_ID]);
702         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
703
704         return IRQ_HANDLED;
705 }
706
707 /* HW Lock for shared dual port PHYs */
708 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
709 {
710         mutex_lock(&bp->port.phy_mutex);
711
712         if (bp->port.need_hw_lock)
713                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
714 }
715
716 void bnx2x_release_phy_lock(struct bnx2x *bp)
717 {
718         if (bp->port.need_hw_lock)
719                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
720
721         mutex_unlock(&bp->port.phy_mutex);
722 }
723
724 void bnx2x_link_report(struct bnx2x *bp)
725 {
726         if (bp->flags & MF_FUNC_DIS) {
727                 netif_carrier_off(bp->dev);
728                 netdev_err(bp->dev, "NIC Link is Down\n");
729                 return;
730         }
731
732         if (bp->link_vars.link_up) {
733                 u16 line_speed;
734
735                 if (bp->state == BNX2X_STATE_OPEN)
736                         netif_carrier_on(bp->dev);
737                 netdev_info(bp->dev, "NIC Link is Up, ");
738
739                 line_speed = bp->link_vars.line_speed;
740                 if (IS_MF(bp)) {
741                         u16 vn_max_rate;
742
743                         vn_max_rate =
744                                 ((bp->mf_config[BP_VN(bp)] &
745                                   FUNC_MF_CFG_MAX_BW_MASK) >>
746                                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
747                         if (vn_max_rate < line_speed)
748                                 line_speed = vn_max_rate;
749                 }
750                 pr_cont("%d Mbps ", line_speed);
751
752                 if (bp->link_vars.duplex == DUPLEX_FULL)
753                         pr_cont("full duplex");
754                 else
755                         pr_cont("half duplex");
756
757                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
758                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
759                                 pr_cont(", receive ");
760                                 if (bp->link_vars.flow_ctrl &
761                                     BNX2X_FLOW_CTRL_TX)
762                                         pr_cont("& transmit ");
763                         } else {
764                                 pr_cont(", transmit ");
765                         }
766                         pr_cont("flow control ON");
767                 }
768                 pr_cont("\n");
769
770         } else { /* link_down */
771                 netif_carrier_off(bp->dev);
772                 netdev_err(bp->dev, "NIC Link is Down\n");
773         }
774 }
775
776 /* Returns the number of actually allocated BDs */
777 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
778                                       int rx_ring_size)
779 {
780         struct bnx2x *bp = fp->bp;
781         u16 ring_prod, cqe_ring_prod;
782         int i;
783
784         fp->rx_comp_cons = 0;
785         cqe_ring_prod = ring_prod = 0;
786         for (i = 0; i < rx_ring_size; i++) {
787                 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
788                         BNX2X_ERR("was only able to allocate "
789                                   "%d rx skbs on queue[%d]\n", i, fp->index);
790                         fp->eth_q_stats.rx_skb_alloc_failed++;
791                         break;
792                 }
793                 ring_prod = NEXT_RX_IDX(ring_prod);
794                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
795                 WARN_ON(ring_prod <= i);
796         }
797
798         fp->rx_bd_prod = ring_prod;
799         /* Limit the CQE producer by the CQE ring size */
800         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
801                                cqe_ring_prod);
802         fp->rx_pkt = fp->rx_calls = 0;
803
804         return i;
805 }
806
807 static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
808 {
809         struct bnx2x *bp = fp->bp;
810         int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
811                                               MAX_RX_AVAIL/bp->num_queues;
812
813         rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
814
815         bnx2x_alloc_rx_bds(fp, rx_ring_size);
816
817         /* Warning!
818          * this will generate an interrupt (to the TSTORM)
819          * must only be done after chip is initialized
820          */
821         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
822                              fp->rx_sge_prod);
823 }
824
825 void bnx2x_init_rx_rings(struct bnx2x *bp)
826 {
827         int func = BP_FUNC(bp);
828         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
829                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
830         u16 ring_prod;
831         int i, j;
832
833         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
834                 BNX2X_FW_IP_HDR_ALIGN_PAD;
835
836         DP(NETIF_MSG_IFUP,
837            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
838
839         for_each_queue(bp, j) {
840                 struct bnx2x_fastpath *fp = &bp->fp[j];
841
842                 if (!fp->disable_tpa) {
843                         for (i = 0; i < max_agg_queues; i++) {
844                                 fp->tpa_pool[i].skb =
845                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
846                                 if (!fp->tpa_pool[i].skb) {
847                                         BNX2X_ERR("Failed to allocate TPA "
848                                                   "skb pool for queue[%d] - "
849                                                   "disabling TPA on this "
850                                                   "queue!\n", j);
851                                         bnx2x_free_tpa_pool(bp, fp, i);
852                                         fp->disable_tpa = 1;
853                                         break;
854                                 }
855                                 dma_unmap_addr_set((struct sw_rx_bd *)
856                                                         &bp->fp->tpa_pool[i],
857                                                    mapping, 0);
858                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
859                         }
860
861                         /* "next page" elements initialization */
862                         bnx2x_set_next_page_sgl(fp);
863
864                         /* set SGEs bit mask */
865                         bnx2x_init_sge_ring_bit_mask(fp);
866
867                         /* Allocate SGEs and initialize the ring elements */
868                         for (i = 0, ring_prod = 0;
869                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
870
871                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
872                                         BNX2X_ERR("was only able to allocate "
873                                                   "%d rx sges\n", i);
874                                         BNX2X_ERR("disabling TPA for"
875                                                   " queue[%d]\n", j);
876                                         /* Cleanup already allocated elements */
877                                         bnx2x_free_rx_sge_range(bp,
878                                                                 fp, ring_prod);
879                                         bnx2x_free_tpa_pool(bp,
880                                                             fp, max_agg_queues);
881                                         fp->disable_tpa = 1;
882                                         ring_prod = 0;
883                                         break;
884                                 }
885                                 ring_prod = NEXT_SGE_IDX(ring_prod);
886                         }
887
888                         fp->rx_sge_prod = ring_prod;
889                 }
890         }
891
892         for_each_queue(bp, j) {
893                 struct bnx2x_fastpath *fp = &bp->fp[j];
894
895                 fp->rx_bd_cons = 0;
896
897                 bnx2x_set_next_page_rx_bd(fp);
898
899                 /* CQ ring */
900                 bnx2x_set_next_page_rx_cq(fp);
901
902                 /* Allocate BDs and initialize BD ring */
903                 bnx2x_alloc_rx_bd_ring(fp);
904
905                 if (j != 0)
906                         continue;
907
908                 if (!CHIP_IS_E2(bp)) {
909                         REG_WR(bp, BAR_USTRORM_INTMEM +
910                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
911                                U64_LO(fp->rx_comp_mapping));
912                         REG_WR(bp, BAR_USTRORM_INTMEM +
913                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
914                                U64_HI(fp->rx_comp_mapping));
915                 }
916         }
917 }
918
919 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
920 {
921         int i;
922
923         for_each_queue(bp, i) {
924                 struct bnx2x_fastpath *fp = &bp->fp[i];
925
926                 u16 bd_cons = fp->tx_bd_cons;
927                 u16 sw_prod = fp->tx_pkt_prod;
928                 u16 sw_cons = fp->tx_pkt_cons;
929
930                 while (sw_cons != sw_prod) {
931                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
932                         sw_cons++;
933                 }
934         }
935 }
936
937 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
938 {
939         int i, j;
940
941         for_each_queue(bp, j) {
942                 struct bnx2x_fastpath *fp = &bp->fp[j];
943
944                 for (i = 0; i < NUM_RX_BD; i++) {
945                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
946                         struct sk_buff *skb = rx_buf->skb;
947
948                         if (skb == NULL)
949                                 continue;
950
951                         dma_unmap_single(&bp->pdev->dev,
952                                          dma_unmap_addr(rx_buf, mapping),
953                                          bp->rx_buf_size, DMA_FROM_DEVICE);
954
955                         rx_buf->skb = NULL;
956                         dev_kfree_skb(skb);
957                 }
958                 if (!fp->disable_tpa)
959                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
960                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
961                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
962         }
963 }
964
965 void bnx2x_free_skbs(struct bnx2x *bp)
966 {
967         bnx2x_free_tx_skbs(bp);
968         bnx2x_free_rx_skbs(bp);
969 }
970
971 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
972 {
973         int i, offset = 1;
974
975         free_irq(bp->msix_table[0].vector, bp->dev);
976         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
977            bp->msix_table[0].vector);
978
979 #ifdef BCM_CNIC
980         offset++;
981 #endif
982         for_each_queue(bp, i) {
983                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
984                    "state %x\n", i, bp->msix_table[i + offset].vector,
985                    bnx2x_fp(bp, i, state));
986
987                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
988         }
989 }
990
991 void bnx2x_free_irq(struct bnx2x *bp)
992 {
993         if (bp->flags & USING_MSIX_FLAG)
994                 bnx2x_free_msix_irqs(bp);
995         else if (bp->flags & USING_MSI_FLAG)
996                 free_irq(bp->pdev->irq, bp->dev);
997         else
998                 free_irq(bp->pdev->irq, bp->dev);
999 }
1000
1001 int bnx2x_enable_msix(struct bnx2x *bp)
1002 {
1003         int msix_vec = 0, i, rc, req_cnt;
1004
1005         bp->msix_table[msix_vec].entry = msix_vec;
1006         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1007            bp->msix_table[0].entry);
1008         msix_vec++;
1009
1010 #ifdef BCM_CNIC
1011         bp->msix_table[msix_vec].entry = msix_vec;
1012         DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1013            bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1014         msix_vec++;
1015 #endif
1016         for_each_queue(bp, i) {
1017                 bp->msix_table[msix_vec].entry = msix_vec;
1018                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1019                    "(fastpath #%u)\n", msix_vec, msix_vec, i);
1020                 msix_vec++;
1021         }
1022
1023         req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1024
1025         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1026
1027         /*
1028          * reconfigure number of tx/rx queues according to available
1029          * MSI-X vectors
1030          */
1031         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1032                 /* how less vectors we will have? */
1033                 int diff = req_cnt - rc;
1034
1035                 DP(NETIF_MSG_IFUP,
1036                    "Trying to use less MSI-X vectors: %d\n", rc);
1037
1038                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1039
1040                 if (rc) {
1041                         DP(NETIF_MSG_IFUP,
1042                            "MSI-X is not attainable  rc %d\n", rc);
1043                         return rc;
1044                 }
1045                 /*
1046                  * decrease number of queues by number of unallocated entries
1047                  */
1048                 bp->num_queues -= diff;
1049
1050                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1051                                   bp->num_queues);
1052         } else if (rc) {
1053                 /* fall to INTx if not enough memory */
1054                 if (rc == -ENOMEM)
1055                         bp->flags |= DISABLE_MSI_FLAG;
1056                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
1057                 return rc;
1058         }
1059
1060         bp->flags |= USING_MSIX_FLAG;
1061
1062         return 0;
1063 }
1064
1065 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1066 {
1067         int i, rc, offset = 1;
1068
1069         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1070                          bp->dev->name, bp->dev);
1071         if (rc) {
1072                 BNX2X_ERR("request sp irq failed\n");
1073                 return -EBUSY;
1074         }
1075
1076 #ifdef BCM_CNIC
1077         offset++;
1078 #endif
1079         for_each_queue(bp, i) {
1080                 struct bnx2x_fastpath *fp = &bp->fp[i];
1081                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1082                          bp->dev->name, i);
1083
1084                 rc = request_irq(bp->msix_table[offset].vector,
1085                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1086                 if (rc) {
1087                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
1088                         bnx2x_free_msix_irqs(bp);
1089                         return -EBUSY;
1090                 }
1091
1092                 offset++;
1093                 fp->state = BNX2X_FP_STATE_IRQ;
1094         }
1095
1096         i = BNX2X_NUM_QUEUES(bp);
1097         offset = 1 + CNIC_CONTEXT_USE;
1098         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
1099                " ... fp[%d] %d\n",
1100                bp->msix_table[0].vector,
1101                0, bp->msix_table[offset].vector,
1102                i - 1, bp->msix_table[offset + i - 1].vector);
1103
1104         return 0;
1105 }
1106
1107 int bnx2x_enable_msi(struct bnx2x *bp)
1108 {
1109         int rc;
1110
1111         rc = pci_enable_msi(bp->pdev);
1112         if (rc) {
1113                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1114                 return -1;
1115         }
1116         bp->flags |= USING_MSI_FLAG;
1117
1118         return 0;
1119 }
1120
1121 static int bnx2x_req_irq(struct bnx2x *bp)
1122 {
1123         unsigned long flags;
1124         int rc;
1125
1126         if (bp->flags & USING_MSI_FLAG)
1127                 flags = 0;
1128         else
1129                 flags = IRQF_SHARED;
1130
1131         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1132                          bp->dev->name, bp->dev);
1133         if (!rc)
1134                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1135
1136         return rc;
1137 }
1138
1139 static void bnx2x_napi_enable(struct bnx2x *bp)
1140 {
1141         int i;
1142
1143         for_each_queue(bp, i)
1144                 napi_enable(&bnx2x_fp(bp, i, napi));
1145 }
1146
1147 static void bnx2x_napi_disable(struct bnx2x *bp)
1148 {
1149         int i;
1150
1151         for_each_queue(bp, i)
1152                 napi_disable(&bnx2x_fp(bp, i, napi));
1153 }
1154
1155 void bnx2x_netif_start(struct bnx2x *bp)
1156 {
1157         int intr_sem;
1158
1159         intr_sem = atomic_dec_and_test(&bp->intr_sem);
1160         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1161
1162         if (intr_sem) {
1163                 if (netif_running(bp->dev)) {
1164                         bnx2x_napi_enable(bp);
1165                         bnx2x_int_enable(bp);
1166                         if (bp->state == BNX2X_STATE_OPEN)
1167                                 netif_tx_wake_all_queues(bp->dev);
1168                 }
1169         }
1170 }
1171
1172 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1173 {
1174         bnx2x_int_disable_sync(bp, disable_hw);
1175         bnx2x_napi_disable(bp);
1176         netif_tx_disable(bp->dev);
1177 }
1178
1179 void bnx2x_set_num_queues(struct bnx2x *bp)
1180 {
1181         switch (bp->multi_mode) {
1182         case ETH_RSS_MODE_DISABLED:
1183                 bp->num_queues = 1;
1184                 break;
1185         case ETH_RSS_MODE_REGULAR:
1186                 bp->num_queues = bnx2x_calc_num_queues(bp);
1187                 break;
1188
1189         default:
1190                 bp->num_queues = 1;
1191                 break;
1192         }
1193 }
1194
1195 static void bnx2x_release_firmware(struct bnx2x *bp)
1196 {
1197         kfree(bp->init_ops_offsets);
1198         kfree(bp->init_ops);
1199         kfree(bp->init_data);
1200         release_firmware(bp->firmware);
1201 }
1202
1203 /* must be called with rtnl_lock */
1204 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1205 {
1206         u32 load_code;
1207         int i, rc;
1208
1209         /* Set init arrays */
1210         rc = bnx2x_init_firmware(bp);
1211         if (rc) {
1212                 BNX2X_ERR("Error loading firmware\n");
1213                 return rc;
1214         }
1215
1216 #ifdef BNX2X_STOP_ON_ERROR
1217         if (unlikely(bp->panic))
1218                 return -EPERM;
1219 #endif
1220
1221         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1222
1223         /* must be called before memory allocation and HW init */
1224         bnx2x_ilt_set_info(bp);
1225
1226         if (bnx2x_alloc_mem(bp))
1227                 return -ENOMEM;
1228
1229         netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1230         rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1231         if (rc) {
1232                 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1233                 goto load_error0;
1234         }
1235
1236         for_each_queue(bp, i)
1237                 bnx2x_fp(bp, i, disable_tpa) =
1238                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
1239
1240         bnx2x_napi_enable(bp);
1241
1242         /* Send LOAD_REQUEST command to MCP
1243            Returns the type of LOAD command:
1244            if it is the first port to be initialized
1245            common blocks should be initialized, otherwise - not
1246         */
1247         if (!BP_NOMCP(bp)) {
1248                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1249                 if (!load_code) {
1250                         BNX2X_ERR("MCP response failure, aborting\n");
1251                         rc = -EBUSY;
1252                         goto load_error1;
1253                 }
1254                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1255                         rc = -EBUSY; /* other port in diagnostic mode */
1256                         goto load_error1;
1257                 }
1258
1259         } else {
1260                 int path = BP_PATH(bp);
1261                 int port = BP_PORT(bp);
1262
1263                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
1264                    path, load_count[path][0], load_count[path][1],
1265                    load_count[path][2]);
1266                 load_count[path][0]++;
1267                 load_count[path][1 + port]++;
1268                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
1269                    path, load_count[path][0], load_count[path][1],
1270                    load_count[path][2]);
1271                 if (load_count[path][0] == 1)
1272                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1273                 else if (load_count[path][1 + port] == 1)
1274                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1275                 else
1276                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1277         }
1278
1279         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1280             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1281             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1282                 bp->port.pmf = 1;
1283         else
1284                 bp->port.pmf = 0;
1285         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1286
1287         /* Initialize HW */
1288         rc = bnx2x_init_hw(bp, load_code);
1289         if (rc) {
1290                 BNX2X_ERR("HW init failed, aborting\n");
1291                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1292                 goto load_error2;
1293         }
1294
1295         /* Connect to IRQs */
1296         rc = bnx2x_setup_irqs(bp);
1297         if (rc) {
1298                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1299                 goto load_error2;
1300         }
1301
1302         /* Setup NIC internals and enable interrupts */
1303         bnx2x_nic_init(bp, load_code);
1304
1305         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1306             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1307             (bp->common.shmem2_base))
1308                 SHMEM2_WR(bp, dcc_support,
1309                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1310                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1311
1312         /* Send LOAD_DONE command to MCP */
1313         if (!BP_NOMCP(bp)) {
1314                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1315                 if (!load_code) {
1316                         BNX2X_ERR("MCP response failure, aborting\n");
1317                         rc = -EBUSY;
1318                         goto load_error3;
1319                 }
1320         }
1321
1322         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1323
1324         rc = bnx2x_func_start(bp);
1325         if (rc) {
1326                 BNX2X_ERR("Function start failed!\n");
1327 #ifndef BNX2X_STOP_ON_ERROR
1328                 goto load_error3;
1329 #else
1330                 bp->panic = 1;
1331                 return -EBUSY;
1332 #endif
1333         }
1334
1335         rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1336         if (rc) {
1337                 BNX2X_ERR("Setup leading failed!\n");
1338 #ifndef BNX2X_STOP_ON_ERROR
1339                 goto load_error3;
1340 #else
1341                 bp->panic = 1;
1342                 return -EBUSY;
1343 #endif
1344         }
1345
1346         if (!CHIP_IS_E1(bp) &&
1347             (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1348                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1349                 bp->flags |= MF_FUNC_DIS;
1350         }
1351
1352 #ifdef BCM_CNIC
1353         /* Enable Timer scan */
1354         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1355 #endif
1356
1357         for_each_nondefault_queue(bp, i) {
1358                 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1359                 if (rc)
1360 #ifdef BCM_CNIC
1361                         goto load_error4;
1362 #else
1363                         goto load_error3;
1364 #endif
1365         }
1366
1367         /* Now when Clients are configured we are ready to work */
1368         bp->state = BNX2X_STATE_OPEN;
1369
1370         bnx2x_set_eth_mac(bp, 1);
1371
1372         if (bp->port.pmf)
1373                 bnx2x_initial_phy_init(bp, load_mode);
1374
1375         /* Start fast path */
1376         switch (load_mode) {
1377         case LOAD_NORMAL:
1378                 /* Tx queue should be only reenabled */
1379                 netif_tx_wake_all_queues(bp->dev);
1380                 /* Initialize the receive filter. */
1381                 bnx2x_set_rx_mode(bp->dev);
1382                 break;
1383
1384         case LOAD_OPEN:
1385                 netif_tx_start_all_queues(bp->dev);
1386                 smp_mb__after_clear_bit();
1387                 /* Initialize the receive filter. */
1388                 bnx2x_set_rx_mode(bp->dev);
1389                 break;
1390
1391         case LOAD_DIAG:
1392                 /* Initialize the receive filter. */
1393                 bnx2x_set_rx_mode(bp->dev);
1394                 bp->state = BNX2X_STATE_DIAG;
1395                 break;
1396
1397         default:
1398                 break;
1399         }
1400
1401         if (!bp->port.pmf)
1402                 bnx2x__link_status_update(bp);
1403
1404         /* start the timer */
1405         mod_timer(&bp->timer, jiffies + bp->current_interval);
1406
1407 #ifdef BCM_CNIC
1408         bnx2x_setup_cnic_irq_info(bp);
1409         if (bp->state == BNX2X_STATE_OPEN)
1410                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1411 #endif
1412         bnx2x_inc_load_cnt(bp);
1413
1414         bnx2x_release_firmware(bp);
1415
1416         return 0;
1417
1418 #ifdef BCM_CNIC
1419 load_error4:
1420         /* Disable Timer scan */
1421         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1422 #endif
1423 load_error3:
1424         bnx2x_int_disable_sync(bp, 1);
1425
1426         /* Free SKBs, SGEs, TPA pool and driver internals */
1427         bnx2x_free_skbs(bp);
1428         for_each_queue(bp, i)
1429                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1430
1431         /* Release IRQs */
1432         bnx2x_free_irq(bp);
1433 load_error2:
1434         if (!BP_NOMCP(bp)) {
1435                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1436                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1437         }
1438
1439         bp->port.pmf = 0;
1440 load_error1:
1441         bnx2x_napi_disable(bp);
1442 load_error0:
1443         bnx2x_free_mem(bp);
1444
1445         bnx2x_release_firmware(bp);
1446
1447         return rc;
1448 }
1449
1450 /* must be called with rtnl_lock */
1451 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1452 {
1453         int i;
1454
1455         if (bp->state == BNX2X_STATE_CLOSED) {
1456                 /* Interface has been removed - nothing to recover */
1457                 bp->recovery_state = BNX2X_RECOVERY_DONE;
1458                 bp->is_leader = 0;
1459                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1460                 smp_wmb();
1461
1462                 return -EINVAL;
1463         }
1464
1465 #ifdef BCM_CNIC
1466         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1467 #endif
1468         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1469
1470         /* Set "drop all" */
1471         bp->rx_mode = BNX2X_RX_MODE_NONE;
1472         bnx2x_set_storm_rx_mode(bp);
1473
1474         /* Stop Tx */
1475         bnx2x_tx_disable(bp);
1476
1477         del_timer_sync(&bp->timer);
1478
1479         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1480                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1481
1482         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1483
1484         /* Cleanup the chip if needed */
1485         if (unload_mode != UNLOAD_RECOVERY)
1486                 bnx2x_chip_cleanup(bp, unload_mode);
1487         else {
1488                 /* Disable HW interrupts, NAPI and Tx */
1489                 bnx2x_netif_stop(bp, 1);
1490
1491                 /* Release IRQs */
1492                 bnx2x_free_irq(bp);
1493         }
1494
1495         bp->port.pmf = 0;
1496
1497         /* Free SKBs, SGEs, TPA pool and driver internals */
1498         bnx2x_free_skbs(bp);
1499         for_each_queue(bp, i)
1500                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1501
1502         bnx2x_free_mem(bp);
1503
1504         bp->state = BNX2X_STATE_CLOSED;
1505
1506         /* The last driver must disable a "close the gate" if there is no
1507          * parity attention or "process kill" pending.
1508          */
1509         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1510             bnx2x_reset_is_done(bp))
1511                 bnx2x_disable_close_the_gate(bp);
1512
1513         /* Reset MCP mail box sequence if there is on going recovery */
1514         if (unload_mode == UNLOAD_RECOVERY)
1515                 bp->fw_seq = 0;
1516
1517         return 0;
1518 }
1519
1520 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1521 {
1522         u16 pmcsr;
1523
1524         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1525
1526         switch (state) {
1527         case PCI_D0:
1528                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1529                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1530                                        PCI_PM_CTRL_PME_STATUS));
1531
1532                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1533                         /* delay required during transition out of D3hot */
1534                         msleep(20);
1535                 break;
1536
1537         case PCI_D3hot:
1538                 /* If there are other clients above don't
1539                    shut down the power */
1540                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1541                         return 0;
1542                 /* Don't shut down the power for emulation and FPGA */
1543                 if (CHIP_REV_IS_SLOW(bp))
1544                         return 0;
1545
1546                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1547                 pmcsr |= 3;
1548
1549                 if (bp->wol)
1550                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1551
1552                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1553                                       pmcsr);
1554
1555                 /* No more memory access after this point until
1556                 * device is brought back to D0.
1557                 */
1558                 break;
1559
1560         default:
1561                 return -EINVAL;
1562         }
1563         return 0;
1564 }
1565
1566 /*
1567  * net_device service functions
1568  */
1569 int bnx2x_poll(struct napi_struct *napi, int budget)
1570 {
1571         int work_done = 0;
1572         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1573                                                  napi);
1574         struct bnx2x *bp = fp->bp;
1575
1576         while (1) {
1577 #ifdef BNX2X_STOP_ON_ERROR
1578                 if (unlikely(bp->panic)) {
1579                         napi_complete(napi);
1580                         return 0;
1581                 }
1582 #endif
1583
1584                 if (bnx2x_has_tx_work(fp))
1585                         bnx2x_tx_int(fp);
1586
1587                 if (bnx2x_has_rx_work(fp)) {
1588                         work_done += bnx2x_rx_int(fp, budget - work_done);
1589
1590                         /* must not complete if we consumed full budget */
1591                         if (work_done >= budget)
1592                                 break;
1593                 }
1594
1595                 /* Fall out from the NAPI loop if needed */
1596                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1597                         bnx2x_update_fpsb_idx(fp);
1598                         /* bnx2x_has_rx_work() reads the status block,
1599                          * thus we need to ensure that status block indices
1600                          * have been actually read (bnx2x_update_fpsb_idx)
1601                          * prior to this check (bnx2x_has_rx_work) so that
1602                          * we won't write the "newer" value of the status block
1603                          * to IGU (if there was a DMA right after
1604                          * bnx2x_has_rx_work and if there is no rmb, the memory
1605                          * reading (bnx2x_update_fpsb_idx) may be postponed
1606                          * to right before bnx2x_ack_sb). In this case there
1607                          * will never be another interrupt until there is
1608                          * another update of the status block, while there
1609                          * is still unhandled work.
1610                          */
1611                         rmb();
1612
1613                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1614                                 napi_complete(napi);
1615                                 /* Re-enable interrupts */
1616                                 DP(NETIF_MSG_HW,
1617                                    "Update index to %d\n", fp->fp_hc_idx);
1618                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1619                                              le16_to_cpu(fp->fp_hc_idx),
1620                                              IGU_INT_ENABLE, 1);
1621                                 break;
1622                         }
1623                 }
1624         }
1625
1626         return work_done;
1627 }
1628
1629 /* we split the first BD into headers and data BDs
1630  * to ease the pain of our fellow microcode engineers
1631  * we use one mapping for both BDs
1632  * So far this has only been observed to happen
1633  * in Other Operating Systems(TM)
1634  */
1635 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1636                                    struct bnx2x_fastpath *fp,
1637                                    struct sw_tx_bd *tx_buf,
1638                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
1639                                    u16 bd_prod, int nbd)
1640 {
1641         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1642         struct eth_tx_bd *d_tx_bd;
1643         dma_addr_t mapping;
1644         int old_len = le16_to_cpu(h_tx_bd->nbytes);
1645
1646         /* first fix first BD */
1647         h_tx_bd->nbd = cpu_to_le16(nbd);
1648         h_tx_bd->nbytes = cpu_to_le16(hlen);
1649
1650         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1651            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1652            h_tx_bd->addr_lo, h_tx_bd->nbd);
1653
1654         /* now get a new data BD
1655          * (after the pbd) and fill it */
1656         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1657         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1658
1659         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1660                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1661
1662         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1663         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1664         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1665
1666         /* this marks the BD as one that has no individual mapping */
1667         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1668
1669         DP(NETIF_MSG_TX_QUEUED,
1670            "TSO split data size is %d (%x:%x)\n",
1671            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1672
1673         /* update tx_bd */
1674         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1675
1676         return bd_prod;
1677 }
1678
1679 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1680 {
1681         if (fix > 0)
1682                 csum = (u16) ~csum_fold(csum_sub(csum,
1683                                 csum_partial(t_header - fix, fix, 0)));
1684
1685         else if (fix < 0)
1686                 csum = (u16) ~csum_fold(csum_add(csum,
1687                                 csum_partial(t_header, -fix, 0)));
1688
1689         return swab16(csum);
1690 }
1691
1692 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1693 {
1694         u32 rc;
1695
1696         if (skb->ip_summed != CHECKSUM_PARTIAL)
1697                 rc = XMIT_PLAIN;
1698
1699         else {
1700                 if (skb->protocol == htons(ETH_P_IPV6)) {
1701                         rc = XMIT_CSUM_V6;
1702                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1703                                 rc |= XMIT_CSUM_TCP;
1704
1705                 } else {
1706                         rc = XMIT_CSUM_V4;
1707                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1708                                 rc |= XMIT_CSUM_TCP;
1709                 }
1710         }
1711
1712         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1713                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1714
1715         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1716                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1717
1718         return rc;
1719 }
1720
1721 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1722 /* check if packet requires linearization (packet is too fragmented)
1723    no need to check fragmentation if page size > 8K (there will be no
1724    violation to FW restrictions) */
1725 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1726                              u32 xmit_type)
1727 {
1728         int to_copy = 0;
1729         int hlen = 0;
1730         int first_bd_sz = 0;
1731
1732         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1733         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1734
1735                 if (xmit_type & XMIT_GSO) {
1736                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1737                         /* Check if LSO packet needs to be copied:
1738                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1739                         int wnd_size = MAX_FETCH_BD - 3;
1740                         /* Number of windows to check */
1741                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1742                         int wnd_idx = 0;
1743                         int frag_idx = 0;
1744                         u32 wnd_sum = 0;
1745
1746                         /* Headers length */
1747                         hlen = (int)(skb_transport_header(skb) - skb->data) +
1748                                 tcp_hdrlen(skb);
1749
1750                         /* Amount of data (w/o headers) on linear part of SKB*/
1751                         first_bd_sz = skb_headlen(skb) - hlen;
1752
1753                         wnd_sum  = first_bd_sz;
1754
1755                         /* Calculate the first sum - it's special */
1756                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1757                                 wnd_sum +=
1758                                         skb_shinfo(skb)->frags[frag_idx].size;
1759
1760                         /* If there was data on linear skb data - check it */
1761                         if (first_bd_sz > 0) {
1762                                 if (unlikely(wnd_sum < lso_mss)) {
1763                                         to_copy = 1;
1764                                         goto exit_lbl;
1765                                 }
1766
1767                                 wnd_sum -= first_bd_sz;
1768                         }
1769
1770                         /* Others are easier: run through the frag list and
1771                            check all windows */
1772                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1773                                 wnd_sum +=
1774                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1775
1776                                 if (unlikely(wnd_sum < lso_mss)) {
1777                                         to_copy = 1;
1778                                         break;
1779                                 }
1780                                 wnd_sum -=
1781                                         skb_shinfo(skb)->frags[wnd_idx].size;
1782                         }
1783                 } else {
1784                         /* in non-LSO too fragmented packet should always
1785                            be linearized */
1786                         to_copy = 1;
1787                 }
1788         }
1789
1790 exit_lbl:
1791         if (unlikely(to_copy))
1792                 DP(NETIF_MSG_TX_QUEUED,
1793                    "Linearization IS REQUIRED for %s packet. "
1794                    "num_frags %d  hlen %d  first_bd_sz %d\n",
1795                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1796                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1797
1798         return to_copy;
1799 }
1800 #endif
1801
1802 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1803                                      struct eth_tx_parse_bd_e2 *pbd,
1804                                      u32 xmit_type)
1805 {
1806         pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1807                 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1808         if ((xmit_type & XMIT_GSO_V6) &&
1809             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1810                 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1811 }
1812
1813 /**
1814  * Update PBD in GSO case.
1815  *
1816  * @param skb
1817  * @param tx_start_bd
1818  * @param pbd
1819  * @param xmit_type
1820  */
1821 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1822                                      struct eth_tx_parse_bd_e1x *pbd,
1823                                      u32 xmit_type)
1824 {
1825         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1826         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1827         pbd->tcp_flags = pbd_tcp_flags(skb);
1828
1829         if (xmit_type & XMIT_GSO_V4) {
1830                 pbd->ip_id = swab16(ip_hdr(skb)->id);
1831                 pbd->tcp_pseudo_csum =
1832                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1833                                                   ip_hdr(skb)->daddr,
1834                                                   0, IPPROTO_TCP, 0));
1835
1836         } else
1837                 pbd->tcp_pseudo_csum =
1838                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1839                                                 &ipv6_hdr(skb)->daddr,
1840                                                 0, IPPROTO_TCP, 0));
1841
1842         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1843 }
1844
1845 /**
1846  *
1847  * @param skb
1848  * @param tx_start_bd
1849  * @param pbd_e2
1850  * @param xmit_type
1851  *
1852  * @return header len
1853  */
1854 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1855         struct eth_tx_parse_bd_e2 *pbd,
1856         u32 xmit_type)
1857 {
1858         pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1859                 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1860
1861         pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1862                                           skb->data) / 2) <<
1863                 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1864
1865         return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1866 }
1867
1868 /**
1869  *
1870  * @param skb
1871  * @param tx_start_bd
1872  * @param pbd
1873  * @param xmit_type
1874  *
1875  * @return Header length
1876  */
1877 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1878         struct eth_tx_parse_bd_e1x *pbd,
1879         u32 xmit_type)
1880 {
1881         u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1882
1883         /* for now NS flag is not used in Linux */
1884         pbd->global_data =
1885                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1886                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1887
1888         pbd->ip_hlen_w = (skb_transport_header(skb) -
1889                         skb_network_header(skb)) / 2;
1890
1891         hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1892
1893         pbd->total_hlen_w = cpu_to_le16(hlen);
1894         hlen = hlen*2;
1895
1896         if (xmit_type & XMIT_CSUM_TCP) {
1897                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1898
1899         } else {
1900                 s8 fix = SKB_CS_OFF(skb); /* signed! */
1901
1902                 DP(NETIF_MSG_TX_QUEUED,
1903                    "hlen %d  fix %d  csum before fix %x\n",
1904                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1905
1906                 /* HW bug: fixup the CSUM */
1907                 pbd->tcp_pseudo_csum =
1908                         bnx2x_csum_fix(skb_transport_header(skb),
1909                                        SKB_CS(skb), fix);
1910
1911                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1912                    pbd->tcp_pseudo_csum);
1913         }
1914
1915         return hlen;
1916 }
1917
1918 /* called with netif_tx_lock
1919  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1920  * netif_wake_queue()
1921  */
1922 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1923 {
1924         struct bnx2x *bp = netdev_priv(dev);
1925         struct bnx2x_fastpath *fp;
1926         struct netdev_queue *txq;
1927         struct sw_tx_bd *tx_buf;
1928         struct eth_tx_start_bd *tx_start_bd;
1929         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1930         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1931         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1932         u16 pkt_prod, bd_prod;
1933         int nbd, fp_index;
1934         dma_addr_t mapping;
1935         u32 xmit_type = bnx2x_xmit_type(bp, skb);
1936         int i;
1937         u8 hlen = 0;
1938         __le16 pkt_size = 0;
1939         struct ethhdr *eth;
1940         u8 mac_type = UNICAST_ADDRESS;
1941
1942 #ifdef BNX2X_STOP_ON_ERROR
1943         if (unlikely(bp->panic))
1944                 return NETDEV_TX_BUSY;
1945 #endif
1946
1947         fp_index = skb_get_queue_mapping(skb);
1948         txq = netdev_get_tx_queue(dev, fp_index);
1949
1950         fp = &bp->fp[fp_index];
1951
1952         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1953                 fp->eth_q_stats.driver_xoff++;
1954                 netif_tx_stop_queue(txq);
1955                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1956                 return NETDEV_TX_BUSY;
1957         }
1958
1959         DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
1960                                 "protocol(%x,%x) gso type %x  xmit_type %x\n",
1961            fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1962            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1963
1964         eth = (struct ethhdr *)skb->data;
1965
1966         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1967         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1968                 if (is_broadcast_ether_addr(eth->h_dest))
1969                         mac_type = BROADCAST_ADDRESS;
1970                 else
1971                         mac_type = MULTICAST_ADDRESS;
1972         }
1973
1974 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1975         /* First, check if we need to linearize the skb (due to FW
1976            restrictions). No need to check fragmentation if page size > 8K
1977            (there will be no violation to FW restrictions) */
1978         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1979                 /* Statistics of linearization */
1980                 bp->lin_cnt++;
1981                 if (skb_linearize(skb) != 0) {
1982                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1983                            "silently dropping this SKB\n");
1984                         dev_kfree_skb_any(skb);
1985                         return NETDEV_TX_OK;
1986                 }
1987         }
1988 #endif
1989
1990         /*
1991         Please read carefully. First we use one BD which we mark as start,
1992         then we have a parsing info BD (used for TSO or xsum),
1993         and only then we have the rest of the TSO BDs.
1994         (don't forget to mark the last one as last,
1995         and to unmap only AFTER you write to the BD ...)
1996         And above all, all pdb sizes are in words - NOT DWORDS!
1997         */
1998
1999         pkt_prod = fp->tx_pkt_prod++;
2000         bd_prod = TX_BD(fp->tx_bd_prod);
2001
2002         /* get a tx_buf and first BD */
2003         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2004         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2005
2006         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2007         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2008                  mac_type);
2009
2010         /* header nbd */
2011         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2012
2013         /* remember the first BD of the packet */
2014         tx_buf->first_bd = fp->tx_bd_prod;
2015         tx_buf->skb = skb;
2016         tx_buf->flags = 0;
2017
2018         DP(NETIF_MSG_TX_QUEUED,
2019            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2020            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2021
2022 #ifdef BCM_VLAN
2023         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
2024             (bp->flags & HW_VLAN_TX_FLAG)) {
2025                 tx_start_bd->vlan_or_ethertype =
2026                     cpu_to_le16(vlan_tx_tag_get(skb));
2027                 tx_start_bd->bd_flags.as_bitfield |=
2028                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2029         } else
2030 #endif
2031                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2032
2033         /* turn on parsing and get a BD */
2034         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2035
2036         if (xmit_type & XMIT_CSUM) {
2037                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2038
2039                 if (xmit_type & XMIT_CSUM_V4)
2040                         tx_start_bd->bd_flags.as_bitfield |=
2041                                                 ETH_TX_BD_FLAGS_IP_CSUM;
2042                 else
2043                         tx_start_bd->bd_flags.as_bitfield |=
2044                                                 ETH_TX_BD_FLAGS_IPV6;
2045
2046                 if (!(xmit_type & XMIT_CSUM_TCP))
2047                         tx_start_bd->bd_flags.as_bitfield |=
2048                                                 ETH_TX_BD_FLAGS_IS_UDP;
2049         }
2050
2051         if (CHIP_IS_E2(bp)) {
2052                 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2053                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2054                 /* Set PBD in checksum offload case */
2055                 if (xmit_type & XMIT_CSUM)
2056                         hlen = bnx2x_set_pbd_csum_e2(bp,
2057                                                      skb, pbd_e2, xmit_type);
2058         } else {
2059                 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2060                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2061                 /* Set PBD in checksum offload case */
2062                 if (xmit_type & XMIT_CSUM)
2063                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2064
2065         }
2066
2067         /* Map skb linear data for DMA */
2068         mapping = dma_map_single(&bp->pdev->dev, skb->data,
2069                                  skb_headlen(skb), DMA_TO_DEVICE);
2070
2071         /* Setup the data pointer of the first BD of the packet */
2072         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2073         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2074         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2075         tx_start_bd->nbd = cpu_to_le16(nbd);
2076         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2077         pkt_size = tx_start_bd->nbytes;
2078
2079         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
2080            "  nbytes %d  flags %x  vlan %x\n",
2081            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2082            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2083            tx_start_bd->bd_flags.as_bitfield,
2084            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2085
2086         if (xmit_type & XMIT_GSO) {
2087
2088                 DP(NETIF_MSG_TX_QUEUED,
2089                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
2090                    skb->len, hlen, skb_headlen(skb),
2091                    skb_shinfo(skb)->gso_size);
2092
2093                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2094
2095                 if (unlikely(skb_headlen(skb) > hlen))
2096                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2097                                                  hlen, bd_prod, ++nbd);
2098                 if (CHIP_IS_E2(bp))
2099                         bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2100                 else
2101                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2102         }
2103         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2104
2105         /* Handle fragmented skb */
2106         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2107                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2108
2109                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2110                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2111                 if (total_pkt_bd == NULL)
2112                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2113
2114                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2115                                        frag->page_offset,
2116                                        frag->size, DMA_TO_DEVICE);
2117
2118                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2119                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2120                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2121                 le16_add_cpu(&pkt_size, frag->size);
2122
2123                 DP(NETIF_MSG_TX_QUEUED,
2124                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
2125                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2126                    le16_to_cpu(tx_data_bd->nbytes));
2127         }
2128
2129         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2130
2131         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2132
2133         /* now send a tx doorbell, counting the next BD
2134          * if the packet contains or ends with it
2135          */
2136         if (TX_BD_POFF(bd_prod) < nbd)
2137                 nbd++;
2138
2139         if (total_pkt_bd != NULL)
2140                 total_pkt_bd->total_pkt_bytes = pkt_size;
2141
2142         if (pbd_e1x)
2143                 DP(NETIF_MSG_TX_QUEUED,
2144                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
2145                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
2146                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2147                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2148                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2149                     le16_to_cpu(pbd_e1x->total_hlen_w));
2150         if (pbd_e2)
2151                 DP(NETIF_MSG_TX_QUEUED,
2152                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
2153                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2154                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2155                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2156                    pbd_e2->parsing_data);
2157         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
2158
2159         /*
2160          * Make sure that the BD data is updated before updating the producer
2161          * since FW might read the BD right after the producer is updated.
2162          * This is only applicable for weak-ordered memory model archs such
2163          * as IA-64. The following barrier is also mandatory since FW will
2164          * assumes packets must have BDs.
2165          */
2166         wmb();
2167
2168         fp->tx_db.data.prod += nbd;
2169         barrier();
2170
2171         DOORBELL(bp, fp->cid, fp->tx_db.raw);
2172
2173         mmiowb();
2174
2175         fp->tx_bd_prod += nbd;
2176
2177         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2178                 netif_tx_stop_queue(txq);
2179
2180                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2181                  * ordering of set_bit() in netif_tx_stop_queue() and read of
2182                  * fp->bd_tx_cons */
2183                 smp_mb();
2184
2185                 fp->eth_q_stats.driver_xoff++;
2186                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2187                         netif_tx_wake_queue(txq);
2188         }
2189         fp->tx_pkt++;
2190
2191         return NETDEV_TX_OK;
2192 }
2193
2194 /* called with rtnl_lock */
2195 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2196 {
2197         struct sockaddr *addr = p;
2198         struct bnx2x *bp = netdev_priv(dev);
2199
2200         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2201                 return -EINVAL;
2202
2203         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2204         if (netif_running(dev))
2205                 bnx2x_set_eth_mac(bp, 1);
2206
2207         return 0;
2208 }
2209
2210
2211 int bnx2x_setup_irqs(struct bnx2x *bp)
2212 {
2213         int rc = 0;
2214         if (bp->flags & USING_MSIX_FLAG) {
2215                 rc = bnx2x_req_msix_irqs(bp);
2216                 if (rc)
2217                         return rc;
2218         } else {
2219                 bnx2x_ack_int(bp);
2220                 rc = bnx2x_req_irq(bp);
2221                 if (rc) {
2222                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
2223                         return rc;
2224                 }
2225                 if (bp->flags & USING_MSI_FLAG) {
2226                         bp->dev->irq = bp->pdev->irq;
2227                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
2228                                bp->pdev->irq);
2229                 }
2230         }
2231
2232         return 0;
2233 }
2234
2235 void bnx2x_free_mem_bp(struct bnx2x *bp)
2236 {
2237         kfree(bp->fp);
2238         kfree(bp->msix_table);
2239         kfree(bp->ilt);
2240 }
2241
2242 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2243 {
2244         struct bnx2x_fastpath *fp;
2245         struct msix_entry *tbl;
2246         struct bnx2x_ilt *ilt;
2247
2248         /* fp array */
2249         fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2250         if (!fp)
2251                 goto alloc_err;
2252         bp->fp = fp;
2253
2254         /* msix table */
2255         tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2256                                   GFP_KERNEL);
2257         if (!tbl)
2258                 goto alloc_err;
2259         bp->msix_table = tbl;
2260
2261         /* ilt */
2262         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2263         if (!ilt)
2264                 goto alloc_err;
2265         bp->ilt = ilt;
2266
2267         return 0;
2268 alloc_err:
2269         bnx2x_free_mem_bp(bp);
2270         return -ENOMEM;
2271
2272 }
2273
2274 /* called with rtnl_lock */
2275 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2276 {
2277         struct bnx2x *bp = netdev_priv(dev);
2278         int rc = 0;
2279
2280         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2281                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2282                 return -EAGAIN;
2283         }
2284
2285         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2286             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2287                 return -EINVAL;
2288
2289         /* This does not race with packet allocation
2290          * because the actual alloc size is
2291          * only updated as part of load
2292          */
2293         dev->mtu = new_mtu;
2294
2295         if (netif_running(dev)) {
2296                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2297                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2298         }
2299
2300         return rc;
2301 }
2302
2303 void bnx2x_tx_timeout(struct net_device *dev)
2304 {
2305         struct bnx2x *bp = netdev_priv(dev);
2306
2307 #ifdef BNX2X_STOP_ON_ERROR
2308         if (!bp->panic)
2309                 bnx2x_panic();
2310 #endif
2311         /* This allows the netif to be shutdown gracefully before resetting */
2312         schedule_delayed_work(&bp->reset_task, 0);
2313 }
2314
2315 #ifdef BCM_VLAN
2316 /* called with rtnl_lock */
2317 void bnx2x_vlan_rx_register(struct net_device *dev,
2318                                    struct vlan_group *vlgrp)
2319 {
2320         struct bnx2x *bp = netdev_priv(dev);
2321
2322         bp->vlgrp = vlgrp;
2323 }
2324
2325 #endif
2326
2327 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2328 {
2329         struct net_device *dev = pci_get_drvdata(pdev);
2330         struct bnx2x *bp;
2331
2332         if (!dev) {
2333                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2334                 return -ENODEV;
2335         }
2336         bp = netdev_priv(dev);
2337
2338         rtnl_lock();
2339
2340         pci_save_state(pdev);
2341
2342         if (!netif_running(dev)) {
2343                 rtnl_unlock();
2344                 return 0;
2345         }
2346
2347         netif_device_detach(dev);
2348
2349         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2350
2351         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2352
2353         rtnl_unlock();
2354
2355         return 0;
2356 }
2357
2358 int bnx2x_resume(struct pci_dev *pdev)
2359 {
2360         struct net_device *dev = pci_get_drvdata(pdev);
2361         struct bnx2x *bp;
2362         int rc;
2363
2364         if (!dev) {
2365                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2366                 return -ENODEV;
2367         }
2368         bp = netdev_priv(dev);
2369
2370         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2371                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2372                 return -EAGAIN;
2373         }
2374
2375         rtnl_lock();
2376
2377         pci_restore_state(pdev);
2378
2379         if (!netif_running(dev)) {
2380                 rtnl_unlock();
2381                 return 0;
2382         }
2383
2384         bnx2x_set_power_state(bp, PCI_D0);
2385         netif_device_attach(dev);
2386
2387         /* Since the chip was reset, clear the FW sequence number */
2388         bp->fw_seq = 0;
2389         rc = bnx2x_nic_load(bp, LOAD_OPEN);
2390
2391         rtnl_unlock();
2392
2393         return rc;
2394 }