]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/bnx2.c
bnx2: Refactor WoL setup into a separate function.
[~andy/linux] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.2.3"
62 #define DRV_MODULE_RELDATE      "June 27, 2012"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254         u32 diff;
255
256         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
257         barrier();
258
259         /* The ring uses 256 indices for 255 entries, one of them
260          * needs to be skipped.
261          */
262         diff = txr->tx_prod - txr->tx_cons;
263         if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264                 diff &= 0xffff;
265                 if (diff == BNX2_TX_DESC_CNT)
266                         diff = BNX2_MAX_TX_DESC_CNT;
267         }
268         return bp->tx_ring_size - diff;
269 }
270
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274         u32 val;
275
276         spin_lock_bh(&bp->indirect_lock);
277         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278         val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279         spin_unlock_bh(&bp->indirect_lock);
280         return val;
281 }
282
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286         spin_lock_bh(&bp->indirect_lock);
287         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289         spin_unlock_bh(&bp->indirect_lock);
290 }
291
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307         offset += cid_addr;
308         spin_lock_bh(&bp->indirect_lock);
309         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
310                 int i;
311
312                 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313                 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314                         offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315                 for (i = 0; i < 5; i++) {
316                         val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318                                 break;
319                         udelay(5);
320                 }
321         } else {
322                 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323                 BNX2_WR(bp, BNX2_CTX_DATA, val);
324         }
325         spin_unlock_bh(&bp->indirect_lock);
326 }
327
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332         struct bnx2 *bp = netdev_priv(dev);
333         struct drv_ctl_io *io = &info->data.io;
334
335         switch (info->cmd) {
336         case DRV_CTL_IO_WR_CMD:
337                 bnx2_reg_wr_ind(bp, io->offset, io->data);
338                 break;
339         case DRV_CTL_IO_RD_CMD:
340                 io->data = bnx2_reg_rd_ind(bp, io->offset);
341                 break;
342         case DRV_CTL_CTX_WR_CMD:
343                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344                 break;
345         default:
346                 return -EINVAL;
347         }
348         return 0;
349 }
350
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355         int sb_id;
356
357         if (bp->flags & BNX2_FLAG_USING_MSIX) {
358                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359                 bnapi->cnic_present = 0;
360                 sb_id = bp->irq_nvecs;
361                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362         } else {
363                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364                 bnapi->cnic_tag = bnapi->last_status_idx;
365                 bnapi->cnic_present = 1;
366                 sb_id = 0;
367                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368         }
369
370         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371         cp->irq_arr[0].status_blk = (void *)
372                 ((unsigned long) bnapi->status_blk.msi +
373                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374         cp->irq_arr[0].status_blk_num = sb_id;
375         cp->num_irq = 1;
376 }
377
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379                               void *data)
380 {
381         struct bnx2 *bp = netdev_priv(dev);
382         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383
384         if (ops == NULL)
385                 return -EINVAL;
386
387         if (cp->drv_state & CNIC_DRV_STATE_REGD)
388                 return -EBUSY;
389
390         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391                 return -ENODEV;
392
393         bp->cnic_data = data;
394         rcu_assign_pointer(bp->cnic_ops, ops);
395
396         cp->num_irq = 0;
397         cp->drv_state = CNIC_DRV_STATE_REGD;
398
399         bnx2_setup_cnic_irq_info(bp);
400
401         return 0;
402 }
403
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406         struct bnx2 *bp = netdev_priv(dev);
407         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409
410         mutex_lock(&bp->cnic_lock);
411         cp->drv_state = 0;
412         bnapi->cnic_present = 0;
413         RCU_INIT_POINTER(bp->cnic_ops, NULL);
414         mutex_unlock(&bp->cnic_lock);
415         synchronize_rcu();
416         return 0;
417 }
418
419 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421         struct bnx2 *bp = netdev_priv(dev);
422         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423
424         if (!cp->max_iscsi_conn)
425                 return NULL;
426
427         cp->drv_owner = THIS_MODULE;
428         cp->chip_id = bp->chip_id;
429         cp->pdev = bp->pdev;
430         cp->io_base = bp->regview;
431         cp->drv_ctl = bnx2_drv_ctl;
432         cp->drv_register_cnic = bnx2_register_cnic;
433         cp->drv_unregister_cnic = bnx2_unregister_cnic;
434
435         return cp;
436 }
437
438 static void
439 bnx2_cnic_stop(struct bnx2 *bp)
440 {
441         struct cnic_ops *c_ops;
442         struct cnic_ctl_info info;
443
444         mutex_lock(&bp->cnic_lock);
445         c_ops = rcu_dereference_protected(bp->cnic_ops,
446                                           lockdep_is_held(&bp->cnic_lock));
447         if (c_ops) {
448                 info.cmd = CNIC_CTL_STOP_CMD;
449                 c_ops->cnic_ctl(bp->cnic_data, &info);
450         }
451         mutex_unlock(&bp->cnic_lock);
452 }
453
454 static void
455 bnx2_cnic_start(struct bnx2 *bp)
456 {
457         struct cnic_ops *c_ops;
458         struct cnic_ctl_info info;
459
460         mutex_lock(&bp->cnic_lock);
461         c_ops = rcu_dereference_protected(bp->cnic_ops,
462                                           lockdep_is_held(&bp->cnic_lock));
463         if (c_ops) {
464                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466
467                         bnapi->cnic_tag = bnapi->last_status_idx;
468                 }
469                 info.cmd = CNIC_CTL_START_CMD;
470                 c_ops->cnic_ctl(bp->cnic_data, &info);
471         }
472         mutex_unlock(&bp->cnic_lock);
473 }
474
475 #else
476
477 static void
478 bnx2_cnic_stop(struct bnx2 *bp)
479 {
480 }
481
482 static void
483 bnx2_cnic_start(struct bnx2 *bp)
484 {
485 }
486
487 #endif
488
489 static int
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 {
492         u32 val1;
493         int i, ret;
494
495         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498
499                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501
502                 udelay(40);
503         }
504
505         val1 = (bp->phy_addr << 21) | (reg << 16) |
506                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507                 BNX2_EMAC_MDIO_COMM_START_BUSY;
508         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509
510         for (i = 0; i < 50; i++) {
511                 udelay(10);
512
513                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515                         udelay(5);
516
517                         val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519
520                         break;
521                 }
522         }
523
524         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525                 *val = 0x0;
526                 ret = -EBUSY;
527         }
528         else {
529                 *val = val1;
530                 ret = 0;
531         }
532
533         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536
537                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539
540                 udelay(40);
541         }
542
543         return ret;
544 }
545
546 static int
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 {
549         u32 val1;
550         int i, ret;
551
552         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555
556                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558
559                 udelay(40);
560         }
561
562         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566
567         for (i = 0; i < 50; i++) {
568                 udelay(10);
569
570                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572                         udelay(5);
573                         break;
574                 }
575         }
576
577         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578                 ret = -EBUSY;
579         else
580                 ret = 0;
581
582         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585
586                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588
589                 udelay(40);
590         }
591
592         return ret;
593 }
594
595 static void
596 bnx2_disable_int(struct bnx2 *bp)
597 {
598         int i;
599         struct bnx2_napi *bnapi;
600
601         for (i = 0; i < bp->irq_nvecs; i++) {
602                 bnapi = &bp->bnx2_napi[i];
603                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605         }
606         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 }
608
609 static void
610 bnx2_enable_int(struct bnx2 *bp)
611 {
612         int i;
613         struct bnx2_napi *bnapi;
614
615         for (i = 0; i < bp->irq_nvecs; i++) {
616                 bnapi = &bp->bnx2_napi[i];
617
618                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620                         BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621                         bnapi->last_status_idx);
622
623                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625                         bnapi->last_status_idx);
626         }
627         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 }
629
630 static void
631 bnx2_disable_int_sync(struct bnx2 *bp)
632 {
633         int i;
634
635         atomic_inc(&bp->intr_sem);
636         if (!netif_running(bp->dev))
637                 return;
638
639         bnx2_disable_int(bp);
640         for (i = 0; i < bp->irq_nvecs; i++)
641                 synchronize_irq(bp->irq_tbl[i].vector);
642 }
643
644 static void
645 bnx2_napi_disable(struct bnx2 *bp)
646 {
647         int i;
648
649         for (i = 0; i < bp->irq_nvecs; i++)
650                 napi_disable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_napi_enable(struct bnx2 *bp)
655 {
656         int i;
657
658         for (i = 0; i < bp->irq_nvecs; i++)
659                 napi_enable(&bp->bnx2_napi[i].napi);
660 }
661
662 static void
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 {
665         if (stop_cnic)
666                 bnx2_cnic_stop(bp);
667         if (netif_running(bp->dev)) {
668                 bnx2_napi_disable(bp);
669                 netif_tx_disable(bp->dev);
670         }
671         bnx2_disable_int_sync(bp);
672         netif_carrier_off(bp->dev);     /* prevent tx timeout */
673 }
674
675 static void
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 {
678         if (atomic_dec_and_test(&bp->intr_sem)) {
679                 if (netif_running(bp->dev)) {
680                         netif_tx_wake_all_queues(bp->dev);
681                         spin_lock_bh(&bp->phy_lock);
682                         if (bp->link_up)
683                                 netif_carrier_on(bp->dev);
684                         spin_unlock_bh(&bp->phy_lock);
685                         bnx2_napi_enable(bp);
686                         bnx2_enable_int(bp);
687                         if (start_cnic)
688                                 bnx2_cnic_start(bp);
689                 }
690         }
691 }
692
693 static void
694 bnx2_free_tx_mem(struct bnx2 *bp)
695 {
696         int i;
697
698         for (i = 0; i < bp->num_tx_rings; i++) {
699                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701
702                 if (txr->tx_desc_ring) {
703                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704                                           txr->tx_desc_ring,
705                                           txr->tx_desc_mapping);
706                         txr->tx_desc_ring = NULL;
707                 }
708                 kfree(txr->tx_buf_ring);
709                 txr->tx_buf_ring = NULL;
710         }
711 }
712
713 static void
714 bnx2_free_rx_mem(struct bnx2 *bp)
715 {
716         int i;
717
718         for (i = 0; i < bp->num_rx_rings; i++) {
719                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721                 int j;
722
723                 for (j = 0; j < bp->rx_max_ring; j++) {
724                         if (rxr->rx_desc_ring[j])
725                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726                                                   rxr->rx_desc_ring[j],
727                                                   rxr->rx_desc_mapping[j]);
728                         rxr->rx_desc_ring[j] = NULL;
729                 }
730                 vfree(rxr->rx_buf_ring);
731                 rxr->rx_buf_ring = NULL;
732
733                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
734                         if (rxr->rx_pg_desc_ring[j])
735                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736                                                   rxr->rx_pg_desc_ring[j],
737                                                   rxr->rx_pg_desc_mapping[j]);
738                         rxr->rx_pg_desc_ring[j] = NULL;
739                 }
740                 vfree(rxr->rx_pg_ring);
741                 rxr->rx_pg_ring = NULL;
742         }
743 }
744
745 static int
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 {
748         int i;
749
750         for (i = 0; i < bp->num_tx_rings; i++) {
751                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753
754                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755                 if (txr->tx_buf_ring == NULL)
756                         return -ENOMEM;
757
758                 txr->tx_desc_ring =
759                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760                                            &txr->tx_desc_mapping, GFP_KERNEL);
761                 if (txr->tx_desc_ring == NULL)
762                         return -ENOMEM;
763         }
764         return 0;
765 }
766
767 static int
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 {
770         int i;
771
772         for (i = 0; i < bp->num_rx_rings; i++) {
773                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775                 int j;
776
777                 rxr->rx_buf_ring =
778                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779                 if (rxr->rx_buf_ring == NULL)
780                         return -ENOMEM;
781
782                 for (j = 0; j < bp->rx_max_ring; j++) {
783                         rxr->rx_desc_ring[j] =
784                                 dma_alloc_coherent(&bp->pdev->dev,
785                                                    RXBD_RING_SIZE,
786                                                    &rxr->rx_desc_mapping[j],
787                                                    GFP_KERNEL);
788                         if (rxr->rx_desc_ring[j] == NULL)
789                                 return -ENOMEM;
790
791                 }
792
793                 if (bp->rx_pg_ring_size) {
794                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795                                                   bp->rx_max_pg_ring);
796                         if (rxr->rx_pg_ring == NULL)
797                                 return -ENOMEM;
798
799                 }
800
801                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
802                         rxr->rx_pg_desc_ring[j] =
803                                 dma_alloc_coherent(&bp->pdev->dev,
804                                                    RXBD_RING_SIZE,
805                                                    &rxr->rx_pg_desc_mapping[j],
806                                                    GFP_KERNEL);
807                         if (rxr->rx_pg_desc_ring[j] == NULL)
808                                 return -ENOMEM;
809
810                 }
811         }
812         return 0;
813 }
814
815 static void
816 bnx2_free_mem(struct bnx2 *bp)
817 {
818         int i;
819         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820
821         bnx2_free_tx_mem(bp);
822         bnx2_free_rx_mem(bp);
823
824         for (i = 0; i < bp->ctx_pages; i++) {
825                 if (bp->ctx_blk[i]) {
826                         dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
827                                           bp->ctx_blk[i],
828                                           bp->ctx_blk_mapping[i]);
829                         bp->ctx_blk[i] = NULL;
830                 }
831         }
832         if (bnapi->status_blk.msi) {
833                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834                                   bnapi->status_blk.msi,
835                                   bp->status_blk_mapping);
836                 bnapi->status_blk.msi = NULL;
837                 bp->stats_blk = NULL;
838         }
839 }
840
841 static int
842 bnx2_alloc_mem(struct bnx2 *bp)
843 {
844         int i, status_blk_size, err;
845         struct bnx2_napi *bnapi;
846         void *status_blk;
847
848         /* Combine status and statistics blocks into one allocation. */
849         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850         if (bp->flags & BNX2_FLAG_MSIX_CAP)
851                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
853         bp->status_stats_size = status_blk_size +
854                                 sizeof(struct statistics_block);
855
856         status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857                                         &bp->status_blk_mapping,
858                                         GFP_KERNEL | __GFP_ZERO);
859         if (status_blk == NULL)
860                 goto alloc_mem_err;
861
862         bnapi = &bp->bnx2_napi[0];
863         bnapi->status_blk.msi = status_blk;
864         bnapi->hw_tx_cons_ptr =
865                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
866         bnapi->hw_rx_cons_ptr =
867                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
868         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
869                 for (i = 1; i < bp->irq_nvecs; i++) {
870                         struct status_block_msix *sblk;
871
872                         bnapi = &bp->bnx2_napi[i];
873
874                         sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
875                         bnapi->status_blk.msix = sblk;
876                         bnapi->hw_tx_cons_ptr =
877                                 &sblk->status_tx_quick_consumer_index;
878                         bnapi->hw_rx_cons_ptr =
879                                 &sblk->status_rx_quick_consumer_index;
880                         bnapi->int_num = i << 24;
881                 }
882         }
883
884         bp->stats_blk = status_blk + status_blk_size;
885
886         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
887
888         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
889                 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
890                 if (bp->ctx_pages == 0)
891                         bp->ctx_pages = 1;
892                 for (i = 0; i < bp->ctx_pages; i++) {
893                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
894                                                 BNX2_PAGE_SIZE,
895                                                 &bp->ctx_blk_mapping[i],
896                                                 GFP_KERNEL);
897                         if (bp->ctx_blk[i] == NULL)
898                                 goto alloc_mem_err;
899                 }
900         }
901
902         err = bnx2_alloc_rx_mem(bp);
903         if (err)
904                 goto alloc_mem_err;
905
906         err = bnx2_alloc_tx_mem(bp);
907         if (err)
908                 goto alloc_mem_err;
909
910         return 0;
911
912 alloc_mem_err:
913         bnx2_free_mem(bp);
914         return -ENOMEM;
915 }
916
917 static void
918 bnx2_report_fw_link(struct bnx2 *bp)
919 {
920         u32 fw_link_status = 0;
921
922         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
923                 return;
924
925         if (bp->link_up) {
926                 u32 bmsr;
927
928                 switch (bp->line_speed) {
929                 case SPEED_10:
930                         if (bp->duplex == DUPLEX_HALF)
931                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
932                         else
933                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
934                         break;
935                 case SPEED_100:
936                         if (bp->duplex == DUPLEX_HALF)
937                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
938                         else
939                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
940                         break;
941                 case SPEED_1000:
942                         if (bp->duplex == DUPLEX_HALF)
943                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
944                         else
945                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
946                         break;
947                 case SPEED_2500:
948                         if (bp->duplex == DUPLEX_HALF)
949                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
950                         else
951                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
952                         break;
953                 }
954
955                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
956
957                 if (bp->autoneg) {
958                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
959
960                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
961                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962
963                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
964                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
965                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
966                         else
967                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
968                 }
969         }
970         else
971                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
972
973         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
974 }
975
976 static char *
977 bnx2_xceiver_str(struct bnx2 *bp)
978 {
979         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
980                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
981                  "Copper");
982 }
983
984 static void
985 bnx2_report_link(struct bnx2 *bp)
986 {
987         if (bp->link_up) {
988                 netif_carrier_on(bp->dev);
989                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
990                             bnx2_xceiver_str(bp),
991                             bp->line_speed,
992                             bp->duplex == DUPLEX_FULL ? "full" : "half");
993
994                 if (bp->flow_ctrl) {
995                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
996                                 pr_cont(", receive ");
997                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
998                                         pr_cont("& transmit ");
999                         }
1000                         else {
1001                                 pr_cont(", transmit ");
1002                         }
1003                         pr_cont("flow control ON");
1004                 }
1005                 pr_cont("\n");
1006         } else {
1007                 netif_carrier_off(bp->dev);
1008                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1009                            bnx2_xceiver_str(bp));
1010         }
1011
1012         bnx2_report_fw_link(bp);
1013 }
1014
1015 static void
1016 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1017 {
1018         u32 local_adv, remote_adv;
1019
1020         bp->flow_ctrl = 0;
1021         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1022                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1023
1024                 if (bp->duplex == DUPLEX_FULL) {
1025                         bp->flow_ctrl = bp->req_flow_ctrl;
1026                 }
1027                 return;
1028         }
1029
1030         if (bp->duplex != DUPLEX_FULL) {
1031                 return;
1032         }
1033
1034         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1035             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1036                 u32 val;
1037
1038                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1039                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1040                         bp->flow_ctrl |= FLOW_CTRL_TX;
1041                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1042                         bp->flow_ctrl |= FLOW_CTRL_RX;
1043                 return;
1044         }
1045
1046         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1047         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1048
1049         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1050                 u32 new_local_adv = 0;
1051                 u32 new_remote_adv = 0;
1052
1053                 if (local_adv & ADVERTISE_1000XPAUSE)
1054                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1055                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1056                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1057                 if (remote_adv & ADVERTISE_1000XPAUSE)
1058                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1059                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1060                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1061
1062                 local_adv = new_local_adv;
1063                 remote_adv = new_remote_adv;
1064         }
1065
1066         /* See Table 28B-3 of 802.3ab-1999 spec. */
1067         if (local_adv & ADVERTISE_PAUSE_CAP) {
1068                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1069                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1070                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1071                         }
1072                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1073                                 bp->flow_ctrl = FLOW_CTRL_RX;
1074                         }
1075                 }
1076                 else {
1077                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1078                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1079                         }
1080                 }
1081         }
1082         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1083                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1084                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1085
1086                         bp->flow_ctrl = FLOW_CTRL_TX;
1087                 }
1088         }
1089 }
1090
1091 static int
1092 bnx2_5709s_linkup(struct bnx2 *bp)
1093 {
1094         u32 val, speed;
1095
1096         bp->link_up = 1;
1097
1098         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1099         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1100         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1101
1102         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1103                 bp->line_speed = bp->req_line_speed;
1104                 bp->duplex = bp->req_duplex;
1105                 return 0;
1106         }
1107         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1108         switch (speed) {
1109                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1110                         bp->line_speed = SPEED_10;
1111                         break;
1112                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1113                         bp->line_speed = SPEED_100;
1114                         break;
1115                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1116                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1117                         bp->line_speed = SPEED_1000;
1118                         break;
1119                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1120                         bp->line_speed = SPEED_2500;
1121                         break;
1122         }
1123         if (val & MII_BNX2_GP_TOP_AN_FD)
1124                 bp->duplex = DUPLEX_FULL;
1125         else
1126                 bp->duplex = DUPLEX_HALF;
1127         return 0;
1128 }
1129
1130 static int
1131 bnx2_5708s_linkup(struct bnx2 *bp)
1132 {
1133         u32 val;
1134
1135         bp->link_up = 1;
1136         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1137         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1138                 case BCM5708S_1000X_STAT1_SPEED_10:
1139                         bp->line_speed = SPEED_10;
1140                         break;
1141                 case BCM5708S_1000X_STAT1_SPEED_100:
1142                         bp->line_speed = SPEED_100;
1143                         break;
1144                 case BCM5708S_1000X_STAT1_SPEED_1G:
1145                         bp->line_speed = SPEED_1000;
1146                         break;
1147                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1148                         bp->line_speed = SPEED_2500;
1149                         break;
1150         }
1151         if (val & BCM5708S_1000X_STAT1_FD)
1152                 bp->duplex = DUPLEX_FULL;
1153         else
1154                 bp->duplex = DUPLEX_HALF;
1155
1156         return 0;
1157 }
1158
1159 static int
1160 bnx2_5706s_linkup(struct bnx2 *bp)
1161 {
1162         u32 bmcr, local_adv, remote_adv, common;
1163
1164         bp->link_up = 1;
1165         bp->line_speed = SPEED_1000;
1166
1167         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1168         if (bmcr & BMCR_FULLDPLX) {
1169                 bp->duplex = DUPLEX_FULL;
1170         }
1171         else {
1172                 bp->duplex = DUPLEX_HALF;
1173         }
1174
1175         if (!(bmcr & BMCR_ANENABLE)) {
1176                 return 0;
1177         }
1178
1179         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1180         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1181
1182         common = local_adv & remote_adv;
1183         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1184
1185                 if (common & ADVERTISE_1000XFULL) {
1186                         bp->duplex = DUPLEX_FULL;
1187                 }
1188                 else {
1189                         bp->duplex = DUPLEX_HALF;
1190                 }
1191         }
1192
1193         return 0;
1194 }
1195
1196 static int
1197 bnx2_copper_linkup(struct bnx2 *bp)
1198 {
1199         u32 bmcr;
1200
1201         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1202         if (bmcr & BMCR_ANENABLE) {
1203                 u32 local_adv, remote_adv, common;
1204
1205                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1206                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1207
1208                 common = local_adv & (remote_adv >> 2);
1209                 if (common & ADVERTISE_1000FULL) {
1210                         bp->line_speed = SPEED_1000;
1211                         bp->duplex = DUPLEX_FULL;
1212                 }
1213                 else if (common & ADVERTISE_1000HALF) {
1214                         bp->line_speed = SPEED_1000;
1215                         bp->duplex = DUPLEX_HALF;
1216                 }
1217                 else {
1218                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1219                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1220
1221                         common = local_adv & remote_adv;
1222                         if (common & ADVERTISE_100FULL) {
1223                                 bp->line_speed = SPEED_100;
1224                                 bp->duplex = DUPLEX_FULL;
1225                         }
1226                         else if (common & ADVERTISE_100HALF) {
1227                                 bp->line_speed = SPEED_100;
1228                                 bp->duplex = DUPLEX_HALF;
1229                         }
1230                         else if (common & ADVERTISE_10FULL) {
1231                                 bp->line_speed = SPEED_10;
1232                                 bp->duplex = DUPLEX_FULL;
1233                         }
1234                         else if (common & ADVERTISE_10HALF) {
1235                                 bp->line_speed = SPEED_10;
1236                                 bp->duplex = DUPLEX_HALF;
1237                         }
1238                         else {
1239                                 bp->line_speed = 0;
1240                                 bp->link_up = 0;
1241                         }
1242                 }
1243         }
1244         else {
1245                 if (bmcr & BMCR_SPEED100) {
1246                         bp->line_speed = SPEED_100;
1247                 }
1248                 else {
1249                         bp->line_speed = SPEED_10;
1250                 }
1251                 if (bmcr & BMCR_FULLDPLX) {
1252                         bp->duplex = DUPLEX_FULL;
1253                 }
1254                 else {
1255                         bp->duplex = DUPLEX_HALF;
1256                 }
1257         }
1258
1259         return 0;
1260 }
1261
1262 static void
1263 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1264 {
1265         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1266
1267         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1268         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1269         val |= 0x02 << 8;
1270
1271         if (bp->flow_ctrl & FLOW_CTRL_TX)
1272                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1273
1274         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1275 }
1276
1277 static void
1278 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1279 {
1280         int i;
1281         u32 cid;
1282
1283         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1284                 if (i == 1)
1285                         cid = RX_RSS_CID;
1286                 bnx2_init_rx_context(bp, cid);
1287         }
1288 }
1289
1290 static void
1291 bnx2_set_mac_link(struct bnx2 *bp)
1292 {
1293         u32 val;
1294
1295         BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1296         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1297                 (bp->duplex == DUPLEX_HALF)) {
1298                 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1299         }
1300
1301         /* Configure the EMAC mode register. */
1302         val = BNX2_RD(bp, BNX2_EMAC_MODE);
1303
1304         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1305                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1306                 BNX2_EMAC_MODE_25G_MODE);
1307
1308         if (bp->link_up) {
1309                 switch (bp->line_speed) {
1310                         case SPEED_10:
1311                                 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1312                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1313                                         break;
1314                                 }
1315                                 /* fall through */
1316                         case SPEED_100:
1317                                 val |= BNX2_EMAC_MODE_PORT_MII;
1318                                 break;
1319                         case SPEED_2500:
1320                                 val |= BNX2_EMAC_MODE_25G_MODE;
1321                                 /* fall through */
1322                         case SPEED_1000:
1323                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1324                                 break;
1325                 }
1326         }
1327         else {
1328                 val |= BNX2_EMAC_MODE_PORT_GMII;
1329         }
1330
1331         /* Set the MAC to operate in the appropriate duplex mode. */
1332         if (bp->duplex == DUPLEX_HALF)
1333                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1334         BNX2_WR(bp, BNX2_EMAC_MODE, val);
1335
1336         /* Enable/disable rx PAUSE. */
1337         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1338
1339         if (bp->flow_ctrl & FLOW_CTRL_RX)
1340                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1341         BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1342
1343         /* Enable/disable tx PAUSE. */
1344         val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1345         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1346
1347         if (bp->flow_ctrl & FLOW_CTRL_TX)
1348                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1349         BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1350
1351         /* Acknowledge the interrupt. */
1352         BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1353
1354         bnx2_init_all_rx_contexts(bp);
1355 }
1356
1357 static void
1358 bnx2_enable_bmsr1(struct bnx2 *bp)
1359 {
1360         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1361             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1362                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1363                                MII_BNX2_BLK_ADDR_GP_STATUS);
1364 }
1365
1366 static void
1367 bnx2_disable_bmsr1(struct bnx2 *bp)
1368 {
1369         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1371                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1372                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1373 }
1374
1375 static int
1376 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1377 {
1378         u32 up1;
1379         int ret = 1;
1380
1381         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1382                 return 0;
1383
1384         if (bp->autoneg & AUTONEG_SPEED)
1385                 bp->advertising |= ADVERTISED_2500baseX_Full;
1386
1387         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1388                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1389
1390         bnx2_read_phy(bp, bp->mii_up1, &up1);
1391         if (!(up1 & BCM5708S_UP1_2G5)) {
1392                 up1 |= BCM5708S_UP1_2G5;
1393                 bnx2_write_phy(bp, bp->mii_up1, up1);
1394                 ret = 0;
1395         }
1396
1397         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1398                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1399                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1400
1401         return ret;
1402 }
1403
1404 static int
1405 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1406 {
1407         u32 up1;
1408         int ret = 0;
1409
1410         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1411                 return 0;
1412
1413         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1414                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1415
1416         bnx2_read_phy(bp, bp->mii_up1, &up1);
1417         if (up1 & BCM5708S_UP1_2G5) {
1418                 up1 &= ~BCM5708S_UP1_2G5;
1419                 bnx2_write_phy(bp, bp->mii_up1, up1);
1420                 ret = 1;
1421         }
1422
1423         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1424                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1425                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1426
1427         return ret;
1428 }
1429
1430 static void
1431 bnx2_enable_forced_2g5(struct bnx2 *bp)
1432 {
1433         u32 uninitialized_var(bmcr);
1434         int err;
1435
1436         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1437                 return;
1438
1439         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1440                 u32 val;
1441
1442                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1443                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1444                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1445                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1446                         val |= MII_BNX2_SD_MISC1_FORCE |
1447                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1448                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1449                 }
1450
1451                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1452                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1453                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1454
1455         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1456                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1457                 if (!err)
1458                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1459         } else {
1460                 return;
1461         }
1462
1463         if (err)
1464                 return;
1465
1466         if (bp->autoneg & AUTONEG_SPEED) {
1467                 bmcr &= ~BMCR_ANENABLE;
1468                 if (bp->req_duplex == DUPLEX_FULL)
1469                         bmcr |= BMCR_FULLDPLX;
1470         }
1471         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1472 }
1473
1474 static void
1475 bnx2_disable_forced_2g5(struct bnx2 *bp)
1476 {
1477         u32 uninitialized_var(bmcr);
1478         int err;
1479
1480         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1481                 return;
1482
1483         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1484                 u32 val;
1485
1486                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1487                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1488                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1489                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1490                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1491                 }
1492
1493                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1494                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1495                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1496
1497         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1498                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1499                 if (!err)
1500                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1501         } else {
1502                 return;
1503         }
1504
1505         if (err)
1506                 return;
1507
1508         if (bp->autoneg & AUTONEG_SPEED)
1509                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1510         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1511 }
1512
1513 static void
1514 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1515 {
1516         u32 val;
1517
1518         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1519         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1520         if (start)
1521                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1522         else
1523                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1524 }
1525
1526 static int
1527 bnx2_set_link(struct bnx2 *bp)
1528 {
1529         u32 bmsr;
1530         u8 link_up;
1531
1532         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1533                 bp->link_up = 1;
1534                 return 0;
1535         }
1536
1537         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1538                 return 0;
1539
1540         link_up = bp->link_up;
1541
1542         bnx2_enable_bmsr1(bp);
1543         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1544         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545         bnx2_disable_bmsr1(bp);
1546
1547         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1548             (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1549                 u32 val, an_dbg;
1550
1551                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1552                         bnx2_5706s_force_link_dn(bp, 0);
1553                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1554                 }
1555                 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1556
1557                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1558                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1559                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560
1561                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1562                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1563                         bmsr |= BMSR_LSTATUS;
1564                 else
1565                         bmsr &= ~BMSR_LSTATUS;
1566         }
1567
1568         if (bmsr & BMSR_LSTATUS) {
1569                 bp->link_up = 1;
1570
1571                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1572                         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1573                                 bnx2_5706s_linkup(bp);
1574                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1575                                 bnx2_5708s_linkup(bp);
1576                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1577                                 bnx2_5709s_linkup(bp);
1578                 }
1579                 else {
1580                         bnx2_copper_linkup(bp);
1581                 }
1582                 bnx2_resolve_flow_ctrl(bp);
1583         }
1584         else {
1585                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1586                     (bp->autoneg & AUTONEG_SPEED))
1587                         bnx2_disable_forced_2g5(bp);
1588
1589                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1590                         u32 bmcr;
1591
1592                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1593                         bmcr |= BMCR_ANENABLE;
1594                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1595
1596                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1597                 }
1598                 bp->link_up = 0;
1599         }
1600
1601         if (bp->link_up != link_up) {
1602                 bnx2_report_link(bp);
1603         }
1604
1605         bnx2_set_mac_link(bp);
1606
1607         return 0;
1608 }
1609
1610 static int
1611 bnx2_reset_phy(struct bnx2 *bp)
1612 {
1613         int i;
1614         u32 reg;
1615
1616         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1617
1618 #define PHY_RESET_MAX_WAIT 100
1619         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1620                 udelay(10);
1621
1622                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1623                 if (!(reg & BMCR_RESET)) {
1624                         udelay(20);
1625                         break;
1626                 }
1627         }
1628         if (i == PHY_RESET_MAX_WAIT) {
1629                 return -EBUSY;
1630         }
1631         return 0;
1632 }
1633
1634 static u32
1635 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1636 {
1637         u32 adv = 0;
1638
1639         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1640                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1641
1642                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1643                         adv = ADVERTISE_1000XPAUSE;
1644                 }
1645                 else {
1646                         adv = ADVERTISE_PAUSE_CAP;
1647                 }
1648         }
1649         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1650                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1651                         adv = ADVERTISE_1000XPSE_ASYM;
1652                 }
1653                 else {
1654                         adv = ADVERTISE_PAUSE_ASYM;
1655                 }
1656         }
1657         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1658                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1660                 }
1661                 else {
1662                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1663                 }
1664         }
1665         return adv;
1666 }
1667
1668 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1669
1670 static int
1671 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1672 __releases(&bp->phy_lock)
1673 __acquires(&bp->phy_lock)
1674 {
1675         u32 speed_arg = 0, pause_adv;
1676
1677         pause_adv = bnx2_phy_get_pause_adv(bp);
1678
1679         if (bp->autoneg & AUTONEG_SPEED) {
1680                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1681                 if (bp->advertising & ADVERTISED_10baseT_Half)
1682                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1683                 if (bp->advertising & ADVERTISED_10baseT_Full)
1684                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1685                 if (bp->advertising & ADVERTISED_100baseT_Half)
1686                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1687                 if (bp->advertising & ADVERTISED_100baseT_Full)
1688                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1689                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1690                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1691                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1692                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1693         } else {
1694                 if (bp->req_line_speed == SPEED_2500)
1695                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1696                 else if (bp->req_line_speed == SPEED_1000)
1697                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1698                 else if (bp->req_line_speed == SPEED_100) {
1699                         if (bp->req_duplex == DUPLEX_FULL)
1700                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1701                         else
1702                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1703                 } else if (bp->req_line_speed == SPEED_10) {
1704                         if (bp->req_duplex == DUPLEX_FULL)
1705                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1706                         else
1707                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1708                 }
1709         }
1710
1711         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1712                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1713         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1714                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1715
1716         if (port == PORT_TP)
1717                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1718                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1719
1720         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1721
1722         spin_unlock_bh(&bp->phy_lock);
1723         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1724         spin_lock_bh(&bp->phy_lock);
1725
1726         return 0;
1727 }
1728
1729 static int
1730 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1731 __releases(&bp->phy_lock)
1732 __acquires(&bp->phy_lock)
1733 {
1734         u32 adv, bmcr;
1735         u32 new_adv = 0;
1736
1737         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1738                 return bnx2_setup_remote_phy(bp, port);
1739
1740         if (!(bp->autoneg & AUTONEG_SPEED)) {
1741                 u32 new_bmcr;
1742                 int force_link_down = 0;
1743
1744                 if (bp->req_line_speed == SPEED_2500) {
1745                         if (!bnx2_test_and_enable_2g5(bp))
1746                                 force_link_down = 1;
1747                 } else if (bp->req_line_speed == SPEED_1000) {
1748                         if (bnx2_test_and_disable_2g5(bp))
1749                                 force_link_down = 1;
1750                 }
1751                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1752                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1753
1754                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1755                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1756                 new_bmcr |= BMCR_SPEED1000;
1757
1758                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1759                         if (bp->req_line_speed == SPEED_2500)
1760                                 bnx2_enable_forced_2g5(bp);
1761                         else if (bp->req_line_speed == SPEED_1000) {
1762                                 bnx2_disable_forced_2g5(bp);
1763                                 new_bmcr &= ~0x2000;
1764                         }
1765
1766                 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1767                         if (bp->req_line_speed == SPEED_2500)
1768                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1769                         else
1770                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1771                 }
1772
1773                 if (bp->req_duplex == DUPLEX_FULL) {
1774                         adv |= ADVERTISE_1000XFULL;
1775                         new_bmcr |= BMCR_FULLDPLX;
1776                 }
1777                 else {
1778                         adv |= ADVERTISE_1000XHALF;
1779                         new_bmcr &= ~BMCR_FULLDPLX;
1780                 }
1781                 if ((new_bmcr != bmcr) || (force_link_down)) {
1782                         /* Force a link down visible on the other side */
1783                         if (bp->link_up) {
1784                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1785                                                ~(ADVERTISE_1000XFULL |
1786                                                  ADVERTISE_1000XHALF));
1787                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1788                                         BMCR_ANRESTART | BMCR_ANENABLE);
1789
1790                                 bp->link_up = 0;
1791                                 netif_carrier_off(bp->dev);
1792                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1793                                 bnx2_report_link(bp);
1794                         }
1795                         bnx2_write_phy(bp, bp->mii_adv, adv);
1796                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1797                 } else {
1798                         bnx2_resolve_flow_ctrl(bp);
1799                         bnx2_set_mac_link(bp);
1800                 }
1801                 return 0;
1802         }
1803
1804         bnx2_test_and_enable_2g5(bp);
1805
1806         if (bp->advertising & ADVERTISED_1000baseT_Full)
1807                 new_adv |= ADVERTISE_1000XFULL;
1808
1809         new_adv |= bnx2_phy_get_pause_adv(bp);
1810
1811         bnx2_read_phy(bp, bp->mii_adv, &adv);
1812         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1813
1814         bp->serdes_an_pending = 0;
1815         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1816                 /* Force a link down visible on the other side */
1817                 if (bp->link_up) {
1818                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1819                         spin_unlock_bh(&bp->phy_lock);
1820                         msleep(20);
1821                         spin_lock_bh(&bp->phy_lock);
1822                 }
1823
1824                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1825                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1826                         BMCR_ANENABLE);
1827                 /* Speed up link-up time when the link partner
1828                  * does not autonegotiate which is very common
1829                  * in blade servers. Some blade servers use
1830                  * IPMI for kerboard input and it's important
1831                  * to minimize link disruptions. Autoneg. involves
1832                  * exchanging base pages plus 3 next pages and
1833                  * normally completes in about 120 msec.
1834                  */
1835                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1836                 bp->serdes_an_pending = 1;
1837                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1838         } else {
1839                 bnx2_resolve_flow_ctrl(bp);
1840                 bnx2_set_mac_link(bp);
1841         }
1842
1843         return 0;
1844 }
1845
1846 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1847         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1848                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1849                 (ADVERTISED_1000baseT_Full)
1850
1851 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1852         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1853         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1854         ADVERTISED_1000baseT_Full)
1855
1856 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1857         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1858
1859 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1860
1861 static void
1862 bnx2_set_default_remote_link(struct bnx2 *bp)
1863 {
1864         u32 link;
1865
1866         if (bp->phy_port == PORT_TP)
1867                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1868         else
1869                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1870
1871         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1872                 bp->req_line_speed = 0;
1873                 bp->autoneg |= AUTONEG_SPEED;
1874                 bp->advertising = ADVERTISED_Autoneg;
1875                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1876                         bp->advertising |= ADVERTISED_10baseT_Half;
1877                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1878                         bp->advertising |= ADVERTISED_10baseT_Full;
1879                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1880                         bp->advertising |= ADVERTISED_100baseT_Half;
1881                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1882                         bp->advertising |= ADVERTISED_100baseT_Full;
1883                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1884                         bp->advertising |= ADVERTISED_1000baseT_Full;
1885                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1886                         bp->advertising |= ADVERTISED_2500baseX_Full;
1887         } else {
1888                 bp->autoneg = 0;
1889                 bp->advertising = 0;
1890                 bp->req_duplex = DUPLEX_FULL;
1891                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1892                         bp->req_line_speed = SPEED_10;
1893                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1894                                 bp->req_duplex = DUPLEX_HALF;
1895                 }
1896                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1897                         bp->req_line_speed = SPEED_100;
1898                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1899                                 bp->req_duplex = DUPLEX_HALF;
1900                 }
1901                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1902                         bp->req_line_speed = SPEED_1000;
1903                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1904                         bp->req_line_speed = SPEED_2500;
1905         }
1906 }
1907
1908 static void
1909 bnx2_set_default_link(struct bnx2 *bp)
1910 {
1911         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1912                 bnx2_set_default_remote_link(bp);
1913                 return;
1914         }
1915
1916         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1917         bp->req_line_speed = 0;
1918         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1919                 u32 reg;
1920
1921                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1922
1923                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1924                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1925                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1926                         bp->autoneg = 0;
1927                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1928                         bp->req_duplex = DUPLEX_FULL;
1929                 }
1930         } else
1931                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1932 }
1933
1934 static void
1935 bnx2_send_heart_beat(struct bnx2 *bp)
1936 {
1937         u32 msg;
1938         u32 addr;
1939
1940         spin_lock(&bp->indirect_lock);
1941         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1942         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1943         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1944         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1945         spin_unlock(&bp->indirect_lock);
1946 }
1947
1948 static void
1949 bnx2_remote_phy_event(struct bnx2 *bp)
1950 {
1951         u32 msg;
1952         u8 link_up = bp->link_up;
1953         u8 old_port;
1954
1955         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1956
1957         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1958                 bnx2_send_heart_beat(bp);
1959
1960         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1961
1962         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1963                 bp->link_up = 0;
1964         else {
1965                 u32 speed;
1966
1967                 bp->link_up = 1;
1968                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1969                 bp->duplex = DUPLEX_FULL;
1970                 switch (speed) {
1971                         case BNX2_LINK_STATUS_10HALF:
1972                                 bp->duplex = DUPLEX_HALF;
1973                                 /* fall through */
1974                         case BNX2_LINK_STATUS_10FULL:
1975                                 bp->line_speed = SPEED_10;
1976                                 break;
1977                         case BNX2_LINK_STATUS_100HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                                 /* fall through */
1980                         case BNX2_LINK_STATUS_100BASE_T4:
1981                         case BNX2_LINK_STATUS_100FULL:
1982                                 bp->line_speed = SPEED_100;
1983                                 break;
1984                         case BNX2_LINK_STATUS_1000HALF:
1985                                 bp->duplex = DUPLEX_HALF;
1986                                 /* fall through */
1987                         case BNX2_LINK_STATUS_1000FULL:
1988                                 bp->line_speed = SPEED_1000;
1989                                 break;
1990                         case BNX2_LINK_STATUS_2500HALF:
1991                                 bp->duplex = DUPLEX_HALF;
1992                                 /* fall through */
1993                         case BNX2_LINK_STATUS_2500FULL:
1994                                 bp->line_speed = SPEED_2500;
1995                                 break;
1996                         default:
1997                                 bp->line_speed = 0;
1998                                 break;
1999                 }
2000
2001                 bp->flow_ctrl = 0;
2002                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2003                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2004                         if (bp->duplex == DUPLEX_FULL)
2005                                 bp->flow_ctrl = bp->req_flow_ctrl;
2006                 } else {
2007                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2008                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2009                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2010                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2011                 }
2012
2013                 old_port = bp->phy_port;
2014                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2015                         bp->phy_port = PORT_FIBRE;
2016                 else
2017                         bp->phy_port = PORT_TP;
2018
2019                 if (old_port != bp->phy_port)
2020                         bnx2_set_default_link(bp);
2021
2022         }
2023         if (bp->link_up != link_up)
2024                 bnx2_report_link(bp);
2025
2026         bnx2_set_mac_link(bp);
2027 }
2028
2029 static int
2030 bnx2_set_remote_link(struct bnx2 *bp)
2031 {
2032         u32 evt_code;
2033
2034         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2035         switch (evt_code) {
2036                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2037                         bnx2_remote_phy_event(bp);
2038                         break;
2039                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2040                 default:
2041                         bnx2_send_heart_beat(bp);
2042                         break;
2043         }
2044         return 0;
2045 }
2046
2047 static int
2048 bnx2_setup_copper_phy(struct bnx2 *bp)
2049 __releases(&bp->phy_lock)
2050 __acquires(&bp->phy_lock)
2051 {
2052         u32 bmcr;
2053         u32 new_bmcr;
2054
2055         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2056
2057         if (bp->autoneg & AUTONEG_SPEED) {
2058                 u32 adv_reg, adv1000_reg;
2059                 u32 new_adv = 0;
2060                 u32 new_adv1000 = 0;
2061
2062                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2063                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2064                         ADVERTISE_PAUSE_ASYM);
2065
2066                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2067                 adv1000_reg &= PHY_ALL_1000_SPEED;
2068
2069                 new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2070                 new_adv |= ADVERTISE_CSMA;
2071                 new_adv |= bnx2_phy_get_pause_adv(bp);
2072
2073                 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2074
2075                 if ((adv1000_reg != new_adv1000) ||
2076                         (adv_reg != new_adv) ||
2077                         ((bmcr & BMCR_ANENABLE) == 0)) {
2078
2079                         bnx2_write_phy(bp, bp->mii_adv, new_adv);
2080                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2081                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2082                                 BMCR_ANENABLE);
2083                 }
2084                 else if (bp->link_up) {
2085                         /* Flow ctrl may have changed from auto to forced */
2086                         /* or vice-versa. */
2087
2088                         bnx2_resolve_flow_ctrl(bp);
2089                         bnx2_set_mac_link(bp);
2090                 }
2091                 return 0;
2092         }
2093
2094         new_bmcr = 0;
2095         if (bp->req_line_speed == SPEED_100) {
2096                 new_bmcr |= BMCR_SPEED100;
2097         }
2098         if (bp->req_duplex == DUPLEX_FULL) {
2099                 new_bmcr |= BMCR_FULLDPLX;
2100         }
2101         if (new_bmcr != bmcr) {
2102                 u32 bmsr;
2103
2104                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2105                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2106
2107                 if (bmsr & BMSR_LSTATUS) {
2108                         /* Force link down */
2109                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2110                         spin_unlock_bh(&bp->phy_lock);
2111                         msleep(50);
2112                         spin_lock_bh(&bp->phy_lock);
2113
2114                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116                 }
2117
2118                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2119
2120                 /* Normally, the new speed is setup after the link has
2121                  * gone down and up again. In some cases, link will not go
2122                  * down so we need to set up the new speed here.
2123                  */
2124                 if (bmsr & BMSR_LSTATUS) {
2125                         bp->line_speed = bp->req_line_speed;
2126                         bp->duplex = bp->req_duplex;
2127                         bnx2_resolve_flow_ctrl(bp);
2128                         bnx2_set_mac_link(bp);
2129                 }
2130         } else {
2131                 bnx2_resolve_flow_ctrl(bp);
2132                 bnx2_set_mac_link(bp);
2133         }
2134         return 0;
2135 }
2136
2137 static int
2138 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2139 __releases(&bp->phy_lock)
2140 __acquires(&bp->phy_lock)
2141 {
2142         if (bp->loopback == MAC_LOOPBACK)
2143                 return 0;
2144
2145         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2146                 return bnx2_setup_serdes_phy(bp, port);
2147         }
2148         else {
2149                 return bnx2_setup_copper_phy(bp);
2150         }
2151 }
2152
2153 static int
2154 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2155 {
2156         u32 val;
2157
2158         bp->mii_bmcr = MII_BMCR + 0x10;
2159         bp->mii_bmsr = MII_BMSR + 0x10;
2160         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2161         bp->mii_adv = MII_ADVERTISE + 0x10;
2162         bp->mii_lpa = MII_LPA + 0x10;
2163         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2164
2165         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2166         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2167
2168         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2169         if (reset_phy)
2170                 bnx2_reset_phy(bp);
2171
2172         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2173
2174         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2175         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2176         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2177         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2178
2179         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2180         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2181         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2182                 val |= BCM5708S_UP1_2G5;
2183         else
2184                 val &= ~BCM5708S_UP1_2G5;
2185         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2186
2187         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2188         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2189         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2190         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2191
2192         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2193
2194         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2195               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2196         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2199
2200         return 0;
2201 }
2202
2203 static int
2204 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2205 {
2206         u32 val;
2207
2208         if (reset_phy)
2209                 bnx2_reset_phy(bp);
2210
2211         bp->mii_up1 = BCM5708S_UP1;
2212
2213         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2214         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2215         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2216
2217         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2218         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2219         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2220
2221         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2222         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2223         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2224
2225         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2226                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2227                 val |= BCM5708S_UP1_2G5;
2228                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2229         }
2230
2231         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2232             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2233             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2234                 /* increase tx signal amplitude */
2235                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2236                                BCM5708S_BLK_ADDR_TX_MISC);
2237                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2238                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2239                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2240                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2241         }
2242
2243         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2244               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2245
2246         if (val) {
2247                 u32 is_backplane;
2248
2249                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2250                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2251                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2252                                        BCM5708S_BLK_ADDR_TX_MISC);
2253                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2254                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2255                                        BCM5708S_BLK_ADDR_DIG);
2256                 }
2257         }
2258         return 0;
2259 }
2260
2261 static int
2262 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2263 {
2264         if (reset_phy)
2265                 bnx2_reset_phy(bp);
2266
2267         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2268
2269         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2270                 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2271
2272         if (bp->dev->mtu > 1500) {
2273                 u32 val;
2274
2275                 /* Set extended packet length bit */
2276                 bnx2_write_phy(bp, 0x18, 0x7);
2277                 bnx2_read_phy(bp, 0x18, &val);
2278                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2279
2280                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2281                 bnx2_read_phy(bp, 0x1c, &val);
2282                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2283         }
2284         else {
2285                 u32 val;
2286
2287                 bnx2_write_phy(bp, 0x18, 0x7);
2288                 bnx2_read_phy(bp, 0x18, &val);
2289                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2290
2291                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292                 bnx2_read_phy(bp, 0x1c, &val);
2293                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2294         }
2295
2296         return 0;
2297 }
2298
2299 static int
2300 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2301 {
2302         u32 val;
2303
2304         if (reset_phy)
2305                 bnx2_reset_phy(bp);
2306
2307         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2308                 bnx2_write_phy(bp, 0x18, 0x0c00);
2309                 bnx2_write_phy(bp, 0x17, 0x000a);
2310                 bnx2_write_phy(bp, 0x15, 0x310b);
2311                 bnx2_write_phy(bp, 0x17, 0x201f);
2312                 bnx2_write_phy(bp, 0x15, 0x9506);
2313                 bnx2_write_phy(bp, 0x17, 0x401f);
2314                 bnx2_write_phy(bp, 0x15, 0x14e2);
2315                 bnx2_write_phy(bp, 0x18, 0x0400);
2316         }
2317
2318         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2319                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2320                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2321                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2322                 val &= ~(1 << 8);
2323                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2324         }
2325
2326         if (bp->dev->mtu > 1500) {
2327                 /* Set extended packet length bit */
2328                 bnx2_write_phy(bp, 0x18, 0x7);
2329                 bnx2_read_phy(bp, 0x18, &val);
2330                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2331
2332                 bnx2_read_phy(bp, 0x10, &val);
2333                 bnx2_write_phy(bp, 0x10, val | 0x1);
2334         }
2335         else {
2336                 bnx2_write_phy(bp, 0x18, 0x7);
2337                 bnx2_read_phy(bp, 0x18, &val);
2338                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2339
2340                 bnx2_read_phy(bp, 0x10, &val);
2341                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2342         }
2343
2344         /* ethernet@wirespeed */
2345         bnx2_write_phy(bp, 0x18, 0x7007);
2346         bnx2_read_phy(bp, 0x18, &val);
2347         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2348         return 0;
2349 }
2350
2351
2352 static int
2353 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2354 __releases(&bp->phy_lock)
2355 __acquires(&bp->phy_lock)
2356 {
2357         u32 val;
2358         int rc = 0;
2359
2360         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2361         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2362
2363         bp->mii_bmcr = MII_BMCR;
2364         bp->mii_bmsr = MII_BMSR;
2365         bp->mii_bmsr1 = MII_BMSR;
2366         bp->mii_adv = MII_ADVERTISE;
2367         bp->mii_lpa = MII_LPA;
2368
2369         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2370
2371         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2372                 goto setup_phy;
2373
2374         bnx2_read_phy(bp, MII_PHYSID1, &val);
2375         bp->phy_id = val << 16;
2376         bnx2_read_phy(bp, MII_PHYSID2, &val);
2377         bp->phy_id |= val & 0xffff;
2378
2379         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2380                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2381                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2382                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2383                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2384                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2385                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2386         }
2387         else {
2388                 rc = bnx2_init_copper_phy(bp, reset_phy);
2389         }
2390
2391 setup_phy:
2392         if (!rc)
2393                 rc = bnx2_setup_phy(bp, bp->phy_port);
2394
2395         return rc;
2396 }
2397
2398 static int
2399 bnx2_set_mac_loopback(struct bnx2 *bp)
2400 {
2401         u32 mac_mode;
2402
2403         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2404         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2405         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2406         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2407         bp->link_up = 1;
2408         return 0;
2409 }
2410
2411 static int bnx2_test_link(struct bnx2 *);
2412
2413 static int
2414 bnx2_set_phy_loopback(struct bnx2 *bp)
2415 {
2416         u32 mac_mode;
2417         int rc, i;
2418
2419         spin_lock_bh(&bp->phy_lock);
2420         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2421                             BMCR_SPEED1000);
2422         spin_unlock_bh(&bp->phy_lock);
2423         if (rc)
2424                 return rc;
2425
2426         for (i = 0; i < 10; i++) {
2427                 if (bnx2_test_link(bp) == 0)
2428                         break;
2429                 msleep(100);
2430         }
2431
2432         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2433         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2434                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2435                       BNX2_EMAC_MODE_25G_MODE);
2436
2437         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2438         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2439         bp->link_up = 1;
2440         return 0;
2441 }
2442
2443 static void
2444 bnx2_dump_mcp_state(struct bnx2 *bp)
2445 {
2446         struct net_device *dev = bp->dev;
2447         u32 mcp_p0, mcp_p1;
2448
2449         netdev_err(dev, "<--- start MCP states dump --->\n");
2450         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2451                 mcp_p0 = BNX2_MCP_STATE_P0;
2452                 mcp_p1 = BNX2_MCP_STATE_P1;
2453         } else {
2454                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2455                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2456         }
2457         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2458                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2459         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2460                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2461                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2462                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2463         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2464                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2465                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2466                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2467         netdev_err(dev, "DEBUG: shmem states:\n");
2468         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2469                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2470                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2471                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2472         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2473         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2474                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2475                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2476         pr_cont(" condition[%08x]\n",
2477                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2478         DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2479         DP_SHMEM_LINE(bp, 0x3cc);
2480         DP_SHMEM_LINE(bp, 0x3dc);
2481         DP_SHMEM_LINE(bp, 0x3ec);
2482         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2483         netdev_err(dev, "<--- end MCP states dump --->\n");
2484 }
2485
2486 static int
2487 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2488 {
2489         int i;
2490         u32 val;
2491
2492         bp->fw_wr_seq++;
2493         msg_data |= bp->fw_wr_seq;
2494
2495         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2496
2497         if (!ack)
2498                 return 0;
2499
2500         /* wait for an acknowledgement. */
2501         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2502                 msleep(10);
2503
2504                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2505
2506                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2507                         break;
2508         }
2509         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2510                 return 0;
2511
2512         /* If we timed out, inform the firmware that this is the case. */
2513         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2514                 msg_data &= ~BNX2_DRV_MSG_CODE;
2515                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2516
2517                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2518                 if (!silent) {
2519                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2520                         bnx2_dump_mcp_state(bp);
2521                 }
2522
2523                 return -EBUSY;
2524         }
2525
2526         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2527                 return -EIO;
2528
2529         return 0;
2530 }
2531
2532 static int
2533 bnx2_init_5709_context(struct bnx2 *bp)
2534 {
2535         int i, ret = 0;
2536         u32 val;
2537
2538         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2539         val |= (BNX2_PAGE_BITS - 8) << 16;
2540         BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2541         for (i = 0; i < 10; i++) {
2542                 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2543                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2544                         break;
2545                 udelay(2);
2546         }
2547         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2548                 return -EBUSY;
2549
2550         for (i = 0; i < bp->ctx_pages; i++) {
2551                 int j;
2552
2553                 if (bp->ctx_blk[i])
2554                         memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2555                 else
2556                         return -ENOMEM;
2557
2558                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2559                         (bp->ctx_blk_mapping[i] & 0xffffffff) |
2560                         BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2561                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2562                         (u64) bp->ctx_blk_mapping[i] >> 32);
2563                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2564                         BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2565                 for (j = 0; j < 10; j++) {
2566
2567                         val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2568                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2569                                 break;
2570                         udelay(5);
2571                 }
2572                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2573                         ret = -EBUSY;
2574                         break;
2575                 }
2576         }
2577         return ret;
2578 }
2579
2580 static void
2581 bnx2_init_context(struct bnx2 *bp)
2582 {
2583         u32 vcid;
2584
2585         vcid = 96;
2586         while (vcid) {
2587                 u32 vcid_addr, pcid_addr, offset;
2588                 int i;
2589
2590                 vcid--;
2591
2592                 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2593                         u32 new_vcid;
2594
2595                         vcid_addr = GET_PCID_ADDR(vcid);
2596                         if (vcid & 0x8) {
2597                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2598                         }
2599                         else {
2600                                 new_vcid = vcid;
2601                         }
2602                         pcid_addr = GET_PCID_ADDR(new_vcid);
2603                 }
2604                 else {
2605                         vcid_addr = GET_CID_ADDR(vcid);
2606                         pcid_addr = vcid_addr;
2607                 }
2608
2609                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2610                         vcid_addr += (i << PHY_CTX_SHIFT);
2611                         pcid_addr += (i << PHY_CTX_SHIFT);
2612
2613                         BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2614                         BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2615
2616                         /* Zero out the context. */
2617                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2618                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2619                 }
2620         }
2621 }
2622
2623 static int
2624 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2625 {
2626         u16 *good_mbuf;
2627         u32 good_mbuf_cnt;
2628         u32 val;
2629
2630         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2631         if (good_mbuf == NULL)
2632                 return -ENOMEM;
2633
2634         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2635                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2636
2637         good_mbuf_cnt = 0;
2638
2639         /* Allocate a bunch of mbufs and save the good ones in an array. */
2640         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2641         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2642                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2643                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2644
2645                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2646
2647                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2648
2649                 /* The addresses with Bit 9 set are bad memory blocks. */
2650                 if (!(val & (1 << 9))) {
2651                         good_mbuf[good_mbuf_cnt] = (u16) val;
2652                         good_mbuf_cnt++;
2653                 }
2654
2655                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2656         }
2657
2658         /* Free the good ones back to the mbuf pool thus discarding
2659          * all the bad ones. */
2660         while (good_mbuf_cnt) {
2661                 good_mbuf_cnt--;
2662
2663                 val = good_mbuf[good_mbuf_cnt];
2664                 val = (val << 9) | val | 1;
2665
2666                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2667         }
2668         kfree(good_mbuf);
2669         return 0;
2670 }
2671
2672 static void
2673 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2674 {
2675         u32 val;
2676
2677         val = (mac_addr[0] << 8) | mac_addr[1];
2678
2679         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2680
2681         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2682                 (mac_addr[4] << 8) | mac_addr[5];
2683
2684         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2685 }
2686
2687 static inline int
2688 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2689 {
2690         dma_addr_t mapping;
2691         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2692         struct bnx2_rx_bd *rxbd =
2693                 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2694         struct page *page = alloc_page(gfp);
2695
2696         if (!page)
2697                 return -ENOMEM;
2698         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2699                                PCI_DMA_FROMDEVICE);
2700         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2701                 __free_page(page);
2702                 return -EIO;
2703         }
2704
2705         rx_pg->page = page;
2706         dma_unmap_addr_set(rx_pg, mapping, mapping);
2707         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2708         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2709         return 0;
2710 }
2711
2712 static void
2713 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2714 {
2715         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2716         struct page *page = rx_pg->page;
2717
2718         if (!page)
2719                 return;
2720
2721         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2722                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2723
2724         __free_page(page);
2725         rx_pg->page = NULL;
2726 }
2727
2728 static inline int
2729 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2730 {
2731         u8 *data;
2732         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2733         dma_addr_t mapping;
2734         struct bnx2_rx_bd *rxbd =
2735                 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2736
2737         data = kmalloc(bp->rx_buf_size, gfp);
2738         if (!data)
2739                 return -ENOMEM;
2740
2741         mapping = dma_map_single(&bp->pdev->dev,
2742                                  get_l2_fhdr(data),
2743                                  bp->rx_buf_use_size,
2744                                  PCI_DMA_FROMDEVICE);
2745         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2746                 kfree(data);
2747                 return -EIO;
2748         }
2749
2750         rx_buf->data = data;
2751         dma_unmap_addr_set(rx_buf, mapping, mapping);
2752
2753         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2754         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2755
2756         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2757
2758         return 0;
2759 }
2760
2761 static int
2762 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2763 {
2764         struct status_block *sblk = bnapi->status_blk.msi;
2765         u32 new_link_state, old_link_state;
2766         int is_set = 1;
2767
2768         new_link_state = sblk->status_attn_bits & event;
2769         old_link_state = sblk->status_attn_bits_ack & event;
2770         if (new_link_state != old_link_state) {
2771                 if (new_link_state)
2772                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2773                 else
2774                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2775         } else
2776                 is_set = 0;
2777
2778         return is_set;
2779 }
2780
2781 static void
2782 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2783 {
2784         spin_lock(&bp->phy_lock);
2785
2786         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2787                 bnx2_set_link(bp);
2788         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2789                 bnx2_set_remote_link(bp);
2790
2791         spin_unlock(&bp->phy_lock);
2792
2793 }
2794
2795 static inline u16
2796 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2797 {
2798         u16 cons;
2799
2800         /* Tell compiler that status block fields can change. */
2801         barrier();
2802         cons = *bnapi->hw_tx_cons_ptr;
2803         barrier();
2804         if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2805                 cons++;
2806         return cons;
2807 }
2808
2809 static int
2810 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2811 {
2812         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2813         u16 hw_cons, sw_cons, sw_ring_cons;
2814         int tx_pkt = 0, index;
2815         unsigned int tx_bytes = 0;
2816         struct netdev_queue *txq;
2817
2818         index = (bnapi - bp->bnx2_napi);
2819         txq = netdev_get_tx_queue(bp->dev, index);
2820
2821         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2822         sw_cons = txr->tx_cons;
2823
2824         while (sw_cons != hw_cons) {
2825                 struct bnx2_sw_tx_bd *tx_buf;
2826                 struct sk_buff *skb;
2827                 int i, last;
2828
2829                 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2830
2831                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2832                 skb = tx_buf->skb;
2833
2834                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2835                 prefetch(&skb->end);
2836
2837                 /* partial BD completions possible with TSO packets */
2838                 if (tx_buf->is_gso) {
2839                         u16 last_idx, last_ring_idx;
2840
2841                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2842                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2843                         if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2844                                 last_idx++;
2845                         }
2846                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2847                                 break;
2848                         }
2849                 }
2850
2851                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2852                         skb_headlen(skb), PCI_DMA_TODEVICE);
2853
2854                 tx_buf->skb = NULL;
2855                 last = tx_buf->nr_frags;
2856
2857                 for (i = 0; i < last; i++) {
2858                         struct bnx2_sw_tx_bd *tx_buf;
2859
2860                         sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2861
2862                         tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2863                         dma_unmap_page(&bp->pdev->dev,
2864                                 dma_unmap_addr(tx_buf, mapping),
2865                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2866                                 PCI_DMA_TODEVICE);
2867                 }
2868
2869                 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2870
2871                 tx_bytes += skb->len;
2872                 dev_kfree_skb(skb);
2873                 tx_pkt++;
2874                 if (tx_pkt == budget)
2875                         break;
2876
2877                 if (hw_cons == sw_cons)
2878                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2879         }
2880
2881         netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2882         txr->hw_tx_cons = hw_cons;
2883         txr->tx_cons = sw_cons;
2884
2885         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2886          * before checking for netif_tx_queue_stopped().  Without the
2887          * memory barrier, there is a small possibility that bnx2_start_xmit()
2888          * will miss it and cause the queue to be stopped forever.
2889          */
2890         smp_mb();
2891
2892         if (unlikely(netif_tx_queue_stopped(txq)) &&
2893                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2894                 __netif_tx_lock(txq, smp_processor_id());
2895                 if ((netif_tx_queue_stopped(txq)) &&
2896                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2897                         netif_tx_wake_queue(txq);
2898                 __netif_tx_unlock(txq);
2899         }
2900
2901         return tx_pkt;
2902 }
2903
2904 static void
2905 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2906                         struct sk_buff *skb, int count)
2907 {
2908         struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2909         struct bnx2_rx_bd *cons_bd, *prod_bd;
2910         int i;
2911         u16 hw_prod, prod;
2912         u16 cons = rxr->rx_pg_cons;
2913
2914         cons_rx_pg = &rxr->rx_pg_ring[cons];
2915
2916         /* The caller was unable to allocate a new page to replace the
2917          * last one in the frags array, so we need to recycle that page
2918          * and then free the skb.
2919          */
2920         if (skb) {
2921                 struct page *page;
2922                 struct skb_shared_info *shinfo;
2923
2924                 shinfo = skb_shinfo(skb);
2925                 shinfo->nr_frags--;
2926                 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2927                 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2928
2929                 cons_rx_pg->page = page;
2930                 dev_kfree_skb(skb);
2931         }
2932
2933         hw_prod = rxr->rx_pg_prod;
2934
2935         for (i = 0; i < count; i++) {
2936                 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2937
2938                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2939                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2940                 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2941                                                 [BNX2_RX_IDX(cons)];
2942                 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2943                                                 [BNX2_RX_IDX(prod)];
2944
2945                 if (prod != cons) {
2946                         prod_rx_pg->page = cons_rx_pg->page;
2947                         cons_rx_pg->page = NULL;
2948                         dma_unmap_addr_set(prod_rx_pg, mapping,
2949                                 dma_unmap_addr(cons_rx_pg, mapping));
2950
2951                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2952                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2953
2954                 }
2955                 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2956                 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2957         }
2958         rxr->rx_pg_prod = hw_prod;
2959         rxr->rx_pg_cons = cons;
2960 }
2961
2962 static inline void
2963 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2964                    u8 *data, u16 cons, u16 prod)
2965 {
2966         struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2967         struct bnx2_rx_bd *cons_bd, *prod_bd;
2968
2969         cons_rx_buf = &rxr->rx_buf_ring[cons];
2970         prod_rx_buf = &rxr->rx_buf_ring[prod];
2971
2972         dma_sync_single_for_device(&bp->pdev->dev,
2973                 dma_unmap_addr(cons_rx_buf, mapping),
2974                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2975
2976         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2977
2978         prod_rx_buf->data = data;
2979
2980         if (cons == prod)
2981                 return;
2982
2983         dma_unmap_addr_set(prod_rx_buf, mapping,
2984                         dma_unmap_addr(cons_rx_buf, mapping));
2985
2986         cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
2987         prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
2988         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2989         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2990 }
2991
2992 static struct sk_buff *
2993 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2994             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2995             u32 ring_idx)
2996 {
2997         int err;
2998         u16 prod = ring_idx & 0xffff;
2999         struct sk_buff *skb;
3000
3001         err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3002         if (unlikely(err)) {
3003                 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3004 error:
3005                 if (hdr_len) {
3006                         unsigned int raw_len = len + 4;
3007                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3008
3009                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3010                 }
3011                 return NULL;
3012         }
3013
3014         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3015                          PCI_DMA_FROMDEVICE);
3016         skb = build_skb(data, 0);
3017         if (!skb) {
3018                 kfree(data);
3019                 goto error;
3020         }
3021         skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3022         if (hdr_len == 0) {
3023                 skb_put(skb, len);
3024                 return skb;
3025         } else {
3026                 unsigned int i, frag_len, frag_size, pages;
3027                 struct bnx2_sw_pg *rx_pg;
3028                 u16 pg_cons = rxr->rx_pg_cons;
3029                 u16 pg_prod = rxr->rx_pg_prod;
3030
3031                 frag_size = len + 4 - hdr_len;
3032                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3033                 skb_put(skb, hdr_len);
3034
3035                 for (i = 0; i < pages; i++) {
3036                         dma_addr_t mapping_old;
3037
3038                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3039                         if (unlikely(frag_len <= 4)) {
3040                                 unsigned int tail = 4 - frag_len;
3041
3042                                 rxr->rx_pg_cons = pg_cons;
3043                                 rxr->rx_pg_prod = pg_prod;
3044                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3045                                                         pages - i);
3046                                 skb->len -= tail;
3047                                 if (i == 0) {
3048                                         skb->tail -= tail;
3049                                 } else {
3050                                         skb_frag_t *frag =
3051                                                 &skb_shinfo(skb)->frags[i - 1];
3052                                         skb_frag_size_sub(frag, tail);
3053                                         skb->data_len -= tail;
3054                                 }
3055                                 return skb;
3056                         }
3057                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3058
3059                         /* Don't unmap yet.  If we're unable to allocate a new
3060                          * page, we need to recycle the page and the DMA addr.
3061                          */
3062                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3063                         if (i == pages - 1)
3064                                 frag_len -= 4;
3065
3066                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3067                         rx_pg->page = NULL;
3068
3069                         err = bnx2_alloc_rx_page(bp, rxr,
3070                                                  BNX2_RX_PG_RING_IDX(pg_prod),
3071                                                  GFP_ATOMIC);
3072                         if (unlikely(err)) {
3073                                 rxr->rx_pg_cons = pg_cons;
3074                                 rxr->rx_pg_prod = pg_prod;
3075                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3076                                                         pages - i);
3077                                 return NULL;
3078                         }
3079
3080                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3081                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3082
3083                         frag_size -= frag_len;
3084                         skb->data_len += frag_len;
3085                         skb->truesize += PAGE_SIZE;
3086                         skb->len += frag_len;
3087
3088                         pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3089                         pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3090                 }
3091                 rxr->rx_pg_prod = pg_prod;
3092                 rxr->rx_pg_cons = pg_cons;
3093         }
3094         return skb;
3095 }
3096
3097 static inline u16
3098 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3099 {
3100         u16 cons;
3101
3102         /* Tell compiler that status block fields can change. */
3103         barrier();
3104         cons = *bnapi->hw_rx_cons_ptr;
3105         barrier();
3106         if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3107                 cons++;
3108         return cons;
3109 }
3110
3111 static int
3112 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3113 {
3114         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3115         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3116         struct l2_fhdr *rx_hdr;
3117         int rx_pkt = 0, pg_ring_used = 0;
3118
3119         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3120         sw_cons = rxr->rx_cons;
3121         sw_prod = rxr->rx_prod;
3122
3123         /* Memory barrier necessary as speculative reads of the rx
3124          * buffer can be ahead of the index in the status block
3125          */
3126         rmb();
3127         while (sw_cons != hw_cons) {
3128                 unsigned int len, hdr_len;
3129                 u32 status;
3130                 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3131                 struct sk_buff *skb;
3132                 dma_addr_t dma_addr;
3133                 u8 *data;
3134                 u16 next_ring_idx;
3135
3136                 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3137                 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3138
3139                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3140                 data = rx_buf->data;
3141                 rx_buf->data = NULL;
3142
3143                 rx_hdr = get_l2_fhdr(data);
3144                 prefetch(rx_hdr);
3145
3146                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3147
3148                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3149                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3150                         PCI_DMA_FROMDEVICE);
3151
3152                 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3153                 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3154                 prefetch(get_l2_fhdr(next_rx_buf->data));
3155
3156                 len = rx_hdr->l2_fhdr_pkt_len;
3157                 status = rx_hdr->l2_fhdr_status;
3158
3159                 hdr_len = 0;
3160                 if (status & L2_FHDR_STATUS_SPLIT) {
3161                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3162                         pg_ring_used = 1;
3163                 } else if (len > bp->rx_jumbo_thresh) {
3164                         hdr_len = bp->rx_jumbo_thresh;
3165                         pg_ring_used = 1;
3166                 }
3167
3168                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3169                                        L2_FHDR_ERRORS_PHY_DECODE |
3170                                        L2_FHDR_ERRORS_ALIGNMENT |
3171                                        L2_FHDR_ERRORS_TOO_SHORT |
3172                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3173
3174                         bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3175                                           sw_ring_prod);
3176                         if (pg_ring_used) {
3177                                 int pages;
3178
3179                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3180
3181                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3182                         }
3183                         goto next_rx;
3184                 }
3185
3186                 len -= 4;
3187
3188                 if (len <= bp->rx_copy_thresh) {
3189                         skb = netdev_alloc_skb(bp->dev, len + 6);
3190                         if (skb == NULL) {
3191                                 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3192                                                   sw_ring_prod);
3193                                 goto next_rx;
3194                         }
3195
3196                         /* aligned copy */
3197                         memcpy(skb->data,
3198                                (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3199                                len + 6);
3200                         skb_reserve(skb, 6);
3201                         skb_put(skb, len);
3202
3203                         bnx2_reuse_rx_data(bp, rxr, data,
3204                                 sw_ring_cons, sw_ring_prod);
3205
3206                 } else {
3207                         skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3208                                           (sw_ring_cons << 16) | sw_ring_prod);
3209                         if (!skb)
3210                                 goto next_rx;
3211                 }
3212                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3213                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3214                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3215
3216                 skb->protocol = eth_type_trans(skb, bp->dev);
3217
3218                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3219                         (ntohs(skb->protocol) != 0x8100)) {
3220
3221                         dev_kfree_skb(skb);
3222                         goto next_rx;
3223
3224                 }
3225
3226                 skb_checksum_none_assert(skb);
3227                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3228                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3229                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3230
3231                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3232                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3233                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3234                 }
3235                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3236                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3237                      L2_FHDR_STATUS_USE_RXHASH))
3238                         skb->rxhash = rx_hdr->l2_fhdr_hash;
3239
3240                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3241                 napi_gro_receive(&bnapi->napi, skb);
3242                 rx_pkt++;
3243
3244 next_rx:
3245                 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3246                 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3247
3248                 if ((rx_pkt == budget))
3249                         break;
3250
3251                 /* Refresh hw_cons to see if there is new work */
3252                 if (sw_cons == hw_cons) {
3253                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3254                         rmb();
3255                 }
3256         }
3257         rxr->rx_cons = sw_cons;
3258         rxr->rx_prod = sw_prod;
3259
3260         if (pg_ring_used)
3261                 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3262
3263         BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3264
3265         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3266
3267         mmiowb();
3268
3269         return rx_pkt;
3270
3271 }
3272
3273 /* MSI ISR - The only difference between this and the INTx ISR
3274  * is that the MSI interrupt is always serviced.
3275  */
3276 static irqreturn_t
3277 bnx2_msi(int irq, void *dev_instance)
3278 {
3279         struct bnx2_napi *bnapi = dev_instance;
3280         struct bnx2 *bp = bnapi->bp;
3281
3282         prefetch(bnapi->status_blk.msi);
3283         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3284                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3285                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3286
3287         /* Return here if interrupt is disabled. */
3288         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3289                 return IRQ_HANDLED;
3290
3291         napi_schedule(&bnapi->napi);
3292
3293         return IRQ_HANDLED;
3294 }
3295
3296 static irqreturn_t
3297 bnx2_msi_1shot(int irq, void *dev_instance)
3298 {
3299         struct bnx2_napi *bnapi = dev_instance;
3300         struct bnx2 *bp = bnapi->bp;
3301
3302         prefetch(bnapi->status_blk.msi);
3303
3304         /* Return here if interrupt is disabled. */
3305         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3306                 return IRQ_HANDLED;
3307
3308         napi_schedule(&bnapi->napi);
3309
3310         return IRQ_HANDLED;
3311 }
3312
3313 static irqreturn_t
3314 bnx2_interrupt(int irq, void *dev_instance)
3315 {
3316         struct bnx2_napi *bnapi = dev_instance;
3317         struct bnx2 *bp = bnapi->bp;
3318         struct status_block *sblk = bnapi->status_blk.msi;
3319
3320         /* When using INTx, it is possible for the interrupt to arrive
3321          * at the CPU before the status block posted prior to the
3322          * interrupt. Reading a register will flush the status block.
3323          * When using MSI, the MSI message will always complete after
3324          * the status block write.
3325          */
3326         if ((sblk->status_idx == bnapi->last_status_idx) &&
3327             (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3328              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3329                 return IRQ_NONE;
3330
3331         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3332                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3333                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3334
3335         /* Read back to deassert IRQ immediately to avoid too many
3336          * spurious interrupts.
3337          */
3338         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3339
3340         /* Return here if interrupt is shared and is disabled. */
3341         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3342                 return IRQ_HANDLED;
3343
3344         if (napi_schedule_prep(&bnapi->napi)) {
3345                 bnapi->last_status_idx = sblk->status_idx;
3346                 __napi_schedule(&bnapi->napi);
3347         }
3348
3349         return IRQ_HANDLED;
3350 }
3351
3352 static inline int
3353 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3354 {
3355         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3356         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3357
3358         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3359             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3360                 return 1;
3361         return 0;
3362 }
3363
3364 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3365                                  STATUS_ATTN_BITS_TIMER_ABORT)
3366
3367 static inline int
3368 bnx2_has_work(struct bnx2_napi *bnapi)
3369 {
3370         struct status_block *sblk = bnapi->status_blk.msi;
3371
3372         if (bnx2_has_fast_work(bnapi))
3373                 return 1;
3374
3375 #ifdef BCM_CNIC
3376         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3377                 return 1;
3378 #endif
3379
3380         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3381             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3382                 return 1;
3383
3384         return 0;
3385 }
3386
3387 static void
3388 bnx2_chk_missed_msi(struct bnx2 *bp)
3389 {
3390         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3391         u32 msi_ctrl;
3392
3393         if (bnx2_has_work(bnapi)) {
3394                 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3395                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3396                         return;
3397
3398                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3399                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3400                                 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3401                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3402                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3403                 }
3404         }
3405
3406         bp->idle_chk_status_idx = bnapi->last_status_idx;
3407 }
3408
3409 #ifdef BCM_CNIC
3410 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3411 {
3412         struct cnic_ops *c_ops;
3413
3414         if (!bnapi->cnic_present)
3415                 return;
3416
3417         rcu_read_lock();
3418         c_ops = rcu_dereference(bp->cnic_ops);
3419         if (c_ops)
3420                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3421                                                       bnapi->status_blk.msi);
3422         rcu_read_unlock();
3423 }
3424 #endif
3425
3426 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3427 {
3428         struct status_block *sblk = bnapi->status_blk.msi;
3429         u32 status_attn_bits = sblk->status_attn_bits;
3430         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3431
3432         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3433             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3434
3435                 bnx2_phy_int(bp, bnapi);
3436
3437                 /* This is needed to take care of transient status
3438                  * during link changes.
3439                  */
3440                 BNX2_WR(bp, BNX2_HC_COMMAND,
3441                         bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3442                 BNX2_RD(bp, BNX2_HC_COMMAND);
3443         }
3444 }
3445
3446 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3447                           int work_done, int budget)
3448 {
3449         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3450         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3451
3452         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3453                 bnx2_tx_int(bp, bnapi, 0);
3454
3455         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3456                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3457
3458         return work_done;
3459 }
3460
3461 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3462 {
3463         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3464         struct bnx2 *bp = bnapi->bp;
3465         int work_done = 0;
3466         struct status_block_msix *sblk = bnapi->status_blk.msix;
3467
3468         while (1) {
3469                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3470                 if (unlikely(work_done >= budget))
3471                         break;
3472
3473                 bnapi->last_status_idx = sblk->status_idx;
3474                 /* status idx must be read before checking for more work. */
3475                 rmb();
3476                 if (likely(!bnx2_has_fast_work(bnapi))) {
3477
3478                         napi_complete(napi);
3479                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3480                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3481                                 bnapi->last_status_idx);
3482                         break;
3483                 }
3484         }
3485         return work_done;
3486 }
3487
3488 static int bnx2_poll(struct napi_struct *napi, int budget)
3489 {
3490         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3491         struct bnx2 *bp = bnapi->bp;
3492         int work_done = 0;
3493         struct status_block *sblk = bnapi->status_blk.msi;
3494
3495         while (1) {
3496                 bnx2_poll_link(bp, bnapi);
3497
3498                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3499
3500 #ifdef BCM_CNIC
3501                 bnx2_poll_cnic(bp, bnapi);
3502 #endif
3503
3504                 /* bnapi->last_status_idx is used below to tell the hw how
3505                  * much work has been processed, so we must read it before
3506                  * checking for more work.
3507                  */
3508                 bnapi->last_status_idx = sblk->status_idx;
3509
3510                 if (unlikely(work_done >= budget))
3511                         break;
3512
3513                 rmb();
3514                 if (likely(!bnx2_has_work(bnapi))) {
3515                         napi_complete(napi);
3516                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3517                                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3518                                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3519                                         bnapi->last_status_idx);
3520                                 break;
3521                         }
3522                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3523                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3524                                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3525                                 bnapi->last_status_idx);
3526
3527                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3528                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3529                                 bnapi->last_status_idx);
3530                         break;
3531                 }
3532         }
3533
3534         return work_done;
3535 }
3536
3537 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3538  * from set_multicast.
3539  */
3540 static void
3541 bnx2_set_rx_mode(struct net_device *dev)
3542 {
3543         struct bnx2 *bp = netdev_priv(dev);
3544         u32 rx_mode, sort_mode;
3545         struct netdev_hw_addr *ha;
3546         int i;
3547
3548         if (!netif_running(dev))
3549                 return;
3550
3551         spin_lock_bh(&bp->phy_lock);
3552
3553         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3554                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3555         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3556         if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3557              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3558                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3559         if (dev->flags & IFF_PROMISC) {
3560                 /* Promiscuous mode. */
3561                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3562                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3563                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3564         }
3565         else if (dev->flags & IFF_ALLMULTI) {
3566                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3567                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3568                                 0xffffffff);
3569                 }
3570                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3571         }
3572         else {
3573                 /* Accept one or more multicast(s). */
3574                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3575                 u32 regidx;
3576                 u32 bit;
3577                 u32 crc;
3578
3579                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3580
3581                 netdev_for_each_mc_addr(ha, dev) {
3582                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3583                         bit = crc & 0xff;
3584                         regidx = (bit & 0xe0) >> 5;
3585                         bit &= 0x1f;
3586                         mc_filter[regidx] |= (1 << bit);
3587                 }
3588
3589                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3590                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3591                                 mc_filter[i]);
3592                 }
3593
3594                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3595         }
3596
3597         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3598                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3599                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3600                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3601         } else if (!(dev->flags & IFF_PROMISC)) {
3602                 /* Add all entries into to the match filter list */
3603                 i = 0;
3604                 netdev_for_each_uc_addr(ha, dev) {
3605                         bnx2_set_mac_addr(bp, ha->addr,
3606                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3607                         sort_mode |= (1 <<
3608                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3609                         i++;
3610                 }
3611
3612         }
3613
3614         if (rx_mode != bp->rx_mode) {
3615                 bp->rx_mode = rx_mode;
3616                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3617         }
3618
3619         BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3620         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3621         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3622
3623         spin_unlock_bh(&bp->phy_lock);
3624 }
3625
3626 static int
3627 check_fw_section(const struct firmware *fw,
3628                  const struct bnx2_fw_file_section *section,
3629                  u32 alignment, bool non_empty)
3630 {
3631         u32 offset = be32_to_cpu(section->offset);
3632         u32 len = be32_to_cpu(section->len);
3633
3634         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3635                 return -EINVAL;
3636         if ((non_empty && len == 0) || len > fw->size - offset ||
3637             len & (alignment - 1))
3638                 return -EINVAL;
3639         return 0;
3640 }
3641
3642 static int
3643 check_mips_fw_entry(const struct firmware *fw,
3644                     const struct bnx2_mips_fw_file_entry *entry)
3645 {
3646         if (check_fw_section(fw, &entry->text, 4, true) ||
3647             check_fw_section(fw, &entry->data, 4, false) ||
3648             check_fw_section(fw, &entry->rodata, 4, false))
3649                 return -EINVAL;
3650         return 0;
3651 }
3652
3653 static void bnx2_release_firmware(struct bnx2 *bp)
3654 {
3655         if (bp->rv2p_firmware) {
3656                 release_firmware(bp->mips_firmware);
3657                 release_firmware(bp->rv2p_firmware);
3658                 bp->rv2p_firmware = NULL;
3659         }
3660 }
3661
3662 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3663 {
3664         const char *mips_fw_file, *rv2p_fw_file;
3665         const struct bnx2_mips_fw_file *mips_fw;
3666         const struct bnx2_rv2p_fw_file *rv2p_fw;
3667         int rc;
3668
3669         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3670                 mips_fw_file = FW_MIPS_FILE_09;
3671                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3672                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3673                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3674                 else
3675                         rv2p_fw_file = FW_RV2P_FILE_09;
3676         } else {
3677                 mips_fw_file = FW_MIPS_FILE_06;
3678                 rv2p_fw_file = FW_RV2P_FILE_06;
3679         }
3680
3681         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3682         if (rc) {
3683                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3684                 goto out;
3685         }
3686
3687         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3688         if (rc) {
3689                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3690                 goto err_release_mips_firmware;
3691         }
3692         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3693         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3694         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3695             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3696             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3697             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3698             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3699             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3700                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3701                 rc = -EINVAL;
3702                 goto err_release_firmware;
3703         }
3704         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3705             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3706             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3707                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3708                 rc = -EINVAL;
3709                 goto err_release_firmware;
3710         }
3711 out:
3712         return rc;
3713
3714 err_release_firmware:
3715         release_firmware(bp->rv2p_firmware);
3716         bp->rv2p_firmware = NULL;
3717 err_release_mips_firmware:
3718         release_firmware(bp->mips_firmware);
3719         goto out;
3720 }
3721
3722 static int bnx2_request_firmware(struct bnx2 *bp)
3723 {
3724         return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3725 }
3726
3727 static u32
3728 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3729 {
3730         switch (idx) {
3731         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3732                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3733                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3734                 break;
3735         }
3736         return rv2p_code;
3737 }
3738
3739 static int
3740 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3741              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3742 {
3743         u32 rv2p_code_len, file_offset;
3744         __be32 *rv2p_code;
3745         int i;
3746         u32 val, cmd, addr;
3747
3748         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3749         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3750
3751         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3752
3753         if (rv2p_proc == RV2P_PROC1) {
3754                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3755                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3756         } else {
3757                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3758                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3759         }
3760
3761         for (i = 0; i < rv2p_code_len; i += 8) {
3762                 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3763                 rv2p_code++;
3764                 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3765                 rv2p_code++;
3766
3767                 val = (i / 8) | cmd;
3768                 BNX2_WR(bp, addr, val);
3769         }
3770
3771         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3772         for (i = 0; i < 8; i++) {
3773                 u32 loc, code;
3774
3775                 loc = be32_to_cpu(fw_entry->fixup[i]);
3776                 if (loc && ((loc * 4) < rv2p_code_len)) {
3777                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3778                         BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3779                         code = be32_to_cpu(*(rv2p_code + loc));
3780                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3781                         BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3782
3783                         val = (loc / 2) | cmd;
3784                         BNX2_WR(bp, addr, val);
3785                 }
3786         }
3787
3788         /* Reset the processor, un-stall is done later. */
3789         if (rv2p_proc == RV2P_PROC1) {
3790                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3791         }
3792         else {
3793                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3794         }
3795
3796         return 0;
3797 }
3798
3799 static int
3800 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3801             const struct bnx2_mips_fw_file_entry *fw_entry)
3802 {
3803         u32 addr, len, file_offset;
3804         __be32 *data;
3805         u32 offset;
3806         u32 val;
3807
3808         /* Halt the CPU. */
3809         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3810         val |= cpu_reg->mode_value_halt;
3811         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3812         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3813
3814         /* Load the Text area. */
3815         addr = be32_to_cpu(fw_entry->text.addr);
3816         len = be32_to_cpu(fw_entry->text.len);
3817         file_offset = be32_to_cpu(fw_entry->text.offset);
3818         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3819
3820         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3821         if (len) {
3822                 int j;
3823
3824                 for (j = 0; j < (len / 4); j++, offset += 4)
3825                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3826         }
3827
3828         /* Load the Data area. */
3829         addr = be32_to_cpu(fw_entry->data.addr);
3830         len = be32_to_cpu(fw_entry->data.len);
3831         file_offset = be32_to_cpu(fw_entry->data.offset);
3832         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3833
3834         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3835         if (len) {
3836                 int j;
3837
3838                 for (j = 0; j < (len / 4); j++, offset += 4)
3839                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3840         }
3841
3842         /* Load the Read-Only area. */
3843         addr = be32_to_cpu(fw_entry->rodata.addr);
3844         len = be32_to_cpu(fw_entry->rodata.len);
3845         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3846         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3847
3848         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3849         if (len) {
3850                 int j;
3851
3852                 for (j = 0; j < (len / 4); j++, offset += 4)
3853                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3854         }
3855
3856         /* Clear the pre-fetch instruction. */
3857         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3858
3859         val = be32_to_cpu(fw_entry->start_addr);
3860         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3861
3862         /* Start the CPU. */
3863         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3864         val &= ~cpu_reg->mode_value_halt;
3865         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3866         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3867
3868         return 0;
3869 }
3870
3871 static int
3872 bnx2_init_cpus(struct bnx2 *bp)
3873 {
3874         const struct bnx2_mips_fw_file *mips_fw =
3875                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3876         const struct bnx2_rv2p_fw_file *rv2p_fw =
3877                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3878         int rc;
3879
3880         /* Initialize the RV2P processor. */
3881         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3882         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3883
3884         /* Initialize the RX Processor. */
3885         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3886         if (rc)
3887                 goto init_cpu_err;
3888
3889         /* Initialize the TX Processor. */
3890         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3891         if (rc)
3892                 goto init_cpu_err;
3893
3894         /* Initialize the TX Patch-up Processor. */
3895         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3896         if (rc)
3897                 goto init_cpu_err;
3898
3899         /* Initialize the Completion Processor. */
3900         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3901         if (rc)
3902                 goto init_cpu_err;
3903
3904         /* Initialize the Command Processor. */
3905         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3906
3907 init_cpu_err:
3908         return rc;
3909 }
3910
3911 static void
3912 bnx2_setup_wol(struct bnx2 *bp)
3913 {
3914         int i;
3915         u32 val, wol_msg;
3916
3917         if (bp->wol) {
3918                 u32 advertising;
3919                 u8 autoneg;
3920
3921                 autoneg = bp->autoneg;
3922                 advertising = bp->advertising;
3923
3924                 if (bp->phy_port == PORT_TP) {
3925                         bp->autoneg = AUTONEG_SPEED;
3926                         bp->advertising = ADVERTISED_10baseT_Half |
3927                                 ADVERTISED_10baseT_Full |
3928                                 ADVERTISED_100baseT_Half |
3929                                 ADVERTISED_100baseT_Full |
3930                                 ADVERTISED_Autoneg;
3931                 }
3932
3933                 spin_lock_bh(&bp->phy_lock);
3934                 bnx2_setup_phy(bp, bp->phy_port);
3935                 spin_unlock_bh(&bp->phy_lock);
3936
3937                 bp->autoneg = autoneg;
3938                 bp->advertising = advertising;
3939
3940                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3941
3942                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3943
3944                 /* Enable port mode. */
3945                 val &= ~BNX2_EMAC_MODE_PORT;
3946                 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3947                        BNX2_EMAC_MODE_ACPI_RCVD |
3948                        BNX2_EMAC_MODE_MPKT;
3949                 if (bp->phy_port == PORT_TP) {
3950                         val |= BNX2_EMAC_MODE_PORT_MII;
3951                 } else {
3952                         val |= BNX2_EMAC_MODE_PORT_GMII;
3953                         if (bp->line_speed == SPEED_2500)
3954                                 val |= BNX2_EMAC_MODE_25G_MODE;
3955                 }
3956
3957                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3958
3959                 /* receive all multicast */
3960                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3961                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3962                                 0xffffffff);
3963                 }
3964                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3965
3966                 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3967                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3968                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3969                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3970
3971                 /* Need to enable EMAC and RPM for WOL. */
3972                 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3973                         BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3974                         BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3975                         BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3976
3977                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3978                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3979                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3980
3981                 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3982         } else {
3983                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3984         }
3985
3986         if (!(bp->flags & BNX2_FLAG_NO_WOL))
3987                 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
3988
3989 }
3990
3991 static int
3992 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3993 {
3994         switch (state) {
3995         case PCI_D0: {
3996                 u32 val;
3997
3998                 pci_enable_wake(bp->pdev, PCI_D0, false);
3999                 pci_set_power_state(bp->pdev, PCI_D0);
4000
4001                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4002                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4003                 val &= ~BNX2_EMAC_MODE_MPKT;
4004                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4005
4006                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4007                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4008                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4009                 break;
4010         }
4011         case PCI_D3hot: {
4012                 bnx2_setup_wol(bp);
4013                 pci_wake_from_d3(bp->pdev, bp->wol);
4014                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4015                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4016
4017                         if (bp->wol)
4018                                 pci_set_power_state(bp->pdev, PCI_D3hot);
4019                 } else {
4020                         pci_set_power_state(bp->pdev, PCI_D3hot);
4021                 }
4022
4023                 /* No more memory access after this point until
4024                  * device is brought back to D0.
4025                  */
4026                 break;
4027         }
4028         default:
4029                 return -EINVAL;
4030         }
4031         return 0;
4032 }
4033
4034 static int
4035 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4036 {
4037         u32 val;
4038         int j;
4039
4040         /* Request access to the flash interface. */
4041         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4042         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4043                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4044                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4045                         break;
4046
4047                 udelay(5);
4048         }
4049
4050         if (j >= NVRAM_TIMEOUT_COUNT)
4051                 return -EBUSY;
4052
4053         return 0;
4054 }
4055
4056 static int
4057 bnx2_release_nvram_lock(struct bnx2 *bp)
4058 {
4059         int j;
4060         u32 val;
4061
4062         /* Relinquish nvram interface. */
4063         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4064
4065         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4066                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4067                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4068                         break;
4069
4070                 udelay(5);
4071         }
4072
4073         if (j >= NVRAM_TIMEOUT_COUNT)
4074                 return -EBUSY;
4075
4076         return 0;
4077 }
4078
4079
4080 static int
4081 bnx2_enable_nvram_write(struct bnx2 *bp)
4082 {
4083         u32 val;
4084
4085         val = BNX2_RD(bp, BNX2_MISC_CFG);
4086         BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4087
4088         if (bp->flash_info->flags & BNX2_NV_WREN) {
4089                 int j;
4090
4091                 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4092                 BNX2_WR(bp, BNX2_NVM_COMMAND,
4093                         BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4094
4095                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4096                         udelay(5);
4097
4098                         val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4099                         if (val & BNX2_NVM_COMMAND_DONE)
4100                                 break;
4101                 }
4102
4103                 if (j >= NVRAM_TIMEOUT_COUNT)
4104                         return -EBUSY;
4105         }
4106         return 0;
4107 }
4108
4109 static void
4110 bnx2_disable_nvram_write(struct bnx2 *bp)
4111 {
4112         u32 val;
4113
4114         val = BNX2_RD(bp, BNX2_MISC_CFG);
4115         BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4116 }
4117
4118
4119 static void
4120 bnx2_enable_nvram_access(struct bnx2 *bp)
4121 {
4122         u32 val;
4123
4124         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4125         /* Enable both bits, even on read. */
4126         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4127                 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4128 }
4129
4130 static void
4131 bnx2_disable_nvram_access(struct bnx2 *bp)
4132 {
4133         u32 val;
4134
4135         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4136         /* Disable both bits, even after read. */
4137         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4138                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4139                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4140 }
4141
4142 static int
4143 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4144 {
4145         u32 cmd;
4146         int j;
4147
4148         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4149                 /* Buffered flash, no erase needed */
4150                 return 0;
4151
4152         /* Build an erase command */
4153         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4154               BNX2_NVM_COMMAND_DOIT;
4155
4156         /* Need to clear DONE bit separately. */
4157         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4158
4159         /* Address of the NVRAM to read from. */
4160         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4161
4162         /* Issue an erase command. */
4163         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4164
4165         /* Wait for completion. */
4166         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4167                 u32 val;
4168
4169                 udelay(5);
4170
4171                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4172                 if (val & BNX2_NVM_COMMAND_DONE)
4173                         break;
4174         }
4175
4176         if (j >= NVRAM_TIMEOUT_COUNT)
4177                 return -EBUSY;
4178
4179         return 0;
4180 }
4181
4182 static int
4183 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4184 {
4185         u32 cmd;
4186         int j;
4187
4188         /* Build the command word. */
4189         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4190
4191         /* Calculate an offset of a buffered flash, not needed for 5709. */
4192         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4193                 offset = ((offset / bp->flash_info->page_size) <<
4194                            bp->flash_info->page_bits) +
4195                           (offset % bp->flash_info->page_size);
4196         }
4197
4198         /* Need to clear DONE bit separately. */
4199         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4200
4201         /* Address of the NVRAM to read from. */
4202         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4203
4204         /* Issue a read command. */
4205         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4206
4207         /* Wait for completion. */
4208         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4209                 u32 val;
4210
4211                 udelay(5);
4212
4213                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4214                 if (val & BNX2_NVM_COMMAND_DONE) {
4215                         __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4216                         memcpy(ret_val, &v, 4);
4217                         break;
4218                 }
4219         }
4220         if (j >= NVRAM_TIMEOUT_COUNT)
4221                 return -EBUSY;
4222
4223         return 0;
4224 }
4225
4226
4227 static int
4228 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4229 {
4230         u32 cmd;
4231         __be32 val32;
4232         int j;
4233
4234         /* Build the command word. */
4235         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4236
4237         /* Calculate an offset of a buffered flash, not needed for 5709. */
4238         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4239                 offset = ((offset / bp->flash_info->page_size) <<
4240                           bp->flash_info->page_bits) +
4241                          (offset % bp->flash_info->page_size);
4242         }
4243
4244         /* Need to clear DONE bit separately. */
4245         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4246
4247         memcpy(&val32, val, 4);
4248
4249         /* Write the data. */
4250         BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4251
4252         /* Address of the NVRAM to write to. */
4253         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4254
4255         /* Issue the write command. */
4256         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4257
4258         /* Wait for completion. */
4259         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4260                 udelay(5);
4261
4262                 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4263                         break;
4264         }
4265         if (j >= NVRAM_TIMEOUT_COUNT)
4266                 return -EBUSY;
4267
4268         return 0;
4269 }
4270
4271 static int
4272 bnx2_init_nvram(struct bnx2 *bp)
4273 {
4274         u32 val;
4275         int j, entry_count, rc = 0;
4276         const struct flash_spec *flash;
4277
4278         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4279                 bp->flash_info = &flash_5709;
4280                 goto get_flash_size;
4281         }
4282
4283         /* Determine the selected interface. */
4284         val = BNX2_RD(bp, BNX2_NVM_CFG1);
4285
4286         entry_count = ARRAY_SIZE(flash_table);
4287
4288         if (val & 0x40000000) {
4289
4290                 /* Flash interface has been reconfigured */
4291                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4292                      j++, flash++) {
4293                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4294                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4295                                 bp->flash_info = flash;
4296                                 break;
4297                         }
4298                 }
4299         }
4300         else {
4301                 u32 mask;
4302                 /* Not yet been reconfigured */
4303
4304                 if (val & (1 << 23))
4305                         mask = FLASH_BACKUP_STRAP_MASK;
4306                 else
4307                         mask = FLASH_STRAP_MASK;
4308
4309                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4310                         j++, flash++) {
4311
4312                         if ((val & mask) == (flash->strapping & mask)) {
4313                                 bp->flash_info = flash;
4314
4315                                 /* Request access to the flash interface. */
4316                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4317                                         return rc;
4318
4319                                 /* Enable access to flash interface */
4320                                 bnx2_enable_nvram_access(bp);
4321
4322                                 /* Reconfigure the flash interface */
4323                                 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4324                                 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4325                                 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4326                                 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4327
4328                                 /* Disable access to flash interface */
4329                                 bnx2_disable_nvram_access(bp);
4330                                 bnx2_release_nvram_lock(bp);
4331
4332                                 break;
4333                         }
4334                 }
4335         } /* if (val & 0x40000000) */
4336
4337         if (j == entry_count) {
4338                 bp->flash_info = NULL;
4339                 pr_alert("Unknown flash/EEPROM type\n");
4340                 return -ENODEV;
4341         }
4342
4343 get_flash_size:
4344         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4345         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4346         if (val)
4347                 bp->flash_size = val;
4348         else
4349                 bp->flash_size = bp->flash_info->total_size;
4350
4351         return rc;
4352 }
4353
4354 static int
4355 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4356                 int buf_size)
4357 {
4358         int rc = 0;
4359         u32 cmd_flags, offset32, len32, extra;
4360
4361         if (buf_size == 0)
4362                 return 0;
4363
4364         /* Request access to the flash interface. */
4365         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4366                 return rc;
4367
4368         /* Enable access to flash interface */
4369         bnx2_enable_nvram_access(bp);
4370
4371         len32 = buf_size;
4372         offset32 = offset;
4373         extra = 0;
4374
4375         cmd_flags = 0;
4376
4377         if (offset32 & 3) {
4378                 u8 buf[4];
4379                 u32 pre_len;
4380
4381                 offset32 &= ~3;
4382                 pre_len = 4 - (offset & 3);
4383
4384                 if (pre_len >= len32) {
4385                         pre_len = len32;
4386                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4387                                     BNX2_NVM_COMMAND_LAST;
4388                 }
4389                 else {
4390                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4391                 }
4392
4393                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4394
4395                 if (rc)
4396                         return rc;
4397
4398                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4399
4400                 offset32 += 4;
4401                 ret_buf += pre_len;
4402                 len32 -= pre_len;
4403         }
4404         if (len32 & 3) {
4405                 extra = 4 - (len32 & 3);
4406                 len32 = (len32 + 4) & ~3;
4407         }
4408
4409         if (len32 == 4) {
4410                 u8 buf[4];
4411
4412                 if (cmd_flags)
4413                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4414                 else
4415                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4416                                     BNX2_NVM_COMMAND_LAST;
4417
4418                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4419
4420                 memcpy(ret_buf, buf, 4 - extra);
4421         }
4422         else if (len32 > 0) {
4423                 u8 buf[4];
4424
4425                 /* Read the first word. */
4426                 if (cmd_flags)
4427                         cmd_flags = 0;
4428                 else
4429                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4430
4431                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4432
4433                 /* Advance to the next dword. */
4434                 offset32 += 4;
4435                 ret_buf += 4;
4436                 len32 -= 4;
4437
4438                 while (len32 > 4 && rc == 0) {
4439                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4440
4441                         /* Advance to the next dword. */
4442                         offset32 += 4;
4443                         ret_buf += 4;
4444                         len32 -= 4;
4445                 }
4446
4447                 if (rc)
4448                         return rc;
4449
4450                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4451                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4452
4453                 memcpy(ret_buf, buf, 4 - extra);
4454         }
4455
4456         /* Disable access to flash interface */
4457         bnx2_disable_nvram_access(bp);
4458
4459         bnx2_release_nvram_lock(bp);
4460
4461         return rc;
4462 }
4463
4464 static int
4465 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4466                 int buf_size)
4467 {
4468         u32 written, offset32, len32;
4469         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4470         int rc = 0;
4471         int align_start, align_end;
4472
4473         buf = data_buf;
4474         offset32 = offset;
4475         len32 = buf_size;
4476         align_start = align_end = 0;
4477
4478         if ((align_start = (offset32 & 3))) {
4479                 offset32 &= ~3;
4480                 len32 += align_start;
4481                 if (len32 < 4)
4482                         len32 = 4;
4483                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4484                         return rc;
4485         }
4486
4487         if (len32 & 3) {
4488                 align_end = 4 - (len32 & 3);
4489                 len32 += align_end;
4490                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4491                         return rc;
4492         }
4493
4494         if (align_start || align_end) {
4495                 align_buf = kmalloc(len32, GFP_KERNEL);
4496                 if (align_buf == NULL)
4497                         return -ENOMEM;
4498                 if (align_start) {
4499                         memcpy(align_buf, start, 4);
4500                 }
4501                 if (align_end) {
4502                         memcpy(align_buf + len32 - 4, end, 4);
4503                 }
4504                 memcpy(align_buf + align_start, data_buf, buf_size);
4505                 buf = align_buf;
4506         }
4507
4508         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4509                 flash_buffer = kmalloc(264, GFP_KERNEL);
4510                 if (flash_buffer == NULL) {
4511                         rc = -ENOMEM;
4512                         goto nvram_write_end;
4513                 }
4514         }
4515
4516         written = 0;
4517         while ((written < len32) && (rc == 0)) {
4518                 u32 page_start, page_end, data_start, data_end;
4519                 u32 addr, cmd_flags;
4520                 int i;
4521
4522                 /* Find the page_start addr */
4523                 page_start = offset32 + written;
4524                 page_start -= (page_start % bp->flash_info->page_size);
4525                 /* Find the page_end addr */
4526                 page_end = page_start + bp->flash_info->page_size;
4527                 /* Find the data_start addr */
4528                 data_start = (written == 0) ? offset32 : page_start;
4529                 /* Find the data_end addr */
4530                 data_end = (page_end > offset32 + len32) ?
4531                         (offset32 + len32) : page_end;
4532
4533                 /* Request access to the flash interface. */
4534                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4535                         goto nvram_write_end;
4536
4537                 /* Enable access to flash interface */
4538                 bnx2_enable_nvram_access(bp);
4539
4540                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4541                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4542                         int j;
4543
4544                         /* Read the whole page into the buffer
4545                          * (non-buffer flash only) */
4546                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4547                                 if (j == (bp->flash_info->page_size - 4)) {
4548                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4549                                 }
4550                                 rc = bnx2_nvram_read_dword(bp,
4551                                         page_start + j,
4552                                         &flash_buffer[j],
4553                                         cmd_flags);
4554
4555                                 if (rc)
4556                                         goto nvram_write_end;
4557
4558                                 cmd_flags = 0;
4559                         }
4560                 }
4561
4562                 /* Enable writes to flash interface (unlock write-protect) */
4563                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4564                         goto nvram_write_end;
4565
4566                 /* Loop to write back the buffer data from page_start to
4567                  * data_start */
4568                 i = 0;
4569                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4570                         /* Erase the page */
4571                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4572                                 goto nvram_write_end;
4573
4574                         /* Re-enable the write again for the actual write */
4575                         bnx2_enable_nvram_write(bp);
4576
4577                         for (addr = page_start; addr < data_start;
4578                                 addr += 4, i += 4) {
4579
4580                                 rc = bnx2_nvram_write_dword(bp, addr,
4581                                         &flash_buffer[i], cmd_flags);
4582
4583                                 if (rc != 0)
4584                                         goto nvram_write_end;
4585
4586                                 cmd_flags = 0;
4587                         }
4588                 }
4589
4590                 /* Loop to write the new data from data_start to data_end */
4591                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4592                         if ((addr == page_end - 4) ||
4593                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4594                                  (addr == data_end - 4))) {
4595
4596                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4597                         }
4598                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4599                                 cmd_flags);
4600
4601                         if (rc != 0)
4602                                 goto nvram_write_end;
4603
4604                         cmd_flags = 0;
4605                         buf += 4;
4606                 }
4607
4608                 /* Loop to write back the buffer data from data_end
4609                  * to page_end */
4610                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4611                         for (addr = data_end; addr < page_end;
4612                                 addr += 4, i += 4) {
4613
4614                                 if (addr == page_end-4) {
4615                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4616                                 }
4617                                 rc = bnx2_nvram_write_dword(bp, addr,
4618                                         &flash_buffer[i], cmd_flags);
4619
4620                                 if (rc != 0)
4621                                         goto nvram_write_end;
4622
4623                                 cmd_flags = 0;
4624                         }
4625                 }
4626
4627                 /* Disable writes to flash interface (lock write-protect) */
4628                 bnx2_disable_nvram_write(bp);
4629
4630                 /* Disable access to flash interface */
4631                 bnx2_disable_nvram_access(bp);
4632                 bnx2_release_nvram_lock(bp);
4633
4634                 /* Increment written */
4635                 written += data_end - data_start;
4636         }
4637
4638 nvram_write_end:
4639         kfree(flash_buffer);
4640         kfree(align_buf);
4641         return rc;
4642 }
4643
4644 static void
4645 bnx2_init_fw_cap(struct bnx2 *bp)
4646 {
4647         u32 val, sig = 0;
4648
4649         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4650         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4651
4652         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4653                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4654
4655         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4656         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4657                 return;
4658
4659         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4660                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4661                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4662         }
4663
4664         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4665             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4666                 u32 link;
4667
4668                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4669
4670                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4671                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4672                         bp->phy_port = PORT_FIBRE;
4673                 else
4674                         bp->phy_port = PORT_TP;
4675
4676                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4677                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4678         }
4679
4680         if (netif_running(bp->dev) && sig)
4681                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4682 }
4683
4684 static void
4685 bnx2_setup_msix_tbl(struct bnx2 *bp)
4686 {
4687         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4688
4689         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4690         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4691 }
4692
4693 static int
4694 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4695 {
4696         u32 val;
4697         int i, rc = 0;
4698         u8 old_port;
4699
4700         /* Wait for the current PCI transaction to complete before
4701          * issuing a reset. */
4702         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4703             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4704                 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4705                         BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4706                         BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4707                         BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4708                         BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4709                 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4710                 udelay(5);
4711         } else {  /* 5709 */
4712                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4713                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4714                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4715                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4716
4717                 for (i = 0; i < 100; i++) {
4718                         msleep(1);
4719                         val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4720                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4721                                 break;
4722                 }
4723         }
4724
4725         /* Wait for the firmware to tell us it is ok to issue a reset. */
4726         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4727
4728         /* Deposit a driver reset signature so the firmware knows that
4729          * this is a soft reset. */
4730         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4731                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4732
4733         /* Do a dummy read to force the chip to complete all current transaction
4734          * before we issue a reset. */
4735         val = BNX2_RD(bp, BNX2_MISC_ID);
4736
4737         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4738                 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4739                 BNX2_RD(bp, BNX2_MISC_COMMAND);
4740                 udelay(5);
4741
4742                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4743                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4744
4745                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4746
4747         } else {
4748                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4749                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4750                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4751
4752                 /* Chip reset. */
4753                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4754
4755                 /* Reading back any register after chip reset will hang the
4756                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4757                  * of margin for write posting.
4758                  */
4759                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4760                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4761                         msleep(20);
4762
4763                 /* Reset takes approximate 30 usec */
4764                 for (i = 0; i < 10; i++) {
4765                         val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4766                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4767                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4768                                 break;
4769                         udelay(10);
4770                 }
4771
4772                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4773                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4774                         pr_err("Chip reset did not complete\n");
4775                         return -EBUSY;
4776                 }
4777         }
4778
4779         /* Make sure byte swapping is properly configured. */
4780         val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4781         if (val != 0x01020304) {
4782                 pr_err("Chip not in correct endian mode\n");
4783                 return -ENODEV;
4784         }
4785
4786         /* Wait for the firmware to finish its initialization. */
4787         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4788         if (rc)
4789                 return rc;
4790
4791         spin_lock_bh(&bp->phy_lock);
4792         old_port = bp->phy_port;
4793         bnx2_init_fw_cap(bp);
4794         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4795             old_port != bp->phy_port)
4796                 bnx2_set_default_remote_link(bp);
4797         spin_unlock_bh(&bp->phy_lock);
4798
4799         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4800                 /* Adjust the voltage regular to two steps lower.  The default
4801                  * of this register is 0x0000000e. */
4802                 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4803
4804                 /* Remove bad rbuf memory from the free pool. */
4805                 rc = bnx2_alloc_bad_rbuf(bp);
4806         }
4807
4808         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4809                 bnx2_setup_msix_tbl(bp);
4810                 /* Prevent MSIX table reads and write from timing out */
4811                 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4812                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4813         }
4814
4815         return rc;
4816 }
4817
4818 static int
4819 bnx2_init_chip(struct bnx2 *bp)
4820 {
4821         u32 val, mtu;
4822         int rc, i;
4823
4824         /* Make sure the interrupt is not active. */
4825         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4826
4827         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4828               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4829 #ifdef __BIG_ENDIAN
4830               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4831 #endif
4832               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4833               DMA_READ_CHANS << 12 |
4834               DMA_WRITE_CHANS << 16;
4835
4836         val |= (0x2 << 20) | (1 << 11);
4837
4838         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4839                 val |= (1 << 23);
4840
4841         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4842             (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4843             !(bp->flags & BNX2_FLAG_PCIX))
4844                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4845
4846         BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4847
4848         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4849                 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4850                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4851                 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4852         }
4853
4854         if (bp->flags & BNX2_FLAG_PCIX) {
4855                 u16 val16;
4856
4857                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4858                                      &val16);
4859                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4860                                       val16 & ~PCI_X_CMD_ERO);
4861         }
4862
4863         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4864                 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4865                 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4866                 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4867
4868         /* Initialize context mapping and zero out the quick contexts.  The
4869          * context block must have already been enabled. */
4870         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4871                 rc = bnx2_init_5709_context(bp);
4872                 if (rc)
4873                         return rc;
4874         } else
4875                 bnx2_init_context(bp);
4876
4877         if ((rc = bnx2_init_cpus(bp)) != 0)
4878                 return rc;
4879
4880         bnx2_init_nvram(bp);
4881
4882         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4883
4884         val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4885         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4886         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4887         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4888                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4889                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4890                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4891         }
4892
4893         BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4894
4895         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4896         BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4897         BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4898
4899         val = (BNX2_PAGE_BITS - 8) << 24;
4900         BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4901
4902         /* Configure page size. */
4903         val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4904         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4905         val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4906         BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4907
4908         val = bp->mac_addr[0] +
4909               (bp->mac_addr[1] << 8) +
4910               (bp->mac_addr[2] << 16) +
4911               bp->mac_addr[3] +
4912               (bp->mac_addr[4] << 8) +
4913               (bp->mac_addr[5] << 16);
4914         BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4915
4916         /* Program the MTU.  Also include 4 bytes for CRC32. */
4917         mtu = bp->dev->mtu;
4918         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4919         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4920                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4921         BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4922
4923         if (mtu < 1500)
4924                 mtu = 1500;
4925
4926         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4927         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4928         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4929
4930         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4931         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4932                 bp->bnx2_napi[i].last_status_idx = 0;
4933
4934         bp->idle_chk_status_idx = 0xffff;
4935
4936         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4937
4938         /* Set up how to generate a link change interrupt. */
4939         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4940
4941         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4942                 (u64) bp->status_blk_mapping & 0xffffffff);
4943         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4944
4945         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4946                 (u64) bp->stats_blk_mapping & 0xffffffff);
4947         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4948                 (u64) bp->stats_blk_mapping >> 32);
4949
4950         BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4951                 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4952
4953         BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4954                 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4955
4956         BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4957                 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4958
4959         BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4960
4961         BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4962
4963         BNX2_WR(bp, BNX2_HC_COM_TICKS,
4964                 (bp->com_ticks_int << 16) | bp->com_ticks);
4965
4966         BNX2_WR(bp, BNX2_HC_CMD_TICKS,
4967                 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4968
4969         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4970                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4971         else
4972                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4973         BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4974
4975         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
4976                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4977         else {
4978                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4979                       BNX2_HC_CONFIG_COLLECT_STATS;
4980         }
4981
4982         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4983                 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4984                         BNX2_HC_MSIX_BIT_VECTOR_VAL);
4985
4986                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4987         }
4988
4989         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4990                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4991
4992         BNX2_WR(bp, BNX2_HC_CONFIG, val);
4993
4994         if (bp->rx_ticks < 25)
4995                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4996         else
4997                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4998
4999         for (i = 1; i < bp->irq_nvecs; i++) {
5000                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5001                            BNX2_HC_SB_CONFIG_1;
5002
5003                 BNX2_WR(bp, base,
5004                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5005                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5006                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5007
5008                 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5009                         (bp->tx_quick_cons_trip_int << 16) |
5010                          bp->tx_quick_cons_trip);
5011
5012                 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5013                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5014
5015                 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5016                         (bp->rx_quick_cons_trip_int << 16) |
5017                         bp->rx_quick_cons_trip);
5018
5019                 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5020                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5021         }
5022
5023         /* Clear internal stats counters. */
5024         BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5025
5026         BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5027
5028         /* Initialize the receive filter. */
5029         bnx2_set_rx_mode(bp->dev);
5030
5031         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5032                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5033                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5034                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5035         }
5036         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5037                           1, 0);
5038
5039         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5040         BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5041
5042         udelay(20);
5043
5044         bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5045
5046         return rc;
5047 }
5048
5049 static void
5050 bnx2_clear_ring_states(struct bnx2 *bp)
5051 {
5052         struct bnx2_napi *bnapi;
5053         struct bnx2_tx_ring_info *txr;
5054         struct bnx2_rx_ring_info *rxr;
5055         int i;
5056
5057         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5058                 bnapi = &bp->bnx2_napi[i];
5059                 txr = &bnapi->tx_ring;
5060                 rxr = &bnapi->rx_ring;
5061
5062                 txr->tx_cons = 0;
5063                 txr->hw_tx_cons = 0;
5064                 rxr->rx_prod_bseq = 0;
5065                 rxr->rx_prod = 0;
5066                 rxr->rx_cons = 0;
5067                 rxr->rx_pg_prod = 0;
5068                 rxr->rx_pg_cons = 0;
5069         }
5070 }
5071
5072 static void
5073 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5074 {
5075         u32 val, offset0, offset1, offset2, offset3;
5076         u32 cid_addr = GET_CID_ADDR(cid);
5077
5078         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5079                 offset0 = BNX2_L2CTX_TYPE_XI;
5080                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5081                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5082                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5083         } else {
5084                 offset0 = BNX2_L2CTX_TYPE;
5085                 offset1 = BNX2_L2CTX_CMD_TYPE;
5086                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5087                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5088         }
5089         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5090         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5091
5092         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5093         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5094
5095         val = (u64) txr->tx_desc_mapping >> 32;
5096         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5097
5098         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5099         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5100 }
5101
5102 static void
5103 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5104 {
5105         struct bnx2_tx_bd *txbd;
5106         u32 cid = TX_CID;
5107         struct bnx2_napi *bnapi;
5108         struct bnx2_tx_ring_info *txr;
5109
5110         bnapi = &bp->bnx2_napi[ring_num];
5111         txr = &bnapi->tx_ring;
5112
5113         if (ring_num == 0)
5114                 cid = TX_CID;
5115         else
5116                 cid = TX_TSS_CID + ring_num - 1;
5117
5118         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5119
5120         txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5121
5122         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5123         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5124
5125         txr->tx_prod = 0;
5126         txr->tx_prod_bseq = 0;
5127
5128         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5129         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5130
5131         bnx2_init_tx_context(bp, cid, txr);
5132 }
5133
5134 static void
5135 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5136                      u32 buf_size, int num_rings)
5137 {
5138         int i;
5139         struct bnx2_rx_bd *rxbd;
5140
5141         for (i = 0; i < num_rings; i++) {
5142                 int j;
5143
5144                 rxbd = &rx_ring[i][0];
5145                 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5146                         rxbd->rx_bd_len = buf_size;
5147                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5148                 }
5149                 if (i == (num_rings - 1))
5150                         j = 0;
5151                 else
5152                         j = i + 1;
5153                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5154                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5155         }
5156 }
5157
5158 static void
5159 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5160 {
5161         int i;
5162         u16 prod, ring_prod;
5163         u32 cid, rx_cid_addr, val;
5164         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5165         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5166
5167         if (ring_num == 0)
5168                 cid = RX_CID;
5169         else
5170                 cid = RX_RSS_CID + ring_num - 1;
5171
5172         rx_cid_addr = GET_CID_ADDR(cid);
5173
5174         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5175                              bp->rx_buf_use_size, bp->rx_max_ring);
5176
5177         bnx2_init_rx_context(bp, cid);
5178
5179         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5180                 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5181                 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5182         }
5183
5184         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5185         if (bp->rx_pg_ring_size) {
5186                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5187                                      rxr->rx_pg_desc_mapping,
5188                                      PAGE_SIZE, bp->rx_max_pg_ring);
5189                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5190                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5191                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5192                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5193
5194                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5195                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5196
5197                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5198                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5199
5200                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5201                         BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5202         }
5203
5204         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5205         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5206
5207         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5208         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5209
5210         ring_prod = prod = rxr->rx_pg_prod;
5211         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5212                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5213                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5214                                     ring_num, i, bp->rx_pg_ring_size);
5215                         break;
5216                 }
5217                 prod = BNX2_NEXT_RX_BD(prod);
5218                 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5219         }
5220         rxr->rx_pg_prod = prod;
5221
5222         ring_prod = prod = rxr->rx_prod;
5223         for (i = 0; i < bp->rx_ring_size; i++) {
5224                 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5225                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5226                                     ring_num, i, bp->rx_ring_size);
5227                         break;
5228                 }
5229                 prod = BNX2_NEXT_RX_BD(prod);
5230                 ring_prod = BNX2_RX_RING_IDX(prod);
5231         }
5232         rxr->rx_prod = prod;
5233
5234         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5235         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5236         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5237
5238         BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5239         BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5240
5241         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5242 }
5243
5244 static void
5245 bnx2_init_all_rings(struct bnx2 *bp)
5246 {
5247         int i;
5248         u32 val;
5249
5250         bnx2_clear_ring_states(bp);
5251
5252         BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5253         for (i = 0; i < bp->num_tx_rings; i++)
5254                 bnx2_init_tx_ring(bp, i);
5255
5256         if (bp->num_tx_rings > 1)
5257                 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5258                         (TX_TSS_CID << 7));
5259
5260         BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5261         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5262
5263         for (i = 0; i < bp->num_rx_rings; i++)
5264                 bnx2_init_rx_ring(bp, i);
5265
5266         if (bp->num_rx_rings > 1) {
5267                 u32 tbl_32 = 0;
5268
5269                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5270                         int shift = (i % 8) << 2;
5271
5272                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5273                         if ((i % 8) == 7) {
5274                                 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5275                                 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5276                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5277                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5278                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5279                                 tbl_32 = 0;
5280                         }
5281                 }
5282
5283                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5284                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5285
5286                 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5287
5288         }
5289 }
5290
5291 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5292 {
5293         u32 max, num_rings = 1;
5294
5295         while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5296                 ring_size -= BNX2_MAX_RX_DESC_CNT;
5297                 num_rings++;
5298         }
5299         /* round to next power of 2 */
5300         max = max_size;
5301         while ((max & num_rings) == 0)
5302                 max >>= 1;
5303
5304         if (num_rings != max)
5305                 max <<= 1;
5306
5307         return max;
5308 }
5309
5310 static void
5311 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5312 {
5313         u32 rx_size, rx_space, jumbo_size;
5314
5315         /* 8 for CRC and VLAN */
5316         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5317
5318         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5319                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5320
5321         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5322         bp->rx_pg_ring_size = 0;
5323         bp->rx_max_pg_ring = 0;
5324         bp->rx_max_pg_ring_idx = 0;
5325         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5326                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5327
5328                 jumbo_size = size * pages;
5329                 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5330                         jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5331
5332                 bp->rx_pg_ring_size = jumbo_size;
5333                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5334                                                         BNX2_MAX_RX_PG_RINGS);
5335                 bp->rx_max_pg_ring_idx =
5336                         (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5337                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5338                 bp->rx_copy_thresh = 0;
5339         }
5340
5341         bp->rx_buf_use_size = rx_size;
5342         /* hw alignment + build_skb() overhead*/
5343         bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5344                 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5345         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5346         bp->rx_ring_size = size;
5347         bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5348         bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5349 }
5350
5351 static void
5352 bnx2_free_tx_skbs(struct bnx2 *bp)
5353 {
5354         int i;
5355
5356         for (i = 0; i < bp->num_tx_rings; i++) {
5357                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5358                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5359                 int j;
5360
5361                 if (txr->tx_buf_ring == NULL)
5362                         continue;
5363
5364                 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5365                         struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5366                         struct sk_buff *skb = tx_buf->skb;
5367                         int k, last;
5368
5369                         if (skb == NULL) {
5370                                 j = BNX2_NEXT_TX_BD(j);
5371                                 continue;
5372                         }
5373
5374                         dma_unmap_single(&bp->pdev->dev,
5375                                          dma_unmap_addr(tx_buf, mapping),
5376                                          skb_headlen(skb),
5377                                          PCI_DMA_TODEVICE);
5378
5379                         tx_buf->skb = NULL;
5380
5381                         last = tx_buf->nr_frags;
5382                         j = BNX2_NEXT_TX_BD(j);
5383                         for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5384                                 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5385                                 dma_unmap_page(&bp->pdev->dev,
5386                                         dma_unmap_addr(tx_buf, mapping),
5387                                         skb_frag_size(&skb_shinfo(skb)->frags[k]),
5388                                         PCI_DMA_TODEVICE);
5389                         }
5390                         dev_kfree_skb(skb);
5391                 }
5392                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5393         }
5394 }
5395
5396 static void
5397 bnx2_free_rx_skbs(struct bnx2 *bp)
5398 {
5399         int i;
5400
5401         for (i = 0; i < bp->num_rx_rings; i++) {
5402                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5403                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5404                 int j;
5405
5406                 if (rxr->rx_buf_ring == NULL)
5407                         return;
5408
5409                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5410                         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5411                         u8 *data = rx_buf->data;
5412
5413                         if (data == NULL)
5414                                 continue;
5415
5416                         dma_unmap_single(&bp->pdev->dev,
5417                                          dma_unmap_addr(rx_buf, mapping),
5418                                          bp->rx_buf_use_size,
5419                                          PCI_DMA_FROMDEVICE);
5420
5421                         rx_buf->data = NULL;
5422
5423                         kfree(data);
5424                 }
5425                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5426                         bnx2_free_rx_page(bp, rxr, j);
5427         }
5428 }
5429
5430 static void
5431 bnx2_free_skbs(struct bnx2 *bp)
5432 {
5433         bnx2_free_tx_skbs(bp);
5434         bnx2_free_rx_skbs(bp);
5435 }
5436
5437 static int
5438 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5439 {
5440         int rc;
5441
5442         rc = bnx2_reset_chip(bp, reset_code);
5443         bnx2_free_skbs(bp);
5444         if (rc)
5445                 return rc;
5446
5447         if ((rc = bnx2_init_chip(bp)) != 0)
5448                 return rc;
5449
5450         bnx2_init_all_rings(bp);
5451         return 0;
5452 }
5453
5454 static int
5455 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5456 {
5457         int rc;
5458
5459         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5460                 return rc;
5461
5462         spin_lock_bh(&bp->phy_lock);
5463         bnx2_init_phy(bp, reset_phy);
5464         bnx2_set_link(bp);
5465         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5466                 bnx2_remote_phy_event(bp);
5467         spin_unlock_bh(&bp->phy_lock);
5468         return 0;
5469 }
5470
5471 static int
5472 bnx2_shutdown_chip(struct bnx2 *bp)
5473 {
5474         u32 reset_code;
5475
5476         if (bp->flags & BNX2_FLAG_NO_WOL)
5477                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5478         else if (bp->wol)
5479                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5480         else
5481                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5482
5483         return bnx2_reset_chip(bp, reset_code);
5484 }
5485
5486 static int
5487 bnx2_test_registers(struct bnx2 *bp)
5488 {
5489         int ret;
5490         int i, is_5709;
5491         static const struct {
5492                 u16   offset;
5493                 u16   flags;
5494 #define BNX2_FL_NOT_5709        1
5495                 u32   rw_mask;
5496                 u32   ro_mask;
5497         } reg_tbl[] = {
5498                 { 0x006c, 0, 0x00000000, 0x0000003f },
5499                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5500                 { 0x0094, 0, 0x00000000, 0x00000000 },
5501
5502                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5503                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5504                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5505                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5506                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5507                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5508                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5509                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5510                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5511
5512                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5513                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5514                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5515                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5516                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5517                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5518
5519                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5520                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5521                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5522
5523                 { 0x1000, 0, 0x00000000, 0x00000001 },
5524                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5525
5526                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5527                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5528                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5529                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5530                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5531                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5532                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5533                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5534                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5535                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5536
5537                 { 0x1800, 0, 0x00000000, 0x00000001 },
5538                 { 0x1804, 0, 0x00000000, 0x00000003 },
5539
5540                 { 0x2800, 0, 0x00000000, 0x00000001 },
5541                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5542                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5543                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5544                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5545                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5546                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5547                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5548                 { 0x2840, 0, 0x00000000, 0xffffffff },
5549                 { 0x2844, 0, 0x00000000, 0xffffffff },
5550                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5551                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5552
5553                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5554                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5555
5556                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5557                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5558                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5559                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5560                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5561                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5562                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5563                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5564                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5565
5566                 { 0x5004, 0, 0x00000000, 0x0000007f },
5567                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5568
5569                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5570                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5571                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5572                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5573                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5574                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5575                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5576                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5577                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5578
5579                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5580                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5581                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5582                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5583                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5584                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5585                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5586                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5587                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5588                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5589                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5590                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5591                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5592                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5593                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5594                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5595                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5596                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5597                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5598                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5599                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5600                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5601                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5602
5603                 { 0xffff, 0, 0x00000000, 0x00000000 },
5604         };
5605
5606         ret = 0;
5607         is_5709 = 0;
5608         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5609                 is_5709 = 1;
5610
5611         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5612                 u32 offset, rw_mask, ro_mask, save_val, val;
5613                 u16 flags = reg_tbl[i].flags;
5614
5615                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5616                         continue;
5617
5618                 offset = (u32) reg_tbl[i].offset;
5619                 rw_mask = reg_tbl[i].rw_mask;
5620                 ro_mask = reg_tbl[i].ro_mask;
5621
5622                 save_val = readl(bp->regview + offset);
5623
5624                 writel(0, bp->regview + offset);
5625
5626                 val = readl(bp->regview + offset);
5627                 if ((val & rw_mask) != 0) {
5628                         goto reg_test_err;
5629                 }
5630
5631                 if ((val & ro_mask) != (save_val & ro_mask)) {
5632                         goto reg_test_err;
5633                 }
5634
5635                 writel(0xffffffff, bp->regview + offset);
5636
5637                 val = readl(bp->regview + offset);
5638                 if ((val & rw_mask) != rw_mask) {
5639                         goto reg_test_err;
5640                 }
5641
5642                 if ((val & ro_mask) != (save_val & ro_mask)) {
5643                         goto reg_test_err;
5644                 }
5645
5646                 writel(save_val, bp->regview + offset);
5647                 continue;
5648
5649 reg_test_err:
5650                 writel(save_val, bp->regview + offset);
5651                 ret = -ENODEV;
5652                 break;
5653         }
5654         return ret;
5655 }
5656
5657 static int
5658 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5659 {
5660         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5661                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5662         int i;
5663
5664         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5665                 u32 offset;
5666
5667                 for (offset = 0; offset < size; offset += 4) {
5668
5669                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5670
5671                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5672                                 test_pattern[i]) {
5673                                 return -ENODEV;
5674                         }
5675                 }
5676         }
5677         return 0;
5678 }
5679
5680 static int
5681 bnx2_test_memory(struct bnx2 *bp)
5682 {
5683         int ret = 0;
5684         int i;
5685         static struct mem_entry {
5686                 u32   offset;
5687                 u32   len;
5688         } mem_tbl_5706[] = {
5689                 { 0x60000,  0x4000 },
5690                 { 0xa0000,  0x3000 },
5691                 { 0xe0000,  0x4000 },
5692                 { 0x120000, 0x4000 },
5693                 { 0x1a0000, 0x4000 },
5694                 { 0x160000, 0x4000 },
5695                 { 0xffffffff, 0    },
5696         },
5697         mem_tbl_5709[] = {
5698                 { 0x60000,  0x4000 },
5699                 { 0xa0000,  0x3000 },
5700                 { 0xe0000,  0x4000 },
5701                 { 0x120000, 0x4000 },
5702                 { 0x1a0000, 0x4000 },
5703                 { 0xffffffff, 0    },
5704         };
5705         struct mem_entry *mem_tbl;
5706
5707         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5708                 mem_tbl = mem_tbl_5709;
5709         else
5710                 mem_tbl = mem_tbl_5706;
5711
5712         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5713                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5714                         mem_tbl[i].len)) != 0) {
5715                         return ret;
5716                 }
5717         }
5718
5719         return ret;
5720 }
5721
5722 #define BNX2_MAC_LOOPBACK       0
5723 #define BNX2_PHY_LOOPBACK       1
5724
5725 static int
5726 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5727 {
5728         unsigned int pkt_size, num_pkts, i;
5729         struct sk_buff *skb;
5730         u8 *data;
5731         unsigned char *packet;
5732         u16 rx_start_idx, rx_idx;
5733         dma_addr_t map;
5734         struct bnx2_tx_bd *txbd;
5735         struct bnx2_sw_bd *rx_buf;
5736         struct l2_fhdr *rx_hdr;
5737         int ret = -ENODEV;
5738         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5739         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5740         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5741
5742         tx_napi = bnapi;
5743
5744         txr = &tx_napi->tx_ring;
5745         rxr = &bnapi->rx_ring;
5746         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5747                 bp->loopback = MAC_LOOPBACK;
5748                 bnx2_set_mac_loopback(bp);
5749         }
5750         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5751                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5752                         return 0;
5753
5754                 bp->loopback = PHY_LOOPBACK;
5755                 bnx2_set_phy_loopback(bp);
5756         }
5757         else
5758                 return -EINVAL;
5759
5760         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5761         skb = netdev_alloc_skb(bp->dev, pkt_size);
5762         if (!skb)
5763                 return -ENOMEM;
5764         packet = skb_put(skb, pkt_size);
5765         memcpy(packet, bp->dev->dev_addr, 6);
5766         memset(packet + 6, 0x0, 8);
5767         for (i = 14; i < pkt_size; i++)
5768                 packet[i] = (unsigned char) (i & 0xff);
5769
5770         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5771                              PCI_DMA_TODEVICE);
5772         if (dma_mapping_error(&bp->pdev->dev, map)) {
5773                 dev_kfree_skb(skb);
5774                 return -EIO;
5775         }
5776
5777         BNX2_WR(bp, BNX2_HC_COMMAND,
5778                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5779
5780         BNX2_RD(bp, BNX2_HC_COMMAND);
5781
5782         udelay(5);
5783         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5784
5785         num_pkts = 0;
5786
5787         txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5788
5789         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5790         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5791         txbd->tx_bd_mss_nbytes = pkt_size;
5792         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5793
5794         num_pkts++;
5795         txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5796         txr->tx_prod_bseq += pkt_size;
5797
5798         BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5799         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5800
5801         udelay(100);
5802
5803         BNX2_WR(bp, BNX2_HC_COMMAND,
5804                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5805
5806         BNX2_RD(bp, BNX2_HC_COMMAND);
5807
5808         udelay(5);
5809
5810         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5811         dev_kfree_skb(skb);
5812
5813         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5814                 goto loopback_test_done;
5815
5816         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5817         if (rx_idx != rx_start_idx + num_pkts) {
5818                 goto loopback_test_done;
5819         }
5820
5821         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5822         data = rx_buf->data;
5823
5824         rx_hdr = get_l2_fhdr(data);
5825         data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5826
5827         dma_sync_single_for_cpu(&bp->pdev->dev,
5828                 dma_unmap_addr(rx_buf, mapping),
5829                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5830
5831         if (rx_hdr->l2_fhdr_status &
5832                 (L2_FHDR_ERRORS_BAD_CRC |
5833                 L2_FHDR_ERRORS_PHY_DECODE |
5834                 L2_FHDR_ERRORS_ALIGNMENT |
5835                 L2_FHDR_ERRORS_TOO_SHORT |
5836                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5837
5838                 goto loopback_test_done;
5839         }
5840
5841         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5842                 goto loopback_test_done;
5843         }
5844
5845         for (i = 14; i < pkt_size; i++) {
5846                 if (*(data + i) != (unsigned char) (i & 0xff)) {
5847                         goto loopback_test_done;
5848                 }
5849         }
5850
5851         ret = 0;
5852
5853 loopback_test_done:
5854         bp->loopback = 0;
5855         return ret;
5856 }
5857
5858 #define BNX2_MAC_LOOPBACK_FAILED        1
5859 #define BNX2_PHY_LOOPBACK_FAILED        2
5860 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5861                                          BNX2_PHY_LOOPBACK_FAILED)
5862
5863 static int
5864 bnx2_test_loopback(struct bnx2 *bp)
5865 {
5866         int rc = 0;
5867
5868         if (!netif_running(bp->dev))
5869                 return BNX2_LOOPBACK_FAILED;
5870
5871         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5872         spin_lock_bh(&bp->phy_lock);
5873         bnx2_init_phy(bp, 1);
5874         spin_unlock_bh(&bp->phy_lock);
5875         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5876                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5877         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5878                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5879         return rc;
5880 }
5881
5882 #define NVRAM_SIZE 0x200
5883 #define CRC32_RESIDUAL 0xdebb20e3
5884
5885 static int
5886 bnx2_test_nvram(struct bnx2 *bp)
5887 {
5888         __be32 buf[NVRAM_SIZE / 4];
5889         u8 *data = (u8 *) buf;
5890         int rc = 0;
5891         u32 magic, csum;
5892
5893         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5894                 goto test_nvram_done;
5895
5896         magic = be32_to_cpu(buf[0]);
5897         if (magic != 0x669955aa) {
5898                 rc = -ENODEV;
5899                 goto test_nvram_done;
5900         }
5901
5902         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5903                 goto test_nvram_done;
5904
5905         csum = ether_crc_le(0x100, data);
5906         if (csum != CRC32_RESIDUAL) {
5907                 rc = -ENODEV;
5908                 goto test_nvram_done;
5909         }
5910
5911         csum = ether_crc_le(0x100, data + 0x100);
5912         if (csum != CRC32_RESIDUAL) {
5913                 rc = -ENODEV;
5914         }
5915
5916 test_nvram_done:
5917         return rc;
5918 }
5919
5920 static int
5921 bnx2_test_link(struct bnx2 *bp)
5922 {
5923         u32 bmsr;
5924
5925         if (!netif_running(bp->dev))
5926                 return -ENODEV;
5927
5928         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5929                 if (bp->link_up)
5930                         return 0;
5931                 return -ENODEV;
5932         }
5933         spin_lock_bh(&bp->phy_lock);
5934         bnx2_enable_bmsr1(bp);
5935         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5936         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5937         bnx2_disable_bmsr1(bp);
5938         spin_unlock_bh(&bp->phy_lock);
5939
5940         if (bmsr & BMSR_LSTATUS) {
5941                 return 0;
5942         }
5943         return -ENODEV;
5944 }
5945
5946 static int
5947 bnx2_test_intr(struct bnx2 *bp)
5948 {
5949         int i;
5950         u16 status_idx;
5951
5952         if (!netif_running(bp->dev))
5953                 return -ENODEV;
5954
5955         status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5956
5957         /* This register is not touched during run-time. */
5958         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5959         BNX2_RD(bp, BNX2_HC_COMMAND);
5960
5961         for (i = 0; i < 10; i++) {
5962                 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5963                         status_idx) {
5964
5965                         break;
5966                 }
5967
5968                 msleep_interruptible(10);
5969         }
5970         if (i < 10)
5971                 return 0;
5972
5973         return -ENODEV;
5974 }
5975
5976 /* Determining link for parallel detection. */
5977 static int
5978 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5979 {
5980         u32 mode_ctl, an_dbg, exp;
5981
5982         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5983                 return 0;
5984
5985         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5986         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5987
5988         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5989                 return 0;
5990
5991         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5992         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5993         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5994
5995         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5996                 return 0;
5997
5998         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5999         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6000         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6001
6002         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6003                 return 0;
6004
6005         return 1;
6006 }
6007
6008 static void
6009 bnx2_5706_serdes_timer(struct bnx2 *bp)
6010 {
6011         int check_link = 1;
6012
6013         spin_lock(&bp->phy_lock);
6014         if (bp->serdes_an_pending) {
6015                 bp->serdes_an_pending--;
6016                 check_link = 0;
6017         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6018                 u32 bmcr;
6019
6020                 bp->current_interval = BNX2_TIMER_INTERVAL;
6021
6022                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6023
6024                 if (bmcr & BMCR_ANENABLE) {
6025                         if (bnx2_5706_serdes_has_link(bp)) {
6026                                 bmcr &= ~BMCR_ANENABLE;
6027                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6028                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6029                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6030                         }
6031                 }
6032         }
6033         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6034                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6035                 u32 phy2;
6036
6037                 bnx2_write_phy(bp, 0x17, 0x0f01);
6038                 bnx2_read_phy(bp, 0x15, &phy2);
6039                 if (phy2 & 0x20) {
6040                         u32 bmcr;
6041
6042                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6043                         bmcr |= BMCR_ANENABLE;
6044                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6045
6046                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6047                 }
6048         } else
6049                 bp->current_interval = BNX2_TIMER_INTERVAL;
6050
6051         if (check_link) {
6052                 u32 val;
6053
6054                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6055                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6056                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6057
6058                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6059                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6060                                 bnx2_5706s_force_link_dn(bp, 1);
6061                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6062                         } else
6063                                 bnx2_set_link(bp);
6064                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6065                         bnx2_set_link(bp);
6066         }
6067         spin_unlock(&bp->phy_lock);
6068 }
6069
6070 static void
6071 bnx2_5708_serdes_timer(struct bnx2 *bp)
6072 {
6073         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6074                 return;
6075
6076         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6077                 bp->serdes_an_pending = 0;
6078                 return;
6079         }
6080
6081         spin_lock(&bp->phy_lock);
6082         if (bp->serdes_an_pending)
6083                 bp->serdes_an_pending--;
6084         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6085                 u32 bmcr;
6086
6087                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6088                 if (bmcr & BMCR_ANENABLE) {
6089                         bnx2_enable_forced_2g5(bp);
6090                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6091                 } else {
6092                         bnx2_disable_forced_2g5(bp);
6093                         bp->serdes_an_pending = 2;
6094                         bp->current_interval = BNX2_TIMER_INTERVAL;
6095                 }
6096
6097         } else
6098                 bp->current_interval = BNX2_TIMER_INTERVAL;
6099
6100         spin_unlock(&bp->phy_lock);
6101 }
6102
6103 static void
6104 bnx2_timer(unsigned long data)
6105 {
6106         struct bnx2 *bp = (struct bnx2 *) data;
6107
6108         if (!netif_running(bp->dev))
6109                 return;
6110
6111         if (atomic_read(&bp->intr_sem) != 0)
6112                 goto bnx2_restart_timer;
6113
6114         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6115              BNX2_FLAG_USING_MSI)
6116                 bnx2_chk_missed_msi(bp);
6117
6118         bnx2_send_heart_beat(bp);
6119
6120         bp->stats_blk->stat_FwRxDrop =
6121                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6122
6123         /* workaround occasional corrupted counters */
6124         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6125                 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6126                         BNX2_HC_COMMAND_STATS_NOW);
6127
6128         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6129                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6130                         bnx2_5706_serdes_timer(bp);
6131                 else
6132                         bnx2_5708_serdes_timer(bp);
6133         }
6134
6135 bnx2_restart_timer:
6136         mod_timer(&bp->timer, jiffies + bp->current_interval);
6137 }
6138
6139 static int
6140 bnx2_request_irq(struct bnx2 *bp)
6141 {
6142         unsigned long flags;
6143         struct bnx2_irq *irq;
6144         int rc = 0, i;
6145
6146         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6147                 flags = 0;
6148         else
6149                 flags = IRQF_SHARED;
6150
6151         for (i = 0; i < bp->irq_nvecs; i++) {
6152                 irq = &bp->irq_tbl[i];
6153                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6154                                  &bp->bnx2_napi[i]);
6155                 if (rc)
6156                         break;
6157                 irq->requested = 1;
6158         }
6159         return rc;
6160 }
6161
6162 static void
6163 __bnx2_free_irq(struct bnx2 *bp)
6164 {
6165         struct bnx2_irq *irq;
6166         int i;
6167
6168         for (i = 0; i < bp->irq_nvecs; i++) {
6169                 irq = &bp->irq_tbl[i];
6170                 if (irq->requested)
6171                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6172                 irq->requested = 0;
6173         }
6174 }
6175
6176 static void
6177 bnx2_free_irq(struct bnx2 *bp)
6178 {
6179
6180         __bnx2_free_irq(bp);
6181         if (bp->flags & BNX2_FLAG_USING_MSI)
6182                 pci_disable_msi(bp->pdev);
6183         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6184                 pci_disable_msix(bp->pdev);
6185
6186         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6187 }
6188
6189 static void
6190 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6191 {
6192         int i, total_vecs, rc;
6193         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6194         struct net_device *dev = bp->dev;
6195         const int len = sizeof(bp->irq_tbl[0].name);
6196
6197         bnx2_setup_msix_tbl(bp);
6198         BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6199         BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6200         BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6201
6202         /*  Need to flush the previous three writes to ensure MSI-X
6203          *  is setup properly */
6204         BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6205
6206         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6207                 msix_ent[i].entry = i;
6208                 msix_ent[i].vector = 0;
6209         }
6210
6211         total_vecs = msix_vecs;
6212 #ifdef BCM_CNIC
6213         total_vecs++;
6214 #endif
6215         rc = -ENOSPC;
6216         while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6217                 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6218                 if (rc <= 0)
6219                         break;
6220                 if (rc > 0)
6221                         total_vecs = rc;
6222         }
6223
6224         if (rc != 0)
6225                 return;
6226
6227         msix_vecs = total_vecs;
6228 #ifdef BCM_CNIC
6229         msix_vecs--;
6230 #endif
6231         bp->irq_nvecs = msix_vecs;
6232         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6233         for (i = 0; i < total_vecs; i++) {
6234                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6235                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6236                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6237         }
6238 }
6239
6240 static int
6241 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6242 {
6243         int cpus = netif_get_num_default_rss_queues();
6244         int msix_vecs;
6245
6246         if (!bp->num_req_rx_rings)
6247                 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6248         else if (!bp->num_req_tx_rings)
6249                 msix_vecs = max(cpus, bp->num_req_rx_rings);
6250         else
6251                 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6252
6253         msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6254
6255         bp->irq_tbl[0].handler = bnx2_interrupt;
6256         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6257         bp->irq_nvecs = 1;
6258         bp->irq_tbl[0].vector = bp->pdev->irq;
6259
6260         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6261                 bnx2_enable_msix(bp, msix_vecs);
6262
6263         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6264             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6265                 if (pci_enable_msi(bp->pdev) == 0) {
6266                         bp->flags |= BNX2_FLAG_USING_MSI;
6267                         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6268                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6269                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6270                         } else
6271                                 bp->irq_tbl[0].handler = bnx2_msi;
6272
6273                         bp->irq_tbl[0].vector = bp->pdev->irq;
6274                 }
6275         }
6276
6277         if (!bp->num_req_tx_rings)
6278                 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6279         else
6280                 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6281
6282         if (!bp->num_req_rx_rings)
6283                 bp->num_rx_rings = bp->irq_nvecs;
6284         else
6285                 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6286
6287         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6288
6289         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6290 }
6291
6292 /* Called with rtnl_lock */
6293 static int
6294 bnx2_open(struct net_device *dev)
6295 {
6296         struct bnx2 *bp = netdev_priv(dev);
6297         int rc;
6298
6299         rc = bnx2_request_firmware(bp);
6300         if (rc < 0)
6301                 goto out;
6302
6303         netif_carrier_off(dev);
6304
6305         bnx2_set_power_state(bp, PCI_D0);
6306         bnx2_disable_int(bp);
6307
6308         rc = bnx2_setup_int_mode(bp, disable_msi);
6309         if (rc)
6310                 goto open_err;
6311         bnx2_init_napi(bp);
6312         bnx2_napi_enable(bp);
6313         rc = bnx2_alloc_mem(bp);
6314         if (rc)
6315                 goto open_err;
6316
6317         rc = bnx2_request_irq(bp);
6318         if (rc)
6319                 goto open_err;
6320
6321         rc = bnx2_init_nic(bp, 1);
6322         if (rc)
6323                 goto open_err;
6324
6325         mod_timer(&bp->timer, jiffies + bp->current_interval);
6326
6327         atomic_set(&bp->intr_sem, 0);
6328
6329         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6330
6331         bnx2_enable_int(bp);
6332
6333         if (bp->flags & BNX2_FLAG_USING_MSI) {
6334                 /* Test MSI to make sure it is working
6335                  * If MSI test fails, go back to INTx mode
6336                  */
6337                 if (bnx2_test_intr(bp) != 0) {
6338                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6339
6340                         bnx2_disable_int(bp);
6341                         bnx2_free_irq(bp);
6342
6343                         bnx2_setup_int_mode(bp, 1);
6344
6345                         rc = bnx2_init_nic(bp, 0);
6346
6347                         if (!rc)
6348                                 rc = bnx2_request_irq(bp);
6349
6350                         if (rc) {
6351                                 del_timer_sync(&bp->timer);
6352                                 goto open_err;
6353                         }
6354                         bnx2_enable_int(bp);
6355                 }
6356         }
6357         if (bp->flags & BNX2_FLAG_USING_MSI)
6358                 netdev_info(dev, "using MSI\n");
6359         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6360                 netdev_info(dev, "using MSIX\n");
6361
6362         netif_tx_start_all_queues(dev);
6363 out:
6364         return rc;
6365
6366 open_err:
6367         bnx2_napi_disable(bp);
6368         bnx2_free_skbs(bp);
6369         bnx2_free_irq(bp);
6370         bnx2_free_mem(bp);
6371         bnx2_del_napi(bp);
6372         bnx2_release_firmware(bp);
6373         goto out;
6374 }
6375
6376 static void
6377 bnx2_reset_task(struct work_struct *work)
6378 {
6379         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6380         int rc;
6381         u16 pcicmd;
6382
6383         rtnl_lock();
6384         if (!netif_running(bp->dev)) {
6385                 rtnl_unlock();
6386                 return;
6387         }
6388
6389         bnx2_netif_stop(bp, true);
6390
6391         pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6392         if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6393                 /* in case PCI block has reset */
6394                 pci_restore_state(bp->pdev);
6395                 pci_save_state(bp->pdev);
6396         }
6397         rc = bnx2_init_nic(bp, 1);
6398         if (rc) {
6399                 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6400                 bnx2_napi_enable(bp);
6401                 dev_close(bp->dev);
6402                 rtnl_unlock();
6403                 return;
6404         }
6405
6406         atomic_set(&bp->intr_sem, 1);
6407         bnx2_netif_start(bp, true);
6408         rtnl_unlock();
6409 }
6410
6411 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6412
6413 static void
6414 bnx2_dump_ftq(struct bnx2 *bp)
6415 {
6416         int i;
6417         u32 reg, bdidx, cid, valid;
6418         struct net_device *dev = bp->dev;
6419         static const struct ftq_reg {
6420                 char *name;
6421                 u32 off;
6422         } ftq_arr[] = {
6423                 BNX2_FTQ_ENTRY(RV2P_P),
6424                 BNX2_FTQ_ENTRY(RV2P_T),
6425                 BNX2_FTQ_ENTRY(RV2P_M),
6426                 BNX2_FTQ_ENTRY(TBDR_),
6427                 BNX2_FTQ_ENTRY(TDMA_),
6428                 BNX2_FTQ_ENTRY(TXP_),
6429                 BNX2_FTQ_ENTRY(TXP_),
6430                 BNX2_FTQ_ENTRY(TPAT_),
6431                 BNX2_FTQ_ENTRY(RXP_C),
6432                 BNX2_FTQ_ENTRY(RXP_),
6433                 BNX2_FTQ_ENTRY(COM_COMXQ_),
6434                 BNX2_FTQ_ENTRY(COM_COMTQ_),
6435                 BNX2_FTQ_ENTRY(COM_COMQ_),
6436                 BNX2_FTQ_ENTRY(CP_CPQ_),
6437         };
6438
6439         netdev_err(dev, "<--- start FTQ dump --->\n");
6440         for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6441                 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6442                            bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6443
6444         netdev_err(dev, "CPU states:\n");
6445         for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6446                 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6447                            reg, bnx2_reg_rd_ind(bp, reg),
6448                            bnx2_reg_rd_ind(bp, reg + 4),
6449                            bnx2_reg_rd_ind(bp, reg + 8),
6450                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6451                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6452                            bnx2_reg_rd_ind(bp, reg + 0x20));
6453
6454         netdev_err(dev, "<--- end FTQ dump --->\n");
6455         netdev_err(dev, "<--- start TBDC dump --->\n");
6456         netdev_err(dev, "TBDC free cnt: %ld\n",
6457                    BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6458         netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6459         for (i = 0; i < 0x20; i++) {
6460                 int j = 0;
6461
6462                 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6463                 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6464                         BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6465                 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6466                 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6467                         BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6468                         j++;
6469
6470                 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6471                 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6472                 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6473                 netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6474                            i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6475                            bdidx >> 24, (valid >> 8) & 0x0ff);
6476         }
6477         netdev_err(dev, "<--- end TBDC dump --->\n");
6478 }
6479
6480 static void
6481 bnx2_dump_state(struct bnx2 *bp)
6482 {
6483         struct net_device *dev = bp->dev;
6484         u32 val1, val2;
6485
6486         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6487         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6488                    atomic_read(&bp->intr_sem), val1);
6489         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6490         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6491         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6492         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6493                    BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6494                    BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6495         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6496                    BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6497         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6498                    BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6499         if (bp->flags & BNX2_FLAG_USING_MSIX)
6500                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6501                            BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6502 }
6503
6504 static void
6505 bnx2_tx_timeout(struct net_device *dev)
6506 {
6507         struct bnx2 *bp = netdev_priv(dev);
6508
6509         bnx2_dump_ftq(bp);
6510         bnx2_dump_state(bp);
6511         bnx2_dump_mcp_state(bp);
6512
6513         /* This allows the netif to be shutdown gracefully before resetting */
6514         schedule_work(&bp->reset_task);
6515 }
6516
6517 /* Called with netif_tx_lock.
6518  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6519  * netif_wake_queue().
6520  */
6521 static netdev_tx_t
6522 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6523 {
6524         struct bnx2 *bp = netdev_priv(dev);
6525         dma_addr_t mapping;
6526         struct bnx2_tx_bd *txbd;
6527         struct bnx2_sw_tx_bd *tx_buf;
6528         u32 len, vlan_tag_flags, last_frag, mss;
6529         u16 prod, ring_prod;
6530         int i;
6531         struct bnx2_napi *bnapi;
6532         struct bnx2_tx_ring_info *txr;
6533         struct netdev_queue *txq;
6534
6535         /*  Determine which tx ring we will be placed on */
6536         i = skb_get_queue_mapping(skb);
6537         bnapi = &bp->bnx2_napi[i];
6538         txr = &bnapi->tx_ring;
6539         txq = netdev_get_tx_queue(dev, i);
6540
6541         if (unlikely(bnx2_tx_avail(bp, txr) <
6542             (skb_shinfo(skb)->nr_frags + 1))) {
6543                 netif_tx_stop_queue(txq);
6544                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6545
6546                 return NETDEV_TX_BUSY;
6547         }
6548         len = skb_headlen(skb);
6549         prod = txr->tx_prod;
6550         ring_prod = BNX2_TX_RING_IDX(prod);
6551
6552         vlan_tag_flags = 0;
6553         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6554                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6555         }
6556
6557         if (vlan_tx_tag_present(skb)) {
6558                 vlan_tag_flags |=
6559                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6560         }
6561
6562         if ((mss = skb_shinfo(skb)->gso_size)) {
6563                 u32 tcp_opt_len;
6564                 struct iphdr *iph;
6565
6566                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6567
6568                 tcp_opt_len = tcp_optlen(skb);
6569
6570                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6571                         u32 tcp_off = skb_transport_offset(skb) -
6572                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6573
6574                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6575                                           TX_BD_FLAGS_SW_FLAGS;
6576                         if (likely(tcp_off == 0))
6577                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6578                         else {
6579                                 tcp_off >>= 3;
6580                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6581                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6582                                                   ((tcp_off & 0x10) <<
6583                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6584                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6585                         }
6586                 } else {
6587                         iph = ip_hdr(skb);
6588                         if (tcp_opt_len || (iph->ihl > 5)) {
6589                                 vlan_tag_flags |= ((iph->ihl - 5) +
6590                                                    (tcp_opt_len >> 2)) << 8;
6591                         }
6592                 }
6593         } else
6594                 mss = 0;
6595
6596         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6597         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6598                 dev_kfree_skb(skb);
6599                 return NETDEV_TX_OK;
6600         }
6601
6602         tx_buf = &txr->tx_buf_ring[ring_prod];
6603         tx_buf->skb = skb;
6604         dma_unmap_addr_set(tx_buf, mapping, mapping);
6605
6606         txbd = &txr->tx_desc_ring[ring_prod];
6607
6608         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6609         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6610         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6611         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6612
6613         last_frag = skb_shinfo(skb)->nr_frags;
6614         tx_buf->nr_frags = last_frag;
6615         tx_buf->is_gso = skb_is_gso(skb);
6616
6617         for (i = 0; i < last_frag; i++) {
6618                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6619
6620                 prod = BNX2_NEXT_TX_BD(prod);
6621                 ring_prod = BNX2_TX_RING_IDX(prod);
6622                 txbd = &txr->tx_desc_ring[ring_prod];
6623
6624                 len = skb_frag_size(frag);
6625                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6626                                            DMA_TO_DEVICE);
6627                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6628                         goto dma_error;
6629                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6630                                    mapping);
6631
6632                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6633                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6634                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6635                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6636
6637         }
6638         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6639
6640         /* Sync BD data before updating TX mailbox */
6641         wmb();
6642
6643         netdev_tx_sent_queue(txq, skb->len);
6644
6645         prod = BNX2_NEXT_TX_BD(prod);
6646         txr->tx_prod_bseq += skb->len;
6647
6648         BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6649         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6650
6651         mmiowb();
6652
6653         txr->tx_prod = prod;
6654
6655         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6656                 netif_tx_stop_queue(txq);
6657
6658                 /* netif_tx_stop_queue() must be done before checking
6659                  * tx index in bnx2_tx_avail() below, because in
6660                  * bnx2_tx_int(), we update tx index before checking for
6661                  * netif_tx_queue_stopped().
6662                  */
6663                 smp_mb();
6664                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6665                         netif_tx_wake_queue(txq);
6666         }
6667
6668         return NETDEV_TX_OK;
6669 dma_error:
6670         /* save value of frag that failed */
6671         last_frag = i;
6672
6673         /* start back at beginning and unmap skb */
6674         prod = txr->tx_prod;
6675         ring_prod = BNX2_TX_RING_IDX(prod);
6676         tx_buf = &txr->tx_buf_ring[ring_prod];
6677         tx_buf->skb = NULL;
6678         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6679                          skb_headlen(skb), PCI_DMA_TODEVICE);
6680
6681         /* unmap remaining mapped pages */
6682         for (i = 0; i < last_frag; i++) {
6683                 prod = BNX2_NEXT_TX_BD(prod);
6684                 ring_prod = BNX2_TX_RING_IDX(prod);
6685                 tx_buf = &txr->tx_buf_ring[ring_prod];
6686                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6687                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
6688                                PCI_DMA_TODEVICE);
6689         }
6690
6691         dev_kfree_skb(skb);
6692         return NETDEV_TX_OK;
6693 }
6694
6695 /* Called with rtnl_lock */
6696 static int
6697 bnx2_close(struct net_device *dev)
6698 {
6699         struct bnx2 *bp = netdev_priv(dev);
6700
6701         bnx2_disable_int_sync(bp);
6702         bnx2_napi_disable(bp);
6703         netif_tx_disable(dev);
6704         del_timer_sync(&bp->timer);
6705         bnx2_shutdown_chip(bp);
6706         bnx2_free_irq(bp);
6707         bnx2_free_skbs(bp);
6708         bnx2_free_mem(bp);
6709         bnx2_del_napi(bp);
6710         bp->link_up = 0;
6711         netif_carrier_off(bp->dev);
6712         bnx2_set_power_state(bp, PCI_D3hot);
6713         return 0;
6714 }
6715
6716 static void
6717 bnx2_save_stats(struct bnx2 *bp)
6718 {
6719         u32 *hw_stats = (u32 *) bp->stats_blk;
6720         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6721         int i;
6722
6723         /* The 1st 10 counters are 64-bit counters */
6724         for (i = 0; i < 20; i += 2) {
6725                 u32 hi;
6726                 u64 lo;
6727
6728                 hi = temp_stats[i] + hw_stats[i];
6729                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6730                 if (lo > 0xffffffff)
6731                         hi++;
6732                 temp_stats[i] = hi;
6733                 temp_stats[i + 1] = lo & 0xffffffff;
6734         }
6735
6736         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6737                 temp_stats[i] += hw_stats[i];
6738 }
6739
6740 #define GET_64BIT_NET_STATS64(ctr)              \
6741         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6742
6743 #define GET_64BIT_NET_STATS(ctr)                                \
6744         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6745         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6746
6747 #define GET_32BIT_NET_STATS(ctr)                                \
6748         (unsigned long) (bp->stats_blk->ctr +                   \
6749                          bp->temp_stats_blk->ctr)
6750
6751 static struct rtnl_link_stats64 *
6752 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6753 {
6754         struct bnx2 *bp = netdev_priv(dev);
6755
6756         if (bp->stats_blk == NULL)
6757                 return net_stats;
6758
6759         net_stats->rx_packets =
6760                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6761                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6762                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6763
6764         net_stats->tx_packets =
6765                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6766                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6767                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6768
6769         net_stats->rx_bytes =
6770                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6771
6772         net_stats->tx_bytes =
6773                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6774
6775         net_stats->multicast =
6776                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6777
6778         net_stats->collisions =
6779                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6780
6781         net_stats->rx_length_errors =
6782                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6783                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6784
6785         net_stats->rx_over_errors =
6786                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6787                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6788
6789         net_stats->rx_frame_errors =
6790                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6791
6792         net_stats->rx_crc_errors =
6793                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6794
6795         net_stats->rx_errors = net_stats->rx_length_errors +
6796                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6797                 net_stats->rx_crc_errors;
6798
6799         net_stats->tx_aborted_errors =
6800                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6801                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6802
6803         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6804             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6805                 net_stats->tx_carrier_errors = 0;
6806         else {
6807                 net_stats->tx_carrier_errors =
6808                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6809         }
6810
6811         net_stats->tx_errors =
6812                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6813                 net_stats->tx_aborted_errors +
6814                 net_stats->tx_carrier_errors;
6815
6816         net_stats->rx_missed_errors =
6817                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6818                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6819                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6820
6821         return net_stats;
6822 }
6823
6824 /* All ethtool functions called with rtnl_lock */
6825
6826 static int
6827 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6828 {
6829         struct bnx2 *bp = netdev_priv(dev);
6830         int support_serdes = 0, support_copper = 0;
6831
6832         cmd->supported = SUPPORTED_Autoneg;
6833         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6834                 support_serdes = 1;
6835                 support_copper = 1;
6836         } else if (bp->phy_port == PORT_FIBRE)
6837                 support_serdes = 1;
6838         else
6839                 support_copper = 1;
6840
6841         if (support_serdes) {
6842                 cmd->supported |= SUPPORTED_1000baseT_Full |
6843                         SUPPORTED_FIBRE;
6844                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6845                         cmd->supported |= SUPPORTED_2500baseX_Full;
6846
6847         }
6848         if (support_copper) {
6849                 cmd->supported |= SUPPORTED_10baseT_Half |
6850                         SUPPORTED_10baseT_Full |
6851                         SUPPORTED_100baseT_Half |
6852                         SUPPORTED_100baseT_Full |
6853                         SUPPORTED_1000baseT_Full |
6854                         SUPPORTED_TP;
6855
6856         }
6857
6858         spin_lock_bh(&bp->phy_lock);
6859         cmd->port = bp->phy_port;
6860         cmd->advertising = bp->advertising;
6861
6862         if (bp->autoneg & AUTONEG_SPEED) {
6863                 cmd->autoneg = AUTONEG_ENABLE;
6864         } else {
6865                 cmd->autoneg = AUTONEG_DISABLE;
6866         }
6867
6868         if (netif_carrier_ok(dev)) {
6869                 ethtool_cmd_speed_set(cmd, bp->line_speed);
6870                 cmd->duplex = bp->duplex;
6871         }
6872         else {
6873                 ethtool_cmd_speed_set(cmd, -1);
6874                 cmd->duplex = -1;
6875         }
6876         spin_unlock_bh(&bp->phy_lock);
6877
6878         cmd->transceiver = XCVR_INTERNAL;
6879         cmd->phy_address = bp->phy_addr;
6880
6881         return 0;
6882 }
6883
6884 static int
6885 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6886 {
6887         struct bnx2 *bp = netdev_priv(dev);
6888         u8 autoneg = bp->autoneg;
6889         u8 req_duplex = bp->req_duplex;
6890         u16 req_line_speed = bp->req_line_speed;
6891         u32 advertising = bp->advertising;
6892         int err = -EINVAL;
6893
6894         spin_lock_bh(&bp->phy_lock);
6895
6896         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6897                 goto err_out_unlock;
6898
6899         if (cmd->port != bp->phy_port &&
6900             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6901                 goto err_out_unlock;
6902
6903         /* If device is down, we can store the settings only if the user
6904          * is setting the currently active port.
6905          */
6906         if (!netif_running(dev) && cmd->port != bp->phy_port)
6907                 goto err_out_unlock;
6908
6909         if (cmd->autoneg == AUTONEG_ENABLE) {
6910                 autoneg |= AUTONEG_SPEED;
6911
6912                 advertising = cmd->advertising;
6913                 if (cmd->port == PORT_TP) {
6914                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6915                         if (!advertising)
6916                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6917                 } else {
6918                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6919                         if (!advertising)
6920                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6921                 }
6922                 advertising |= ADVERTISED_Autoneg;
6923         }
6924         else {
6925                 u32 speed = ethtool_cmd_speed(cmd);
6926                 if (cmd->port == PORT_FIBRE) {
6927                         if ((speed != SPEED_1000 &&
6928                              speed != SPEED_2500) ||
6929                             (cmd->duplex != DUPLEX_FULL))
6930                                 goto err_out_unlock;
6931
6932                         if (speed == SPEED_2500 &&
6933                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6934                                 goto err_out_unlock;
6935                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6936                         goto err_out_unlock;
6937
6938                 autoneg &= ~AUTONEG_SPEED;
6939                 req_line_speed = speed;
6940                 req_duplex = cmd->duplex;
6941                 advertising = 0;
6942         }
6943
6944         bp->autoneg = autoneg;
6945         bp->advertising = advertising;
6946         bp->req_line_speed = req_line_speed;
6947         bp->req_duplex = req_duplex;
6948
6949         err = 0;
6950         /* If device is down, the new settings will be picked up when it is
6951          * brought up.
6952          */
6953         if (netif_running(dev))
6954                 err = bnx2_setup_phy(bp, cmd->port);
6955
6956 err_out_unlock:
6957         spin_unlock_bh(&bp->phy_lock);
6958
6959         return err;
6960 }
6961
6962 static void
6963 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6964 {
6965         struct bnx2 *bp = netdev_priv(dev);
6966
6967         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6968         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6969         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6970         strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6971 }
6972
6973 #define BNX2_REGDUMP_LEN                (32 * 1024)
6974
6975 static int
6976 bnx2_get_regs_len(struct net_device *dev)
6977 {
6978         return BNX2_REGDUMP_LEN;
6979 }
6980
6981 static void
6982 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6983 {
6984         u32 *p = _p, i, offset;
6985         u8 *orig_p = _p;
6986         struct bnx2 *bp = netdev_priv(dev);
6987         static const u32 reg_boundaries[] = {
6988                 0x0000, 0x0098, 0x0400, 0x045c,
6989                 0x0800, 0x0880, 0x0c00, 0x0c10,
6990                 0x0c30, 0x0d08, 0x1000, 0x101c,
6991                 0x1040, 0x1048, 0x1080, 0x10a4,
6992                 0x1400, 0x1490, 0x1498, 0x14f0,
6993                 0x1500, 0x155c, 0x1580, 0x15dc,
6994                 0x1600, 0x1658, 0x1680, 0x16d8,
6995                 0x1800, 0x1820, 0x1840, 0x1854,
6996                 0x1880, 0x1894, 0x1900, 0x1984,
6997                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6998                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6999                 0x2000, 0x2030, 0x23c0, 0x2400,
7000                 0x2800, 0x2820, 0x2830, 0x2850,
7001                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7002                 0x3c00, 0x3c94, 0x4000, 0x4010,
7003                 0x4080, 0x4090, 0x43c0, 0x4458,
7004                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7005                 0x4fc0, 0x5010, 0x53c0, 0x5444,
7006                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7007                 0x5fc0, 0x6000, 0x6400, 0x6428,
7008                 0x6800, 0x6848, 0x684c, 0x6860,
7009                 0x6888, 0x6910, 0x8000
7010         };
7011
7012         regs->version = 0;
7013
7014         memset(p, 0, BNX2_REGDUMP_LEN);
7015
7016         if (!netif_running(bp->dev))
7017                 return;
7018
7019         i = 0;
7020         offset = reg_boundaries[0];
7021         p += offset;
7022         while (offset < BNX2_REGDUMP_LEN) {
7023                 *p++ = BNX2_RD(bp, offset);
7024                 offset += 4;
7025                 if (offset == reg_boundaries[i + 1]) {
7026                         offset = reg_boundaries[i + 2];
7027                         p = (u32 *) (orig_p + offset);
7028                         i += 2;
7029                 }
7030         }
7031 }
7032
7033 static void
7034 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7035 {
7036         struct bnx2 *bp = netdev_priv(dev);
7037
7038         if (bp->flags & BNX2_FLAG_NO_WOL) {
7039                 wol->supported = 0;
7040                 wol->wolopts = 0;
7041         }
7042         else {
7043                 wol->supported = WAKE_MAGIC;
7044                 if (bp->wol)
7045                         wol->wolopts = WAKE_MAGIC;
7046                 else
7047                         wol->wolopts = 0;
7048         }
7049         memset(&wol->sopass, 0, sizeof(wol->sopass));
7050 }
7051
7052 static int
7053 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7054 {
7055         struct bnx2 *bp = netdev_priv(dev);
7056
7057         if (wol->wolopts & ~WAKE_MAGIC)
7058                 return -EINVAL;
7059
7060         if (wol->wolopts & WAKE_MAGIC) {
7061                 if (bp->flags & BNX2_FLAG_NO_WOL)
7062                         return -EINVAL;
7063
7064                 bp->wol = 1;
7065         }
7066         else {
7067                 bp->wol = 0;
7068         }
7069
7070         device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7071
7072         return 0;
7073 }
7074
7075 static int
7076 bnx2_nway_reset(struct net_device *dev)
7077 {
7078         struct bnx2 *bp = netdev_priv(dev);
7079         u32 bmcr;
7080
7081         if (!netif_running(dev))
7082                 return -EAGAIN;
7083
7084         if (!(bp->autoneg & AUTONEG_SPEED)) {
7085                 return -EINVAL;
7086         }
7087
7088         spin_lock_bh(&bp->phy_lock);
7089
7090         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7091                 int rc;
7092
7093                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7094                 spin_unlock_bh(&bp->phy_lock);
7095                 return rc;
7096         }
7097
7098         /* Force a link down visible on the other side */
7099         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7100                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7101                 spin_unlock_bh(&bp->phy_lock);
7102
7103                 msleep(20);
7104
7105                 spin_lock_bh(&bp->phy_lock);
7106
7107                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7108                 bp->serdes_an_pending = 1;
7109                 mod_timer(&bp->timer, jiffies + bp->current_interval);
7110         }
7111
7112         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7113         bmcr &= ~BMCR_LOOPBACK;
7114         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7115
7116         spin_unlock_bh(&bp->phy_lock);
7117
7118         return 0;
7119 }
7120
7121 static u32
7122 bnx2_get_link(struct net_device *dev)
7123 {
7124         struct bnx2 *bp = netdev_priv(dev);
7125
7126         return bp->link_up;
7127 }
7128
7129 static int
7130 bnx2_get_eeprom_len(struct net_device *dev)
7131 {
7132         struct bnx2 *bp = netdev_priv(dev);
7133
7134         if (bp->flash_info == NULL)
7135                 return 0;
7136
7137         return (int) bp->flash_size;
7138 }
7139
7140 static int
7141 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7142                 u8 *eebuf)
7143 {
7144         struct bnx2 *bp = netdev_priv(dev);
7145         int rc;
7146
7147         if (!netif_running(dev))
7148                 return -EAGAIN;
7149
7150         /* parameters already validated in ethtool_get_eeprom */
7151
7152         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7153
7154         return rc;
7155 }
7156
7157 static int
7158 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7159                 u8 *eebuf)
7160 {
7161         struct bnx2 *bp = netdev_priv(dev);
7162         int rc;
7163
7164         if (!netif_running(dev))
7165                 return -EAGAIN;
7166
7167         /* parameters already validated in ethtool_set_eeprom */
7168
7169         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7170
7171         return rc;
7172 }
7173
7174 static int
7175 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7176 {
7177         struct bnx2 *bp = netdev_priv(dev);
7178
7179         memset(coal, 0, sizeof(struct ethtool_coalesce));
7180
7181         coal->rx_coalesce_usecs = bp->rx_ticks;
7182         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7183         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7184         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7185
7186         coal->tx_coalesce_usecs = bp->tx_ticks;
7187         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7188         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7189         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7190
7191         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7192
7193         return 0;
7194 }
7195
7196 static int
7197 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7198 {
7199         struct bnx2 *bp = netdev_priv(dev);
7200
7201         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7202         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7203
7204         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7205         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7206
7207         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7208         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7209
7210         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7211         if (bp->rx_quick_cons_trip_int > 0xff)
7212                 bp->rx_quick_cons_trip_int = 0xff;
7213
7214         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7215         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7216
7217         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7218         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7219
7220         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7221         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7222
7223         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7224         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7225                 0xff;
7226
7227         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7228         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7229                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7230                         bp->stats_ticks = USEC_PER_SEC;
7231         }
7232         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7233                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7234         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7235
7236         if (netif_running(bp->dev)) {
7237                 bnx2_netif_stop(bp, true);
7238                 bnx2_init_nic(bp, 0);
7239                 bnx2_netif_start(bp, true);
7240         }
7241
7242         return 0;
7243 }
7244
7245 static void
7246 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7247 {
7248         struct bnx2 *bp = netdev_priv(dev);
7249
7250         ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7251         ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7252
7253         ering->rx_pending = bp->rx_ring_size;
7254         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7255
7256         ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7257         ering->tx_pending = bp->tx_ring_size;
7258 }
7259
7260 static int
7261 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7262 {
7263         if (netif_running(bp->dev)) {
7264                 /* Reset will erase chipset stats; save them */
7265                 bnx2_save_stats(bp);
7266
7267                 bnx2_netif_stop(bp, true);
7268                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7269                 if (reset_irq) {
7270                         bnx2_free_irq(bp);
7271                         bnx2_del_napi(bp);
7272                 } else {
7273                         __bnx2_free_irq(bp);
7274                 }
7275                 bnx2_free_skbs(bp);
7276                 bnx2_free_mem(bp);
7277         }
7278
7279         bnx2_set_rx_ring_size(bp, rx);
7280         bp->tx_ring_size = tx;
7281
7282         if (netif_running(bp->dev)) {
7283                 int rc = 0;
7284
7285                 if (reset_irq) {
7286                         rc = bnx2_setup_int_mode(bp, disable_msi);
7287                         bnx2_init_napi(bp);
7288                 }
7289
7290                 if (!rc)
7291                         rc = bnx2_alloc_mem(bp);
7292
7293                 if (!rc)
7294                         rc = bnx2_request_irq(bp);
7295
7296                 if (!rc)
7297                         rc = bnx2_init_nic(bp, 0);
7298
7299                 if (rc) {
7300                         bnx2_napi_enable(bp);
7301                         dev_close(bp->dev);
7302                         return rc;
7303                 }
7304 #ifdef BCM_CNIC
7305                 mutex_lock(&bp->cnic_lock);
7306                 /* Let cnic know about the new status block. */
7307                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7308                         bnx2_setup_cnic_irq_info(bp);
7309                 mutex_unlock(&bp->cnic_lock);
7310 #endif
7311                 bnx2_netif_start(bp, true);
7312         }
7313         return 0;
7314 }
7315
7316 static int
7317 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7318 {
7319         struct bnx2 *bp = netdev_priv(dev);
7320         int rc;
7321
7322         if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7323                 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7324                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7325
7326                 return -EINVAL;
7327         }
7328         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7329                                    false);
7330         return rc;
7331 }
7332
7333 static void
7334 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7335 {
7336         struct bnx2 *bp = netdev_priv(dev);
7337
7338         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7339         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7340         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7341 }
7342
7343 static int
7344 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7345 {
7346         struct bnx2 *bp = netdev_priv(dev);
7347
7348         bp->req_flow_ctrl = 0;
7349         if (epause->rx_pause)
7350                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7351         if (epause->tx_pause)
7352                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7353
7354         if (epause->autoneg) {
7355                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7356         }
7357         else {
7358                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7359         }
7360
7361         if (netif_running(dev)) {
7362                 spin_lock_bh(&bp->phy_lock);
7363                 bnx2_setup_phy(bp, bp->phy_port);
7364                 spin_unlock_bh(&bp->phy_lock);
7365         }
7366
7367         return 0;
7368 }
7369
7370 static struct {
7371         char string[ETH_GSTRING_LEN];
7372 } bnx2_stats_str_arr[] = {
7373         { "rx_bytes" },
7374         { "rx_error_bytes" },
7375         { "tx_bytes" },
7376         { "tx_error_bytes" },
7377         { "rx_ucast_packets" },
7378         { "rx_mcast_packets" },
7379         { "rx_bcast_packets" },
7380         { "tx_ucast_packets" },
7381         { "tx_mcast_packets" },
7382         { "tx_bcast_packets" },
7383         { "tx_mac_errors" },
7384         { "tx_carrier_errors" },
7385         { "rx_crc_errors" },
7386         { "rx_align_errors" },
7387         { "tx_single_collisions" },
7388         { "tx_multi_collisions" },
7389         { "tx_deferred" },
7390         { "tx_excess_collisions" },
7391         { "tx_late_collisions" },
7392         { "tx_total_collisions" },
7393         { "rx_fragments" },
7394         { "rx_jabbers" },
7395         { "rx_undersize_packets" },
7396         { "rx_oversize_packets" },
7397         { "rx_64_byte_packets" },
7398         { "rx_65_to_127_byte_packets" },
7399         { "rx_128_to_255_byte_packets" },
7400         { "rx_256_to_511_byte_packets" },
7401         { "rx_512_to_1023_byte_packets" },
7402         { "rx_1024_to_1522_byte_packets" },
7403         { "rx_1523_to_9022_byte_packets" },
7404         { "tx_64_byte_packets" },
7405         { "tx_65_to_127_byte_packets" },
7406         { "tx_128_to_255_byte_packets" },
7407         { "tx_256_to_511_byte_packets" },
7408         { "tx_512_to_1023_byte_packets" },
7409         { "tx_1024_to_1522_byte_packets" },
7410         { "tx_1523_to_9022_byte_packets" },
7411         { "rx_xon_frames" },
7412         { "rx_xoff_frames" },
7413         { "tx_xon_frames" },
7414         { "tx_xoff_frames" },
7415         { "rx_mac_ctrl_frames" },
7416         { "rx_filtered_packets" },
7417         { "rx_ftq_discards" },
7418         { "rx_discards" },
7419         { "rx_fw_discards" },
7420 };
7421
7422 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7423
7424 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7425
7426 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7427     STATS_OFFSET32(stat_IfHCInOctets_hi),
7428     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7429     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7430     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7431     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7432     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7433     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7434     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7435     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7436     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7437     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7438     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7439     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7440     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7441     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7442     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7443     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7444     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7445     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7446     STATS_OFFSET32(stat_EtherStatsCollisions),
7447     STATS_OFFSET32(stat_EtherStatsFragments),
7448     STATS_OFFSET32(stat_EtherStatsJabbers),
7449     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7450     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7451     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7452     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7453     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7454     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7455     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7456     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7457     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7458     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7459     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7460     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7461     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7462     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7463     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7464     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7465     STATS_OFFSET32(stat_XonPauseFramesReceived),
7466     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7467     STATS_OFFSET32(stat_OutXonSent),
7468     STATS_OFFSET32(stat_OutXoffSent),
7469     STATS_OFFSET32(stat_MacControlFramesReceived),
7470     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7471     STATS_OFFSET32(stat_IfInFTQDiscards),
7472     STATS_OFFSET32(stat_IfInMBUFDiscards),
7473     STATS_OFFSET32(stat_FwRxDrop),
7474 };
7475
7476 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7477  * skipped because of errata.
7478  */
7479 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7480         8,0,8,8,8,8,8,8,8,8,
7481         4,0,4,4,4,4,4,4,4,4,
7482         4,4,4,4,4,4,4,4,4,4,
7483         4,4,4,4,4,4,4,4,4,4,
7484         4,4,4,4,4,4,4,
7485 };
7486
7487 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7488         8,0,8,8,8,8,8,8,8,8,
7489         4,4,4,4,4,4,4,4,4,4,
7490         4,4,4,4,4,4,4,4,4,4,
7491         4,4,4,4,4,4,4,4,4,4,
7492         4,4,4,4,4,4,4,
7493 };
7494
7495 #define BNX2_NUM_TESTS 6
7496
7497 static struct {
7498         char string[ETH_GSTRING_LEN];
7499 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7500         { "register_test (offline)" },
7501         { "memory_test (offline)" },
7502         { "loopback_test (offline)" },
7503         { "nvram_test (online)" },
7504         { "interrupt_test (online)" },
7505         { "link_test (online)" },
7506 };
7507
7508 static int
7509 bnx2_get_sset_count(struct net_device *dev, int sset)
7510 {
7511         switch (sset) {
7512         case ETH_SS_TEST:
7513                 return BNX2_NUM_TESTS;
7514         case ETH_SS_STATS:
7515                 return BNX2_NUM_STATS;
7516         default:
7517                 return -EOPNOTSUPP;
7518         }
7519 }
7520
7521 static void
7522 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7523 {
7524         struct bnx2 *bp = netdev_priv(dev);
7525
7526         bnx2_set_power_state(bp, PCI_D0);
7527
7528         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7529         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7530                 int i;
7531
7532                 bnx2_netif_stop(bp, true);
7533                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7534                 bnx2_free_skbs(bp);
7535
7536                 if (bnx2_test_registers(bp) != 0) {
7537                         buf[0] = 1;
7538                         etest->flags |= ETH_TEST_FL_FAILED;
7539                 }
7540                 if (bnx2_test_memory(bp) != 0) {
7541                         buf[1] = 1;
7542                         etest->flags |= ETH_TEST_FL_FAILED;
7543                 }
7544                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7545                         etest->flags |= ETH_TEST_FL_FAILED;
7546
7547                 if (!netif_running(bp->dev))
7548                         bnx2_shutdown_chip(bp);
7549                 else {
7550                         bnx2_init_nic(bp, 1);
7551                         bnx2_netif_start(bp, true);
7552                 }
7553
7554                 /* wait for link up */
7555                 for (i = 0; i < 7; i++) {
7556                         if (bp->link_up)
7557                                 break;
7558                         msleep_interruptible(1000);
7559                 }
7560         }
7561
7562         if (bnx2_test_nvram(bp) != 0) {
7563                 buf[3] = 1;
7564                 etest->flags |= ETH_TEST_FL_FAILED;
7565         }
7566         if (bnx2_test_intr(bp) != 0) {
7567                 buf[4] = 1;
7568                 etest->flags |= ETH_TEST_FL_FAILED;
7569         }
7570
7571         if (bnx2_test_link(bp) != 0) {
7572                 buf[5] = 1;
7573                 etest->flags |= ETH_TEST_FL_FAILED;
7574
7575         }
7576         if (!netif_running(bp->dev))
7577                 bnx2_set_power_state(bp, PCI_D3hot);
7578 }
7579
7580 static void
7581 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7582 {
7583         switch (stringset) {
7584         case ETH_SS_STATS:
7585                 memcpy(buf, bnx2_stats_str_arr,
7586                         sizeof(bnx2_stats_str_arr));
7587                 break;
7588         case ETH_SS_TEST:
7589                 memcpy(buf, bnx2_tests_str_arr,
7590                         sizeof(bnx2_tests_str_arr));
7591                 break;
7592         }
7593 }
7594
7595 static void
7596 bnx2_get_ethtool_stats(struct net_device *dev,
7597                 struct ethtool_stats *stats, u64 *buf)
7598 {
7599         struct bnx2 *bp = netdev_priv(dev);
7600         int i;
7601         u32 *hw_stats = (u32 *) bp->stats_blk;
7602         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7603         u8 *stats_len_arr = NULL;
7604
7605         if (hw_stats == NULL) {
7606                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7607                 return;
7608         }
7609
7610         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7611             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7612             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7613             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7614                 stats_len_arr = bnx2_5706_stats_len_arr;
7615         else
7616                 stats_len_arr = bnx2_5708_stats_len_arr;
7617
7618         for (i = 0; i < BNX2_NUM_STATS; i++) {
7619                 unsigned long offset;
7620
7621                 if (stats_len_arr[i] == 0) {
7622                         /* skip this counter */
7623                         buf[i] = 0;
7624                         continue;
7625                 }
7626
7627                 offset = bnx2_stats_offset_arr[i];
7628                 if (stats_len_arr[i] == 4) {
7629                         /* 4-byte counter */
7630                         buf[i] = (u64) *(hw_stats + offset) +
7631                                  *(temp_stats + offset);
7632                         continue;
7633                 }
7634                 /* 8-byte counter */
7635                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7636                          *(hw_stats + offset + 1) +
7637                          (((u64) *(temp_stats + offset)) << 32) +
7638                          *(temp_stats + offset + 1);
7639         }
7640 }
7641
7642 static int
7643 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7644 {
7645         struct bnx2 *bp = netdev_priv(dev);
7646
7647         switch (state) {
7648         case ETHTOOL_ID_ACTIVE:
7649                 bnx2_set_power_state(bp, PCI_D0);
7650
7651                 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7652                 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7653                 return 1;       /* cycle on/off once per second */
7654
7655         case ETHTOOL_ID_ON:
7656                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7657                         BNX2_EMAC_LED_1000MB_OVERRIDE |
7658                         BNX2_EMAC_LED_100MB_OVERRIDE |
7659                         BNX2_EMAC_LED_10MB_OVERRIDE |
7660                         BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7661                         BNX2_EMAC_LED_TRAFFIC);
7662                 break;
7663
7664         case ETHTOOL_ID_OFF:
7665                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7666                 break;
7667
7668         case ETHTOOL_ID_INACTIVE:
7669                 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7670                 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7671
7672                 if (!netif_running(dev))
7673                         bnx2_set_power_state(bp, PCI_D3hot);
7674                 break;
7675         }
7676
7677         return 0;
7678 }
7679
7680 static netdev_features_t
7681 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7682 {
7683         struct bnx2 *bp = netdev_priv(dev);
7684
7685         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7686                 features |= NETIF_F_HW_VLAN_CTAG_RX;
7687
7688         return features;
7689 }
7690
7691 static int
7692 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7693 {
7694         struct bnx2 *bp = netdev_priv(dev);
7695
7696         /* TSO with VLAN tag won't work with current firmware */
7697         if (features & NETIF_F_HW_VLAN_CTAG_TX)
7698                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7699         else
7700                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7701
7702         if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7703             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7704             netif_running(dev)) {
7705                 bnx2_netif_stop(bp, false);
7706                 dev->features = features;
7707                 bnx2_set_rx_mode(dev);
7708                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7709                 bnx2_netif_start(bp, false);
7710                 return 1;
7711         }
7712
7713         return 0;
7714 }
7715
7716 static void bnx2_get_channels(struct net_device *dev,
7717                               struct ethtool_channels *channels)
7718 {
7719         struct bnx2 *bp = netdev_priv(dev);
7720         u32 max_rx_rings = 1;
7721         u32 max_tx_rings = 1;
7722
7723         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7724                 max_rx_rings = RX_MAX_RINGS;
7725                 max_tx_rings = TX_MAX_RINGS;
7726         }
7727
7728         channels->max_rx = max_rx_rings;
7729         channels->max_tx = max_tx_rings;
7730         channels->max_other = 0;
7731         channels->max_combined = 0;
7732         channels->rx_count = bp->num_rx_rings;
7733         channels->tx_count = bp->num_tx_rings;
7734         channels->other_count = 0;
7735         channels->combined_count = 0;
7736 }
7737
7738 static int bnx2_set_channels(struct net_device *dev,
7739                               struct ethtool_channels *channels)
7740 {
7741         struct bnx2 *bp = netdev_priv(dev);
7742         u32 max_rx_rings = 1;
7743         u32 max_tx_rings = 1;
7744         int rc = 0;
7745
7746         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7747                 max_rx_rings = RX_MAX_RINGS;
7748                 max_tx_rings = TX_MAX_RINGS;
7749         }
7750         if (channels->rx_count > max_rx_rings ||
7751             channels->tx_count > max_tx_rings)
7752                 return -EINVAL;
7753
7754         bp->num_req_rx_rings = channels->rx_count;
7755         bp->num_req_tx_rings = channels->tx_count;
7756
7757         if (netif_running(dev))
7758                 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7759                                            bp->tx_ring_size, true);
7760
7761         return rc;
7762 }
7763
7764 static const struct ethtool_ops bnx2_ethtool_ops = {
7765         .get_settings           = bnx2_get_settings,
7766         .set_settings           = bnx2_set_settings,
7767         .get_drvinfo            = bnx2_get_drvinfo,
7768         .get_regs_len           = bnx2_get_regs_len,
7769         .get_regs               = bnx2_get_regs,
7770         .get_wol                = bnx2_get_wol,
7771         .set_wol                = bnx2_set_wol,
7772         .nway_reset             = bnx2_nway_reset,
7773         .get_link               = bnx2_get_link,
7774         .get_eeprom_len         = bnx2_get_eeprom_len,
7775         .get_eeprom             = bnx2_get_eeprom,
7776         .set_eeprom             = bnx2_set_eeprom,
7777         .get_coalesce           = bnx2_get_coalesce,
7778         .set_coalesce           = bnx2_set_coalesce,
7779         .get_ringparam          = bnx2_get_ringparam,
7780         .set_ringparam          = bnx2_set_ringparam,
7781         .get_pauseparam         = bnx2_get_pauseparam,
7782         .set_pauseparam         = bnx2_set_pauseparam,
7783         .self_test              = bnx2_self_test,
7784         .get_strings            = bnx2_get_strings,
7785         .set_phys_id            = bnx2_set_phys_id,
7786         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7787         .get_sset_count         = bnx2_get_sset_count,
7788         .get_channels           = bnx2_get_channels,
7789         .set_channels           = bnx2_set_channels,
7790 };
7791
7792 /* Called with rtnl_lock */
7793 static int
7794 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7795 {
7796         struct mii_ioctl_data *data = if_mii(ifr);
7797         struct bnx2 *bp = netdev_priv(dev);
7798         int err;
7799
7800         switch(cmd) {
7801         case SIOCGMIIPHY:
7802                 data->phy_id = bp->phy_addr;
7803
7804                 /* fallthru */
7805         case SIOCGMIIREG: {
7806                 u32 mii_regval;
7807
7808                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7809                         return -EOPNOTSUPP;
7810
7811                 if (!netif_running(dev))
7812                         return -EAGAIN;
7813
7814                 spin_lock_bh(&bp->phy_lock);
7815                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7816                 spin_unlock_bh(&bp->phy_lock);
7817
7818                 data->val_out = mii_regval;
7819
7820                 return err;
7821         }
7822
7823         case SIOCSMIIREG:
7824                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7825                         return -EOPNOTSUPP;
7826
7827                 if (!netif_running(dev))
7828                         return -EAGAIN;
7829
7830                 spin_lock_bh(&bp->phy_lock);
7831                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7832                 spin_unlock_bh(&bp->phy_lock);
7833
7834                 return err;
7835
7836         default:
7837                 /* do nothing */
7838                 break;
7839         }
7840         return -EOPNOTSUPP;
7841 }
7842
7843 /* Called with rtnl_lock */
7844 static int
7845 bnx2_change_mac_addr(struct net_device *dev, void *p)
7846 {
7847         struct sockaddr *addr = p;
7848         struct bnx2 *bp = netdev_priv(dev);
7849
7850         if (!is_valid_ether_addr(addr->sa_data))
7851                 return -EADDRNOTAVAIL;
7852
7853         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7854         if (netif_running(dev))
7855                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7856
7857         return 0;
7858 }
7859
7860 /* Called with rtnl_lock */
7861 static int
7862 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7863 {
7864         struct bnx2 *bp = netdev_priv(dev);
7865
7866         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7867                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7868                 return -EINVAL;
7869
7870         dev->mtu = new_mtu;
7871         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7872                                      false);
7873 }
7874
7875 #ifdef CONFIG_NET_POLL_CONTROLLER
7876 static void
7877 poll_bnx2(struct net_device *dev)
7878 {
7879         struct bnx2 *bp = netdev_priv(dev);
7880         int i;
7881
7882         for (i = 0; i < bp->irq_nvecs; i++) {
7883                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7884
7885                 disable_irq(irq->vector);
7886                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7887                 enable_irq(irq->vector);
7888         }
7889 }
7890 #endif
7891
7892 static void
7893 bnx2_get_5709_media(struct bnx2 *bp)
7894 {
7895         u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7896         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7897         u32 strap;
7898
7899         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7900                 return;
7901         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7902                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7903                 return;
7904         }
7905
7906         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7907                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7908         else
7909                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7910
7911         if (bp->func == 0) {
7912                 switch (strap) {
7913                 case 0x4:
7914                 case 0x5:
7915                 case 0x6:
7916                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7917                         return;
7918                 }
7919         } else {
7920                 switch (strap) {
7921                 case 0x1:
7922                 case 0x2:
7923                 case 0x4:
7924                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7925                         return;
7926                 }
7927         }
7928 }
7929
7930 static void
7931 bnx2_get_pci_speed(struct bnx2 *bp)
7932 {
7933         u32 reg;
7934
7935         reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7936         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7937                 u32 clkreg;
7938
7939                 bp->flags |= BNX2_FLAG_PCIX;
7940
7941                 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7942
7943                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7944                 switch (clkreg) {
7945                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7946                         bp->bus_speed_mhz = 133;
7947                         break;
7948
7949                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7950                         bp->bus_speed_mhz = 100;
7951                         break;
7952
7953                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7954                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7955                         bp->bus_speed_mhz = 66;
7956                         break;
7957
7958                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7959                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7960                         bp->bus_speed_mhz = 50;
7961                         break;
7962
7963                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7964                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7965                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7966                         bp->bus_speed_mhz = 33;
7967                         break;
7968                 }
7969         }
7970         else {
7971                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7972                         bp->bus_speed_mhz = 66;
7973                 else
7974                         bp->bus_speed_mhz = 33;
7975         }
7976
7977         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7978                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7979
7980 }
7981
7982 static void
7983 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7984 {
7985         int rc, i, j;
7986         u8 *data;
7987         unsigned int block_end, rosize, len;
7988
7989 #define BNX2_VPD_NVRAM_OFFSET   0x300
7990 #define BNX2_VPD_LEN            128
7991 #define BNX2_MAX_VER_SLEN       30
7992
7993         data = kmalloc(256, GFP_KERNEL);
7994         if (!data)
7995                 return;
7996
7997         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7998                              BNX2_VPD_LEN);
7999         if (rc)
8000                 goto vpd_done;
8001
8002         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8003                 data[i] = data[i + BNX2_VPD_LEN + 3];
8004                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8005                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8006                 data[i + 3] = data[i + BNX2_VPD_LEN];
8007         }
8008
8009         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8010         if (i < 0)
8011                 goto vpd_done;
8012
8013         rosize = pci_vpd_lrdt_size(&data[i]);
8014         i += PCI_VPD_LRDT_TAG_SIZE;
8015         block_end = i + rosize;
8016
8017         if (block_end > BNX2_VPD_LEN)
8018                 goto vpd_done;
8019
8020         j = pci_vpd_find_info_keyword(data, i, rosize,
8021                                       PCI_VPD_RO_KEYWORD_MFR_ID);
8022         if (j < 0)
8023                 goto vpd_done;
8024
8025         len = pci_vpd_info_field_size(&data[j]);
8026
8027         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8028         if (j + len > block_end || len != 4 ||
8029             memcmp(&data[j], "1028", 4))
8030                 goto vpd_done;
8031
8032         j = pci_vpd_find_info_keyword(data, i, rosize,
8033                                       PCI_VPD_RO_KEYWORD_VENDOR0);
8034         if (j < 0)
8035                 goto vpd_done;
8036
8037         len = pci_vpd_info_field_size(&data[j]);
8038
8039         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8040         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8041                 goto vpd_done;
8042
8043         memcpy(bp->fw_version, &data[j], len);
8044         bp->fw_version[len] = ' ';
8045
8046 vpd_done:
8047         kfree(data);
8048 }
8049
8050 static int
8051 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8052 {
8053         struct bnx2 *bp;
8054         int rc, i, j;
8055         u32 reg;
8056         u64 dma_mask, persist_dma_mask;
8057         int err;
8058
8059         SET_NETDEV_DEV(dev, &pdev->dev);
8060         bp = netdev_priv(dev);
8061
8062         bp->flags = 0;
8063         bp->phy_flags = 0;
8064
8065         bp->temp_stats_blk =
8066                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8067
8068         if (bp->temp_stats_blk == NULL) {
8069                 rc = -ENOMEM;
8070                 goto err_out;
8071         }
8072
8073         /* enable device (incl. PCI PM wakeup), and bus-mastering */
8074         rc = pci_enable_device(pdev);
8075         if (rc) {
8076                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8077                 goto err_out;
8078         }
8079
8080         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8081                 dev_err(&pdev->dev,
8082                         "Cannot find PCI device base address, aborting\n");
8083                 rc = -ENODEV;
8084                 goto err_out_disable;
8085         }
8086
8087         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8088         if (rc) {
8089                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8090                 goto err_out_disable;
8091         }
8092
8093         pci_set_master(pdev);
8094
8095         bp->pm_cap = pdev->pm_cap;
8096         if (bp->pm_cap == 0) {
8097                 dev_err(&pdev->dev,
8098                         "Cannot find power management capability, aborting\n");
8099                 rc = -EIO;
8100                 goto err_out_release;
8101         }
8102
8103         bp->dev = dev;
8104         bp->pdev = pdev;
8105
8106         spin_lock_init(&bp->phy_lock);
8107         spin_lock_init(&bp->indirect_lock);
8108 #ifdef BCM_CNIC
8109         mutex_init(&bp->cnic_lock);
8110 #endif
8111         INIT_WORK(&bp->reset_task, bnx2_reset_task);
8112
8113         bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8114                                                          TX_MAX_TSS_RINGS + 1));
8115         if (!bp->regview) {
8116                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8117                 rc = -ENOMEM;
8118                 goto err_out_release;
8119         }
8120
8121         bnx2_set_power_state(bp, PCI_D0);
8122
8123         /* Configure byte swap and enable write to the reg_window registers.
8124          * Rely on CPU to do target byte swapping on big endian systems
8125          * The chip's target access swapping will not swap all accesses
8126          */
8127         BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8128                 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8129                 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8130
8131         bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8132
8133         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8134                 if (!pci_is_pcie(pdev)) {
8135                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
8136                         rc = -EIO;
8137                         goto err_out_unmap;
8138                 }
8139                 bp->flags |= BNX2_FLAG_PCIE;
8140                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8141                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8142
8143                 /* AER (Advanced Error Reporting) hooks */
8144                 err = pci_enable_pcie_error_reporting(pdev);
8145                 if (!err)
8146                         bp->flags |= BNX2_FLAG_AER_ENABLED;
8147
8148         } else {
8149                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8150                 if (bp->pcix_cap == 0) {
8151                         dev_err(&pdev->dev,
8152                                 "Cannot find PCIX capability, aborting\n");
8153                         rc = -EIO;
8154                         goto err_out_unmap;
8155                 }
8156                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8157         }
8158
8159         if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8160             BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8161                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8162                         bp->flags |= BNX2_FLAG_MSIX_CAP;
8163         }
8164
8165         if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8166             BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8167                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8168                         bp->flags |= BNX2_FLAG_MSI_CAP;
8169         }
8170
8171         /* 5708 cannot support DMA addresses > 40-bit.  */
8172         if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8173                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8174         else
8175                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8176
8177         /* Configure DMA attributes. */
8178         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8179                 dev->features |= NETIF_F_HIGHDMA;
8180                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8181                 if (rc) {
8182                         dev_err(&pdev->dev,
8183                                 "pci_set_consistent_dma_mask failed, aborting\n");
8184                         goto err_out_unmap;
8185                 }
8186         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8187                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8188                 goto err_out_unmap;
8189         }
8190
8191         if (!(bp->flags & BNX2_FLAG_PCIE))
8192                 bnx2_get_pci_speed(bp);
8193
8194         /* 5706A0 may falsely detect SERR and PERR. */
8195         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8196                 reg = BNX2_RD(bp, PCI_COMMAND);
8197                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8198                 BNX2_WR(bp, PCI_COMMAND, reg);
8199         } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8200                 !(bp->flags & BNX2_FLAG_PCIX)) {
8201
8202                 dev_err(&pdev->dev,
8203                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8204                 goto err_out_unmap;
8205         }
8206
8207         bnx2_init_nvram(bp);
8208
8209         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8210
8211         if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8212                 bp->func = 1;
8213
8214         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8215             BNX2_SHM_HDR_SIGNATURE_SIG) {
8216                 u32 off = bp->func << 2;
8217
8218                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8219         } else
8220                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8221
8222         /* Get the permanent MAC address.  First we need to make sure the
8223          * firmware is actually running.
8224          */
8225         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8226
8227         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8228             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8229                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8230                 rc = -ENODEV;
8231                 goto err_out_unmap;
8232         }
8233
8234         bnx2_read_vpd_fw_ver(bp);
8235
8236         j = strlen(bp->fw_version);
8237         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8238         for (i = 0; i < 3 && j < 24; i++) {
8239                 u8 num, k, skip0;
8240
8241                 if (i == 0) {
8242                         bp->fw_version[j++] = 'b';
8243                         bp->fw_version[j++] = 'c';
8244                         bp->fw_version[j++] = ' ';
8245                 }
8246                 num = (u8) (reg >> (24 - (i * 8)));
8247                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8248                         if (num >= k || !skip0 || k == 1) {
8249                                 bp->fw_version[j++] = (num / k) + '0';
8250                                 skip0 = 0;
8251                         }
8252                 }
8253                 if (i != 2)
8254                         bp->fw_version[j++] = '.';
8255         }
8256         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8257         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8258                 bp->wol = 1;
8259
8260         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8261                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8262
8263                 for (i = 0; i < 30; i++) {
8264                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8265                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8266                                 break;
8267                         msleep(10);
8268                 }
8269         }
8270         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8271         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8272         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8273             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8274                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8275
8276                 if (j < 32)
8277                         bp->fw_version[j++] = ' ';
8278                 for (i = 0; i < 3 && j < 28; i++) {
8279                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8280                         reg = be32_to_cpu(reg);
8281                         memcpy(&bp->fw_version[j], &reg, 4);
8282                         j += 4;
8283                 }
8284         }
8285
8286         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8287         bp->mac_addr[0] = (u8) (reg >> 8);
8288         bp->mac_addr[1] = (u8) reg;
8289
8290         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8291         bp->mac_addr[2] = (u8) (reg >> 24);
8292         bp->mac_addr[3] = (u8) (reg >> 16);
8293         bp->mac_addr[4] = (u8) (reg >> 8);
8294         bp->mac_addr[5] = (u8) reg;
8295
8296         bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8297         bnx2_set_rx_ring_size(bp, 255);
8298
8299         bp->tx_quick_cons_trip_int = 2;
8300         bp->tx_quick_cons_trip = 20;
8301         bp->tx_ticks_int = 18;
8302         bp->tx_ticks = 80;
8303
8304         bp->rx_quick_cons_trip_int = 2;
8305         bp->rx_quick_cons_trip = 12;
8306         bp->rx_ticks_int = 18;
8307         bp->rx_ticks = 18;
8308
8309         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8310
8311         bp->current_interval = BNX2_TIMER_INTERVAL;
8312
8313         bp->phy_addr = 1;
8314
8315         /* Disable WOL support if we are running on a SERDES chip. */
8316         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8317                 bnx2_get_5709_media(bp);
8318         else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8319                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8320
8321         bp->phy_port = PORT_TP;
8322         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8323                 bp->phy_port = PORT_FIBRE;
8324                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8325                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8326                         bp->flags |= BNX2_FLAG_NO_WOL;
8327                         bp->wol = 0;
8328                 }
8329                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8330                         /* Don't do parallel detect on this board because of
8331                          * some board problems.  The link will not go down
8332                          * if we do parallel detect.
8333                          */
8334                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8335                             pdev->subsystem_device == 0x310c)
8336                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8337                 } else {
8338                         bp->phy_addr = 2;
8339                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8340                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8341                 }
8342         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8343                    BNX2_CHIP(bp) == BNX2_CHIP_5708)
8344                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8345         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8346                  (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8347                   BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8348                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8349
8350         bnx2_init_fw_cap(bp);
8351
8352         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8353             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8354             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8355             !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8356                 bp->flags |= BNX2_FLAG_NO_WOL;
8357                 bp->wol = 0;
8358         }
8359
8360         if (bp->flags & BNX2_FLAG_NO_WOL)
8361                 device_set_wakeup_capable(&bp->pdev->dev, false);
8362         else
8363                 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8364
8365         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8366                 bp->tx_quick_cons_trip_int =
8367                         bp->tx_quick_cons_trip;
8368                 bp->tx_ticks_int = bp->tx_ticks;
8369                 bp->rx_quick_cons_trip_int =
8370                         bp->rx_quick_cons_trip;
8371                 bp->rx_ticks_int = bp->rx_ticks;
8372                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8373                 bp->com_ticks_int = bp->com_ticks;
8374                 bp->cmd_ticks_int = bp->cmd_ticks;
8375         }
8376
8377         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8378          *
8379          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8380          * with byte enables disabled on the unused 32-bit word.  This is legal
8381          * but causes problems on the AMD 8132 which will eventually stop
8382          * responding after a while.
8383          *
8384          * AMD believes this incompatibility is unique to the 5706, and
8385          * prefers to locally disable MSI rather than globally disabling it.
8386          */
8387         if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8388                 struct pci_dev *amd_8132 = NULL;
8389
8390                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8391                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8392                                                   amd_8132))) {
8393
8394                         if (amd_8132->revision >= 0x10 &&
8395                             amd_8132->revision <= 0x13) {
8396                                 disable_msi = 1;
8397                                 pci_dev_put(amd_8132);
8398                                 break;
8399                         }
8400                 }
8401         }
8402
8403         bnx2_set_default_link(bp);
8404         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8405
8406         init_timer(&bp->timer);
8407         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8408         bp->timer.data = (unsigned long) bp;
8409         bp->timer.function = bnx2_timer;
8410
8411 #ifdef BCM_CNIC
8412         if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8413                 bp->cnic_eth_dev.max_iscsi_conn =
8414                         (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8415                          BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8416         bp->cnic_probe = bnx2_cnic_probe;
8417 #endif
8418         pci_save_state(pdev);
8419
8420         return 0;
8421
8422 err_out_unmap:
8423         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8424                 pci_disable_pcie_error_reporting(pdev);
8425                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8426         }
8427
8428         pci_iounmap(pdev, bp->regview);
8429         bp->regview = NULL;
8430
8431 err_out_release:
8432         pci_release_regions(pdev);
8433
8434 err_out_disable:
8435         pci_disable_device(pdev);
8436         pci_set_drvdata(pdev, NULL);
8437
8438 err_out:
8439         return rc;
8440 }
8441
8442 static char *
8443 bnx2_bus_string(struct bnx2 *bp, char *str)
8444 {
8445         char *s = str;
8446
8447         if (bp->flags & BNX2_FLAG_PCIE) {
8448                 s += sprintf(s, "PCI Express");
8449         } else {
8450                 s += sprintf(s, "PCI");
8451                 if (bp->flags & BNX2_FLAG_PCIX)
8452                         s += sprintf(s, "-X");
8453                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8454                         s += sprintf(s, " 32-bit");
8455                 else
8456                         s += sprintf(s, " 64-bit");
8457                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8458         }
8459         return str;
8460 }
8461
8462 static void
8463 bnx2_del_napi(struct bnx2 *bp)
8464 {
8465         int i;
8466
8467         for (i = 0; i < bp->irq_nvecs; i++)
8468                 netif_napi_del(&bp->bnx2_napi[i].napi);
8469 }
8470
8471 static void
8472 bnx2_init_napi(struct bnx2 *bp)
8473 {
8474         int i;
8475
8476         for (i = 0; i < bp->irq_nvecs; i++) {
8477                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8478                 int (*poll)(struct napi_struct *, int);
8479
8480                 if (i == 0)
8481                         poll = bnx2_poll;
8482                 else
8483                         poll = bnx2_poll_msix;
8484
8485                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8486                 bnapi->bp = bp;
8487         }
8488 }
8489
8490 static const struct net_device_ops bnx2_netdev_ops = {
8491         .ndo_open               = bnx2_open,
8492         .ndo_start_xmit         = bnx2_start_xmit,
8493         .ndo_stop               = bnx2_close,
8494         .ndo_get_stats64        = bnx2_get_stats64,
8495         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8496         .ndo_do_ioctl           = bnx2_ioctl,
8497         .ndo_validate_addr      = eth_validate_addr,
8498         .ndo_set_mac_address    = bnx2_change_mac_addr,
8499         .ndo_change_mtu         = bnx2_change_mtu,
8500         .ndo_fix_features       = bnx2_fix_features,
8501         .ndo_set_features       = bnx2_set_features,
8502         .ndo_tx_timeout         = bnx2_tx_timeout,
8503 #ifdef CONFIG_NET_POLL_CONTROLLER
8504         .ndo_poll_controller    = poll_bnx2,
8505 #endif
8506 };
8507
8508 static int
8509 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8510 {
8511         static int version_printed = 0;
8512         struct net_device *dev;
8513         struct bnx2 *bp;
8514         int rc;
8515         char str[40];
8516
8517         if (version_printed++ == 0)
8518                 pr_info("%s", version);
8519
8520         /* dev zeroed in init_etherdev */
8521         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8522         if (!dev)
8523                 return -ENOMEM;
8524
8525         rc = bnx2_init_board(pdev, dev);
8526         if (rc < 0)
8527                 goto err_free;
8528
8529         dev->netdev_ops = &bnx2_netdev_ops;
8530         dev->watchdog_timeo = TX_TIMEOUT;
8531         dev->ethtool_ops = &bnx2_ethtool_ops;
8532
8533         bp = netdev_priv(dev);
8534
8535         pci_set_drvdata(pdev, dev);
8536
8537         memcpy(dev->dev_addr, bp->mac_addr, 6);
8538
8539         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8540                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8541                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8542
8543         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8544                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8545
8546         dev->vlan_features = dev->hw_features;
8547         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8548         dev->features |= dev->hw_features;
8549         dev->priv_flags |= IFF_UNICAST_FLT;
8550
8551         if ((rc = register_netdev(dev))) {
8552                 dev_err(&pdev->dev, "Cannot register net device\n");
8553                 goto error;
8554         }
8555
8556         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8557                     "node addr %pM\n", board_info[ent->driver_data].name,
8558                     ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8559                     ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8560                     bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8561                     pdev->irq, dev->dev_addr);
8562
8563         return 0;
8564
8565 error:
8566         pci_iounmap(pdev, bp->regview);
8567         pci_release_regions(pdev);
8568         pci_disable_device(pdev);
8569         pci_set_drvdata(pdev, NULL);
8570 err_free:
8571         free_netdev(dev);
8572         return rc;
8573 }
8574
8575 static void
8576 bnx2_remove_one(struct pci_dev *pdev)
8577 {
8578         struct net_device *dev = pci_get_drvdata(pdev);
8579         struct bnx2 *bp = netdev_priv(dev);
8580
8581         unregister_netdev(dev);
8582
8583         del_timer_sync(&bp->timer);
8584         cancel_work_sync(&bp->reset_task);
8585
8586         pci_iounmap(bp->pdev, bp->regview);
8587
8588         kfree(bp->temp_stats_blk);
8589
8590         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8591                 pci_disable_pcie_error_reporting(pdev);
8592                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8593         }
8594
8595         bnx2_release_firmware(bp);
8596
8597         free_netdev(dev);
8598
8599         pci_release_regions(pdev);
8600         pci_disable_device(pdev);
8601         pci_set_drvdata(pdev, NULL);
8602 }
8603
8604 static int
8605 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8606 {
8607         struct net_device *dev = pci_get_drvdata(pdev);
8608         struct bnx2 *bp = netdev_priv(dev);
8609
8610         /* PCI register 4 needs to be saved whether netif_running() or not.
8611          * MSI address and data need to be saved if using MSI and
8612          * netif_running().
8613          */
8614         pci_save_state(pdev);
8615         if (!netif_running(dev))
8616                 return 0;
8617
8618         cancel_work_sync(&bp->reset_task);
8619         bnx2_netif_stop(bp, true);
8620         netif_device_detach(dev);
8621         del_timer_sync(&bp->timer);
8622         bnx2_shutdown_chip(bp);
8623         bnx2_free_skbs(bp);
8624         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8625         return 0;
8626 }
8627
8628 static int
8629 bnx2_resume(struct pci_dev *pdev)
8630 {
8631         struct net_device *dev = pci_get_drvdata(pdev);
8632         struct bnx2 *bp = netdev_priv(dev);
8633
8634         pci_restore_state(pdev);
8635         if (!netif_running(dev))
8636                 return 0;
8637
8638         bnx2_set_power_state(bp, PCI_D0);
8639         netif_device_attach(dev);
8640         bnx2_init_nic(bp, 1);
8641         bnx2_netif_start(bp, true);
8642         return 0;
8643 }
8644
8645 /**
8646  * bnx2_io_error_detected - called when PCI error is detected
8647  * @pdev: Pointer to PCI device
8648  * @state: The current pci connection state
8649  *
8650  * This function is called after a PCI bus error affecting
8651  * this device has been detected.
8652  */
8653 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8654                                                pci_channel_state_t state)
8655 {
8656         struct net_device *dev = pci_get_drvdata(pdev);
8657         struct bnx2 *bp = netdev_priv(dev);
8658
8659         rtnl_lock();
8660         netif_device_detach(dev);
8661
8662         if (state == pci_channel_io_perm_failure) {
8663                 rtnl_unlock();
8664                 return PCI_ERS_RESULT_DISCONNECT;
8665         }
8666
8667         if (netif_running(dev)) {
8668                 bnx2_netif_stop(bp, true);
8669                 del_timer_sync(&bp->timer);
8670                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8671         }
8672
8673         pci_disable_device(pdev);
8674         rtnl_unlock();
8675
8676         /* Request a slot slot reset. */
8677         return PCI_ERS_RESULT_NEED_RESET;
8678 }
8679
8680 /**
8681  * bnx2_io_slot_reset - called after the pci bus has been reset.
8682  * @pdev: Pointer to PCI device
8683  *
8684  * Restart the card from scratch, as if from a cold-boot.
8685  */
8686 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8687 {
8688         struct net_device *dev = pci_get_drvdata(pdev);
8689         struct bnx2 *bp = netdev_priv(dev);
8690         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8691         int err = 0;
8692
8693         rtnl_lock();
8694         if (pci_enable_device(pdev)) {
8695                 dev_err(&pdev->dev,
8696                         "Cannot re-enable PCI device after reset\n");
8697         } else {
8698                 pci_set_master(pdev);
8699                 pci_restore_state(pdev);
8700                 pci_save_state(pdev);
8701
8702                 if (netif_running(dev)) {
8703                         bnx2_set_power_state(bp, PCI_D0);
8704                         err = bnx2_init_nic(bp, 1);
8705                 }
8706                 if (!err)
8707                         result = PCI_ERS_RESULT_RECOVERED;
8708         }
8709
8710         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8711                 bnx2_napi_enable(bp);
8712                 dev_close(dev);
8713         }
8714         rtnl_unlock();
8715
8716         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8717                 return result;
8718
8719         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8720         if (err) {
8721                 dev_err(&pdev->dev,
8722                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8723                          err); /* non-fatal, continue */
8724         }
8725
8726         return result;
8727 }
8728
8729 /**
8730  * bnx2_io_resume - called when traffic can start flowing again.
8731  * @pdev: Pointer to PCI device
8732  *
8733  * This callback is called when the error recovery driver tells us that
8734  * its OK to resume normal operation.
8735  */
8736 static void bnx2_io_resume(struct pci_dev *pdev)
8737 {
8738         struct net_device *dev = pci_get_drvdata(pdev);
8739         struct bnx2 *bp = netdev_priv(dev);
8740
8741         rtnl_lock();
8742         if (netif_running(dev))
8743                 bnx2_netif_start(bp, true);
8744
8745         netif_device_attach(dev);
8746         rtnl_unlock();
8747 }
8748
8749 static const struct pci_error_handlers bnx2_err_handler = {
8750         .error_detected = bnx2_io_error_detected,
8751         .slot_reset     = bnx2_io_slot_reset,
8752         .resume         = bnx2_io_resume,
8753 };
8754
8755 static struct pci_driver bnx2_pci_driver = {
8756         .name           = DRV_MODULE_NAME,
8757         .id_table       = bnx2_pci_tbl,
8758         .probe          = bnx2_init_one,
8759         .remove         = bnx2_remove_one,
8760         .suspend        = bnx2_suspend,
8761         .resume         = bnx2_resume,
8762         .err_handler    = &bnx2_err_handler,
8763 };
8764
8765 module_pci_driver(bnx2_pci_driver);