]> Pileus Git - ~andy/linux/blob - drivers/net/bnx2.c
ixgbe: Only set/clear VFE in ixgbe_set_rx_mode
[~andy/linux] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define PFX DRV_MODULE_NAME     ": "
62 #define DRV_MODULE_VERSION      "2.0.2"
63 #define DRV_MODULE_RELDATE      "Aug 21, 2009"
64 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j3.fw"
65 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
66 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j3.fw"
67 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
68 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
69
70 #define RUN_AT(x) (jiffies + (x))
71
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87
88 static int disable_msi = 0;
89
90 module_param(disable_msi, int, 0);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92
93 typedef enum {
94         BCM5706 = 0,
95         NC370T,
96         NC370I,
97         BCM5706S,
98         NC370F,
99         BCM5708,
100         BCM5708S,
101         BCM5709,
102         BCM5709S,
103         BCM5716,
104         BCM5716S,
105 } board_t;
106
107 /* indexed by board_t, above */
108 static struct {
109         char *name;
110 } board_info[] __devinitdata = {
111         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
112         { "HP NC370T Multifunction Gigabit Server Adapter" },
113         { "HP NC370i Multifunction Gigabit Server Adapter" },
114         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115         { "HP NC370F Multifunction Gigabit Server Adapter" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
117         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
119         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
121         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122         };
123
124 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143         { PCI_VENDOR_ID_BROADCOM, 0x163b,
144           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145         { PCI_VENDOR_ID_BROADCOM, 0x163c,
146           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147         { 0, }
148 };
149
150 static const struct flash_spec flash_table[] =
151 {
152 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
154         /* Slow EEPROM */
155         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158          "EEPROM - slow"},
159         /* Expansion entry 0001 */
160         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163          "Entry 0001"},
164         /* Saifun SA25F010 (non-buffered flash) */
165         /* strap, cfg1, & write1 need updates */
166         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169          "Non-buffered flash (128kB)"},
170         /* Saifun SA25F020 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175          "Non-buffered flash (256kB)"},
176         /* Expansion entry 0100 */
177         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180          "Entry 0100"},
181         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
187         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191         /* Saifun SA25F005 (non-buffered flash) */
192         /* strap, cfg1, & write1 need updates */
193         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196          "Non-buffered flash (64kB)"},
197         /* Fast EEPROM */
198         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201          "EEPROM - fast"},
202         /* Expansion entry 1001 */
203         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1001"},
207         /* Expansion entry 1010 */
208         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211          "Entry 1010"},
212         /* ATMEL AT45DB011B (buffered flash) */
213         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216          "Buffered flash (128kB)"},
217         /* Expansion entry 1100 */
218         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221          "Entry 1100"},
222         /* Expansion entry 1101 */
223         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226          "Entry 1101"},
227         /* Ateml Expansion entry 1110 */
228         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231          "Entry 1110 (Atmel)"},
232         /* ATMEL AT45DB021B (buffered flash) */
233         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236          "Buffered flash (256kB)"},
237 };
238
239 static const struct flash_spec flash_5709 = {
240         .flags          = BNX2_NV_BUFFERED,
241         .page_bits      = BCM5709_FLASH_PAGE_BITS,
242         .page_size      = BCM5709_FLASH_PAGE_SIZE,
243         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
244         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
245         .name           = "5709 Buffered flash (256kB)",
246 };
247
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249
250 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
251 {
252         u32 diff;
253
254         smp_mb();
255
256         /* The ring uses 256 indices for 255 entries, one of them
257          * needs to be skipped.
258          */
259         diff = txr->tx_prod - txr->tx_cons;
260         if (unlikely(diff >= TX_DESC_CNT)) {
261                 diff &= 0xffff;
262                 if (diff == TX_DESC_CNT)
263                         diff = MAX_TX_DESC_CNT;
264         }
265         return (bp->tx_ring_size - diff);
266 }
267
268 static u32
269 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
270 {
271         u32 val;
272
273         spin_lock_bh(&bp->indirect_lock);
274         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
275         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
276         spin_unlock_bh(&bp->indirect_lock);
277         return val;
278 }
279
280 static void
281 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
282 {
283         spin_lock_bh(&bp->indirect_lock);
284         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
286         spin_unlock_bh(&bp->indirect_lock);
287 }
288
289 static void
290 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
291 {
292         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
293 }
294
295 static u32
296 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
297 {
298         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
299 }
300
301 static void
302 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
303 {
304         offset += cid_addr;
305         spin_lock_bh(&bp->indirect_lock);
306         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
307                 int i;
308
309                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
310                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
311                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
312                 for (i = 0; i < 5; i++) {
313                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
314                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
315                                 break;
316                         udelay(5);
317                 }
318         } else {
319                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
320                 REG_WR(bp, BNX2_CTX_DATA, val);
321         }
322         spin_unlock_bh(&bp->indirect_lock);
323 }
324
325 #ifdef BCM_CNIC
326 static int
327 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
328 {
329         struct bnx2 *bp = netdev_priv(dev);
330         struct drv_ctl_io *io = &info->data.io;
331
332         switch (info->cmd) {
333         case DRV_CTL_IO_WR_CMD:
334                 bnx2_reg_wr_ind(bp, io->offset, io->data);
335                 break;
336         case DRV_CTL_IO_RD_CMD:
337                 io->data = bnx2_reg_rd_ind(bp, io->offset);
338                 break;
339         case DRV_CTL_CTX_WR_CMD:
340                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
341                 break;
342         default:
343                 return -EINVAL;
344         }
345         return 0;
346 }
347
348 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
349 {
350         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
351         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
352         int sb_id;
353
354         if (bp->flags & BNX2_FLAG_USING_MSIX) {
355                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
356                 bnapi->cnic_present = 0;
357                 sb_id = bp->irq_nvecs;
358                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
359         } else {
360                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
361                 bnapi->cnic_tag = bnapi->last_status_idx;
362                 bnapi->cnic_present = 1;
363                 sb_id = 0;
364                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
365         }
366
367         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
368         cp->irq_arr[0].status_blk = (void *)
369                 ((unsigned long) bnapi->status_blk.msi +
370                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
371         cp->irq_arr[0].status_blk_num = sb_id;
372         cp->num_irq = 1;
373 }
374
375 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
376                               void *data)
377 {
378         struct bnx2 *bp = netdev_priv(dev);
379         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
380
381         if (ops == NULL)
382                 return -EINVAL;
383
384         if (cp->drv_state & CNIC_DRV_STATE_REGD)
385                 return -EBUSY;
386
387         bp->cnic_data = data;
388         rcu_assign_pointer(bp->cnic_ops, ops);
389
390         cp->num_irq = 0;
391         cp->drv_state = CNIC_DRV_STATE_REGD;
392
393         bnx2_setup_cnic_irq_info(bp);
394
395         return 0;
396 }
397
398 static int bnx2_unregister_cnic(struct net_device *dev)
399 {
400         struct bnx2 *bp = netdev_priv(dev);
401         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
402         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
403
404         mutex_lock(&bp->cnic_lock);
405         cp->drv_state = 0;
406         bnapi->cnic_present = 0;
407         rcu_assign_pointer(bp->cnic_ops, NULL);
408         mutex_unlock(&bp->cnic_lock);
409         synchronize_rcu();
410         return 0;
411 }
412
413 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
414 {
415         struct bnx2 *bp = netdev_priv(dev);
416         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
417
418         cp->drv_owner = THIS_MODULE;
419         cp->chip_id = bp->chip_id;
420         cp->pdev = bp->pdev;
421         cp->io_base = bp->regview;
422         cp->drv_ctl = bnx2_drv_ctl;
423         cp->drv_register_cnic = bnx2_register_cnic;
424         cp->drv_unregister_cnic = bnx2_unregister_cnic;
425
426         return cp;
427 }
428 EXPORT_SYMBOL(bnx2_cnic_probe);
429
430 static void
431 bnx2_cnic_stop(struct bnx2 *bp)
432 {
433         struct cnic_ops *c_ops;
434         struct cnic_ctl_info info;
435
436         mutex_lock(&bp->cnic_lock);
437         c_ops = bp->cnic_ops;
438         if (c_ops) {
439                 info.cmd = CNIC_CTL_STOP_CMD;
440                 c_ops->cnic_ctl(bp->cnic_data, &info);
441         }
442         mutex_unlock(&bp->cnic_lock);
443 }
444
445 static void
446 bnx2_cnic_start(struct bnx2 *bp)
447 {
448         struct cnic_ops *c_ops;
449         struct cnic_ctl_info info;
450
451         mutex_lock(&bp->cnic_lock);
452         c_ops = bp->cnic_ops;
453         if (c_ops) {
454                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
455                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
456
457                         bnapi->cnic_tag = bnapi->last_status_idx;
458                 }
459                 info.cmd = CNIC_CTL_START_CMD;
460                 c_ops->cnic_ctl(bp->cnic_data, &info);
461         }
462         mutex_unlock(&bp->cnic_lock);
463 }
464
465 #else
466
467 static void
468 bnx2_cnic_stop(struct bnx2 *bp)
469 {
470 }
471
472 static void
473 bnx2_cnic_start(struct bnx2 *bp)
474 {
475 }
476
477 #endif
478
479 static int
480 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
481 {
482         u32 val1;
483         int i, ret;
484
485         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
486                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
487                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
488
489                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
490                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
491
492                 udelay(40);
493         }
494
495         val1 = (bp->phy_addr << 21) | (reg << 16) |
496                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
497                 BNX2_EMAC_MDIO_COMM_START_BUSY;
498         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
499
500         for (i = 0; i < 50; i++) {
501                 udelay(10);
502
503                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
504                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
505                         udelay(5);
506
507                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
508                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
509
510                         break;
511                 }
512         }
513
514         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
515                 *val = 0x0;
516                 ret = -EBUSY;
517         }
518         else {
519                 *val = val1;
520                 ret = 0;
521         }
522
523         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
524                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
525                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
526
527                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
528                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
529
530                 udelay(40);
531         }
532
533         return ret;
534 }
535
536 static int
537 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
538 {
539         u32 val1;
540         int i, ret;
541
542         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
543                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
544                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
545
546                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
547                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
548
549                 udelay(40);
550         }
551
552         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
553                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
554                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
555         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
556
557         for (i = 0; i < 50; i++) {
558                 udelay(10);
559
560                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
561                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
562                         udelay(5);
563                         break;
564                 }
565         }
566
567         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
568                 ret = -EBUSY;
569         else
570                 ret = 0;
571
572         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
573                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
574                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
575
576                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
577                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
578
579                 udelay(40);
580         }
581
582         return ret;
583 }
584
585 static void
586 bnx2_disable_int(struct bnx2 *bp)
587 {
588         int i;
589         struct bnx2_napi *bnapi;
590
591         for (i = 0; i < bp->irq_nvecs; i++) {
592                 bnapi = &bp->bnx2_napi[i];
593                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
594                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
595         }
596         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
597 }
598
599 static void
600 bnx2_enable_int(struct bnx2 *bp)
601 {
602         int i;
603         struct bnx2_napi *bnapi;
604
605         for (i = 0; i < bp->irq_nvecs; i++) {
606                 bnapi = &bp->bnx2_napi[i];
607
608                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
609                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
610                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
611                        bnapi->last_status_idx);
612
613                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
614                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
615                        bnapi->last_status_idx);
616         }
617         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
618 }
619
620 static void
621 bnx2_disable_int_sync(struct bnx2 *bp)
622 {
623         int i;
624
625         atomic_inc(&bp->intr_sem);
626         if (!netif_running(bp->dev))
627                 return;
628
629         bnx2_disable_int(bp);
630         for (i = 0; i < bp->irq_nvecs; i++)
631                 synchronize_irq(bp->irq_tbl[i].vector);
632 }
633
634 static void
635 bnx2_napi_disable(struct bnx2 *bp)
636 {
637         int i;
638
639         for (i = 0; i < bp->irq_nvecs; i++)
640                 napi_disable(&bp->bnx2_napi[i].napi);
641 }
642
643 static void
644 bnx2_napi_enable(struct bnx2 *bp)
645 {
646         int i;
647
648         for (i = 0; i < bp->irq_nvecs; i++)
649                 napi_enable(&bp->bnx2_napi[i].napi);
650 }
651
652 static void
653 bnx2_netif_stop(struct bnx2 *bp)
654 {
655         bnx2_cnic_stop(bp);
656         bnx2_disable_int_sync(bp);
657         if (netif_running(bp->dev)) {
658                 bnx2_napi_disable(bp);
659                 netif_tx_disable(bp->dev);
660                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
661         }
662 }
663
664 static void
665 bnx2_netif_start(struct bnx2 *bp)
666 {
667         if (atomic_dec_and_test(&bp->intr_sem)) {
668                 if (netif_running(bp->dev)) {
669                         netif_tx_wake_all_queues(bp->dev);
670                         bnx2_napi_enable(bp);
671                         bnx2_enable_int(bp);
672                         bnx2_cnic_start(bp);
673                 }
674         }
675 }
676
677 static void
678 bnx2_free_tx_mem(struct bnx2 *bp)
679 {
680         int i;
681
682         for (i = 0; i < bp->num_tx_rings; i++) {
683                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
684                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
685
686                 if (txr->tx_desc_ring) {
687                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
688                                             txr->tx_desc_ring,
689                                             txr->tx_desc_mapping);
690                         txr->tx_desc_ring = NULL;
691                 }
692                 kfree(txr->tx_buf_ring);
693                 txr->tx_buf_ring = NULL;
694         }
695 }
696
697 static void
698 bnx2_free_rx_mem(struct bnx2 *bp)
699 {
700         int i;
701
702         for (i = 0; i < bp->num_rx_rings; i++) {
703                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
704                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
705                 int j;
706
707                 for (j = 0; j < bp->rx_max_ring; j++) {
708                         if (rxr->rx_desc_ring[j])
709                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
710                                                     rxr->rx_desc_ring[j],
711                                                     rxr->rx_desc_mapping[j]);
712                         rxr->rx_desc_ring[j] = NULL;
713                 }
714                 vfree(rxr->rx_buf_ring);
715                 rxr->rx_buf_ring = NULL;
716
717                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
718                         if (rxr->rx_pg_desc_ring[j])
719                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
720                                                     rxr->rx_pg_desc_ring[j],
721                                                     rxr->rx_pg_desc_mapping[j]);
722                         rxr->rx_pg_desc_ring[j] = NULL;
723                 }
724                 vfree(rxr->rx_pg_ring);
725                 rxr->rx_pg_ring = NULL;
726         }
727 }
728
729 static int
730 bnx2_alloc_tx_mem(struct bnx2 *bp)
731 {
732         int i;
733
734         for (i = 0; i < bp->num_tx_rings; i++) {
735                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
736                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
737
738                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
739                 if (txr->tx_buf_ring == NULL)
740                         return -ENOMEM;
741
742                 txr->tx_desc_ring =
743                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
744                                              &txr->tx_desc_mapping);
745                 if (txr->tx_desc_ring == NULL)
746                         return -ENOMEM;
747         }
748         return 0;
749 }
750
751 static int
752 bnx2_alloc_rx_mem(struct bnx2 *bp)
753 {
754         int i;
755
756         for (i = 0; i < bp->num_rx_rings; i++) {
757                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
758                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
759                 int j;
760
761                 rxr->rx_buf_ring =
762                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
763                 if (rxr->rx_buf_ring == NULL)
764                         return -ENOMEM;
765
766                 memset(rxr->rx_buf_ring, 0,
767                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
768
769                 for (j = 0; j < bp->rx_max_ring; j++) {
770                         rxr->rx_desc_ring[j] =
771                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
772                                                      &rxr->rx_desc_mapping[j]);
773                         if (rxr->rx_desc_ring[j] == NULL)
774                                 return -ENOMEM;
775
776                 }
777
778                 if (bp->rx_pg_ring_size) {
779                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
780                                                   bp->rx_max_pg_ring);
781                         if (rxr->rx_pg_ring == NULL)
782                                 return -ENOMEM;
783
784                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
785                                bp->rx_max_pg_ring);
786                 }
787
788                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
789                         rxr->rx_pg_desc_ring[j] =
790                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
791                                                 &rxr->rx_pg_desc_mapping[j]);
792                         if (rxr->rx_pg_desc_ring[j] == NULL)
793                                 return -ENOMEM;
794
795                 }
796         }
797         return 0;
798 }
799
800 static void
801 bnx2_free_mem(struct bnx2 *bp)
802 {
803         int i;
804         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
805
806         bnx2_free_tx_mem(bp);
807         bnx2_free_rx_mem(bp);
808
809         for (i = 0; i < bp->ctx_pages; i++) {
810                 if (bp->ctx_blk[i]) {
811                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
812                                             bp->ctx_blk[i],
813                                             bp->ctx_blk_mapping[i]);
814                         bp->ctx_blk[i] = NULL;
815                 }
816         }
817         if (bnapi->status_blk.msi) {
818                 pci_free_consistent(bp->pdev, bp->status_stats_size,
819                                     bnapi->status_blk.msi,
820                                     bp->status_blk_mapping);
821                 bnapi->status_blk.msi = NULL;
822                 bp->stats_blk = NULL;
823         }
824 }
825
826 static int
827 bnx2_alloc_mem(struct bnx2 *bp)
828 {
829         int i, status_blk_size, err;
830         struct bnx2_napi *bnapi;
831         void *status_blk;
832
833         /* Combine status and statistics blocks into one allocation. */
834         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
835         if (bp->flags & BNX2_FLAG_MSIX_CAP)
836                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
837                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
838         bp->status_stats_size = status_blk_size +
839                                 sizeof(struct statistics_block);
840
841         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
842                                           &bp->status_blk_mapping);
843         if (status_blk == NULL)
844                 goto alloc_mem_err;
845
846         memset(status_blk, 0, bp->status_stats_size);
847
848         bnapi = &bp->bnx2_napi[0];
849         bnapi->status_blk.msi = status_blk;
850         bnapi->hw_tx_cons_ptr =
851                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
852         bnapi->hw_rx_cons_ptr =
853                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
854         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
855                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
856                         struct status_block_msix *sblk;
857
858                         bnapi = &bp->bnx2_napi[i];
859
860                         sblk = (void *) (status_blk +
861                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
862                         bnapi->status_blk.msix = sblk;
863                         bnapi->hw_tx_cons_ptr =
864                                 &sblk->status_tx_quick_consumer_index;
865                         bnapi->hw_rx_cons_ptr =
866                                 &sblk->status_rx_quick_consumer_index;
867                         bnapi->int_num = i << 24;
868                 }
869         }
870
871         bp->stats_blk = status_blk + status_blk_size;
872
873         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
874
875         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
876                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
877                 if (bp->ctx_pages == 0)
878                         bp->ctx_pages = 1;
879                 for (i = 0; i < bp->ctx_pages; i++) {
880                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
881                                                 BCM_PAGE_SIZE,
882                                                 &bp->ctx_blk_mapping[i]);
883                         if (bp->ctx_blk[i] == NULL)
884                                 goto alloc_mem_err;
885                 }
886         }
887
888         err = bnx2_alloc_rx_mem(bp);
889         if (err)
890                 goto alloc_mem_err;
891
892         err = bnx2_alloc_tx_mem(bp);
893         if (err)
894                 goto alloc_mem_err;
895
896         return 0;
897
898 alloc_mem_err:
899         bnx2_free_mem(bp);
900         return -ENOMEM;
901 }
902
903 static void
904 bnx2_report_fw_link(struct bnx2 *bp)
905 {
906         u32 fw_link_status = 0;
907
908         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
909                 return;
910
911         if (bp->link_up) {
912                 u32 bmsr;
913
914                 switch (bp->line_speed) {
915                 case SPEED_10:
916                         if (bp->duplex == DUPLEX_HALF)
917                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
918                         else
919                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
920                         break;
921                 case SPEED_100:
922                         if (bp->duplex == DUPLEX_HALF)
923                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
924                         else
925                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
926                         break;
927                 case SPEED_1000:
928                         if (bp->duplex == DUPLEX_HALF)
929                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
930                         else
931                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
932                         break;
933                 case SPEED_2500:
934                         if (bp->duplex == DUPLEX_HALF)
935                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
936                         else
937                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
938                         break;
939                 }
940
941                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
942
943                 if (bp->autoneg) {
944                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
945
946                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
947                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
948
949                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
950                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
951                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
952                         else
953                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
954                 }
955         }
956         else
957                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
958
959         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
960 }
961
962 static char *
963 bnx2_xceiver_str(struct bnx2 *bp)
964 {
965         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
966                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
967                  "Copper"));
968 }
969
970 static void
971 bnx2_report_link(struct bnx2 *bp)
972 {
973         if (bp->link_up) {
974                 netif_carrier_on(bp->dev);
975                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
976                        bnx2_xceiver_str(bp));
977
978                 printk("%d Mbps ", bp->line_speed);
979
980                 if (bp->duplex == DUPLEX_FULL)
981                         printk("full duplex");
982                 else
983                         printk("half duplex");
984
985                 if (bp->flow_ctrl) {
986                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
987                                 printk(", receive ");
988                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
989                                         printk("& transmit ");
990                         }
991                         else {
992                                 printk(", transmit ");
993                         }
994                         printk("flow control ON");
995                 }
996                 printk("\n");
997         }
998         else {
999                 netif_carrier_off(bp->dev);
1000                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
1001                        bnx2_xceiver_str(bp));
1002         }
1003
1004         bnx2_report_fw_link(bp);
1005 }
1006
1007 static void
1008 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1009 {
1010         u32 local_adv, remote_adv;
1011
1012         bp->flow_ctrl = 0;
1013         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1014                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1015
1016                 if (bp->duplex == DUPLEX_FULL) {
1017                         bp->flow_ctrl = bp->req_flow_ctrl;
1018                 }
1019                 return;
1020         }
1021
1022         if (bp->duplex != DUPLEX_FULL) {
1023                 return;
1024         }
1025
1026         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1027             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1028                 u32 val;
1029
1030                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1031                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1032                         bp->flow_ctrl |= FLOW_CTRL_TX;
1033                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1034                         bp->flow_ctrl |= FLOW_CTRL_RX;
1035                 return;
1036         }
1037
1038         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1039         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1040
1041         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1042                 u32 new_local_adv = 0;
1043                 u32 new_remote_adv = 0;
1044
1045                 if (local_adv & ADVERTISE_1000XPAUSE)
1046                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1047                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1048                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1049                 if (remote_adv & ADVERTISE_1000XPAUSE)
1050                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1051                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1052                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1053
1054                 local_adv = new_local_adv;
1055                 remote_adv = new_remote_adv;
1056         }
1057
1058         /* See Table 28B-3 of 802.3ab-1999 spec. */
1059         if (local_adv & ADVERTISE_PAUSE_CAP) {
1060                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1061                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1062                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1063                         }
1064                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1065                                 bp->flow_ctrl = FLOW_CTRL_RX;
1066                         }
1067                 }
1068                 else {
1069                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1070                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1071                         }
1072                 }
1073         }
1074         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1075                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1076                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1077
1078                         bp->flow_ctrl = FLOW_CTRL_TX;
1079                 }
1080         }
1081 }
1082
1083 static int
1084 bnx2_5709s_linkup(struct bnx2 *bp)
1085 {
1086         u32 val, speed;
1087
1088         bp->link_up = 1;
1089
1090         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1091         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1092         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1095                 bp->line_speed = bp->req_line_speed;
1096                 bp->duplex = bp->req_duplex;
1097                 return 0;
1098         }
1099         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1100         switch (speed) {
1101                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1102                         bp->line_speed = SPEED_10;
1103                         break;
1104                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1105                         bp->line_speed = SPEED_100;
1106                         break;
1107                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1108                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1109                         bp->line_speed = SPEED_1000;
1110                         break;
1111                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1112                         bp->line_speed = SPEED_2500;
1113                         break;
1114         }
1115         if (val & MII_BNX2_GP_TOP_AN_FD)
1116                 bp->duplex = DUPLEX_FULL;
1117         else
1118                 bp->duplex = DUPLEX_HALF;
1119         return 0;
1120 }
1121
1122 static int
1123 bnx2_5708s_linkup(struct bnx2 *bp)
1124 {
1125         u32 val;
1126
1127         bp->link_up = 1;
1128         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1129         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1130                 case BCM5708S_1000X_STAT1_SPEED_10:
1131                         bp->line_speed = SPEED_10;
1132                         break;
1133                 case BCM5708S_1000X_STAT1_SPEED_100:
1134                         bp->line_speed = SPEED_100;
1135                         break;
1136                 case BCM5708S_1000X_STAT1_SPEED_1G:
1137                         bp->line_speed = SPEED_1000;
1138                         break;
1139                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1140                         bp->line_speed = SPEED_2500;
1141                         break;
1142         }
1143         if (val & BCM5708S_1000X_STAT1_FD)
1144                 bp->duplex = DUPLEX_FULL;
1145         else
1146                 bp->duplex = DUPLEX_HALF;
1147
1148         return 0;
1149 }
1150
1151 static int
1152 bnx2_5706s_linkup(struct bnx2 *bp)
1153 {
1154         u32 bmcr, local_adv, remote_adv, common;
1155
1156         bp->link_up = 1;
1157         bp->line_speed = SPEED_1000;
1158
1159         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1160         if (bmcr & BMCR_FULLDPLX) {
1161                 bp->duplex = DUPLEX_FULL;
1162         }
1163         else {
1164                 bp->duplex = DUPLEX_HALF;
1165         }
1166
1167         if (!(bmcr & BMCR_ANENABLE)) {
1168                 return 0;
1169         }
1170
1171         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1172         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1173
1174         common = local_adv & remote_adv;
1175         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1176
1177                 if (common & ADVERTISE_1000XFULL) {
1178                         bp->duplex = DUPLEX_FULL;
1179                 }
1180                 else {
1181                         bp->duplex = DUPLEX_HALF;
1182                 }
1183         }
1184
1185         return 0;
1186 }
1187
1188 static int
1189 bnx2_copper_linkup(struct bnx2 *bp)
1190 {
1191         u32 bmcr;
1192
1193         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1194         if (bmcr & BMCR_ANENABLE) {
1195                 u32 local_adv, remote_adv, common;
1196
1197                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1198                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1199
1200                 common = local_adv & (remote_adv >> 2);
1201                 if (common & ADVERTISE_1000FULL) {
1202                         bp->line_speed = SPEED_1000;
1203                         bp->duplex = DUPLEX_FULL;
1204                 }
1205                 else if (common & ADVERTISE_1000HALF) {
1206                         bp->line_speed = SPEED_1000;
1207                         bp->duplex = DUPLEX_HALF;
1208                 }
1209                 else {
1210                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1211                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1212
1213                         common = local_adv & remote_adv;
1214                         if (common & ADVERTISE_100FULL) {
1215                                 bp->line_speed = SPEED_100;
1216                                 bp->duplex = DUPLEX_FULL;
1217                         }
1218                         else if (common & ADVERTISE_100HALF) {
1219                                 bp->line_speed = SPEED_100;
1220                                 bp->duplex = DUPLEX_HALF;
1221                         }
1222                         else if (common & ADVERTISE_10FULL) {
1223                                 bp->line_speed = SPEED_10;
1224                                 bp->duplex = DUPLEX_FULL;
1225                         }
1226                         else if (common & ADVERTISE_10HALF) {
1227                                 bp->line_speed = SPEED_10;
1228                                 bp->duplex = DUPLEX_HALF;
1229                         }
1230                         else {
1231                                 bp->line_speed = 0;
1232                                 bp->link_up = 0;
1233                         }
1234                 }
1235         }
1236         else {
1237                 if (bmcr & BMCR_SPEED100) {
1238                         bp->line_speed = SPEED_100;
1239                 }
1240                 else {
1241                         bp->line_speed = SPEED_10;
1242                 }
1243                 if (bmcr & BMCR_FULLDPLX) {
1244                         bp->duplex = DUPLEX_FULL;
1245                 }
1246                 else {
1247                         bp->duplex = DUPLEX_HALF;
1248                 }
1249         }
1250
1251         return 0;
1252 }
1253
1254 static void
1255 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1256 {
1257         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1258
1259         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1260         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1261         val |= 0x02 << 8;
1262
1263         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1264                 u32 lo_water, hi_water;
1265
1266                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1267                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1268                 else
1269                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1270                 if (lo_water >= bp->rx_ring_size)
1271                         lo_water = 0;
1272
1273                 hi_water = bp->rx_ring_size / 4;
1274
1275                 if (hi_water <= lo_water)
1276                         lo_water = 0;
1277
1278                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1279                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1280
1281                 if (hi_water > 0xf)
1282                         hi_water = 0xf;
1283                 else if (hi_water == 0)
1284                         lo_water = 0;
1285                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1286         }
1287         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1288 }
1289
1290 static void
1291 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1292 {
1293         int i;
1294         u32 cid;
1295
1296         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1297                 if (i == 1)
1298                         cid = RX_RSS_CID;
1299                 bnx2_init_rx_context(bp, cid);
1300         }
1301 }
1302
1303 static void
1304 bnx2_set_mac_link(struct bnx2 *bp)
1305 {
1306         u32 val;
1307
1308         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1309         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1310                 (bp->duplex == DUPLEX_HALF)) {
1311                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1312         }
1313
1314         /* Configure the EMAC mode register. */
1315         val = REG_RD(bp, BNX2_EMAC_MODE);
1316
1317         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1318                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1319                 BNX2_EMAC_MODE_25G_MODE);
1320
1321         if (bp->link_up) {
1322                 switch (bp->line_speed) {
1323                         case SPEED_10:
1324                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1325                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1326                                         break;
1327                                 }
1328                                 /* fall through */
1329                         case SPEED_100:
1330                                 val |= BNX2_EMAC_MODE_PORT_MII;
1331                                 break;
1332                         case SPEED_2500:
1333                                 val |= BNX2_EMAC_MODE_25G_MODE;
1334                                 /* fall through */
1335                         case SPEED_1000:
1336                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1337                                 break;
1338                 }
1339         }
1340         else {
1341                 val |= BNX2_EMAC_MODE_PORT_GMII;
1342         }
1343
1344         /* Set the MAC to operate in the appropriate duplex mode. */
1345         if (bp->duplex == DUPLEX_HALF)
1346                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1347         REG_WR(bp, BNX2_EMAC_MODE, val);
1348
1349         /* Enable/disable rx PAUSE. */
1350         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1351
1352         if (bp->flow_ctrl & FLOW_CTRL_RX)
1353                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1354         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1355
1356         /* Enable/disable tx PAUSE. */
1357         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1358         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1359
1360         if (bp->flow_ctrl & FLOW_CTRL_TX)
1361                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1362         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1363
1364         /* Acknowledge the interrupt. */
1365         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1366
1367         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1368                 bnx2_init_all_rx_contexts(bp);
1369 }
1370
1371 static void
1372 bnx2_enable_bmsr1(struct bnx2 *bp)
1373 {
1374         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1375             (CHIP_NUM(bp) == CHIP_NUM_5709))
1376                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1377                                MII_BNX2_BLK_ADDR_GP_STATUS);
1378 }
1379
1380 static void
1381 bnx2_disable_bmsr1(struct bnx2 *bp)
1382 {
1383         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1384             (CHIP_NUM(bp) == CHIP_NUM_5709))
1385                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1386                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1387 }
1388
1389 static int
1390 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1391 {
1392         u32 up1;
1393         int ret = 1;
1394
1395         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1396                 return 0;
1397
1398         if (bp->autoneg & AUTONEG_SPEED)
1399                 bp->advertising |= ADVERTISED_2500baseX_Full;
1400
1401         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1402                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1403
1404         bnx2_read_phy(bp, bp->mii_up1, &up1);
1405         if (!(up1 & BCM5708S_UP1_2G5)) {
1406                 up1 |= BCM5708S_UP1_2G5;
1407                 bnx2_write_phy(bp, bp->mii_up1, up1);
1408                 ret = 0;
1409         }
1410
1411         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1412                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1413                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1414
1415         return ret;
1416 }
1417
1418 static int
1419 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1420 {
1421         u32 up1;
1422         int ret = 0;
1423
1424         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1425                 return 0;
1426
1427         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1428                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1429
1430         bnx2_read_phy(bp, bp->mii_up1, &up1);
1431         if (up1 & BCM5708S_UP1_2G5) {
1432                 up1 &= ~BCM5708S_UP1_2G5;
1433                 bnx2_write_phy(bp, bp->mii_up1, up1);
1434                 ret = 1;
1435         }
1436
1437         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1438                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1439                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1440
1441         return ret;
1442 }
1443
1444 static void
1445 bnx2_enable_forced_2g5(struct bnx2 *bp)
1446 {
1447         u32 bmcr;
1448
1449         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1450                 return;
1451
1452         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1453                 u32 val;
1454
1455                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1456                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1457                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1458                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1459                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1460                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1461
1462                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1463                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1464                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1465
1466         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1467                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1469         } else {
1470                 return;
1471         }
1472
1473         if (bp->autoneg & AUTONEG_SPEED) {
1474                 bmcr &= ~BMCR_ANENABLE;
1475                 if (bp->req_duplex == DUPLEX_FULL)
1476                         bmcr |= BMCR_FULLDPLX;
1477         }
1478         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1479 }
1480
1481 static void
1482 bnx2_disable_forced_2g5(struct bnx2 *bp)
1483 {
1484         u32 bmcr;
1485
1486         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1487                 return;
1488
1489         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1490                 u32 val;
1491
1492                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1493                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1494                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1495                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1496                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1497
1498                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1499                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1500                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501
1502         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1503                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1504                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1505         } else {
1506                 return;
1507         }
1508
1509         if (bp->autoneg & AUTONEG_SPEED)
1510                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1511         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1512 }
1513
1514 static void
1515 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1516 {
1517         u32 val;
1518
1519         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1520         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1521         if (start)
1522                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1523         else
1524                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1525 }
1526
1527 static int
1528 bnx2_set_link(struct bnx2 *bp)
1529 {
1530         u32 bmsr;
1531         u8 link_up;
1532
1533         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1534                 bp->link_up = 1;
1535                 return 0;
1536         }
1537
1538         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1539                 return 0;
1540
1541         link_up = bp->link_up;
1542
1543         bnx2_enable_bmsr1(bp);
1544         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546         bnx2_disable_bmsr1(bp);
1547
1548         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1549             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1550                 u32 val, an_dbg;
1551
1552                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1553                         bnx2_5706s_force_link_dn(bp, 0);
1554                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1555                 }
1556                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1557
1558                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1559                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561
1562                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1563                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1564                         bmsr |= BMSR_LSTATUS;
1565                 else
1566                         bmsr &= ~BMSR_LSTATUS;
1567         }
1568
1569         if (bmsr & BMSR_LSTATUS) {
1570                 bp->link_up = 1;
1571
1572                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1573                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1574                                 bnx2_5706s_linkup(bp);
1575                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1576                                 bnx2_5708s_linkup(bp);
1577                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1578                                 bnx2_5709s_linkup(bp);
1579                 }
1580                 else {
1581                         bnx2_copper_linkup(bp);
1582                 }
1583                 bnx2_resolve_flow_ctrl(bp);
1584         }
1585         else {
1586                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1587                     (bp->autoneg & AUTONEG_SPEED))
1588                         bnx2_disable_forced_2g5(bp);
1589
1590                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1591                         u32 bmcr;
1592
1593                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1594                         bmcr |= BMCR_ANENABLE;
1595                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1596
1597                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1598                 }
1599                 bp->link_up = 0;
1600         }
1601
1602         if (bp->link_up != link_up) {
1603                 bnx2_report_link(bp);
1604         }
1605
1606         bnx2_set_mac_link(bp);
1607
1608         return 0;
1609 }
1610
1611 static int
1612 bnx2_reset_phy(struct bnx2 *bp)
1613 {
1614         int i;
1615         u32 reg;
1616
1617         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1618
1619 #define PHY_RESET_MAX_WAIT 100
1620         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1621                 udelay(10);
1622
1623                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1624                 if (!(reg & BMCR_RESET)) {
1625                         udelay(20);
1626                         break;
1627                 }
1628         }
1629         if (i == PHY_RESET_MAX_WAIT) {
1630                 return -EBUSY;
1631         }
1632         return 0;
1633 }
1634
1635 static u32
1636 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1637 {
1638         u32 adv = 0;
1639
1640         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1641                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1642
1643                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644                         adv = ADVERTISE_1000XPAUSE;
1645                 }
1646                 else {
1647                         adv = ADVERTISE_PAUSE_CAP;
1648                 }
1649         }
1650         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1651                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652                         adv = ADVERTISE_1000XPSE_ASYM;
1653                 }
1654                 else {
1655                         adv = ADVERTISE_PAUSE_ASYM;
1656                 }
1657         }
1658         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1659                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1661                 }
1662                 else {
1663                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1664                 }
1665         }
1666         return adv;
1667 }
1668
1669 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1670
1671 static int
1672 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1673 __releases(&bp->phy_lock)
1674 __acquires(&bp->phy_lock)
1675 {
1676         u32 speed_arg = 0, pause_adv;
1677
1678         pause_adv = bnx2_phy_get_pause_adv(bp);
1679
1680         if (bp->autoneg & AUTONEG_SPEED) {
1681                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1682                 if (bp->advertising & ADVERTISED_10baseT_Half)
1683                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1684                 if (bp->advertising & ADVERTISED_10baseT_Full)
1685                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1686                 if (bp->advertising & ADVERTISED_100baseT_Half)
1687                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1688                 if (bp->advertising & ADVERTISED_100baseT_Full)
1689                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1690                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1691                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1692                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1693                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1694         } else {
1695                 if (bp->req_line_speed == SPEED_2500)
1696                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697                 else if (bp->req_line_speed == SPEED_1000)
1698                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699                 else if (bp->req_line_speed == SPEED_100) {
1700                         if (bp->req_duplex == DUPLEX_FULL)
1701                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1702                         else
1703                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1704                 } else if (bp->req_line_speed == SPEED_10) {
1705                         if (bp->req_duplex == DUPLEX_FULL)
1706                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1707                         else
1708                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1709                 }
1710         }
1711
1712         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1713                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1714         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1715                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1716
1717         if (port == PORT_TP)
1718                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1719                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1720
1721         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1722
1723         spin_unlock_bh(&bp->phy_lock);
1724         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1725         spin_lock_bh(&bp->phy_lock);
1726
1727         return 0;
1728 }
1729
1730 static int
1731 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1732 __releases(&bp->phy_lock)
1733 __acquires(&bp->phy_lock)
1734 {
1735         u32 adv, bmcr;
1736         u32 new_adv = 0;
1737
1738         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1739                 return (bnx2_setup_remote_phy(bp, port));
1740
1741         if (!(bp->autoneg & AUTONEG_SPEED)) {
1742                 u32 new_bmcr;
1743                 int force_link_down = 0;
1744
1745                 if (bp->req_line_speed == SPEED_2500) {
1746                         if (!bnx2_test_and_enable_2g5(bp))
1747                                 force_link_down = 1;
1748                 } else if (bp->req_line_speed == SPEED_1000) {
1749                         if (bnx2_test_and_disable_2g5(bp))
1750                                 force_link_down = 1;
1751                 }
1752                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1753                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1754
1755                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1756                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1757                 new_bmcr |= BMCR_SPEED1000;
1758
1759                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1760                         if (bp->req_line_speed == SPEED_2500)
1761                                 bnx2_enable_forced_2g5(bp);
1762                         else if (bp->req_line_speed == SPEED_1000) {
1763                                 bnx2_disable_forced_2g5(bp);
1764                                 new_bmcr &= ~0x2000;
1765                         }
1766
1767                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1768                         if (bp->req_line_speed == SPEED_2500)
1769                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1770                         else
1771                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1772                 }
1773
1774                 if (bp->req_duplex == DUPLEX_FULL) {
1775                         adv |= ADVERTISE_1000XFULL;
1776                         new_bmcr |= BMCR_FULLDPLX;
1777                 }
1778                 else {
1779                         adv |= ADVERTISE_1000XHALF;
1780                         new_bmcr &= ~BMCR_FULLDPLX;
1781                 }
1782                 if ((new_bmcr != bmcr) || (force_link_down)) {
1783                         /* Force a link down visible on the other side */
1784                         if (bp->link_up) {
1785                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1786                                                ~(ADVERTISE_1000XFULL |
1787                                                  ADVERTISE_1000XHALF));
1788                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1789                                         BMCR_ANRESTART | BMCR_ANENABLE);
1790
1791                                 bp->link_up = 0;
1792                                 netif_carrier_off(bp->dev);
1793                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1794                                 bnx2_report_link(bp);
1795                         }
1796                         bnx2_write_phy(bp, bp->mii_adv, adv);
1797                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1798                 } else {
1799                         bnx2_resolve_flow_ctrl(bp);
1800                         bnx2_set_mac_link(bp);
1801                 }
1802                 return 0;
1803         }
1804
1805         bnx2_test_and_enable_2g5(bp);
1806
1807         if (bp->advertising & ADVERTISED_1000baseT_Full)
1808                 new_adv |= ADVERTISE_1000XFULL;
1809
1810         new_adv |= bnx2_phy_get_pause_adv(bp);
1811
1812         bnx2_read_phy(bp, bp->mii_adv, &adv);
1813         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1814
1815         bp->serdes_an_pending = 0;
1816         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1817                 /* Force a link down visible on the other side */
1818                 if (bp->link_up) {
1819                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1820                         spin_unlock_bh(&bp->phy_lock);
1821                         msleep(20);
1822                         spin_lock_bh(&bp->phy_lock);
1823                 }
1824
1825                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1826                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1827                         BMCR_ANENABLE);
1828                 /* Speed up link-up time when the link partner
1829                  * does not autonegotiate which is very common
1830                  * in blade servers. Some blade servers use
1831                  * IPMI for kerboard input and it's important
1832                  * to minimize link disruptions. Autoneg. involves
1833                  * exchanging base pages plus 3 next pages and
1834                  * normally completes in about 120 msec.
1835                  */
1836                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1837                 bp->serdes_an_pending = 1;
1838                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1839         } else {
1840                 bnx2_resolve_flow_ctrl(bp);
1841                 bnx2_set_mac_link(bp);
1842         }
1843
1844         return 0;
1845 }
1846
1847 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1848         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1849                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1850                 (ADVERTISED_1000baseT_Full)
1851
1852 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1853         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1854         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1855         ADVERTISED_1000baseT_Full)
1856
1857 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1858         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1859
1860 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1861
1862 static void
1863 bnx2_set_default_remote_link(struct bnx2 *bp)
1864 {
1865         u32 link;
1866
1867         if (bp->phy_port == PORT_TP)
1868                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1869         else
1870                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1871
1872         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1873                 bp->req_line_speed = 0;
1874                 bp->autoneg |= AUTONEG_SPEED;
1875                 bp->advertising = ADVERTISED_Autoneg;
1876                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1877                         bp->advertising |= ADVERTISED_10baseT_Half;
1878                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1879                         bp->advertising |= ADVERTISED_10baseT_Full;
1880                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1881                         bp->advertising |= ADVERTISED_100baseT_Half;
1882                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1883                         bp->advertising |= ADVERTISED_100baseT_Full;
1884                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1885                         bp->advertising |= ADVERTISED_1000baseT_Full;
1886                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1887                         bp->advertising |= ADVERTISED_2500baseX_Full;
1888         } else {
1889                 bp->autoneg = 0;
1890                 bp->advertising = 0;
1891                 bp->req_duplex = DUPLEX_FULL;
1892                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1893                         bp->req_line_speed = SPEED_10;
1894                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1895                                 bp->req_duplex = DUPLEX_HALF;
1896                 }
1897                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1898                         bp->req_line_speed = SPEED_100;
1899                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1900                                 bp->req_duplex = DUPLEX_HALF;
1901                 }
1902                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1903                         bp->req_line_speed = SPEED_1000;
1904                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1905                         bp->req_line_speed = SPEED_2500;
1906         }
1907 }
1908
1909 static void
1910 bnx2_set_default_link(struct bnx2 *bp)
1911 {
1912         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1913                 bnx2_set_default_remote_link(bp);
1914                 return;
1915         }
1916
1917         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1918         bp->req_line_speed = 0;
1919         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1920                 u32 reg;
1921
1922                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1923
1924                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1925                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1926                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1927                         bp->autoneg = 0;
1928                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1929                         bp->req_duplex = DUPLEX_FULL;
1930                 }
1931         } else
1932                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1933 }
1934
1935 static void
1936 bnx2_send_heart_beat(struct bnx2 *bp)
1937 {
1938         u32 msg;
1939         u32 addr;
1940
1941         spin_lock(&bp->indirect_lock);
1942         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1943         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1944         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1945         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1946         spin_unlock(&bp->indirect_lock);
1947 }
1948
1949 static void
1950 bnx2_remote_phy_event(struct bnx2 *bp)
1951 {
1952         u32 msg;
1953         u8 link_up = bp->link_up;
1954         u8 old_port;
1955
1956         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1957
1958         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1959                 bnx2_send_heart_beat(bp);
1960
1961         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1962
1963         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1964                 bp->link_up = 0;
1965         else {
1966                 u32 speed;
1967
1968                 bp->link_up = 1;
1969                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1970                 bp->duplex = DUPLEX_FULL;
1971                 switch (speed) {
1972                         case BNX2_LINK_STATUS_10HALF:
1973                                 bp->duplex = DUPLEX_HALF;
1974                         case BNX2_LINK_STATUS_10FULL:
1975                                 bp->line_speed = SPEED_10;
1976                                 break;
1977                         case BNX2_LINK_STATUS_100HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                         case BNX2_LINK_STATUS_100BASE_T4:
1980                         case BNX2_LINK_STATUS_100FULL:
1981                                 bp->line_speed = SPEED_100;
1982                                 break;
1983                         case BNX2_LINK_STATUS_1000HALF:
1984                                 bp->duplex = DUPLEX_HALF;
1985                         case BNX2_LINK_STATUS_1000FULL:
1986                                 bp->line_speed = SPEED_1000;
1987                                 break;
1988                         case BNX2_LINK_STATUS_2500HALF:
1989                                 bp->duplex = DUPLEX_HALF;
1990                         case BNX2_LINK_STATUS_2500FULL:
1991                                 bp->line_speed = SPEED_2500;
1992                                 break;
1993                         default:
1994                                 bp->line_speed = 0;
1995                                 break;
1996                 }
1997
1998                 bp->flow_ctrl = 0;
1999                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2000                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2001                         if (bp->duplex == DUPLEX_FULL)
2002                                 bp->flow_ctrl = bp->req_flow_ctrl;
2003                 } else {
2004                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2005                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2006                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2007                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2008                 }
2009
2010                 old_port = bp->phy_port;
2011                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2012                         bp->phy_port = PORT_FIBRE;
2013                 else
2014                         bp->phy_port = PORT_TP;
2015
2016                 if (old_port != bp->phy_port)
2017                         bnx2_set_default_link(bp);
2018
2019         }
2020         if (bp->link_up != link_up)
2021                 bnx2_report_link(bp);
2022
2023         bnx2_set_mac_link(bp);
2024 }
2025
2026 static int
2027 bnx2_set_remote_link(struct bnx2 *bp)
2028 {
2029         u32 evt_code;
2030
2031         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2032         switch (evt_code) {
2033                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2034                         bnx2_remote_phy_event(bp);
2035                         break;
2036                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2037                 default:
2038                         bnx2_send_heart_beat(bp);
2039                         break;
2040         }
2041         return 0;
2042 }
2043
2044 static int
2045 bnx2_setup_copper_phy(struct bnx2 *bp)
2046 __releases(&bp->phy_lock)
2047 __acquires(&bp->phy_lock)
2048 {
2049         u32 bmcr;
2050         u32 new_bmcr;
2051
2052         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2053
2054         if (bp->autoneg & AUTONEG_SPEED) {
2055                 u32 adv_reg, adv1000_reg;
2056                 u32 new_adv_reg = 0;
2057                 u32 new_adv1000_reg = 0;
2058
2059                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2060                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2061                         ADVERTISE_PAUSE_ASYM);
2062
2063                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2064                 adv1000_reg &= PHY_ALL_1000_SPEED;
2065
2066                 if (bp->advertising & ADVERTISED_10baseT_Half)
2067                         new_adv_reg |= ADVERTISE_10HALF;
2068                 if (bp->advertising & ADVERTISED_10baseT_Full)
2069                         new_adv_reg |= ADVERTISE_10FULL;
2070                 if (bp->advertising & ADVERTISED_100baseT_Half)
2071                         new_adv_reg |= ADVERTISE_100HALF;
2072                 if (bp->advertising & ADVERTISED_100baseT_Full)
2073                         new_adv_reg |= ADVERTISE_100FULL;
2074                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2075                         new_adv1000_reg |= ADVERTISE_1000FULL;
2076
2077                 new_adv_reg |= ADVERTISE_CSMA;
2078
2079                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2080
2081                 if ((adv1000_reg != new_adv1000_reg) ||
2082                         (adv_reg != new_adv_reg) ||
2083                         ((bmcr & BMCR_ANENABLE) == 0)) {
2084
2085                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2086                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2087                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2088                                 BMCR_ANENABLE);
2089                 }
2090                 else if (bp->link_up) {
2091                         /* Flow ctrl may have changed from auto to forced */
2092                         /* or vice-versa. */
2093
2094                         bnx2_resolve_flow_ctrl(bp);
2095                         bnx2_set_mac_link(bp);
2096                 }
2097                 return 0;
2098         }
2099
2100         new_bmcr = 0;
2101         if (bp->req_line_speed == SPEED_100) {
2102                 new_bmcr |= BMCR_SPEED100;
2103         }
2104         if (bp->req_duplex == DUPLEX_FULL) {
2105                 new_bmcr |= BMCR_FULLDPLX;
2106         }
2107         if (new_bmcr != bmcr) {
2108                 u32 bmsr;
2109
2110                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2111                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112
2113                 if (bmsr & BMSR_LSTATUS) {
2114                         /* Force link down */
2115                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2116                         spin_unlock_bh(&bp->phy_lock);
2117                         msleep(50);
2118                         spin_lock_bh(&bp->phy_lock);
2119
2120                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2121                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2122                 }
2123
2124                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2125
2126                 /* Normally, the new speed is setup after the link has
2127                  * gone down and up again. In some cases, link will not go
2128                  * down so we need to set up the new speed here.
2129                  */
2130                 if (bmsr & BMSR_LSTATUS) {
2131                         bp->line_speed = bp->req_line_speed;
2132                         bp->duplex = bp->req_duplex;
2133                         bnx2_resolve_flow_ctrl(bp);
2134                         bnx2_set_mac_link(bp);
2135                 }
2136         } else {
2137                 bnx2_resolve_flow_ctrl(bp);
2138                 bnx2_set_mac_link(bp);
2139         }
2140         return 0;
2141 }
2142
2143 static int
2144 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2145 __releases(&bp->phy_lock)
2146 __acquires(&bp->phy_lock)
2147 {
2148         if (bp->loopback == MAC_LOOPBACK)
2149                 return 0;
2150
2151         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2152                 return (bnx2_setup_serdes_phy(bp, port));
2153         }
2154         else {
2155                 return (bnx2_setup_copper_phy(bp));
2156         }
2157 }
2158
2159 static int
2160 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2161 {
2162         u32 val;
2163
2164         bp->mii_bmcr = MII_BMCR + 0x10;
2165         bp->mii_bmsr = MII_BMSR + 0x10;
2166         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2167         bp->mii_adv = MII_ADVERTISE + 0x10;
2168         bp->mii_lpa = MII_LPA + 0x10;
2169         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2170
2171         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2172         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2173
2174         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2175         if (reset_phy)
2176                 bnx2_reset_phy(bp);
2177
2178         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2179
2180         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2181         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2182         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2183         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2184
2185         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2186         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2187         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2188                 val |= BCM5708S_UP1_2G5;
2189         else
2190                 val &= ~BCM5708S_UP1_2G5;
2191         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2192
2193         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2194         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2195         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2196         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2199
2200         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2201               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2202         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2203
2204         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2205
2206         return 0;
2207 }
2208
2209 static int
2210 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2211 {
2212         u32 val;
2213
2214         if (reset_phy)
2215                 bnx2_reset_phy(bp);
2216
2217         bp->mii_up1 = BCM5708S_UP1;
2218
2219         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2220         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2221         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2222
2223         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2224         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2225         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2226
2227         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2228         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2229         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2230
2231         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2232                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2233                 val |= BCM5708S_UP1_2G5;
2234                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2235         }
2236
2237         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2238             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2239             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2240                 /* increase tx signal amplitude */
2241                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2242                                BCM5708S_BLK_ADDR_TX_MISC);
2243                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2244                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2245                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2246                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2247         }
2248
2249         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2250               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2251
2252         if (val) {
2253                 u32 is_backplane;
2254
2255                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2256                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2257                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2258                                        BCM5708S_BLK_ADDR_TX_MISC);
2259                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2260                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2261                                        BCM5708S_BLK_ADDR_DIG);
2262                 }
2263         }
2264         return 0;
2265 }
2266
2267 static int
2268 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2269 {
2270         if (reset_phy)
2271                 bnx2_reset_phy(bp);
2272
2273         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2274
2275         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2276                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2277
2278         if (bp->dev->mtu > 1500) {
2279                 u32 val;
2280
2281                 /* Set extended packet length bit */
2282                 bnx2_write_phy(bp, 0x18, 0x7);
2283                 bnx2_read_phy(bp, 0x18, &val);
2284                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2285
2286                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2287                 bnx2_read_phy(bp, 0x1c, &val);
2288                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2289         }
2290         else {
2291                 u32 val;
2292
2293                 bnx2_write_phy(bp, 0x18, 0x7);
2294                 bnx2_read_phy(bp, 0x18, &val);
2295                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2296
2297                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2298                 bnx2_read_phy(bp, 0x1c, &val);
2299                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2300         }
2301
2302         return 0;
2303 }
2304
2305 static int
2306 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2307 {
2308         u32 val;
2309
2310         if (reset_phy)
2311                 bnx2_reset_phy(bp);
2312
2313         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2314                 bnx2_write_phy(bp, 0x18, 0x0c00);
2315                 bnx2_write_phy(bp, 0x17, 0x000a);
2316                 bnx2_write_phy(bp, 0x15, 0x310b);
2317                 bnx2_write_phy(bp, 0x17, 0x201f);
2318                 bnx2_write_phy(bp, 0x15, 0x9506);
2319                 bnx2_write_phy(bp, 0x17, 0x401f);
2320                 bnx2_write_phy(bp, 0x15, 0x14e2);
2321                 bnx2_write_phy(bp, 0x18, 0x0400);
2322         }
2323
2324         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2325                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2326                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2327                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2328                 val &= ~(1 << 8);
2329                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2330         }
2331
2332         if (bp->dev->mtu > 1500) {
2333                 /* Set extended packet length bit */
2334                 bnx2_write_phy(bp, 0x18, 0x7);
2335                 bnx2_read_phy(bp, 0x18, &val);
2336                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2337
2338                 bnx2_read_phy(bp, 0x10, &val);
2339                 bnx2_write_phy(bp, 0x10, val | 0x1);
2340         }
2341         else {
2342                 bnx2_write_phy(bp, 0x18, 0x7);
2343                 bnx2_read_phy(bp, 0x18, &val);
2344                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2345
2346                 bnx2_read_phy(bp, 0x10, &val);
2347                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2348         }
2349
2350         /* ethernet@wirespeed */
2351         bnx2_write_phy(bp, 0x18, 0x7007);
2352         bnx2_read_phy(bp, 0x18, &val);
2353         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2354         return 0;
2355 }
2356
2357
2358 static int
2359 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2360 __releases(&bp->phy_lock)
2361 __acquires(&bp->phy_lock)
2362 {
2363         u32 val;
2364         int rc = 0;
2365
2366         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2367         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2368
2369         bp->mii_bmcr = MII_BMCR;
2370         bp->mii_bmsr = MII_BMSR;
2371         bp->mii_bmsr1 = MII_BMSR;
2372         bp->mii_adv = MII_ADVERTISE;
2373         bp->mii_lpa = MII_LPA;
2374
2375         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2376
2377         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2378                 goto setup_phy;
2379
2380         bnx2_read_phy(bp, MII_PHYSID1, &val);
2381         bp->phy_id = val << 16;
2382         bnx2_read_phy(bp, MII_PHYSID2, &val);
2383         bp->phy_id |= val & 0xffff;
2384
2385         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2386                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2387                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2388                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2389                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2390                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2391                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2392         }
2393         else {
2394                 rc = bnx2_init_copper_phy(bp, reset_phy);
2395         }
2396
2397 setup_phy:
2398         if (!rc)
2399                 rc = bnx2_setup_phy(bp, bp->phy_port);
2400
2401         return rc;
2402 }
2403
2404 static int
2405 bnx2_set_mac_loopback(struct bnx2 *bp)
2406 {
2407         u32 mac_mode;
2408
2409         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2410         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2411         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2412         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2413         bp->link_up = 1;
2414         return 0;
2415 }
2416
2417 static int bnx2_test_link(struct bnx2 *);
2418
2419 static int
2420 bnx2_set_phy_loopback(struct bnx2 *bp)
2421 {
2422         u32 mac_mode;
2423         int rc, i;
2424
2425         spin_lock_bh(&bp->phy_lock);
2426         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2427                             BMCR_SPEED1000);
2428         spin_unlock_bh(&bp->phy_lock);
2429         if (rc)
2430                 return rc;
2431
2432         for (i = 0; i < 10; i++) {
2433                 if (bnx2_test_link(bp) == 0)
2434                         break;
2435                 msleep(100);
2436         }
2437
2438         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2439         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2440                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2441                       BNX2_EMAC_MODE_25G_MODE);
2442
2443         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2444         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2445         bp->link_up = 1;
2446         return 0;
2447 }
2448
2449 static int
2450 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2451 {
2452         int i;
2453         u32 val;
2454
2455         bp->fw_wr_seq++;
2456         msg_data |= bp->fw_wr_seq;
2457
2458         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2459
2460         if (!ack)
2461                 return 0;
2462
2463         /* wait for an acknowledgement. */
2464         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2465                 msleep(10);
2466
2467                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2468
2469                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2470                         break;
2471         }
2472         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2473                 return 0;
2474
2475         /* If we timed out, inform the firmware that this is the case. */
2476         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2477                 if (!silent)
2478                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2479                                             "%x\n", msg_data);
2480
2481                 msg_data &= ~BNX2_DRV_MSG_CODE;
2482                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2483
2484                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2485
2486                 return -EBUSY;
2487         }
2488
2489         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2490                 return -EIO;
2491
2492         return 0;
2493 }
2494
2495 static int
2496 bnx2_init_5709_context(struct bnx2 *bp)
2497 {
2498         int i, ret = 0;
2499         u32 val;
2500
2501         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2502         val |= (BCM_PAGE_BITS - 8) << 16;
2503         REG_WR(bp, BNX2_CTX_COMMAND, val);
2504         for (i = 0; i < 10; i++) {
2505                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2506                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2507                         break;
2508                 udelay(2);
2509         }
2510         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2511                 return -EBUSY;
2512
2513         for (i = 0; i < bp->ctx_pages; i++) {
2514                 int j;
2515
2516                 if (bp->ctx_blk[i])
2517                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2518                 else
2519                         return -ENOMEM;
2520
2521                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2522                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2523                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2524                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2525                        (u64) bp->ctx_blk_mapping[i] >> 32);
2526                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2527                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2528                 for (j = 0; j < 10; j++) {
2529
2530                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2531                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2532                                 break;
2533                         udelay(5);
2534                 }
2535                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2536                         ret = -EBUSY;
2537                         break;
2538                 }
2539         }
2540         return ret;
2541 }
2542
2543 static void
2544 bnx2_init_context(struct bnx2 *bp)
2545 {
2546         u32 vcid;
2547
2548         vcid = 96;
2549         while (vcid) {
2550                 u32 vcid_addr, pcid_addr, offset;
2551                 int i;
2552
2553                 vcid--;
2554
2555                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2556                         u32 new_vcid;
2557
2558                         vcid_addr = GET_PCID_ADDR(vcid);
2559                         if (vcid & 0x8) {
2560                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2561                         }
2562                         else {
2563                                 new_vcid = vcid;
2564                         }
2565                         pcid_addr = GET_PCID_ADDR(new_vcid);
2566                 }
2567                 else {
2568                         vcid_addr = GET_CID_ADDR(vcid);
2569                         pcid_addr = vcid_addr;
2570                 }
2571
2572                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2573                         vcid_addr += (i << PHY_CTX_SHIFT);
2574                         pcid_addr += (i << PHY_CTX_SHIFT);
2575
2576                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2577                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2578
2579                         /* Zero out the context. */
2580                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2581                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2582                 }
2583         }
2584 }
2585
2586 static int
2587 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2588 {
2589         u16 *good_mbuf;
2590         u32 good_mbuf_cnt;
2591         u32 val;
2592
2593         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2594         if (good_mbuf == NULL) {
2595                 printk(KERN_ERR PFX "Failed to allocate memory in "
2596                                     "bnx2_alloc_bad_rbuf\n");
2597                 return -ENOMEM;
2598         }
2599
2600         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2601                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2602
2603         good_mbuf_cnt = 0;
2604
2605         /* Allocate a bunch of mbufs and save the good ones in an array. */
2606         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2607         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2608                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2609                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2610
2611                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2612
2613                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2614
2615                 /* The addresses with Bit 9 set are bad memory blocks. */
2616                 if (!(val & (1 << 9))) {
2617                         good_mbuf[good_mbuf_cnt] = (u16) val;
2618                         good_mbuf_cnt++;
2619                 }
2620
2621                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2622         }
2623
2624         /* Free the good ones back to the mbuf pool thus discarding
2625          * all the bad ones. */
2626         while (good_mbuf_cnt) {
2627                 good_mbuf_cnt--;
2628
2629                 val = good_mbuf[good_mbuf_cnt];
2630                 val = (val << 9) | val | 1;
2631
2632                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2633         }
2634         kfree(good_mbuf);
2635         return 0;
2636 }
2637
2638 static void
2639 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2640 {
2641         u32 val;
2642
2643         val = (mac_addr[0] << 8) | mac_addr[1];
2644
2645         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2646
2647         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2648                 (mac_addr[4] << 8) | mac_addr[5];
2649
2650         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2651 }
2652
2653 static inline int
2654 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2655 {
2656         dma_addr_t mapping;
2657         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2658         struct rx_bd *rxbd =
2659                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2660         struct page *page = alloc_page(GFP_ATOMIC);
2661
2662         if (!page)
2663                 return -ENOMEM;
2664         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2665                                PCI_DMA_FROMDEVICE);
2666         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2667                 __free_page(page);
2668                 return -EIO;
2669         }
2670
2671         rx_pg->page = page;
2672         pci_unmap_addr_set(rx_pg, mapping, mapping);
2673         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2674         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2675         return 0;
2676 }
2677
2678 static void
2679 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2680 {
2681         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2682         struct page *page = rx_pg->page;
2683
2684         if (!page)
2685                 return;
2686
2687         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2688                        PCI_DMA_FROMDEVICE);
2689
2690         __free_page(page);
2691         rx_pg->page = NULL;
2692 }
2693
2694 static inline int
2695 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2696 {
2697         struct sk_buff *skb;
2698         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2699         dma_addr_t mapping;
2700         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2701         unsigned long align;
2702
2703         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2704         if (skb == NULL) {
2705                 return -ENOMEM;
2706         }
2707
2708         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2709                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2710
2711         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2712                 PCI_DMA_FROMDEVICE);
2713         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2714                 dev_kfree_skb(skb);
2715                 return -EIO;
2716         }
2717
2718         rx_buf->skb = skb;
2719         pci_unmap_addr_set(rx_buf, mapping, mapping);
2720
2721         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2722         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2723
2724         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2725
2726         return 0;
2727 }
2728
2729 static int
2730 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2731 {
2732         struct status_block *sblk = bnapi->status_blk.msi;
2733         u32 new_link_state, old_link_state;
2734         int is_set = 1;
2735
2736         new_link_state = sblk->status_attn_bits & event;
2737         old_link_state = sblk->status_attn_bits_ack & event;
2738         if (new_link_state != old_link_state) {
2739                 if (new_link_state)
2740                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2741                 else
2742                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2743         } else
2744                 is_set = 0;
2745
2746         return is_set;
2747 }
2748
2749 static void
2750 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2751 {
2752         spin_lock(&bp->phy_lock);
2753
2754         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2755                 bnx2_set_link(bp);
2756         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2757                 bnx2_set_remote_link(bp);
2758
2759         spin_unlock(&bp->phy_lock);
2760
2761 }
2762
2763 static inline u16
2764 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2765 {
2766         u16 cons;
2767
2768         /* Tell compiler that status block fields can change. */
2769         barrier();
2770         cons = *bnapi->hw_tx_cons_ptr;
2771         barrier();
2772         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2773                 cons++;
2774         return cons;
2775 }
2776
2777 static int
2778 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2779 {
2780         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2781         u16 hw_cons, sw_cons, sw_ring_cons;
2782         int tx_pkt = 0, index;
2783         struct netdev_queue *txq;
2784
2785         index = (bnapi - bp->bnx2_napi);
2786         txq = netdev_get_tx_queue(bp->dev, index);
2787
2788         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2789         sw_cons = txr->tx_cons;
2790
2791         while (sw_cons != hw_cons) {
2792                 struct sw_tx_bd *tx_buf;
2793                 struct sk_buff *skb;
2794                 int i, last;
2795
2796                 sw_ring_cons = TX_RING_IDX(sw_cons);
2797
2798                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2799                 skb = tx_buf->skb;
2800
2801                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2802                 prefetch(&skb->end);
2803
2804                 /* partial BD completions possible with TSO packets */
2805                 if (tx_buf->is_gso) {
2806                         u16 last_idx, last_ring_idx;
2807
2808                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2809                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2810                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2811                                 last_idx++;
2812                         }
2813                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2814                                 break;
2815                         }
2816                 }
2817
2818                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2819
2820                 tx_buf->skb = NULL;
2821                 last = tx_buf->nr_frags;
2822
2823                 for (i = 0; i < last; i++) {
2824                         sw_cons = NEXT_TX_BD(sw_cons);
2825                 }
2826
2827                 sw_cons = NEXT_TX_BD(sw_cons);
2828
2829                 dev_kfree_skb(skb);
2830                 tx_pkt++;
2831                 if (tx_pkt == budget)
2832                         break;
2833
2834                 if (hw_cons == sw_cons)
2835                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2836         }
2837
2838         txr->hw_tx_cons = hw_cons;
2839         txr->tx_cons = sw_cons;
2840
2841         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2842          * before checking for netif_tx_queue_stopped().  Without the
2843          * memory barrier, there is a small possibility that bnx2_start_xmit()
2844          * will miss it and cause the queue to be stopped forever.
2845          */
2846         smp_mb();
2847
2848         if (unlikely(netif_tx_queue_stopped(txq)) &&
2849                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2850                 __netif_tx_lock(txq, smp_processor_id());
2851                 if ((netif_tx_queue_stopped(txq)) &&
2852                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2853                         netif_tx_wake_queue(txq);
2854                 __netif_tx_unlock(txq);
2855         }
2856
2857         return tx_pkt;
2858 }
2859
2860 static void
2861 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2862                         struct sk_buff *skb, int count)
2863 {
2864         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2865         struct rx_bd *cons_bd, *prod_bd;
2866         int i;
2867         u16 hw_prod, prod;
2868         u16 cons = rxr->rx_pg_cons;
2869
2870         cons_rx_pg = &rxr->rx_pg_ring[cons];
2871
2872         /* The caller was unable to allocate a new page to replace the
2873          * last one in the frags array, so we need to recycle that page
2874          * and then free the skb.
2875          */
2876         if (skb) {
2877                 struct page *page;
2878                 struct skb_shared_info *shinfo;
2879
2880                 shinfo = skb_shinfo(skb);
2881                 shinfo->nr_frags--;
2882                 page = shinfo->frags[shinfo->nr_frags].page;
2883                 shinfo->frags[shinfo->nr_frags].page = NULL;
2884
2885                 cons_rx_pg->page = page;
2886                 dev_kfree_skb(skb);
2887         }
2888
2889         hw_prod = rxr->rx_pg_prod;
2890
2891         for (i = 0; i < count; i++) {
2892                 prod = RX_PG_RING_IDX(hw_prod);
2893
2894                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2895                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2896                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2897                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2898
2899                 if (prod != cons) {
2900                         prod_rx_pg->page = cons_rx_pg->page;
2901                         cons_rx_pg->page = NULL;
2902                         pci_unmap_addr_set(prod_rx_pg, mapping,
2903                                 pci_unmap_addr(cons_rx_pg, mapping));
2904
2905                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2906                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2907
2908                 }
2909                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2910                 hw_prod = NEXT_RX_BD(hw_prod);
2911         }
2912         rxr->rx_pg_prod = hw_prod;
2913         rxr->rx_pg_cons = cons;
2914 }
2915
2916 static inline void
2917 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2918                   struct sk_buff *skb, u16 cons, u16 prod)
2919 {
2920         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2921         struct rx_bd *cons_bd, *prod_bd;
2922
2923         cons_rx_buf = &rxr->rx_buf_ring[cons];
2924         prod_rx_buf = &rxr->rx_buf_ring[prod];
2925
2926         pci_dma_sync_single_for_device(bp->pdev,
2927                 pci_unmap_addr(cons_rx_buf, mapping),
2928                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2929
2930         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2931
2932         prod_rx_buf->skb = skb;
2933
2934         if (cons == prod)
2935                 return;
2936
2937         pci_unmap_addr_set(prod_rx_buf, mapping,
2938                         pci_unmap_addr(cons_rx_buf, mapping));
2939
2940         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2941         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2942         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2943         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2944 }
2945
2946 static int
2947 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2948             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2949             u32 ring_idx)
2950 {
2951         int err;
2952         u16 prod = ring_idx & 0xffff;
2953
2954         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2955         if (unlikely(err)) {
2956                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2957                 if (hdr_len) {
2958                         unsigned int raw_len = len + 4;
2959                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2960
2961                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2962                 }
2963                 return err;
2964         }
2965
2966         skb_reserve(skb, BNX2_RX_OFFSET);
2967         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2968                          PCI_DMA_FROMDEVICE);
2969
2970         if (hdr_len == 0) {
2971                 skb_put(skb, len);
2972                 return 0;
2973         } else {
2974                 unsigned int i, frag_len, frag_size, pages;
2975                 struct sw_pg *rx_pg;
2976                 u16 pg_cons = rxr->rx_pg_cons;
2977                 u16 pg_prod = rxr->rx_pg_prod;
2978
2979                 frag_size = len + 4 - hdr_len;
2980                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2981                 skb_put(skb, hdr_len);
2982
2983                 for (i = 0; i < pages; i++) {
2984                         dma_addr_t mapping_old;
2985
2986                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2987                         if (unlikely(frag_len <= 4)) {
2988                                 unsigned int tail = 4 - frag_len;
2989
2990                                 rxr->rx_pg_cons = pg_cons;
2991                                 rxr->rx_pg_prod = pg_prod;
2992                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2993                                                         pages - i);
2994                                 skb->len -= tail;
2995                                 if (i == 0) {
2996                                         skb->tail -= tail;
2997                                 } else {
2998                                         skb_frag_t *frag =
2999                                                 &skb_shinfo(skb)->frags[i - 1];
3000                                         frag->size -= tail;
3001                                         skb->data_len -= tail;
3002                                         skb->truesize -= tail;
3003                                 }
3004                                 return 0;
3005                         }
3006                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3007
3008                         /* Don't unmap yet.  If we're unable to allocate a new
3009                          * page, we need to recycle the page and the DMA addr.
3010                          */
3011                         mapping_old = pci_unmap_addr(rx_pg, mapping);
3012                         if (i == pages - 1)
3013                                 frag_len -= 4;
3014
3015                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3016                         rx_pg->page = NULL;
3017
3018                         err = bnx2_alloc_rx_page(bp, rxr,
3019                                                  RX_PG_RING_IDX(pg_prod));
3020                         if (unlikely(err)) {
3021                                 rxr->rx_pg_cons = pg_cons;
3022                                 rxr->rx_pg_prod = pg_prod;
3023                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3024                                                         pages - i);
3025                                 return err;
3026                         }
3027
3028                         pci_unmap_page(bp->pdev, mapping_old,
3029                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3030
3031                         frag_size -= frag_len;
3032                         skb->data_len += frag_len;
3033                         skb->truesize += frag_len;
3034                         skb->len += frag_len;
3035
3036                         pg_prod = NEXT_RX_BD(pg_prod);
3037                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3038                 }
3039                 rxr->rx_pg_prod = pg_prod;
3040                 rxr->rx_pg_cons = pg_cons;
3041         }
3042         return 0;
3043 }
3044
3045 static inline u16
3046 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3047 {
3048         u16 cons;
3049
3050         /* Tell compiler that status block fields can change. */
3051         barrier();
3052         cons = *bnapi->hw_rx_cons_ptr;
3053         barrier();
3054         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3055                 cons++;
3056         return cons;
3057 }
3058
3059 static int
3060 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3061 {
3062         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3063         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3064         struct l2_fhdr *rx_hdr;
3065         int rx_pkt = 0, pg_ring_used = 0;
3066
3067         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3068         sw_cons = rxr->rx_cons;
3069         sw_prod = rxr->rx_prod;
3070
3071         /* Memory barrier necessary as speculative reads of the rx
3072          * buffer can be ahead of the index in the status block
3073          */
3074         rmb();
3075         while (sw_cons != hw_cons) {
3076                 unsigned int len, hdr_len;
3077                 u32 status;
3078                 struct sw_bd *rx_buf;
3079                 struct sk_buff *skb;
3080                 dma_addr_t dma_addr;
3081                 u16 vtag = 0;
3082                 int hw_vlan __maybe_unused = 0;
3083
3084                 sw_ring_cons = RX_RING_IDX(sw_cons);
3085                 sw_ring_prod = RX_RING_IDX(sw_prod);
3086
3087                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3088                 skb = rx_buf->skb;
3089
3090                 rx_buf->skb = NULL;
3091
3092                 dma_addr = pci_unmap_addr(rx_buf, mapping);
3093
3094                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3095                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3096                         PCI_DMA_FROMDEVICE);
3097
3098                 rx_hdr = (struct l2_fhdr *) skb->data;
3099                 len = rx_hdr->l2_fhdr_pkt_len;
3100                 status = rx_hdr->l2_fhdr_status;
3101
3102                 hdr_len = 0;
3103                 if (status & L2_FHDR_STATUS_SPLIT) {
3104                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3105                         pg_ring_used = 1;
3106                 } else if (len > bp->rx_jumbo_thresh) {
3107                         hdr_len = bp->rx_jumbo_thresh;
3108                         pg_ring_used = 1;
3109                 }
3110
3111                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3112                                        L2_FHDR_ERRORS_PHY_DECODE |
3113                                        L2_FHDR_ERRORS_ALIGNMENT |
3114                                        L2_FHDR_ERRORS_TOO_SHORT |
3115                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3116
3117                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3118                                           sw_ring_prod);
3119                         if (pg_ring_used) {
3120                                 int pages;
3121
3122                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3123
3124                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3125                         }
3126                         goto next_rx;
3127                 }
3128
3129                 len -= 4;
3130
3131                 if (len <= bp->rx_copy_thresh) {
3132                         struct sk_buff *new_skb;
3133
3134                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3135                         if (new_skb == NULL) {
3136                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3137                                                   sw_ring_prod);
3138                                 goto next_rx;
3139                         }
3140
3141                         /* aligned copy */
3142                         skb_copy_from_linear_data_offset(skb,
3143                                                          BNX2_RX_OFFSET - 6,
3144                                       new_skb->data, len + 6);
3145                         skb_reserve(new_skb, 6);
3146                         skb_put(new_skb, len);
3147
3148                         bnx2_reuse_rx_skb(bp, rxr, skb,
3149                                 sw_ring_cons, sw_ring_prod);
3150
3151                         skb = new_skb;
3152                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3153                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3154                         goto next_rx;
3155
3156                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3157                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3158                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3159 #ifdef BCM_VLAN
3160                         if (bp->vlgrp)
3161                                 hw_vlan = 1;
3162                         else
3163 #endif
3164                         {
3165                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3166                                         __skb_push(skb, 4);
3167
3168                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3169                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3170                                 ve->h_vlan_TCI = htons(vtag);
3171                                 len += 4;
3172                         }
3173                 }
3174
3175                 skb->protocol = eth_type_trans(skb, bp->dev);
3176
3177                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3178                         (ntohs(skb->protocol) != 0x8100)) {
3179
3180                         dev_kfree_skb(skb);
3181                         goto next_rx;
3182
3183                 }
3184
3185                 skb->ip_summed = CHECKSUM_NONE;
3186                 if (bp->rx_csum &&
3187                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3188                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3189
3190                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3191                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3192                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3193                 }
3194
3195                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3196
3197 #ifdef BCM_VLAN
3198                 if (hw_vlan)
3199                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3200                 else
3201 #endif
3202                         netif_receive_skb(skb);
3203
3204                 rx_pkt++;
3205
3206 next_rx:
3207                 sw_cons = NEXT_RX_BD(sw_cons);
3208                 sw_prod = NEXT_RX_BD(sw_prod);
3209
3210                 if ((rx_pkt == budget))
3211                         break;
3212
3213                 /* Refresh hw_cons to see if there is new work */
3214                 if (sw_cons == hw_cons) {
3215                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3216                         rmb();
3217                 }
3218         }
3219         rxr->rx_cons = sw_cons;
3220         rxr->rx_prod = sw_prod;
3221
3222         if (pg_ring_used)
3223                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3224
3225         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3226
3227         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3228
3229         mmiowb();
3230
3231         return rx_pkt;
3232
3233 }
3234
3235 /* MSI ISR - The only difference between this and the INTx ISR
3236  * is that the MSI interrupt is always serviced.
3237  */
3238 static irqreturn_t
3239 bnx2_msi(int irq, void *dev_instance)
3240 {
3241         struct bnx2_napi *bnapi = dev_instance;
3242         struct bnx2 *bp = bnapi->bp;
3243
3244         prefetch(bnapi->status_blk.msi);
3245         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3246                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3247                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3248
3249         /* Return here if interrupt is disabled. */
3250         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3251                 return IRQ_HANDLED;
3252
3253         napi_schedule(&bnapi->napi);
3254
3255         return IRQ_HANDLED;
3256 }
3257
3258 static irqreturn_t
3259 bnx2_msi_1shot(int irq, void *dev_instance)
3260 {
3261         struct bnx2_napi *bnapi = dev_instance;
3262         struct bnx2 *bp = bnapi->bp;
3263
3264         prefetch(bnapi->status_blk.msi);
3265
3266         /* Return here if interrupt is disabled. */
3267         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3268                 return IRQ_HANDLED;
3269
3270         napi_schedule(&bnapi->napi);
3271
3272         return IRQ_HANDLED;
3273 }
3274
3275 static irqreturn_t
3276 bnx2_interrupt(int irq, void *dev_instance)
3277 {
3278         struct bnx2_napi *bnapi = dev_instance;
3279         struct bnx2 *bp = bnapi->bp;
3280         struct status_block *sblk = bnapi->status_blk.msi;
3281
3282         /* When using INTx, it is possible for the interrupt to arrive
3283          * at the CPU before the status block posted prior to the
3284          * interrupt. Reading a register will flush the status block.
3285          * When using MSI, the MSI message will always complete after
3286          * the status block write.
3287          */
3288         if ((sblk->status_idx == bnapi->last_status_idx) &&
3289             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3290              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3291                 return IRQ_NONE;
3292
3293         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3294                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3295                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3296
3297         /* Read back to deassert IRQ immediately to avoid too many
3298          * spurious interrupts.
3299          */
3300         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3301
3302         /* Return here if interrupt is shared and is disabled. */
3303         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3304                 return IRQ_HANDLED;
3305
3306         if (napi_schedule_prep(&bnapi->napi)) {
3307                 bnapi->last_status_idx = sblk->status_idx;
3308                 __napi_schedule(&bnapi->napi);
3309         }
3310
3311         return IRQ_HANDLED;
3312 }
3313
3314 static inline int
3315 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3316 {
3317         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3318         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3319
3320         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3321             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3322                 return 1;
3323         return 0;
3324 }
3325
3326 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3327                                  STATUS_ATTN_BITS_TIMER_ABORT)
3328
3329 static inline int
3330 bnx2_has_work(struct bnx2_napi *bnapi)
3331 {
3332         struct status_block *sblk = bnapi->status_blk.msi;
3333
3334         if (bnx2_has_fast_work(bnapi))
3335                 return 1;
3336
3337 #ifdef BCM_CNIC
3338         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3339                 return 1;
3340 #endif
3341
3342         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3343             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3344                 return 1;
3345
3346         return 0;
3347 }
3348
3349 static void
3350 bnx2_chk_missed_msi(struct bnx2 *bp)
3351 {
3352         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3353         u32 msi_ctrl;
3354
3355         if (bnx2_has_work(bnapi)) {
3356                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3357                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3358                         return;
3359
3360                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3361                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3362                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3363                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3364                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3365                 }
3366         }
3367
3368         bp->idle_chk_status_idx = bnapi->last_status_idx;
3369 }
3370
3371 #ifdef BCM_CNIC
3372 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3373 {
3374         struct cnic_ops *c_ops;
3375
3376         if (!bnapi->cnic_present)
3377                 return;
3378
3379         rcu_read_lock();
3380         c_ops = rcu_dereference(bp->cnic_ops);
3381         if (c_ops)
3382                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3383                                                       bnapi->status_blk.msi);
3384         rcu_read_unlock();
3385 }
3386 #endif
3387
3388 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3389 {
3390         struct status_block *sblk = bnapi->status_blk.msi;
3391         u32 status_attn_bits = sblk->status_attn_bits;
3392         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3393
3394         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3395             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3396
3397                 bnx2_phy_int(bp, bnapi);
3398
3399                 /* This is needed to take care of transient status
3400                  * during link changes.
3401                  */
3402                 REG_WR(bp, BNX2_HC_COMMAND,
3403                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3404                 REG_RD(bp, BNX2_HC_COMMAND);
3405         }
3406 }
3407
3408 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3409                           int work_done, int budget)
3410 {
3411         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3412         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3413
3414         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3415                 bnx2_tx_int(bp, bnapi, 0);
3416
3417         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3418                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3419
3420         return work_done;
3421 }
3422
3423 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3424 {
3425         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3426         struct bnx2 *bp = bnapi->bp;
3427         int work_done = 0;
3428         struct status_block_msix *sblk = bnapi->status_blk.msix;
3429
3430         while (1) {
3431                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3432                 if (unlikely(work_done >= budget))
3433                         break;
3434
3435                 bnapi->last_status_idx = sblk->status_idx;
3436                 /* status idx must be read before checking for more work. */
3437                 rmb();
3438                 if (likely(!bnx2_has_fast_work(bnapi))) {
3439
3440                         napi_complete(napi);
3441                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3442                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3443                                bnapi->last_status_idx);
3444                         break;
3445                 }
3446         }
3447         return work_done;
3448 }
3449
3450 static int bnx2_poll(struct napi_struct *napi, int budget)
3451 {
3452         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3453         struct bnx2 *bp = bnapi->bp;
3454         int work_done = 0;
3455         struct status_block *sblk = bnapi->status_blk.msi;
3456
3457         while (1) {
3458                 bnx2_poll_link(bp, bnapi);
3459
3460                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3461
3462 #ifdef BCM_CNIC
3463                 bnx2_poll_cnic(bp, bnapi);
3464 #endif
3465
3466                 /* bnapi->last_status_idx is used below to tell the hw how
3467                  * much work has been processed, so we must read it before
3468                  * checking for more work.
3469                  */
3470                 bnapi->last_status_idx = sblk->status_idx;
3471
3472                 if (unlikely(work_done >= budget))
3473                         break;
3474
3475                 rmb();
3476                 if (likely(!bnx2_has_work(bnapi))) {
3477                         napi_complete(napi);
3478                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3479                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3480                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3481                                        bnapi->last_status_idx);
3482                                 break;
3483                         }
3484                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3485                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3486                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3487                                bnapi->last_status_idx);
3488
3489                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3490                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3491                                bnapi->last_status_idx);
3492                         break;
3493                 }
3494         }
3495
3496         return work_done;
3497 }
3498
3499 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3500  * from set_multicast.
3501  */
3502 static void
3503 bnx2_set_rx_mode(struct net_device *dev)
3504 {
3505         struct bnx2 *bp = netdev_priv(dev);
3506         u32 rx_mode, sort_mode;
3507         struct netdev_hw_addr *ha;
3508         int i;
3509
3510         if (!netif_running(dev))
3511                 return;
3512
3513         spin_lock_bh(&bp->phy_lock);
3514
3515         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3516                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3517         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3518 #ifdef BCM_VLAN
3519         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3520                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3521 #else
3522         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3523                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3524 #endif
3525         if (dev->flags & IFF_PROMISC) {
3526                 /* Promiscuous mode. */
3527                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3528                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3529                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3530         }
3531         else if (dev->flags & IFF_ALLMULTI) {
3532                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3533                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3534                                0xffffffff);
3535                 }
3536                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3537         }
3538         else {
3539                 /* Accept one or more multicast(s). */
3540                 struct dev_mc_list *mclist;
3541                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3542                 u32 regidx;
3543                 u32 bit;
3544                 u32 crc;
3545
3546                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3547
3548                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3549                      i++, mclist = mclist->next) {
3550
3551                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3552                         bit = crc & 0xff;
3553                         regidx = (bit & 0xe0) >> 5;
3554                         bit &= 0x1f;
3555                         mc_filter[regidx] |= (1 << bit);
3556                 }
3557
3558                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3559                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3560                                mc_filter[i]);
3561                 }
3562
3563                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3564         }
3565
3566         if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
3567                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3568                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3569                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3570         } else if (!(dev->flags & IFF_PROMISC)) {
3571                 /* Add all entries into to the match filter list */
3572                 i = 0;
3573                 list_for_each_entry(ha, &dev->uc.list, list) {
3574                         bnx2_set_mac_addr(bp, ha->addr,
3575                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3576                         sort_mode |= (1 <<
3577                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3578                         i++;
3579                 }
3580
3581         }
3582
3583         if (rx_mode != bp->rx_mode) {
3584                 bp->rx_mode = rx_mode;
3585                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3586         }
3587
3588         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3589         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3590         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3591
3592         spin_unlock_bh(&bp->phy_lock);
3593 }
3594
3595 static int __devinit
3596 check_fw_section(const struct firmware *fw,
3597                  const struct bnx2_fw_file_section *section,
3598                  u32 alignment, bool non_empty)
3599 {
3600         u32 offset = be32_to_cpu(section->offset);
3601         u32 len = be32_to_cpu(section->len);
3602
3603         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3604                 return -EINVAL;
3605         if ((non_empty && len == 0) || len > fw->size - offset ||
3606             len & (alignment - 1))
3607                 return -EINVAL;
3608         return 0;
3609 }
3610
3611 static int __devinit
3612 check_mips_fw_entry(const struct firmware *fw,
3613                     const struct bnx2_mips_fw_file_entry *entry)
3614 {
3615         if (check_fw_section(fw, &entry->text, 4, true) ||
3616             check_fw_section(fw, &entry->data, 4, false) ||
3617             check_fw_section(fw, &entry->rodata, 4, false))
3618                 return -EINVAL;
3619         return 0;
3620 }
3621
3622 static int __devinit
3623 bnx2_request_firmware(struct bnx2 *bp)
3624 {
3625         const char *mips_fw_file, *rv2p_fw_file;
3626         const struct bnx2_mips_fw_file *mips_fw;
3627         const struct bnx2_rv2p_fw_file *rv2p_fw;
3628         int rc;
3629
3630         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3631                 mips_fw_file = FW_MIPS_FILE_09;
3632                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3633                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3634                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3635                 else
3636                         rv2p_fw_file = FW_RV2P_FILE_09;
3637         } else {
3638                 mips_fw_file = FW_MIPS_FILE_06;
3639                 rv2p_fw_file = FW_RV2P_FILE_06;
3640         }
3641
3642         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3643         if (rc) {
3644                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3645                        mips_fw_file);
3646                 return rc;
3647         }
3648
3649         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3650         if (rc) {
3651                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3652                        rv2p_fw_file);
3653                 return rc;
3654         }
3655         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3656         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3657         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3658             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3659             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3660             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3661             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3662             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3663                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3664                        mips_fw_file);
3665                 return -EINVAL;
3666         }
3667         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3668             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3669             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3670                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3671                        rv2p_fw_file);
3672                 return -EINVAL;
3673         }
3674
3675         return 0;
3676 }
3677
3678 static u32
3679 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3680 {
3681         switch (idx) {
3682         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3683                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3684                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3685                 break;
3686         }
3687         return rv2p_code;
3688 }
3689
3690 static int
3691 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3692              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3693 {
3694         u32 rv2p_code_len, file_offset;
3695         __be32 *rv2p_code;
3696         int i;
3697         u32 val, cmd, addr;
3698
3699         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3700         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3701
3702         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3703
3704         if (rv2p_proc == RV2P_PROC1) {
3705                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3706                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3707         } else {
3708                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3709                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3710         }
3711
3712         for (i = 0; i < rv2p_code_len; i += 8) {
3713                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3714                 rv2p_code++;
3715                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3716                 rv2p_code++;
3717
3718                 val = (i / 8) | cmd;
3719                 REG_WR(bp, addr, val);
3720         }
3721
3722         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3723         for (i = 0; i < 8; i++) {
3724                 u32 loc, code;
3725
3726                 loc = be32_to_cpu(fw_entry->fixup[i]);
3727                 if (loc && ((loc * 4) < rv2p_code_len)) {
3728                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3729                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3730                         code = be32_to_cpu(*(rv2p_code + loc));
3731                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3732                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3733
3734                         val = (loc / 2) | cmd;
3735                         REG_WR(bp, addr, val);
3736                 }
3737         }
3738
3739         /* Reset the processor, un-stall is done later. */
3740         if (rv2p_proc == RV2P_PROC1) {
3741                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3742         }
3743         else {
3744                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3745         }
3746
3747         return 0;
3748 }
3749
3750 static int
3751 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3752             const struct bnx2_mips_fw_file_entry *fw_entry)
3753 {
3754         u32 addr, len, file_offset;
3755         __be32 *data;
3756         u32 offset;
3757         u32 val;
3758
3759         /* Halt the CPU. */
3760         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3761         val |= cpu_reg->mode_value_halt;
3762         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3763         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3764
3765         /* Load the Text area. */
3766         addr = be32_to_cpu(fw_entry->text.addr);
3767         len = be32_to_cpu(fw_entry->text.len);
3768         file_offset = be32_to_cpu(fw_entry->text.offset);
3769         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3770
3771         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3772         if (len) {
3773                 int j;
3774
3775                 for (j = 0; j < (len / 4); j++, offset += 4)
3776                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3777         }
3778
3779         /* Load the Data area. */
3780         addr = be32_to_cpu(fw_entry->data.addr);
3781         len = be32_to_cpu(fw_entry->data.len);
3782         file_offset = be32_to_cpu(fw_entry->data.offset);
3783         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3784
3785         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3786         if (len) {
3787                 int j;
3788
3789                 for (j = 0; j < (len / 4); j++, offset += 4)
3790                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3791         }
3792
3793         /* Load the Read-Only area. */
3794         addr = be32_to_cpu(fw_entry->rodata.addr);
3795         len = be32_to_cpu(fw_entry->rodata.len);
3796         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3797         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3798
3799         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3800         if (len) {
3801                 int j;
3802
3803                 for (j = 0; j < (len / 4); j++, offset += 4)
3804                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3805         }
3806
3807         /* Clear the pre-fetch instruction. */
3808         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3809
3810         val = be32_to_cpu(fw_entry->start_addr);
3811         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3812
3813         /* Start the CPU. */
3814         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3815         val &= ~cpu_reg->mode_value_halt;
3816         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3817         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3818
3819         return 0;
3820 }
3821
3822 static int
3823 bnx2_init_cpus(struct bnx2 *bp)
3824 {
3825         const struct bnx2_mips_fw_file *mips_fw =
3826                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3827         const struct bnx2_rv2p_fw_file *rv2p_fw =
3828                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3829         int rc;
3830
3831         /* Initialize the RV2P processor. */
3832         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3833         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3834
3835         /* Initialize the RX Processor. */
3836         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3837         if (rc)
3838                 goto init_cpu_err;
3839
3840         /* Initialize the TX Processor. */
3841         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3842         if (rc)
3843                 goto init_cpu_err;
3844
3845         /* Initialize the TX Patch-up Processor. */
3846         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3847         if (rc)
3848                 goto init_cpu_err;
3849
3850         /* Initialize the Completion Processor. */
3851         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3852         if (rc)
3853                 goto init_cpu_err;
3854
3855         /* Initialize the Command Processor. */
3856         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3857
3858 init_cpu_err:
3859         return rc;
3860 }
3861
3862 static int
3863 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3864 {
3865         u16 pmcsr;
3866
3867         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3868
3869         switch (state) {
3870         case PCI_D0: {
3871                 u32 val;
3872
3873                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3874                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3875                         PCI_PM_CTRL_PME_STATUS);
3876
3877                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3878                         /* delay required during transition out of D3hot */
3879                         msleep(20);
3880
3881                 val = REG_RD(bp, BNX2_EMAC_MODE);
3882                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3883                 val &= ~BNX2_EMAC_MODE_MPKT;
3884                 REG_WR(bp, BNX2_EMAC_MODE, val);
3885
3886                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3887                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3888                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3889                 break;
3890         }
3891         case PCI_D3hot: {
3892                 int i;
3893                 u32 val, wol_msg;
3894
3895                 if (bp->wol) {
3896                         u32 advertising;
3897                         u8 autoneg;
3898
3899                         autoneg = bp->autoneg;
3900                         advertising = bp->advertising;
3901
3902                         if (bp->phy_port == PORT_TP) {
3903                                 bp->autoneg = AUTONEG_SPEED;
3904                                 bp->advertising = ADVERTISED_10baseT_Half |
3905                                         ADVERTISED_10baseT_Full |
3906                                         ADVERTISED_100baseT_Half |
3907                                         ADVERTISED_100baseT_Full |
3908                                         ADVERTISED_Autoneg;
3909                         }
3910
3911                         spin_lock_bh(&bp->phy_lock);
3912                         bnx2_setup_phy(bp, bp->phy_port);
3913                         spin_unlock_bh(&bp->phy_lock);
3914
3915                         bp->autoneg = autoneg;
3916                         bp->advertising = advertising;
3917
3918                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3919
3920                         val = REG_RD(bp, BNX2_EMAC_MODE);
3921
3922                         /* Enable port mode. */
3923                         val &= ~BNX2_EMAC_MODE_PORT;
3924                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3925                                BNX2_EMAC_MODE_ACPI_RCVD |
3926                                BNX2_EMAC_MODE_MPKT;
3927                         if (bp->phy_port == PORT_TP)
3928                                 val |= BNX2_EMAC_MODE_PORT_MII;
3929                         else {
3930                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3931                                 if (bp->line_speed == SPEED_2500)
3932                                         val |= BNX2_EMAC_MODE_25G_MODE;
3933                         }
3934
3935                         REG_WR(bp, BNX2_EMAC_MODE, val);
3936
3937                         /* receive all multicast */
3938                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3939                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3940                                        0xffffffff);
3941                         }
3942                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3943                                BNX2_EMAC_RX_MODE_SORT_MODE);
3944
3945                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3946                               BNX2_RPM_SORT_USER0_MC_EN;
3947                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3948                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3949                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3950                                BNX2_RPM_SORT_USER0_ENA);
3951
3952                         /* Need to enable EMAC and RPM for WOL. */
3953                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3954                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3955                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3956                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3957
3958                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3959                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3960                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3961
3962                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3963                 }
3964                 else {
3965                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3966                 }
3967
3968                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3969                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3970                                      1, 0);
3971
3972                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3973                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3974                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3975
3976                         if (bp->wol)
3977                                 pmcsr |= 3;
3978                 }
3979                 else {
3980                         pmcsr |= 3;
3981                 }
3982                 if (bp->wol) {
3983                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3984                 }
3985                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3986                                       pmcsr);
3987
3988                 /* No more memory access after this point until
3989                  * device is brought back to D0.
3990                  */
3991                 udelay(50);
3992                 break;
3993         }
3994         default:
3995                 return -EINVAL;
3996         }
3997         return 0;
3998 }
3999
4000 static int
4001 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4002 {
4003         u32 val;
4004         int j;
4005
4006         /* Request access to the flash interface. */
4007         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4008         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4009                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4010                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4011                         break;
4012
4013                 udelay(5);
4014         }
4015
4016         if (j >= NVRAM_TIMEOUT_COUNT)
4017                 return -EBUSY;
4018
4019         return 0;
4020 }
4021
4022 static int
4023 bnx2_release_nvram_lock(struct bnx2 *bp)
4024 {
4025         int j;
4026         u32 val;
4027
4028         /* Relinquish nvram interface. */
4029         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4030
4031         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4032                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4033                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4034                         break;
4035
4036                 udelay(5);
4037         }
4038
4039         if (j >= NVRAM_TIMEOUT_COUNT)
4040                 return -EBUSY;
4041
4042         return 0;
4043 }
4044
4045
4046 static int
4047 bnx2_enable_nvram_write(struct bnx2 *bp)
4048 {
4049         u32 val;
4050
4051         val = REG_RD(bp, BNX2_MISC_CFG);
4052         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4053
4054         if (bp->flash_info->flags & BNX2_NV_WREN) {
4055                 int j;
4056
4057                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4058                 REG_WR(bp, BNX2_NVM_COMMAND,
4059                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4060
4061                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4062                         udelay(5);
4063
4064                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4065                         if (val & BNX2_NVM_COMMAND_DONE)
4066                                 break;
4067                 }
4068
4069                 if (j >= NVRAM_TIMEOUT_COUNT)
4070                         return -EBUSY;
4071         }
4072         return 0;
4073 }
4074
4075 static void
4076 bnx2_disable_nvram_write(struct bnx2 *bp)
4077 {
4078         u32 val;
4079
4080         val = REG_RD(bp, BNX2_MISC_CFG);
4081         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4082 }
4083
4084
4085 static void
4086 bnx2_enable_nvram_access(struct bnx2 *bp)
4087 {
4088         u32 val;
4089
4090         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4091         /* Enable both bits, even on read. */
4092         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4093                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4094 }
4095
4096 static void
4097 bnx2_disable_nvram_access(struct bnx2 *bp)
4098 {
4099         u32 val;
4100
4101         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4102         /* Disable both bits, even after read. */
4103         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4104                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4105                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4106 }
4107
4108 static int
4109 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4110 {
4111         u32 cmd;
4112         int j;
4113
4114         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4115                 /* Buffered flash, no erase needed */
4116                 return 0;
4117
4118         /* Build an erase command */
4119         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4120               BNX2_NVM_COMMAND_DOIT;
4121
4122         /* Need to clear DONE bit separately. */
4123         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4124
4125         /* Address of the NVRAM to read from. */
4126         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4127
4128         /* Issue an erase command. */
4129         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4130
4131         /* Wait for completion. */
4132         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4133                 u32 val;
4134
4135                 udelay(5);
4136
4137                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4138                 if (val & BNX2_NVM_COMMAND_DONE)
4139                         break;
4140         }
4141
4142         if (j >= NVRAM_TIMEOUT_COUNT)
4143                 return -EBUSY;
4144
4145         return 0;
4146 }
4147
4148 static int
4149 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4150 {
4151         u32 cmd;
4152         int j;
4153
4154         /* Build the command word. */
4155         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4156
4157         /* Calculate an offset of a buffered flash, not needed for 5709. */
4158         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4159                 offset = ((offset / bp->flash_info->page_size) <<
4160                            bp->flash_info->page_bits) +
4161                           (offset % bp->flash_info->page_size);
4162         }
4163
4164         /* Need to clear DONE bit separately. */
4165         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4166
4167         /* Address of the NVRAM to read from. */
4168         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4169
4170         /* Issue a read command. */
4171         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4172
4173         /* Wait for completion. */
4174         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4175                 u32 val;
4176
4177                 udelay(5);
4178
4179                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4180                 if (val & BNX2_NVM_COMMAND_DONE) {
4181                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4182                         memcpy(ret_val, &v, 4);
4183                         break;
4184                 }
4185         }
4186         if (j >= NVRAM_TIMEOUT_COUNT)
4187                 return -EBUSY;
4188
4189         return 0;
4190 }
4191
4192
4193 static int
4194 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4195 {
4196         u32 cmd;
4197         __be32 val32;
4198         int j;
4199
4200         /* Build the command word. */
4201         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4202
4203         /* Calculate an offset of a buffered flash, not needed for 5709. */
4204         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4205                 offset = ((offset / bp->flash_info->page_size) <<
4206                           bp->flash_info->page_bits) +
4207                          (offset % bp->flash_info->page_size);
4208         }
4209
4210         /* Need to clear DONE bit separately. */
4211         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4212
4213         memcpy(&val32, val, 4);
4214
4215         /* Write the data. */
4216         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4217
4218         /* Address of the NVRAM to write to. */
4219         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4220
4221         /* Issue the write command. */
4222         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4223
4224         /* Wait for completion. */
4225         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4226                 udelay(5);
4227
4228                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4229                         break;
4230         }
4231         if (j >= NVRAM_TIMEOUT_COUNT)
4232                 return -EBUSY;
4233
4234         return 0;
4235 }
4236
4237 static int
4238 bnx2_init_nvram(struct bnx2 *bp)
4239 {
4240         u32 val;
4241         int j, entry_count, rc = 0;
4242         const struct flash_spec *flash;
4243
4244         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4245                 bp->flash_info = &flash_5709;
4246                 goto get_flash_size;
4247         }
4248
4249         /* Determine the selected interface. */
4250         val = REG_RD(bp, BNX2_NVM_CFG1);
4251
4252         entry_count = ARRAY_SIZE(flash_table);
4253
4254         if (val & 0x40000000) {
4255
4256                 /* Flash interface has been reconfigured */
4257                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4258                      j++, flash++) {
4259                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4260                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4261                                 bp->flash_info = flash;
4262                                 break;
4263                         }
4264                 }
4265         }
4266         else {
4267                 u32 mask;
4268                 /* Not yet been reconfigured */
4269
4270                 if (val & (1 << 23))
4271                         mask = FLASH_BACKUP_STRAP_MASK;
4272                 else
4273                         mask = FLASH_STRAP_MASK;
4274
4275                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4276                         j++, flash++) {
4277
4278                         if ((val & mask) == (flash->strapping & mask)) {
4279                                 bp->flash_info = flash;
4280
4281                                 /* Request access to the flash interface. */
4282                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4283                                         return rc;
4284
4285                                 /* Enable access to flash interface */
4286                                 bnx2_enable_nvram_access(bp);
4287
4288                                 /* Reconfigure the flash interface */
4289                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4290                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4291                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4292                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4293
4294                                 /* Disable access to flash interface */
4295                                 bnx2_disable_nvram_access(bp);
4296                                 bnx2_release_nvram_lock(bp);
4297
4298                                 break;
4299                         }
4300                 }
4301         } /* if (val & 0x40000000) */
4302
4303         if (j == entry_count) {
4304                 bp->flash_info = NULL;
4305                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4306                 return -ENODEV;
4307         }
4308
4309 get_flash_size:
4310         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4311         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4312         if (val)
4313                 bp->flash_size = val;
4314         else
4315                 bp->flash_size = bp->flash_info->total_size;
4316
4317         return rc;
4318 }
4319
4320 static int
4321 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4322                 int buf_size)
4323 {
4324         int rc = 0;
4325         u32 cmd_flags, offset32, len32, extra;
4326
4327         if (buf_size == 0)
4328                 return 0;
4329
4330         /* Request access to the flash interface. */
4331         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4332                 return rc;
4333
4334         /* Enable access to flash interface */
4335         bnx2_enable_nvram_access(bp);
4336
4337         len32 = buf_size;
4338         offset32 = offset;
4339         extra = 0;
4340
4341         cmd_flags = 0;
4342
4343         if (offset32 & 3) {
4344                 u8 buf[4];
4345                 u32 pre_len;
4346
4347                 offset32 &= ~3;
4348                 pre_len = 4 - (offset & 3);
4349
4350                 if (pre_len >= len32) {
4351                         pre_len = len32;
4352                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4353                                     BNX2_NVM_COMMAND_LAST;
4354                 }
4355                 else {
4356                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4357                 }
4358
4359                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4360
4361                 if (rc)
4362                         return rc;
4363
4364                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4365
4366                 offset32 += 4;
4367                 ret_buf += pre_len;
4368                 len32 -= pre_len;
4369         }
4370         if (len32 & 3) {
4371                 extra = 4 - (len32 & 3);
4372                 len32 = (len32 + 4) & ~3;
4373         }
4374
4375         if (len32 == 4) {
4376                 u8 buf[4];
4377
4378                 if (cmd_flags)
4379                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4380                 else
4381                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4382                                     BNX2_NVM_COMMAND_LAST;
4383
4384                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4385
4386                 memcpy(ret_buf, buf, 4 - extra);
4387         }
4388         else if (len32 > 0) {
4389                 u8 buf[4];
4390
4391                 /* Read the first word. */
4392                 if (cmd_flags)
4393                         cmd_flags = 0;
4394                 else
4395                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4396
4397                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4398
4399                 /* Advance to the next dword. */
4400                 offset32 += 4;
4401                 ret_buf += 4;
4402                 len32 -= 4;
4403
4404                 while (len32 > 4 && rc == 0) {
4405                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4406
4407                         /* Advance to the next dword. */
4408                         offset32 += 4;
4409                         ret_buf += 4;
4410                         len32 -= 4;
4411                 }
4412
4413                 if (rc)
4414                         return rc;
4415
4416                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4417                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4418
4419                 memcpy(ret_buf, buf, 4 - extra);
4420         }
4421
4422         /* Disable access to flash interface */
4423         bnx2_disable_nvram_access(bp);
4424
4425         bnx2_release_nvram_lock(bp);
4426
4427         return rc;
4428 }
4429
4430 static int
4431 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4432                 int buf_size)
4433 {
4434         u32 written, offset32, len32;
4435         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4436         int rc = 0;
4437         int align_start, align_end;
4438
4439         buf = data_buf;
4440         offset32 = offset;
4441         len32 = buf_size;
4442         align_start = align_end = 0;
4443
4444         if ((align_start = (offset32 & 3))) {
4445                 offset32 &= ~3;
4446                 len32 += align_start;
4447                 if (len32 < 4)
4448                         len32 = 4;
4449                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4450                         return rc;
4451         }
4452
4453         if (len32 & 3) {
4454                 align_end = 4 - (len32 & 3);
4455                 len32 += align_end;
4456                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4457                         return rc;
4458         }
4459
4460         if (align_start || align_end) {
4461                 align_buf = kmalloc(len32, GFP_KERNEL);
4462                 if (align_buf == NULL)
4463                         return -ENOMEM;
4464                 if (align_start) {
4465                         memcpy(align_buf, start, 4);
4466                 }
4467                 if (align_end) {
4468                         memcpy(align_buf + len32 - 4, end, 4);
4469                 }
4470                 memcpy(align_buf + align_start, data_buf, buf_size);
4471                 buf = align_buf;
4472         }
4473
4474         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4475                 flash_buffer = kmalloc(264, GFP_KERNEL);
4476                 if (flash_buffer == NULL) {
4477                         rc = -ENOMEM;
4478                         goto nvram_write_end;
4479                 }
4480         }
4481
4482         written = 0;
4483         while ((written < len32) && (rc == 0)) {
4484                 u32 page_start, page_end, data_start, data_end;
4485                 u32 addr, cmd_flags;
4486                 int i;
4487
4488                 /* Find the page_start addr */
4489                 page_start = offset32 + written;
4490                 page_start -= (page_start % bp->flash_info->page_size);
4491                 /* Find the page_end addr */
4492                 page_end = page_start + bp->flash_info->page_size;
4493                 /* Find the data_start addr */
4494                 data_start = (written == 0) ? offset32 : page_start;
4495                 /* Find the data_end addr */
4496                 data_end = (page_end > offset32 + len32) ?
4497                         (offset32 + len32) : page_end;
4498
4499                 /* Request access to the flash interface. */
4500                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4501                         goto nvram_write_end;
4502
4503                 /* Enable access to flash interface */
4504                 bnx2_enable_nvram_access(bp);
4505
4506                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4507                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4508                         int j;
4509
4510                         /* Read the whole page into the buffer
4511                          * (non-buffer flash only) */
4512                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4513                                 if (j == (bp->flash_info->page_size - 4)) {
4514                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4515                                 }
4516                                 rc = bnx2_nvram_read_dword(bp,
4517                                         page_start + j,
4518                                         &flash_buffer[j],
4519                                         cmd_flags);
4520
4521                                 if (rc)
4522                                         goto nvram_write_end;
4523
4524                                 cmd_flags = 0;
4525                         }
4526                 }
4527
4528                 /* Enable writes to flash interface (unlock write-protect) */
4529                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4530                         goto nvram_write_end;
4531
4532                 /* Loop to write back the buffer data from page_start to
4533                  * data_start */
4534                 i = 0;
4535                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4536                         /* Erase the page */
4537                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4538                                 goto nvram_write_end;
4539
4540                         /* Re-enable the write again for the actual write */
4541                         bnx2_enable_nvram_write(bp);
4542
4543                         for (addr = page_start; addr < data_start;
4544                                 addr += 4, i += 4) {
4545
4546                                 rc = bnx2_nvram_write_dword(bp, addr,
4547                                         &flash_buffer[i], cmd_flags);
4548
4549                                 if (rc != 0)
4550                                         goto nvram_write_end;
4551
4552                                 cmd_flags = 0;
4553                         }
4554                 }
4555
4556                 /* Loop to write the new data from data_start to data_end */
4557                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4558                         if ((addr == page_end - 4) ||
4559                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4560                                  (addr == data_end - 4))) {
4561
4562                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4563                         }
4564                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4565                                 cmd_flags);
4566
4567                         if (rc != 0)
4568                                 goto nvram_write_end;
4569
4570                         cmd_flags = 0;
4571                         buf += 4;
4572                 }
4573
4574                 /* Loop to write back the buffer data from data_end
4575                  * to page_end */
4576                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4577                         for (addr = data_end; addr < page_end;
4578                                 addr += 4, i += 4) {
4579
4580                                 if (addr == page_end-4) {
4581                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4582                                 }
4583                                 rc = bnx2_nvram_write_dword(bp, addr,
4584                                         &flash_buffer[i], cmd_flags);
4585
4586                                 if (rc != 0)
4587                                         goto nvram_write_end;
4588
4589                                 cmd_flags = 0;
4590                         }
4591                 }
4592
4593                 /* Disable writes to flash interface (lock write-protect) */
4594                 bnx2_disable_nvram_write(bp);
4595
4596                 /* Disable access to flash interface */
4597                 bnx2_disable_nvram_access(bp);
4598                 bnx2_release_nvram_lock(bp);
4599
4600                 /* Increment written */
4601                 written += data_end - data_start;
4602         }
4603
4604 nvram_write_end:
4605         kfree(flash_buffer);
4606         kfree(align_buf);
4607         return rc;
4608 }
4609
4610 static void
4611 bnx2_init_fw_cap(struct bnx2 *bp)
4612 {
4613         u32 val, sig = 0;
4614
4615         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4616         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4617
4618         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4619                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4620
4621         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4622         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4623                 return;
4624
4625         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4626                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4627                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4628         }
4629
4630         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4631             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4632                 u32 link;
4633
4634                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4635
4636                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4637                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4638                         bp->phy_port = PORT_FIBRE;
4639                 else
4640                         bp->phy_port = PORT_TP;
4641
4642                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4643                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4644         }
4645
4646         if (netif_running(bp->dev) && sig)
4647                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4648 }
4649
4650 static void
4651 bnx2_setup_msix_tbl(struct bnx2 *bp)
4652 {
4653         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4654
4655         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4656         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4657 }
4658
4659 static int
4660 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4661 {
4662         u32 val;
4663         int i, rc = 0;
4664         u8 old_port;
4665
4666         /* Wait for the current PCI transaction to complete before
4667          * issuing a reset. */
4668         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4669                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4670                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4671                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4672                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4673         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4674         udelay(5);
4675
4676         /* Wait for the firmware to tell us it is ok to issue a reset. */
4677         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4678
4679         /* Deposit a driver reset signature so the firmware knows that
4680          * this is a soft reset. */
4681         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4682                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4683
4684         /* Do a dummy read to force the chip to complete all current transaction
4685          * before we issue a reset. */
4686         val = REG_RD(bp, BNX2_MISC_ID);
4687
4688         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4689                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4690                 REG_RD(bp, BNX2_MISC_COMMAND);
4691                 udelay(5);
4692
4693                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4694                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4695
4696                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4697
4698         } else {
4699                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4700                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4701                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4702
4703                 /* Chip reset. */
4704                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4705
4706                 /* Reading back any register after chip reset will hang the
4707                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4708                  * of margin for write posting.
4709                  */
4710                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4711                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4712                         msleep(20);
4713
4714                 /* Reset takes approximate 30 usec */
4715                 for (i = 0; i < 10; i++) {
4716                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4717                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4718                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4719                                 break;
4720                         udelay(10);
4721                 }
4722
4723                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4724                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4725                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4726                         return -EBUSY;
4727                 }
4728         }
4729
4730         /* Make sure byte swapping is properly configured. */
4731         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4732         if (val != 0x01020304) {
4733                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4734                 return -ENODEV;
4735         }
4736
4737         /* Wait for the firmware to finish its initialization. */
4738         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4739         if (rc)
4740                 return rc;
4741
4742         spin_lock_bh(&bp->phy_lock);
4743         old_port = bp->phy_port;
4744         bnx2_init_fw_cap(bp);
4745         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4746             old_port != bp->phy_port)
4747                 bnx2_set_default_remote_link(bp);
4748         spin_unlock_bh(&bp->phy_lock);
4749
4750         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4751                 /* Adjust the voltage regular to two steps lower.  The default
4752                  * of this register is 0x0000000e. */
4753                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4754
4755                 /* Remove bad rbuf memory from the free pool. */
4756                 rc = bnx2_alloc_bad_rbuf(bp);
4757         }
4758
4759         if (bp->flags & BNX2_FLAG_USING_MSIX)
4760                 bnx2_setup_msix_tbl(bp);
4761
4762         return rc;
4763 }
4764
4765 static int
4766 bnx2_init_chip(struct bnx2 *bp)
4767 {
4768         u32 val, mtu;
4769         int rc, i;
4770
4771         /* Make sure the interrupt is not active. */
4772         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4773
4774         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4775               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4776 #ifdef __BIG_ENDIAN
4777               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4778 #endif
4779               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4780               DMA_READ_CHANS << 12 |
4781               DMA_WRITE_CHANS << 16;
4782
4783         val |= (0x2 << 20) | (1 << 11);
4784
4785         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4786                 val |= (1 << 23);
4787
4788         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4789             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4790                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4791
4792         REG_WR(bp, BNX2_DMA_CONFIG, val);
4793
4794         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4795                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4796                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4797                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4798         }
4799
4800         if (bp->flags & BNX2_FLAG_PCIX) {
4801                 u16 val16;
4802
4803                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4804                                      &val16);
4805                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4806                                       val16 & ~PCI_X_CMD_ERO);
4807         }
4808
4809         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4810                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4811                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4812                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4813
4814         /* Initialize context mapping and zero out the quick contexts.  The
4815          * context block must have already been enabled. */
4816         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4817                 rc = bnx2_init_5709_context(bp);
4818                 if (rc)
4819                         return rc;
4820         } else
4821                 bnx2_init_context(bp);
4822
4823         if ((rc = bnx2_init_cpus(bp)) != 0)
4824                 return rc;
4825
4826         bnx2_init_nvram(bp);
4827
4828         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4829
4830         val = REG_RD(bp, BNX2_MQ_CONFIG);
4831         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4832         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4833         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4834                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4835                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4836                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4837         }
4838
4839         REG_WR(bp, BNX2_MQ_CONFIG, val);
4840
4841         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4842         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4843         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4844
4845         val = (BCM_PAGE_BITS - 8) << 24;
4846         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4847
4848         /* Configure page size. */
4849         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4850         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4851         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4852         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4853
4854         val = bp->mac_addr[0] +
4855               (bp->mac_addr[1] << 8) +
4856               (bp->mac_addr[2] << 16) +
4857               bp->mac_addr[3] +
4858               (bp->mac_addr[4] << 8) +
4859               (bp->mac_addr[5] << 16);
4860         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4861
4862         /* Program the MTU.  Also include 4 bytes for CRC32. */
4863         mtu = bp->dev->mtu;
4864         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4865         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4866                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4867         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4868
4869         if (mtu < 1500)
4870                 mtu = 1500;
4871
4872         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4873         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4874         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4875
4876         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4877         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4878                 bp->bnx2_napi[i].last_status_idx = 0;
4879
4880         bp->idle_chk_status_idx = 0xffff;
4881
4882         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4883
4884         /* Set up how to generate a link change interrupt. */
4885         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4886
4887         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4888                (u64) bp->status_blk_mapping & 0xffffffff);
4889         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4890
4891         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4892                (u64) bp->stats_blk_mapping & 0xffffffff);
4893         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4894                (u64) bp->stats_blk_mapping >> 32);
4895
4896         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4897                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4898
4899         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4900                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4901
4902         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4903                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4904
4905         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4906
4907         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4908
4909         REG_WR(bp, BNX2_HC_COM_TICKS,
4910                (bp->com_ticks_int << 16) | bp->com_ticks);
4911
4912         REG_WR(bp, BNX2_HC_CMD_TICKS,
4913                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4914
4915         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4916                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4917         else
4918                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4919         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4920
4921         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4922                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4923         else {
4924                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4925                       BNX2_HC_CONFIG_COLLECT_STATS;
4926         }
4927
4928         if (bp->irq_nvecs > 1) {
4929                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4930                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4931
4932                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4933         }
4934
4935         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4936                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4937
4938         REG_WR(bp, BNX2_HC_CONFIG, val);
4939
4940         for (i = 1; i < bp->irq_nvecs; i++) {
4941                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4942                            BNX2_HC_SB_CONFIG_1;
4943
4944                 REG_WR(bp, base,
4945                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4946                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4947                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4948
4949                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4950                         (bp->tx_quick_cons_trip_int << 16) |
4951                          bp->tx_quick_cons_trip);
4952
4953                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4954                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4955
4956                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4957                        (bp->rx_quick_cons_trip_int << 16) |
4958                         bp->rx_quick_cons_trip);
4959
4960                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4961                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4962         }
4963
4964         /* Clear internal stats counters. */
4965         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4966
4967         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4968
4969         /* Initialize the receive filter. */
4970         bnx2_set_rx_mode(bp->dev);
4971
4972         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4973                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4974                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4975                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4976         }
4977         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4978                           1, 0);
4979
4980         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4981         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4982
4983         udelay(20);
4984
4985         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4986
4987         return rc;
4988 }
4989
4990 static void
4991 bnx2_clear_ring_states(struct bnx2 *bp)
4992 {
4993         struct bnx2_napi *bnapi;
4994         struct bnx2_tx_ring_info *txr;
4995         struct bnx2_rx_ring_info *rxr;
4996         int i;
4997
4998         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4999                 bnapi = &bp->bnx2_napi[i];
5000                 txr = &bnapi->tx_ring;
5001                 rxr = &bnapi->rx_ring;
5002
5003                 txr->tx_cons = 0;
5004                 txr->hw_tx_cons = 0;
5005                 rxr->rx_prod_bseq = 0;
5006                 rxr->rx_prod = 0;
5007                 rxr->rx_cons = 0;
5008                 rxr->rx_pg_prod = 0;
5009                 rxr->rx_pg_cons = 0;
5010         }
5011 }
5012
5013 static void
5014 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5015 {
5016         u32 val, offset0, offset1, offset2, offset3;
5017         u32 cid_addr = GET_CID_ADDR(cid);
5018
5019         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5020                 offset0 = BNX2_L2CTX_TYPE_XI;
5021                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5022                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5023                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5024         } else {
5025                 offset0 = BNX2_L2CTX_TYPE;
5026                 offset1 = BNX2_L2CTX_CMD_TYPE;
5027                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5028                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5029         }
5030         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5031         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5032
5033         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5034         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5035
5036         val = (u64) txr->tx_desc_mapping >> 32;
5037         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5038
5039         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5040         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5041 }
5042
5043 static void
5044 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5045 {
5046         struct tx_bd *txbd;
5047         u32 cid = TX_CID;
5048         struct bnx2_napi *bnapi;
5049         struct bnx2_tx_ring_info *txr;
5050
5051         bnapi = &bp->bnx2_napi[ring_num];
5052         txr = &bnapi->tx_ring;
5053
5054         if (ring_num == 0)
5055                 cid = TX_CID;
5056         else
5057                 cid = TX_TSS_CID + ring_num - 1;
5058
5059         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5060
5061         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5062
5063         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5064         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5065
5066         txr->tx_prod = 0;
5067         txr->tx_prod_bseq = 0;
5068
5069         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5070         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5071
5072         bnx2_init_tx_context(bp, cid, txr);
5073 }
5074
5075 static void
5076 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5077                      int num_rings)
5078 {
5079         int i;
5080         struct rx_bd *rxbd;
5081
5082         for (i = 0; i < num_rings; i++) {
5083                 int j;
5084
5085                 rxbd = &rx_ring[i][0];
5086                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5087                         rxbd->rx_bd_len = buf_size;
5088                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5089                 }
5090                 if (i == (num_rings - 1))
5091                         j = 0;
5092                 else
5093                         j = i + 1;
5094                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5095                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5096         }
5097 }
5098
5099 static void
5100 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5101 {
5102         int i;
5103         u16 prod, ring_prod;
5104         u32 cid, rx_cid_addr, val;
5105         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5106         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5107
5108         if (ring_num == 0)
5109                 cid = RX_CID;
5110         else
5111                 cid = RX_RSS_CID + ring_num - 1;
5112
5113         rx_cid_addr = GET_CID_ADDR(cid);
5114
5115         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5116                              bp->rx_buf_use_size, bp->rx_max_ring);
5117
5118         bnx2_init_rx_context(bp, cid);
5119
5120         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5121                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5122                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5123         }
5124
5125         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5126         if (bp->rx_pg_ring_size) {
5127                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5128                                      rxr->rx_pg_desc_mapping,
5129                                      PAGE_SIZE, bp->rx_max_pg_ring);
5130                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5131                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5132                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5133                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5134
5135                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5136                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5137
5138                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5139                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5140
5141                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5142                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5143         }
5144
5145         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5146         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5147
5148         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5149         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5150
5151         ring_prod = prod = rxr->rx_pg_prod;
5152         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5153                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
5154                         break;
5155                 prod = NEXT_RX_BD(prod);
5156                 ring_prod = RX_PG_RING_IDX(prod);
5157         }
5158         rxr->rx_pg_prod = prod;
5159
5160         ring_prod = prod = rxr->rx_prod;
5161         for (i = 0; i < bp->rx_ring_size; i++) {
5162                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
5163                         break;
5164                 prod = NEXT_RX_BD(prod);
5165                 ring_prod = RX_RING_IDX(prod);
5166         }
5167         rxr->rx_prod = prod;
5168
5169         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5170         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5171         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5172
5173         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5174         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5175
5176         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5177 }
5178
5179 static void
5180 bnx2_init_all_rings(struct bnx2 *bp)
5181 {
5182         int i;
5183         u32 val;
5184
5185         bnx2_clear_ring_states(bp);
5186
5187         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5188         for (i = 0; i < bp->num_tx_rings; i++)
5189                 bnx2_init_tx_ring(bp, i);
5190
5191         if (bp->num_tx_rings > 1)
5192                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5193                        (TX_TSS_CID << 7));
5194
5195         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5196         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5197
5198         for (i = 0; i < bp->num_rx_rings; i++)
5199                 bnx2_init_rx_ring(bp, i);
5200
5201         if (bp->num_rx_rings > 1) {
5202                 u32 tbl_32;
5203                 u8 *tbl = (u8 *) &tbl_32;
5204
5205                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5206                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5207
5208                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5209                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5210                         if ((i % 4) == 3)
5211                                 bnx2_reg_wr_ind(bp,
5212                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5213                                                 cpu_to_be32(tbl_32));
5214                 }
5215
5216                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5217                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5218
5219                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5220
5221         }
5222 }
5223
5224 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5225 {
5226         u32 max, num_rings = 1;
5227
5228         while (ring_size > MAX_RX_DESC_CNT) {
5229                 ring_size -= MAX_RX_DESC_CNT;
5230                 num_rings++;
5231         }
5232         /* round to next power of 2 */
5233         max = max_size;
5234         while ((max & num_rings) == 0)
5235                 max >>= 1;
5236
5237         if (num_rings != max)
5238                 max <<= 1;
5239
5240         return max;
5241 }
5242
5243 static void
5244 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5245 {
5246         u32 rx_size, rx_space, jumbo_size;
5247
5248         /* 8 for CRC and VLAN */
5249         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5250
5251         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5252                 sizeof(struct skb_shared_info);
5253
5254         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5255         bp->rx_pg_ring_size = 0;
5256         bp->rx_max_pg_ring = 0;
5257         bp->rx_max_pg_ring_idx = 0;
5258         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5259                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5260
5261                 jumbo_size = size * pages;
5262                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5263                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5264
5265                 bp->rx_pg_ring_size = jumbo_size;
5266                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5267                                                         MAX_RX_PG_RINGS);
5268                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5269                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5270                 bp->rx_copy_thresh = 0;
5271         }
5272
5273         bp->rx_buf_use_size = rx_size;
5274         /* hw alignment */
5275         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5276         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5277         bp->rx_ring_size = size;
5278         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5279         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5280 }
5281
5282 static void
5283 bnx2_free_tx_skbs(struct bnx2 *bp)
5284 {
5285         int i;
5286
5287         for (i = 0; i < bp->num_tx_rings; i++) {
5288                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5289                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5290                 int j;
5291
5292                 if (txr->tx_buf_ring == NULL)
5293                         continue;
5294
5295                 for (j = 0; j < TX_DESC_CNT; ) {
5296                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5297                         struct sk_buff *skb = tx_buf->skb;
5298
5299                         if (skb == NULL) {
5300                                 j++;
5301                                 continue;
5302                         }
5303
5304                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5305
5306                         tx_buf->skb = NULL;
5307
5308                         j += skb_shinfo(skb)->nr_frags + 1;
5309                         dev_kfree_skb(skb);
5310                 }
5311         }
5312 }
5313
5314 static void
5315 bnx2_free_rx_skbs(struct bnx2 *bp)
5316 {
5317         int i;
5318
5319         for (i = 0; i < bp->num_rx_rings; i++) {
5320                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5321                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5322                 int j;
5323
5324                 if (rxr->rx_buf_ring == NULL)
5325                         return;
5326
5327                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5328                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5329                         struct sk_buff *skb = rx_buf->skb;
5330
5331                         if (skb == NULL)
5332                                 continue;
5333
5334                         pci_unmap_single(bp->pdev,
5335                                          pci_unmap_addr(rx_buf, mapping),
5336                                          bp->rx_buf_use_size,
5337                                          PCI_DMA_FROMDEVICE);
5338
5339                         rx_buf->skb = NULL;
5340
5341                         dev_kfree_skb(skb);
5342                 }
5343                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5344                         bnx2_free_rx_page(bp, rxr, j);
5345         }
5346 }
5347
5348 static void
5349 bnx2_free_skbs(struct bnx2 *bp)
5350 {
5351         bnx2_free_tx_skbs(bp);
5352         bnx2_free_rx_skbs(bp);
5353 }
5354
5355 static int
5356 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5357 {
5358         int rc;
5359
5360         rc = bnx2_reset_chip(bp, reset_code);
5361         bnx2_free_skbs(bp);
5362         if (rc)
5363                 return rc;
5364
5365         if ((rc = bnx2_init_chip(bp)) != 0)
5366                 return rc;
5367
5368         bnx2_init_all_rings(bp);
5369         return 0;
5370 }
5371
5372 static int
5373 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5374 {
5375         int rc;
5376
5377         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5378                 return rc;
5379
5380         spin_lock_bh(&bp->phy_lock);
5381         bnx2_init_phy(bp, reset_phy);
5382         bnx2_set_link(bp);
5383         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5384                 bnx2_remote_phy_event(bp);
5385         spin_unlock_bh(&bp->phy_lock);
5386         return 0;
5387 }
5388
5389 static int
5390 bnx2_shutdown_chip(struct bnx2 *bp)
5391 {
5392         u32 reset_code;
5393
5394         if (bp->flags & BNX2_FLAG_NO_WOL)
5395                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5396         else if (bp->wol)
5397                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5398         else
5399                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5400
5401         return bnx2_reset_chip(bp, reset_code);
5402 }
5403
5404 static int
5405 bnx2_test_registers(struct bnx2 *bp)
5406 {
5407         int ret;
5408         int i, is_5709;
5409         static const struct {
5410                 u16   offset;
5411                 u16   flags;
5412 #define BNX2_FL_NOT_5709        1
5413                 u32   rw_mask;
5414                 u32   ro_mask;
5415         } reg_tbl[] = {
5416                 { 0x006c, 0, 0x00000000, 0x0000003f },
5417                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5418                 { 0x0094, 0, 0x00000000, 0x00000000 },
5419
5420                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5421                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5422                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5423                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5424                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5425                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5426                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5427                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5428                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5429
5430                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5431                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5432                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5433                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5434                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5435                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5436
5437                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5438                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5439                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5440
5441                 { 0x1000, 0, 0x00000000, 0x00000001 },
5442                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5443
5444                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5445                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5446                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5447                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5448                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5449                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5450                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5451                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5452                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5453                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5454
5455                 { 0x1800, 0, 0x00000000, 0x00000001 },
5456                 { 0x1804, 0, 0x00000000, 0x00000003 },
5457
5458                 { 0x2800, 0, 0x00000000, 0x00000001 },
5459                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5460                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5461                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5462                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5463                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5464                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5465                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5466                 { 0x2840, 0, 0x00000000, 0xffffffff },
5467                 { 0x2844, 0, 0x00000000, 0xffffffff },
5468                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5469                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5470
5471                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5472                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5473
5474                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5475                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5476                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5477                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5478                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5479                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5480                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5481                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5482                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5483
5484                 { 0x5004, 0, 0x00000000, 0x0000007f },
5485                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5486
5487                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5488                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5489                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5490                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5491                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5492                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5493                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5494                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5495                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5496
5497                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5498                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5499                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5500                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5501                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5502                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5503                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5504                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5505                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5506                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5507                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5508                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5509                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5510                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5511                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5512                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5513                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5514                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5515                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5516                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5517                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5518                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5519                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5520
5521                 { 0xffff, 0, 0x00000000, 0x00000000 },
5522         };
5523
5524         ret = 0;
5525         is_5709 = 0;
5526         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5527                 is_5709 = 1;
5528
5529         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5530                 u32 offset, rw_mask, ro_mask, save_val, val;
5531                 u16 flags = reg_tbl[i].flags;
5532
5533                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5534                         continue;
5535
5536                 offset = (u32) reg_tbl[i].offset;
5537                 rw_mask = reg_tbl[i].rw_mask;
5538                 ro_mask = reg_tbl[i].ro_mask;
5539
5540                 save_val = readl(bp->regview + offset);
5541
5542                 writel(0, bp->regview + offset);
5543
5544                 val = readl(bp->regview + offset);
5545                 if ((val & rw_mask) != 0) {
5546                         goto reg_test_err;
5547                 }
5548
5549                 if ((val & ro_mask) != (save_val & ro_mask)) {
5550                         goto reg_test_err;
5551                 }
5552
5553                 writel(0xffffffff, bp->regview + offset);
5554
5555                 val = readl(bp->regview + offset);
5556                 if ((val & rw_mask) != rw_mask) {
5557                         goto reg_test_err;
5558                 }
5559
5560                 if ((val & ro_mask) != (save_val & ro_mask)) {
5561                         goto reg_test_err;
5562                 }
5563
5564                 writel(save_val, bp->regview + offset);
5565                 continue;
5566
5567 reg_test_err:
5568                 writel(save_val, bp->regview + offset);
5569                 ret = -ENODEV;
5570                 break;
5571         }
5572         return ret;
5573 }
5574
5575 static int
5576 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5577 {
5578         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5579                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5580         int i;
5581
5582         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5583                 u32 offset;
5584
5585                 for (offset = 0; offset < size; offset += 4) {
5586
5587                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5588
5589                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5590                                 test_pattern[i]) {
5591                                 return -ENODEV;
5592                         }
5593                 }
5594         }
5595         return 0;
5596 }
5597
5598 static int
5599 bnx2_test_memory(struct bnx2 *bp)
5600 {
5601         int ret = 0;
5602         int i;
5603         static struct mem_entry {
5604                 u32   offset;
5605                 u32   len;
5606         } mem_tbl_5706[] = {
5607                 { 0x60000,  0x4000 },
5608                 { 0xa0000,  0x3000 },
5609                 { 0xe0000,  0x4000 },
5610                 { 0x120000, 0x4000 },
5611                 { 0x1a0000, 0x4000 },
5612                 { 0x160000, 0x4000 },
5613                 { 0xffffffff, 0    },
5614         },
5615         mem_tbl_5709[] = {
5616                 { 0x60000,  0x4000 },
5617                 { 0xa0000,  0x3000 },
5618                 { 0xe0000,  0x4000 },
5619                 { 0x120000, 0x4000 },
5620                 { 0x1a0000, 0x4000 },
5621                 { 0xffffffff, 0    },
5622         };
5623         struct mem_entry *mem_tbl;
5624
5625         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5626                 mem_tbl = mem_tbl_5709;
5627         else
5628                 mem_tbl = mem_tbl_5706;
5629
5630         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5631                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5632                         mem_tbl[i].len)) != 0) {
5633                         return ret;
5634                 }
5635         }
5636
5637         return ret;
5638 }
5639
5640 #define BNX2_MAC_LOOPBACK       0
5641 #define BNX2_PHY_LOOPBACK       1
5642
5643 static int
5644 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5645 {
5646         unsigned int pkt_size, num_pkts, i;
5647         struct sk_buff *skb, *rx_skb;
5648         unsigned char *packet;
5649         u16 rx_start_idx, rx_idx;
5650         dma_addr_t map;
5651         struct tx_bd *txbd;
5652         struct sw_bd *rx_buf;
5653         struct l2_fhdr *rx_hdr;
5654         int ret = -ENODEV;
5655         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5656         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5657         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5658
5659         tx_napi = bnapi;
5660
5661         txr = &tx_napi->tx_ring;
5662         rxr = &bnapi->rx_ring;
5663         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5664                 bp->loopback = MAC_LOOPBACK;
5665                 bnx2_set_mac_loopback(bp);
5666         }
5667         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5668                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5669                         return 0;
5670
5671                 bp->loopback = PHY_LOOPBACK;
5672                 bnx2_set_phy_loopback(bp);
5673         }
5674         else
5675                 return -EINVAL;
5676
5677         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5678         skb = netdev_alloc_skb(bp->dev, pkt_size);
5679         if (!skb)
5680                 return -ENOMEM;
5681         packet = skb_put(skb, pkt_size);
5682         memcpy(packet, bp->dev->dev_addr, 6);
5683         memset(packet + 6, 0x0, 8);
5684         for (i = 14; i < pkt_size; i++)
5685                 packet[i] = (unsigned char) (i & 0xff);
5686
5687         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5688                 dev_kfree_skb(skb);
5689                 return -EIO;
5690         }
5691         map = skb_shinfo(skb)->dma_head;
5692
5693         REG_WR(bp, BNX2_HC_COMMAND,
5694                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5695
5696         REG_RD(bp, BNX2_HC_COMMAND);
5697
5698         udelay(5);
5699         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5700
5701         num_pkts = 0;
5702
5703         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5704
5705         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5706         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5707         txbd->tx_bd_mss_nbytes = pkt_size;
5708         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5709
5710         num_pkts++;
5711         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5712         txr->tx_prod_bseq += pkt_size;
5713
5714         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5715         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5716
5717         udelay(100);
5718
5719         REG_WR(bp, BNX2_HC_COMMAND,
5720                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5721
5722         REG_RD(bp, BNX2_HC_COMMAND);
5723
5724         udelay(5);
5725
5726         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5727         dev_kfree_skb(skb);
5728
5729         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5730                 goto loopback_test_done;
5731
5732         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5733         if (rx_idx != rx_start_idx + num_pkts) {
5734                 goto loopback_test_done;
5735         }
5736
5737         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5738         rx_skb = rx_buf->skb;
5739
5740         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5741         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5742
5743         pci_dma_sync_single_for_cpu(bp->pdev,
5744                 pci_unmap_addr(rx_buf, mapping),
5745                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5746
5747         if (rx_hdr->l2_fhdr_status &
5748                 (L2_FHDR_ERRORS_BAD_CRC |
5749                 L2_FHDR_ERRORS_PHY_DECODE |
5750                 L2_FHDR_ERRORS_ALIGNMENT |
5751                 L2_FHDR_ERRORS_TOO_SHORT |
5752                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5753
5754                 goto loopback_test_done;
5755         }
5756
5757         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5758                 goto loopback_test_done;
5759         }
5760
5761         for (i = 14; i < pkt_size; i++) {
5762                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5763                         goto loopback_test_done;
5764                 }
5765         }
5766
5767         ret = 0;
5768
5769 loopback_test_done:
5770         bp->loopback = 0;
5771         return ret;
5772 }
5773
5774 #define BNX2_MAC_LOOPBACK_FAILED        1
5775 #define BNX2_PHY_LOOPBACK_FAILED        2
5776 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5777                                          BNX2_PHY_LOOPBACK_FAILED)
5778
5779 static int
5780 bnx2_test_loopback(struct bnx2 *bp)
5781 {
5782         int rc = 0;
5783
5784         if (!netif_running(bp->dev))
5785                 return BNX2_LOOPBACK_FAILED;
5786
5787         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5788         spin_lock_bh(&bp->phy_lock);
5789         bnx2_init_phy(bp, 1);
5790         spin_unlock_bh(&bp->phy_lock);
5791         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5792                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5793         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5794                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5795         return rc;
5796 }
5797
5798 #define NVRAM_SIZE 0x200
5799 #define CRC32_RESIDUAL 0xdebb20e3
5800
5801 static int
5802 bnx2_test_nvram(struct bnx2 *bp)
5803 {
5804         __be32 buf[NVRAM_SIZE / 4];
5805         u8 *data = (u8 *) buf;
5806         int rc = 0;
5807         u32 magic, csum;
5808
5809         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5810                 goto test_nvram_done;
5811
5812         magic = be32_to_cpu(buf[0]);
5813         if (magic != 0x669955aa) {
5814                 rc = -ENODEV;
5815                 goto test_nvram_done;
5816         }
5817
5818         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5819                 goto test_nvram_done;
5820
5821         csum = ether_crc_le(0x100, data);
5822         if (csum != CRC32_RESIDUAL) {
5823                 rc = -ENODEV;
5824                 goto test_nvram_done;
5825         }
5826
5827         csum = ether_crc_le(0x100, data + 0x100);
5828         if (csum != CRC32_RESIDUAL) {
5829                 rc = -ENODEV;
5830         }
5831
5832 test_nvram_done:
5833         return rc;
5834 }
5835
5836 static int
5837 bnx2_test_link(struct bnx2 *bp)
5838 {
5839         u32 bmsr;
5840
5841         if (!netif_running(bp->dev))
5842                 return -ENODEV;
5843
5844         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5845                 if (bp->link_up)
5846                         return 0;
5847                 return -ENODEV;
5848         }
5849         spin_lock_bh(&bp->phy_lock);
5850         bnx2_enable_bmsr1(bp);
5851         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5852         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5853         bnx2_disable_bmsr1(bp);
5854         spin_unlock_bh(&bp->phy_lock);
5855
5856         if (bmsr & BMSR_LSTATUS) {
5857                 return 0;
5858         }
5859         return -ENODEV;
5860 }
5861
5862 static int
5863 bnx2_test_intr(struct bnx2 *bp)
5864 {
5865         int i;
5866         u16 status_idx;
5867
5868         if (!netif_running(bp->dev))
5869                 return -ENODEV;
5870
5871         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5872
5873         /* This register is not touched during run-time. */
5874         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5875         REG_RD(bp, BNX2_HC_COMMAND);
5876
5877         for (i = 0; i < 10; i++) {
5878                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5879                         status_idx) {
5880
5881                         break;
5882                 }
5883
5884                 msleep_interruptible(10);
5885         }
5886         if (i < 10)
5887                 return 0;
5888
5889         return -ENODEV;
5890 }
5891
5892 /* Determining link for parallel detection. */
5893 static int
5894 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5895 {
5896         u32 mode_ctl, an_dbg, exp;
5897
5898         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5899                 return 0;
5900
5901         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5902         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5903
5904         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5905                 return 0;
5906
5907         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5908         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5909         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5910
5911         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5912                 return 0;
5913
5914         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5915         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5916         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5917
5918         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5919                 return 0;
5920
5921         return 1;
5922 }
5923
5924 static void
5925 bnx2_5706_serdes_timer(struct bnx2 *bp)
5926 {
5927         int check_link = 1;
5928
5929         spin_lock(&bp->phy_lock);
5930         if (bp->serdes_an_pending) {
5931                 bp->serdes_an_pending--;
5932                 check_link = 0;
5933         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5934                 u32 bmcr;
5935
5936                 bp->current_interval = BNX2_TIMER_INTERVAL;
5937
5938                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5939
5940                 if (bmcr & BMCR_ANENABLE) {
5941                         if (bnx2_5706_serdes_has_link(bp)) {
5942                                 bmcr &= ~BMCR_ANENABLE;
5943                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5944                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5945                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5946                         }
5947                 }
5948         }
5949         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5950                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5951                 u32 phy2;
5952
5953                 bnx2_write_phy(bp, 0x17, 0x0f01);
5954                 bnx2_read_phy(bp, 0x15, &phy2);
5955                 if (phy2 & 0x20) {
5956                         u32 bmcr;
5957
5958                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5959                         bmcr |= BMCR_ANENABLE;
5960                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5961
5962                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5963                 }
5964         } else
5965                 bp->current_interval = BNX2_TIMER_INTERVAL;
5966
5967         if (check_link) {
5968                 u32 val;
5969
5970                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5971                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5972                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5973
5974                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5975                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5976                                 bnx2_5706s_force_link_dn(bp, 1);
5977                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5978                         } else
5979                                 bnx2_set_link(bp);
5980                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5981                         bnx2_set_link(bp);
5982         }
5983         spin_unlock(&bp->phy_lock);
5984 }
5985
5986 static void
5987 bnx2_5708_serdes_timer(struct bnx2 *bp)
5988 {
5989         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5990                 return;
5991
5992         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5993                 bp->serdes_an_pending = 0;
5994                 return;
5995         }
5996
5997         spin_lock(&bp->phy_lock);
5998         if (bp->serdes_an_pending)
5999                 bp->serdes_an_pending--;
6000         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6001                 u32 bmcr;
6002
6003                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6004                 if (bmcr & BMCR_ANENABLE) {
6005                         bnx2_enable_forced_2g5(bp);
6006                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6007                 } else {
6008                         bnx2_disable_forced_2g5(bp);
6009                         bp->serdes_an_pending = 2;
6010                         bp->current_interval = BNX2_TIMER_INTERVAL;
6011                 }
6012
6013         } else
6014                 bp->current_interval = BNX2_TIMER_INTERVAL;
6015
6016         spin_unlock(&bp->phy_lock);
6017 }
6018
6019 static void
6020 bnx2_timer(unsigned long data)
6021 {
6022         struct bnx2 *bp = (struct bnx2 *) data;
6023
6024         if (!netif_running(bp->dev))
6025                 return;
6026
6027         if (atomic_read(&bp->intr_sem) != 0)
6028                 goto bnx2_restart_timer;
6029
6030         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6031              BNX2_FLAG_USING_MSI)
6032                 bnx2_chk_missed_msi(bp);
6033
6034         bnx2_send_heart_beat(bp);
6035
6036         bp->stats_blk->stat_FwRxDrop =
6037                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6038
6039         /* workaround occasional corrupted counters */
6040         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6041                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6042                                             BNX2_HC_COMMAND_STATS_NOW);
6043
6044         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6045                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6046                         bnx2_5706_serdes_timer(bp);
6047                 else
6048                         bnx2_5708_serdes_timer(bp);
6049         }
6050
6051 bnx2_restart_timer:
6052         mod_timer(&bp->timer, jiffies + bp->current_interval);
6053 }
6054
6055 static int
6056 bnx2_request_irq(struct bnx2 *bp)
6057 {
6058         unsigned long flags;
6059         struct bnx2_irq *irq;
6060         int rc = 0, i;
6061
6062         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6063                 flags = 0;
6064         else
6065                 flags = IRQF_SHARED;
6066
6067         for (i = 0; i < bp->irq_nvecs; i++) {
6068                 irq = &bp->irq_tbl[i];
6069                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6070                                  &bp->bnx2_napi[i]);
6071                 if (rc)
6072                         break;
6073                 irq->requested = 1;
6074         }
6075         return rc;
6076 }
6077
6078 static void
6079 bnx2_free_irq(struct bnx2 *bp)
6080 {
6081         struct bnx2_irq *irq;
6082         int i;
6083
6084         for (i = 0; i < bp->irq_nvecs; i++) {
6085                 irq = &bp->irq_tbl[i];
6086                 if (irq->requested)
6087                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6088                 irq->requested = 0;
6089         }
6090         if (bp->flags & BNX2_FLAG_USING_MSI)
6091                 pci_disable_msi(bp->pdev);
6092         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6093                 pci_disable_msix(bp->pdev);
6094
6095         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6096 }
6097
6098 static void
6099 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6100 {
6101         int i, rc;
6102         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6103         struct net_device *dev = bp->dev;
6104         const int len = sizeof(bp->irq_tbl[0].name);
6105
6106         bnx2_setup_msix_tbl(bp);
6107         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6108         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6109         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6110
6111         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6112                 msix_ent[i].entry = i;
6113                 msix_ent[i].vector = 0;
6114         }
6115
6116         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6117         if (rc != 0)
6118                 return;
6119
6120         bp->irq_nvecs = msix_vecs;
6121         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6122         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6123                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6124                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6125                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6126         }
6127 }
6128
6129 static void
6130 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6131 {
6132         int cpus = num_online_cpus();
6133         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6134
6135         bp->irq_tbl[0].handler = bnx2_interrupt;
6136         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6137         bp->irq_nvecs = 1;
6138         bp->irq_tbl[0].vector = bp->pdev->irq;
6139
6140         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6141                 bnx2_enable_msix(bp, msix_vecs);
6142
6143         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6144             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6145                 if (pci_enable_msi(bp->pdev) == 0) {
6146                         bp->flags |= BNX2_FLAG_USING_MSI;
6147                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6148                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6149                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6150                         } else
6151                                 bp->irq_tbl[0].handler = bnx2_msi;
6152
6153                         bp->irq_tbl[0].vector = bp->pdev->irq;
6154                 }
6155         }
6156
6157         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6158         bp->dev->real_num_tx_queues = bp->num_tx_rings;
6159
6160         bp->num_rx_rings = bp->irq_nvecs;
6161 }
6162
6163 /* Called with rtnl_lock */
6164 static int
6165 bnx2_open(struct net_device *dev)
6166 {
6167         struct bnx2 *bp = netdev_priv(dev);
6168         int rc;
6169
6170         netif_carrier_off(dev);
6171
6172         bnx2_set_power_state(bp, PCI_D0);
6173         bnx2_disable_int(bp);
6174
6175         bnx2_setup_int_mode(bp, disable_msi);
6176         bnx2_napi_enable(bp);
6177         rc = bnx2_alloc_mem(bp);
6178         if (rc)
6179                 goto open_err;
6180
6181         rc = bnx2_request_irq(bp);
6182         if (rc)
6183                 goto open_err;
6184
6185         rc = bnx2_init_nic(bp, 1);
6186         if (rc)
6187                 goto open_err;
6188
6189         mod_timer(&bp->timer, jiffies + bp->current_interval);
6190
6191         atomic_set(&bp->intr_sem, 0);
6192
6193         bnx2_enable_int(bp);
6194
6195         if (bp->flags & BNX2_FLAG_USING_MSI) {
6196                 /* Test MSI to make sure it is working
6197                  * If MSI test fails, go back to INTx mode
6198                  */
6199                 if (bnx2_test_intr(bp) != 0) {
6200                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
6201                                " using MSI, switching to INTx mode. Please"
6202                                " report this failure to the PCI maintainer"
6203                                " and include system chipset information.\n",
6204                                bp->dev->name);
6205
6206                         bnx2_disable_int(bp);
6207                         bnx2_free_irq(bp);
6208
6209                         bnx2_setup_int_mode(bp, 1);
6210
6211                         rc = bnx2_init_nic(bp, 0);
6212
6213                         if (!rc)
6214                                 rc = bnx2_request_irq(bp);
6215
6216                         if (rc) {
6217                                 del_timer_sync(&bp->timer);
6218                                 goto open_err;
6219                         }
6220                         bnx2_enable_int(bp);
6221                 }
6222         }
6223         if (bp->flags & BNX2_FLAG_USING_MSI)
6224                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6225         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6226                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6227
6228         netif_tx_start_all_queues(dev);
6229
6230         return 0;
6231
6232 open_err:
6233         bnx2_napi_disable(bp);
6234         bnx2_free_skbs(bp);
6235         bnx2_free_irq(bp);
6236         bnx2_free_mem(bp);
6237         return rc;
6238 }
6239
6240 static void
6241 bnx2_reset_task(struct work_struct *work)
6242 {
6243         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6244
6245         if (!netif_running(bp->dev))
6246                 return;
6247
6248         bnx2_netif_stop(bp);
6249
6250         bnx2_init_nic(bp, 1);
6251
6252         atomic_set(&bp->intr_sem, 1);
6253         bnx2_netif_start(bp);
6254 }
6255
6256 static void
6257 bnx2_tx_timeout(struct net_device *dev)
6258 {
6259         struct bnx2 *bp = netdev_priv(dev);
6260
6261         /* This allows the netif to be shutdown gracefully before resetting */
6262         schedule_work(&bp->reset_task);
6263 }
6264
6265 #ifdef BCM_VLAN
6266 /* Called with rtnl_lock */
6267 static void
6268 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6269 {
6270         struct bnx2 *bp = netdev_priv(dev);
6271
6272         if (netif_running(dev))
6273                 bnx2_netif_stop(bp);
6274
6275         bp->vlgrp = vlgrp;
6276
6277         if (!netif_running(dev))
6278                 return;
6279
6280         bnx2_set_rx_mode(dev);
6281         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6282                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6283
6284         bnx2_netif_start(bp);
6285 }
6286 #endif
6287
6288 /* Called with netif_tx_lock.
6289  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6290  * netif_wake_queue().
6291  */
6292 static netdev_tx_t
6293 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6294 {
6295         struct bnx2 *bp = netdev_priv(dev);
6296         dma_addr_t mapping;
6297         struct tx_bd *txbd;
6298         struct sw_tx_bd *tx_buf;
6299         u32 len, vlan_tag_flags, last_frag, mss;
6300         u16 prod, ring_prod;
6301         int i;
6302         struct bnx2_napi *bnapi;
6303         struct bnx2_tx_ring_info *txr;
6304         struct netdev_queue *txq;
6305         struct skb_shared_info *sp;
6306
6307         /*  Determine which tx ring we will be placed on */
6308         i = skb_get_queue_mapping(skb);
6309         bnapi = &bp->bnx2_napi[i];
6310         txr = &bnapi->tx_ring;
6311         txq = netdev_get_tx_queue(dev, i);
6312
6313         if (unlikely(bnx2_tx_avail(bp, txr) <
6314             (skb_shinfo(skb)->nr_frags + 1))) {
6315                 netif_tx_stop_queue(txq);
6316                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6317                         dev->name);
6318
6319                 return NETDEV_TX_BUSY;
6320         }
6321         len = skb_headlen(skb);
6322         prod = txr->tx_prod;
6323         ring_prod = TX_RING_IDX(prod);
6324
6325         vlan_tag_flags = 0;
6326         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6327                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6328         }
6329
6330 #ifdef BCM_VLAN
6331         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6332                 vlan_tag_flags |=
6333                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6334         }
6335 #endif
6336         if ((mss = skb_shinfo(skb)->gso_size)) {
6337                 u32 tcp_opt_len;
6338                 struct iphdr *iph;
6339
6340                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6341
6342                 tcp_opt_len = tcp_optlen(skb);
6343
6344                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6345                         u32 tcp_off = skb_transport_offset(skb) -
6346                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6347
6348                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6349                                           TX_BD_FLAGS_SW_FLAGS;
6350                         if (likely(tcp_off == 0))
6351                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6352                         else {
6353                                 tcp_off >>= 3;
6354                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6355                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6356                                                   ((tcp_off & 0x10) <<
6357                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6358                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6359                         }
6360                 } else {
6361                         iph = ip_hdr(skb);
6362                         if (tcp_opt_len || (iph->ihl > 5)) {
6363                                 vlan_tag_flags |= ((iph->ihl - 5) +
6364                                                    (tcp_opt_len >> 2)) << 8;
6365                         }
6366                 }
6367         } else
6368                 mss = 0;
6369
6370         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6371                 dev_kfree_skb(skb);
6372                 return NETDEV_TX_OK;
6373         }
6374
6375         sp = skb_shinfo(skb);
6376         mapping = sp->dma_head;
6377
6378         tx_buf = &txr->tx_buf_ring[ring_prod];
6379         tx_buf->skb = skb;
6380
6381         txbd = &txr->tx_desc_ring[ring_prod];
6382
6383         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6384         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6385         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6386         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6387
6388         last_frag = skb_shinfo(skb)->nr_frags;
6389         tx_buf->nr_frags = last_frag;
6390         tx_buf->is_gso = skb_is_gso(skb);
6391
6392         for (i = 0; i < last_frag; i++) {
6393                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6394
6395                 prod = NEXT_TX_BD(prod);
6396                 ring_prod = TX_RING_IDX(prod);
6397                 txbd = &txr->tx_desc_ring[ring_prod];
6398
6399                 len = frag->size;
6400                 mapping = sp->dma_maps[i];
6401
6402                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6403                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6404                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6405                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6406
6407         }
6408         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6409
6410         prod = NEXT_TX_BD(prod);
6411         txr->tx_prod_bseq += skb->len;
6412
6413         REG_WR16(bp, txr->tx_bidx_addr, prod);
6414         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6415
6416         mmiowb();
6417
6418         txr->tx_prod = prod;
6419
6420         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6421                 netif_tx_stop_queue(txq);
6422                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6423                         netif_tx_wake_queue(txq);
6424         }
6425
6426         return NETDEV_TX_OK;
6427 }
6428
6429 /* Called with rtnl_lock */
6430 static int
6431 bnx2_close(struct net_device *dev)
6432 {
6433         struct bnx2 *bp = netdev_priv(dev);
6434
6435         cancel_work_sync(&bp->reset_task);
6436
6437         bnx2_disable_int_sync(bp);
6438         bnx2_napi_disable(bp);
6439         del_timer_sync(&bp->timer);
6440         bnx2_shutdown_chip(bp);
6441         bnx2_free_irq(bp);
6442         bnx2_free_skbs(bp);
6443         bnx2_free_mem(bp);
6444         bp->link_up = 0;
6445         netif_carrier_off(bp->dev);
6446         bnx2_set_power_state(bp, PCI_D3hot);
6447         return 0;
6448 }
6449
6450 #define GET_NET_STATS64(ctr)                                    \
6451         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6452         (unsigned long) (ctr##_lo)
6453
6454 #define GET_NET_STATS32(ctr)            \
6455         (ctr##_lo)
6456
6457 #if (BITS_PER_LONG == 64)
6458 #define GET_NET_STATS   GET_NET_STATS64
6459 #else
6460 #define GET_NET_STATS   GET_NET_STATS32
6461 #endif
6462
6463 static struct net_device_stats *
6464 bnx2_get_stats(struct net_device *dev)
6465 {
6466         struct bnx2 *bp = netdev_priv(dev);
6467         struct statistics_block *stats_blk = bp->stats_blk;
6468         struct net_device_stats *net_stats = &dev->stats;
6469
6470         if (bp->stats_blk == NULL) {
6471                 return net_stats;
6472         }
6473         net_stats->rx_packets =
6474                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6475                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6476                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6477
6478         net_stats->tx_packets =
6479                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6480                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6481                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6482
6483         net_stats->rx_bytes =
6484                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6485
6486         net_stats->tx_bytes =
6487                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6488
6489         net_stats->multicast =
6490                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6491
6492         net_stats->collisions =
6493                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6494
6495         net_stats->rx_length_errors =
6496                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6497                 stats_blk->stat_EtherStatsOverrsizePkts);
6498
6499         net_stats->rx_over_errors =
6500                 (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6501                 stats_blk->stat_IfInMBUFDiscards);
6502
6503         net_stats->rx_frame_errors =
6504                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6505
6506         net_stats->rx_crc_errors =
6507                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6508
6509         net_stats->rx_errors = net_stats->rx_length_errors +
6510                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6511                 net_stats->rx_crc_errors;
6512
6513         net_stats->tx_aborted_errors =
6514                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6515                 stats_blk->stat_Dot3StatsLateCollisions);
6516
6517         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6518             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6519                 net_stats->tx_carrier_errors = 0;
6520         else {
6521                 net_stats->tx_carrier_errors =
6522                         (unsigned long)
6523                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6524         }
6525
6526         net_stats->tx_errors =
6527                 (unsigned long)
6528                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6529                 +
6530                 net_stats->tx_aborted_errors +
6531                 net_stats->tx_carrier_errors;
6532
6533         net_stats->rx_missed_errors =
6534                 (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6535                 stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop);
6536
6537         return net_stats;
6538 }
6539
6540 /* All ethtool functions called with rtnl_lock */
6541
6542 static int
6543 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6544 {
6545         struct bnx2 *bp = netdev_priv(dev);
6546         int support_serdes = 0, support_copper = 0;
6547
6548         cmd->supported = SUPPORTED_Autoneg;
6549         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6550                 support_serdes = 1;
6551                 support_copper = 1;
6552         } else if (bp->phy_port == PORT_FIBRE)
6553                 support_serdes = 1;
6554         else
6555                 support_copper = 1;
6556
6557         if (support_serdes) {
6558                 cmd->supported |= SUPPORTED_1000baseT_Full |
6559                         SUPPORTED_FIBRE;
6560                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6561                         cmd->supported |= SUPPORTED_2500baseX_Full;
6562
6563         }
6564         if (support_copper) {
6565                 cmd->supported |= SUPPORTED_10baseT_Half |
6566                         SUPPORTED_10baseT_Full |
6567                         SUPPORTED_100baseT_Half |
6568                         SUPPORTED_100baseT_Full |
6569                         SUPPORTED_1000baseT_Full |
6570                         SUPPORTED_TP;
6571
6572         }
6573
6574         spin_lock_bh(&bp->phy_lock);
6575         cmd->port = bp->phy_port;
6576         cmd->advertising = bp->advertising;
6577
6578         if (bp->autoneg & AUTONEG_SPEED) {
6579                 cmd->autoneg = AUTONEG_ENABLE;
6580         }
6581         else {
6582                 cmd->autoneg = AUTONEG_DISABLE;
6583         }
6584
6585         if (netif_carrier_ok(dev)) {
6586                 cmd->speed = bp->line_speed;
6587                 cmd->duplex = bp->duplex;
6588         }
6589         else {
6590                 cmd->speed = -1;
6591                 cmd->duplex = -1;
6592         }
6593         spin_unlock_bh(&bp->phy_lock);
6594
6595         cmd->transceiver = XCVR_INTERNAL;
6596         cmd->phy_address = bp->phy_addr;
6597
6598         return 0;
6599 }
6600
6601 static int
6602 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6603 {
6604         struct bnx2 *bp = netdev_priv(dev);
6605         u8 autoneg = bp->autoneg;
6606         u8 req_duplex = bp->req_duplex;
6607         u16 req_line_speed = bp->req_line_speed;
6608         u32 advertising = bp->advertising;
6609         int err = -EINVAL;
6610
6611         spin_lock_bh(&bp->phy_lock);
6612
6613         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6614                 goto err_out_unlock;
6615
6616         if (cmd->port != bp->phy_port &&
6617             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6618                 goto err_out_unlock;
6619
6620         /* If device is down, we can store the settings only if the user
6621          * is setting the currently active port.
6622          */
6623         if (!netif_running(dev) && cmd->port != bp->phy_port)
6624                 goto err_out_unlock;
6625
6626         if (cmd->autoneg == AUTONEG_ENABLE) {
6627                 autoneg |= AUTONEG_SPEED;
6628
6629                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6630
6631                 /* allow advertising 1 speed */
6632                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6633                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6634                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6635                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6636
6637                         if (cmd->port == PORT_FIBRE)
6638                                 goto err_out_unlock;
6639
6640                         advertising = cmd->advertising;
6641
6642                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6643                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6644                             (cmd->port == PORT_TP))
6645                                 goto err_out_unlock;
6646                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6647                         advertising = cmd->advertising;
6648                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6649                         goto err_out_unlock;
6650                 else {
6651                         if (cmd->port == PORT_FIBRE)
6652                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6653                         else
6654                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6655                 }
6656                 advertising |= ADVERTISED_Autoneg;
6657         }
6658         else {
6659                 if (cmd->port == PORT_FIBRE) {
6660                         if ((cmd->speed != SPEED_1000 &&
6661                              cmd->speed != SPEED_2500) ||
6662                             (cmd->duplex != DUPLEX_FULL))
6663                                 goto err_out_unlock;
6664
6665                         if (cmd->speed == SPEED_2500 &&
6666                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6667                                 goto err_out_unlock;
6668                 }
6669                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6670                         goto err_out_unlock;
6671
6672                 autoneg &= ~AUTONEG_SPEED;
6673                 req_line_speed = cmd->speed;
6674                 req_duplex = cmd->duplex;
6675                 advertising = 0;
6676         }
6677
6678         bp->autoneg = autoneg;
6679         bp->advertising = advertising;
6680         bp->req_line_speed = req_line_speed;
6681         bp->req_duplex = req_duplex;
6682
6683         err = 0;
6684         /* If device is down, the new settings will be picked up when it is
6685          * brought up.
6686          */
6687         if (netif_running(dev))
6688                 err = bnx2_setup_phy(bp, cmd->port);
6689
6690 err_out_unlock:
6691         spin_unlock_bh(&bp->phy_lock);
6692
6693         return err;
6694 }
6695
6696 static void
6697 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6698 {
6699         struct bnx2 *bp = netdev_priv(dev);
6700
6701         strcpy(info->driver, DRV_MODULE_NAME);
6702         strcpy(info->version, DRV_MODULE_VERSION);
6703         strcpy(info->bus_info, pci_name(bp->pdev));
6704         strcpy(info->fw_version, bp->fw_version);
6705 }
6706
6707 #define BNX2_REGDUMP_LEN                (32 * 1024)
6708
6709 static int
6710 bnx2_get_regs_len(struct net_device *dev)
6711 {
6712         return BNX2_REGDUMP_LEN;
6713 }
6714
6715 static void
6716 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6717 {
6718         u32 *p = _p, i, offset;
6719         u8 *orig_p = _p;
6720         struct bnx2 *bp = netdev_priv(dev);
6721         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6722                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6723                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6724                                  0x1040, 0x1048, 0x1080, 0x10a4,
6725                                  0x1400, 0x1490, 0x1498, 0x14f0,
6726                                  0x1500, 0x155c, 0x1580, 0x15dc,
6727                                  0x1600, 0x1658, 0x1680, 0x16d8,
6728                                  0x1800, 0x1820, 0x1840, 0x1854,
6729                                  0x1880, 0x1894, 0x1900, 0x1984,
6730                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6731                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6732                                  0x2000, 0x2030, 0x23c0, 0x2400,
6733                                  0x2800, 0x2820, 0x2830, 0x2850,
6734                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6735                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6736                                  0x4080, 0x4090, 0x43c0, 0x4458,
6737                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6738                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6739                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6740                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6741                                  0x6800, 0x6848, 0x684c, 0x6860,
6742                                  0x6888, 0x6910, 0x8000 };
6743
6744         regs->version = 0;
6745
6746         memset(p, 0, BNX2_REGDUMP_LEN);
6747
6748         if (!netif_running(bp->dev))
6749                 return;
6750
6751         i = 0;
6752         offset = reg_boundaries[0];
6753         p += offset;
6754         while (offset < BNX2_REGDUMP_LEN) {
6755                 *p++ = REG_RD(bp, offset);
6756                 offset += 4;
6757                 if (offset == reg_boundaries[i + 1]) {
6758                         offset = reg_boundaries[i + 2];
6759                         p = (u32 *) (orig_p + offset);
6760                         i += 2;
6761                 }
6762         }
6763 }
6764
6765 static void
6766 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6767 {
6768         struct bnx2 *bp = netdev_priv(dev);
6769
6770         if (bp->flags & BNX2_FLAG_NO_WOL) {
6771                 wol->supported = 0;
6772                 wol->wolopts = 0;
6773         }
6774         else {
6775                 wol->supported = WAKE_MAGIC;
6776                 if (bp->wol)
6777                         wol->wolopts = WAKE_MAGIC;
6778                 else
6779                         wol->wolopts = 0;
6780         }
6781         memset(&wol->sopass, 0, sizeof(wol->sopass));
6782 }
6783
6784 static int
6785 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6786 {
6787         struct bnx2 *bp = netdev_priv(dev);
6788
6789         if (wol->wolopts & ~WAKE_MAGIC)
6790                 return -EINVAL;
6791
6792         if (wol->wolopts & WAKE_MAGIC) {
6793                 if (bp->flags & BNX2_FLAG_NO_WOL)
6794                         return -EINVAL;
6795
6796                 bp->wol = 1;
6797         }
6798         else {
6799                 bp->wol = 0;
6800         }
6801         return 0;
6802 }
6803
6804 static int
6805 bnx2_nway_reset(struct net_device *dev)
6806 {
6807         struct bnx2 *bp = netdev_priv(dev);
6808         u32 bmcr;
6809
6810         if (!netif_running(dev))
6811                 return -EAGAIN;
6812
6813         if (!(bp->autoneg & AUTONEG_SPEED)) {
6814                 return -EINVAL;
6815         }
6816
6817         spin_lock_bh(&bp->phy_lock);
6818
6819         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6820                 int rc;
6821
6822                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6823                 spin_unlock_bh(&bp->phy_lock);
6824                 return rc;
6825         }
6826
6827         /* Force a link down visible on the other side */
6828         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6829                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6830                 spin_unlock_bh(&bp->phy_lock);
6831
6832                 msleep(20);
6833
6834                 spin_lock_bh(&bp->phy_lock);
6835
6836                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6837                 bp->serdes_an_pending = 1;
6838                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6839         }
6840
6841         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6842         bmcr &= ~BMCR_LOOPBACK;
6843         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6844
6845         spin_unlock_bh(&bp->phy_lock);
6846
6847         return 0;
6848 }
6849
6850 static u32
6851 bnx2_get_link(struct net_device *dev)
6852 {
6853         struct bnx2 *bp = netdev_priv(dev);
6854
6855         return bp->link_up;
6856 }
6857
6858 static int
6859 bnx2_get_eeprom_len(struct net_device *dev)
6860 {
6861         struct bnx2 *bp = netdev_priv(dev);
6862
6863         if (bp->flash_info == NULL)
6864                 return 0;
6865
6866         return (int) bp->flash_size;
6867 }
6868
6869 static int
6870 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6871                 u8 *eebuf)
6872 {
6873         struct bnx2 *bp = netdev_priv(dev);
6874         int rc;
6875
6876         if (!netif_running(dev))
6877                 return -EAGAIN;
6878
6879         /* parameters already validated in ethtool_get_eeprom */
6880
6881         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6882
6883         return rc;
6884 }
6885
6886 static int
6887 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6888                 u8 *eebuf)
6889 {
6890         struct bnx2 *bp = netdev_priv(dev);
6891         int rc;
6892
6893         if (!netif_running(dev))
6894                 return -EAGAIN;
6895
6896         /* parameters already validated in ethtool_set_eeprom */
6897
6898         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6899
6900         return rc;
6901 }
6902
6903 static int
6904 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6905 {
6906         struct bnx2 *bp = netdev_priv(dev);
6907
6908         memset(coal, 0, sizeof(struct ethtool_coalesce));
6909
6910         coal->rx_coalesce_usecs = bp->rx_ticks;
6911         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6912         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6913         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6914
6915         coal->tx_coalesce_usecs = bp->tx_ticks;
6916         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6917         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6918         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6919
6920         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6921
6922         return 0;
6923 }
6924
6925 static int
6926 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6927 {
6928         struct bnx2 *bp = netdev_priv(dev);
6929
6930         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6931         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6932
6933         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6934         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6935
6936         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6937         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6938
6939         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6940         if (bp->rx_quick_cons_trip_int > 0xff)
6941                 bp->rx_quick_cons_trip_int = 0xff;
6942
6943         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6944         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6945
6946         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6947         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6948
6949         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6950         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6951
6952         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6953         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6954                 0xff;
6955
6956         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6957         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
6958                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6959                         bp->stats_ticks = USEC_PER_SEC;
6960         }
6961         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6962                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6963         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6964
6965         if (netif_running(bp->dev)) {
6966                 bnx2_netif_stop(bp);
6967                 bnx2_init_nic(bp, 0);
6968                 bnx2_netif_start(bp);
6969         }
6970
6971         return 0;
6972 }
6973
6974 static void
6975 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6976 {
6977         struct bnx2 *bp = netdev_priv(dev);
6978
6979         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6980         ering->rx_mini_max_pending = 0;
6981         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6982
6983         ering->rx_pending = bp->rx_ring_size;
6984         ering->rx_mini_pending = 0;
6985         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6986
6987         ering->tx_max_pending = MAX_TX_DESC_CNT;
6988         ering->tx_pending = bp->tx_ring_size;
6989 }
6990
6991 static int
6992 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6993 {
6994         if (netif_running(bp->dev)) {
6995                 bnx2_netif_stop(bp);
6996                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6997                 bnx2_free_skbs(bp);
6998                 bnx2_free_mem(bp);
6999         }
7000
7001         bnx2_set_rx_ring_size(bp, rx);
7002         bp->tx_ring_size = tx;
7003
7004         if (netif_running(bp->dev)) {
7005                 int rc;
7006
7007                 rc = bnx2_alloc_mem(bp);
7008                 if (!rc)
7009                         rc = bnx2_init_nic(bp, 0);
7010
7011                 if (rc) {
7012                         bnx2_napi_enable(bp);
7013                         dev_close(bp->dev);
7014                         return rc;
7015                 }
7016                 bnx2_netif_start(bp);
7017         }
7018         return 0;
7019 }
7020
7021 static int
7022 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7023 {
7024         struct bnx2 *bp = netdev_priv(dev);
7025         int rc;
7026
7027         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7028                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7029                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7030
7031                 return -EINVAL;
7032         }
7033         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7034         return rc;
7035 }
7036
7037 static void
7038 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7039 {
7040         struct bnx2 *bp = netdev_priv(dev);
7041
7042         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7043         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7044         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7045 }
7046
7047 static int
7048 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7049 {
7050         struct bnx2 *bp = netdev_priv(dev);
7051
7052         bp->req_flow_ctrl = 0;
7053         if (epause->rx_pause)
7054                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7055         if (epause->tx_pause)
7056                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7057
7058         if (epause->autoneg) {
7059                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7060         }
7061         else {
7062                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7063         }
7064
7065         if (netif_running(dev)) {
7066                 spin_lock_bh(&bp->phy_lock);
7067                 bnx2_setup_phy(bp, bp->phy_port);
7068                 spin_unlock_bh(&bp->phy_lock);
7069         }
7070
7071         return 0;
7072 }
7073
7074 static u32
7075 bnx2_get_rx_csum(struct net_device *dev)
7076 {
7077         struct bnx2 *bp = netdev_priv(dev);
7078
7079         return bp->rx_csum;
7080 }
7081
7082 static int
7083 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7084 {
7085         struct bnx2 *bp = netdev_priv(dev);
7086
7087         bp->rx_csum = data;
7088         return 0;
7089 }
7090
7091 static int
7092 bnx2_set_tso(struct net_device *dev, u32 data)
7093 {
7094         struct bnx2 *bp = netdev_priv(dev);
7095
7096         if (data) {
7097                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7098                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7099                         dev->features |= NETIF_F_TSO6;
7100         } else
7101                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7102                                    NETIF_F_TSO_ECN);
7103         return 0;
7104 }
7105
7106 static struct {
7107         char string[ETH_GSTRING_LEN];
7108 } bnx2_stats_str_arr[] = {
7109         { "rx_bytes" },
7110         { "rx_error_bytes" },
7111         { "tx_bytes" },
7112         { "tx_error_bytes" },
7113         { "rx_ucast_packets" },
7114         { "rx_mcast_packets" },
7115         { "rx_bcast_packets" },
7116         { "tx_ucast_packets" },
7117         { "tx_mcast_packets" },
7118         { "tx_bcast_packets" },
7119         { "tx_mac_errors" },
7120         { "tx_carrier_errors" },
7121         { "rx_crc_errors" },
7122         { "rx_align_errors" },
7123         { "tx_single_collisions" },
7124         { "tx_multi_collisions" },
7125         { "tx_deferred" },
7126         { "tx_excess_collisions" },
7127         { "tx_late_collisions" },
7128         { "tx_total_collisions" },
7129         { "rx_fragments" },
7130         { "rx_jabbers" },
7131         { "rx_undersize_packets" },
7132         { "rx_oversize_packets" },
7133         { "rx_64_byte_packets" },
7134         { "rx_65_to_127_byte_packets" },
7135         { "rx_128_to_255_byte_packets" },
7136         { "rx_256_to_511_byte_packets" },
7137         { "rx_512_to_1023_byte_packets" },
7138         { "rx_1024_to_1522_byte_packets" },
7139         { "rx_1523_to_9022_byte_packets" },
7140         { "tx_64_byte_packets" },
7141         { "tx_65_to_127_byte_packets" },
7142         { "tx_128_to_255_byte_packets" },
7143         { "tx_256_to_511_byte_packets" },
7144         { "tx_512_to_1023_byte_packets" },
7145         { "tx_1024_to_1522_byte_packets" },
7146         { "tx_1523_to_9022_byte_packets" },
7147         { "rx_xon_frames" },
7148         { "rx_xoff_frames" },
7149         { "tx_xon_frames" },
7150         { "tx_xoff_frames" },
7151         { "rx_mac_ctrl_frames" },
7152         { "rx_filtered_packets" },
7153         { "rx_ftq_discards" },
7154         { "rx_discards" },
7155         { "rx_fw_discards" },
7156 };
7157
7158 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7159                         sizeof(bnx2_stats_str_arr[0]))
7160
7161 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7162
7163 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7164     STATS_OFFSET32(stat_IfHCInOctets_hi),
7165     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7166     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7167     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7168     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7169     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7170     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7171     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7172     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7173     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7174     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7175     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7176     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7177     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7178     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7179     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7180     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7181     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7182     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7183     STATS_OFFSET32(stat_EtherStatsCollisions),
7184     STATS_OFFSET32(stat_EtherStatsFragments),
7185     STATS_OFFSET32(stat_EtherStatsJabbers),
7186     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7187     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7188     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7189     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7190     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7191     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7192     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7193     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7194     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7195     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7196     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7197     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7198     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7199     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7200     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7201     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7202     STATS_OFFSET32(stat_XonPauseFramesReceived),
7203     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7204     STATS_OFFSET32(stat_OutXonSent),
7205     STATS_OFFSET32(stat_OutXoffSent),
7206     STATS_OFFSET32(stat_MacControlFramesReceived),
7207     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7208     STATS_OFFSET32(stat_IfInFTQDiscards),
7209     STATS_OFFSET32(stat_IfInMBUFDiscards),
7210     STATS_OFFSET32(stat_FwRxDrop),
7211 };
7212
7213 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7214  * skipped because of errata.
7215  */
7216 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7217         8,0,8,8,8,8,8,8,8,8,
7218         4,0,4,4,4,4,4,4,4,4,
7219         4,4,4,4,4,4,4,4,4,4,
7220         4,4,4,4,4,4,4,4,4,4,
7221         4,4,4,4,4,4,4,
7222 };
7223
7224 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7225         8,0,8,8,8,8,8,8,8,8,
7226         4,4,4,4,4,4,4,4,4,4,
7227         4,4,4,4,4,4,4,4,4,4,
7228         4,4,4,4,4,4,4,4,4,4,
7229         4,4,4,4,4,4,4,
7230 };
7231
7232 #define BNX2_NUM_TESTS 6
7233
7234 static struct {
7235         char string[ETH_GSTRING_LEN];
7236 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7237         { "register_test (offline)" },
7238         { "memory_test (offline)" },
7239         { "loopback_test (offline)" },
7240         { "nvram_test (online)" },
7241         { "interrupt_test (online)" },
7242         { "link_test (online)" },
7243 };
7244
7245 static int
7246 bnx2_get_sset_count(struct net_device *dev, int sset)
7247 {
7248         switch (sset) {
7249         case ETH_SS_TEST:
7250                 return BNX2_NUM_TESTS;
7251         case ETH_SS_STATS:
7252                 return BNX2_NUM_STATS;
7253         default:
7254                 return -EOPNOTSUPP;
7255         }
7256 }
7257
7258 static void
7259 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7260 {
7261         struct bnx2 *bp = netdev_priv(dev);
7262
7263         bnx2_set_power_state(bp, PCI_D0);
7264
7265         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7266         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7267                 int i;
7268
7269                 bnx2_netif_stop(bp);
7270                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7271                 bnx2_free_skbs(bp);
7272
7273                 if (bnx2_test_registers(bp) != 0) {
7274                         buf[0] = 1;
7275                         etest->flags |= ETH_TEST_FL_FAILED;
7276                 }
7277                 if (bnx2_test_memory(bp) != 0) {
7278                         buf[1] = 1;
7279                         etest->flags |= ETH_TEST_FL_FAILED;
7280                 }
7281                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7282                         etest->flags |= ETH_TEST_FL_FAILED;
7283
7284                 if (!netif_running(bp->dev))
7285                         bnx2_shutdown_chip(bp);
7286                 else {
7287                         bnx2_init_nic(bp, 1);
7288                         bnx2_netif_start(bp);
7289                 }
7290
7291                 /* wait for link up */
7292                 for (i = 0; i < 7; i++) {
7293                         if (bp->link_up)
7294                                 break;
7295                         msleep_interruptible(1000);
7296                 }
7297         }
7298
7299         if (bnx2_test_nvram(bp) != 0) {
7300                 buf[3] = 1;
7301                 etest->flags |= ETH_TEST_FL_FAILED;
7302         }
7303         if (bnx2_test_intr(bp) != 0) {
7304                 buf[4] = 1;
7305                 etest->flags |= ETH_TEST_FL_FAILED;
7306         }
7307
7308         if (bnx2_test_link(bp) != 0) {
7309                 buf[5] = 1;
7310                 etest->flags |= ETH_TEST_FL_FAILED;
7311
7312         }
7313         if (!netif_running(bp->dev))
7314                 bnx2_set_power_state(bp, PCI_D3hot);
7315 }
7316
7317 static void
7318 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7319 {
7320         switch (stringset) {
7321         case ETH_SS_STATS:
7322                 memcpy(buf, bnx2_stats_str_arr,
7323                         sizeof(bnx2_stats_str_arr));
7324                 break;
7325         case ETH_SS_TEST:
7326                 memcpy(buf, bnx2_tests_str_arr,
7327                         sizeof(bnx2_tests_str_arr));
7328                 break;
7329         }
7330 }
7331
7332 static void
7333 bnx2_get_ethtool_stats(struct net_device *dev,
7334                 struct ethtool_stats *stats, u64 *buf)
7335 {
7336         struct bnx2 *bp = netdev_priv(dev);
7337         int i;
7338         u32 *hw_stats = (u32 *) bp->stats_blk;
7339         u8 *stats_len_arr = NULL;
7340
7341         if (hw_stats == NULL) {
7342                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7343                 return;
7344         }
7345
7346         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7347             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7348             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7349             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7350                 stats_len_arr = bnx2_5706_stats_len_arr;
7351         else
7352                 stats_len_arr = bnx2_5708_stats_len_arr;
7353
7354         for (i = 0; i < BNX2_NUM_STATS; i++) {
7355                 if (stats_len_arr[i] == 0) {
7356                         /* skip this counter */
7357                         buf[i] = 0;
7358                         continue;
7359                 }
7360                 if (stats_len_arr[i] == 4) {
7361                         /* 4-byte counter */
7362                         buf[i] = (u64)
7363                                 *(hw_stats + bnx2_stats_offset_arr[i]);
7364                         continue;
7365                 }
7366                 /* 8-byte counter */
7367                 buf[i] = (((u64) *(hw_stats +
7368                                         bnx2_stats_offset_arr[i])) << 32) +
7369                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7370         }
7371 }
7372
7373 static int
7374 bnx2_phys_id(struct net_device *dev, u32 data)
7375 {
7376         struct bnx2 *bp = netdev_priv(dev);
7377         int i;
7378         u32 save;
7379
7380         bnx2_set_power_state(bp, PCI_D0);
7381
7382         if (data == 0)
7383                 data = 2;
7384
7385         save = REG_RD(bp, BNX2_MISC_CFG);
7386         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7387
7388         for (i = 0; i < (data * 2); i++) {
7389                 if ((i % 2) == 0) {
7390                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7391                 }
7392                 else {
7393                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7394                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7395                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7396                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7397                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7398                                 BNX2_EMAC_LED_TRAFFIC);
7399                 }
7400                 msleep_interruptible(500);
7401                 if (signal_pending(current))
7402                         break;
7403         }
7404         REG_WR(bp, BNX2_EMAC_LED, 0);
7405         REG_WR(bp, BNX2_MISC_CFG, save);
7406
7407         if (!netif_running(dev))
7408                 bnx2_set_power_state(bp, PCI_D3hot);
7409
7410         return 0;
7411 }
7412
7413 static int
7414 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7415 {
7416         struct bnx2 *bp = netdev_priv(dev);
7417
7418         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7419                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7420         else
7421                 return (ethtool_op_set_tx_csum(dev, data));
7422 }
7423
7424 static const struct ethtool_ops bnx2_ethtool_ops = {
7425         .get_settings           = bnx2_get_settings,
7426         .set_settings           = bnx2_set_settings,
7427         .get_drvinfo            = bnx2_get_drvinfo,
7428         .get_regs_len           = bnx2_get_regs_len,
7429         .get_regs               = bnx2_get_regs,
7430         .get_wol                = bnx2_get_wol,
7431         .set_wol                = bnx2_set_wol,
7432         .nway_reset             = bnx2_nway_reset,
7433         .get_link               = bnx2_get_link,
7434         .get_eeprom_len         = bnx2_get_eeprom_len,
7435         .get_eeprom             = bnx2_get_eeprom,
7436         .set_eeprom             = bnx2_set_eeprom,
7437         .get_coalesce           = bnx2_get_coalesce,
7438         .set_coalesce           = bnx2_set_coalesce,
7439         .get_ringparam          = bnx2_get_ringparam,
7440         .set_ringparam          = bnx2_set_ringparam,
7441         .get_pauseparam         = bnx2_get_pauseparam,
7442         .set_pauseparam         = bnx2_set_pauseparam,
7443         .get_rx_csum            = bnx2_get_rx_csum,
7444         .set_rx_csum            = bnx2_set_rx_csum,
7445         .set_tx_csum            = bnx2_set_tx_csum,
7446         .set_sg                 = ethtool_op_set_sg,
7447         .set_tso                = bnx2_set_tso,
7448         .self_test              = bnx2_self_test,
7449         .get_strings            = bnx2_get_strings,
7450         .phys_id                = bnx2_phys_id,
7451         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7452         .get_sset_count         = bnx2_get_sset_count,
7453 };
7454
7455 /* Called with rtnl_lock */
7456 static int
7457 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7458 {
7459         struct mii_ioctl_data *data = if_mii(ifr);
7460         struct bnx2 *bp = netdev_priv(dev);
7461         int err;
7462
7463         switch(cmd) {
7464         case SIOCGMIIPHY:
7465                 data->phy_id = bp->phy_addr;
7466
7467                 /* fallthru */
7468         case SIOCGMIIREG: {
7469                 u32 mii_regval;
7470
7471                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7472                         return -EOPNOTSUPP;
7473
7474                 if (!netif_running(dev))
7475                         return -EAGAIN;
7476
7477                 spin_lock_bh(&bp->phy_lock);
7478                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7479                 spin_unlock_bh(&bp->phy_lock);
7480
7481                 data->val_out = mii_regval;
7482
7483                 return err;
7484         }
7485
7486         case SIOCSMIIREG:
7487                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7488                         return -EOPNOTSUPP;
7489
7490                 if (!netif_running(dev))
7491                         return -EAGAIN;
7492
7493                 spin_lock_bh(&bp->phy_lock);
7494                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7495                 spin_unlock_bh(&bp->phy_lock);
7496
7497                 return err;
7498
7499         default:
7500                 /* do nothing */
7501                 break;
7502         }
7503         return -EOPNOTSUPP;
7504 }
7505
7506 /* Called with rtnl_lock */
7507 static int
7508 bnx2_change_mac_addr(struct net_device *dev, void *p)
7509 {
7510         struct sockaddr *addr = p;
7511         struct bnx2 *bp = netdev_priv(dev);
7512
7513         if (!is_valid_ether_addr(addr->sa_data))
7514                 return -EINVAL;
7515
7516         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7517         if (netif_running(dev))
7518                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7519
7520         return 0;
7521 }
7522
7523 /* Called with rtnl_lock */
7524 static int
7525 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7526 {
7527         struct bnx2 *bp = netdev_priv(dev);
7528
7529         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7530                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7531                 return -EINVAL;
7532
7533         dev->mtu = new_mtu;
7534         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7535 }
7536
7537 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7538 static void
7539 poll_bnx2(struct net_device *dev)
7540 {
7541         struct bnx2 *bp = netdev_priv(dev);
7542         int i;
7543
7544         for (i = 0; i < bp->irq_nvecs; i++) {
7545                 disable_irq(bp->irq_tbl[i].vector);
7546                 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7547                 enable_irq(bp->irq_tbl[i].vector);
7548         }
7549 }
7550 #endif
7551
7552 static void __devinit
7553 bnx2_get_5709_media(struct bnx2 *bp)
7554 {
7555         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7556         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7557         u32 strap;
7558
7559         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7560                 return;
7561         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7562                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7563                 return;
7564         }
7565
7566         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7567                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7568         else
7569                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7570
7571         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7572                 switch (strap) {
7573                 case 0x4:
7574                 case 0x5:
7575                 case 0x6:
7576                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7577                         return;
7578                 }
7579         } else {
7580                 switch (strap) {
7581                 case 0x1:
7582                 case 0x2:
7583                 case 0x4:
7584                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7585                         return;
7586                 }
7587         }
7588 }
7589
7590 static void __devinit
7591 bnx2_get_pci_speed(struct bnx2 *bp)
7592 {
7593         u32 reg;
7594
7595         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7596         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7597                 u32 clkreg;
7598
7599                 bp->flags |= BNX2_FLAG_PCIX;
7600
7601                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7602
7603                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7604                 switch (clkreg) {
7605                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7606                         bp->bus_speed_mhz = 133;
7607                         break;
7608
7609                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7610                         bp->bus_speed_mhz = 100;
7611                         break;
7612
7613                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7614                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7615                         bp->bus_speed_mhz = 66;
7616                         break;
7617
7618                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7619                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7620                         bp->bus_speed_mhz = 50;
7621                         break;
7622
7623                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7624                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7625                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7626                         bp->bus_speed_mhz = 33;
7627                         break;
7628                 }
7629         }
7630         else {
7631                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7632                         bp->bus_speed_mhz = 66;
7633                 else
7634                         bp->bus_speed_mhz = 33;
7635         }
7636
7637         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7638                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7639
7640 }
7641
7642 static int __devinit
7643 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7644 {
7645         struct bnx2 *bp;
7646         unsigned long mem_len;
7647         int rc, i, j;
7648         u32 reg;
7649         u64 dma_mask, persist_dma_mask;
7650
7651         SET_NETDEV_DEV(dev, &pdev->dev);
7652         bp = netdev_priv(dev);
7653
7654         bp->flags = 0;
7655         bp->phy_flags = 0;
7656
7657         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7658         rc = pci_enable_device(pdev);
7659         if (rc) {
7660                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7661                 goto err_out;
7662         }
7663
7664         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7665                 dev_err(&pdev->dev,
7666                         "Cannot find PCI device base address, aborting.\n");
7667                 rc = -ENODEV;
7668                 goto err_out_disable;
7669         }
7670
7671         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7672         if (rc) {
7673                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7674                 goto err_out_disable;
7675         }
7676
7677         pci_set_master(pdev);
7678         pci_save_state(pdev);
7679
7680         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7681         if (bp->pm_cap == 0) {
7682                 dev_err(&pdev->dev,
7683                         "Cannot find power management capability, aborting.\n");
7684                 rc = -EIO;
7685                 goto err_out_release;
7686         }
7687
7688         bp->dev = dev;
7689         bp->pdev = pdev;
7690
7691         spin_lock_init(&bp->phy_lock);
7692         spin_lock_init(&bp->indirect_lock);
7693 #ifdef BCM_CNIC
7694         mutex_init(&bp->cnic_lock);
7695 #endif
7696         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7697
7698         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7699         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7700         dev->mem_end = dev->mem_start + mem_len;
7701         dev->irq = pdev->irq;
7702
7703         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7704
7705         if (!bp->regview) {
7706                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7707                 rc = -ENOMEM;
7708                 goto err_out_release;
7709         }
7710
7711         /* Configure byte swap and enable write to the reg_window registers.
7712          * Rely on CPU to do target byte swapping on big endian systems
7713          * The chip's target access swapping will not swap all accesses
7714          */
7715         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7716                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7717                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7718
7719         bnx2_set_power_state(bp, PCI_D0);
7720
7721         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7722
7723         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7724                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7725                         dev_err(&pdev->dev,
7726                                 "Cannot find PCIE capability, aborting.\n");
7727                         rc = -EIO;
7728                         goto err_out_unmap;
7729                 }
7730                 bp->flags |= BNX2_FLAG_PCIE;
7731                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7732                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7733         } else {
7734                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7735                 if (bp->pcix_cap == 0) {
7736                         dev_err(&pdev->dev,
7737                                 "Cannot find PCIX capability, aborting.\n");
7738                         rc = -EIO;
7739                         goto err_out_unmap;
7740                 }
7741                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7742         }
7743
7744         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7745                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7746                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7747         }
7748
7749         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7750                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7751                         bp->flags |= BNX2_FLAG_MSI_CAP;
7752         }
7753
7754         /* 5708 cannot support DMA addresses > 40-bit.  */
7755         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7756                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7757         else
7758                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7759
7760         /* Configure DMA attributes. */
7761         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7762                 dev->features |= NETIF_F_HIGHDMA;
7763                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7764                 if (rc) {
7765                         dev_err(&pdev->dev,
7766                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7767                         goto err_out_unmap;
7768                 }
7769         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7770                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7771                 goto err_out_unmap;
7772         }
7773
7774         if (!(bp->flags & BNX2_FLAG_PCIE))
7775                 bnx2_get_pci_speed(bp);
7776
7777         /* 5706A0 may falsely detect SERR and PERR. */
7778         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7779                 reg = REG_RD(bp, PCI_COMMAND);
7780                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7781                 REG_WR(bp, PCI_COMMAND, reg);
7782         }
7783         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7784                 !(bp->flags & BNX2_FLAG_PCIX)) {
7785
7786                 dev_err(&pdev->dev,
7787                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7788                 goto err_out_unmap;
7789         }
7790
7791         bnx2_init_nvram(bp);
7792
7793         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7794
7795         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7796             BNX2_SHM_HDR_SIGNATURE_SIG) {
7797                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7798
7799                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7800         } else
7801                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7802
7803         /* Get the permanent MAC address.  First we need to make sure the
7804          * firmware is actually running.
7805          */
7806         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7807
7808         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7809             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7810                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7811                 rc = -ENODEV;
7812                 goto err_out_unmap;
7813         }
7814
7815         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7816         for (i = 0, j = 0; i < 3; i++) {
7817                 u8 num, k, skip0;
7818
7819                 num = (u8) (reg >> (24 - (i * 8)));
7820                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7821                         if (num >= k || !skip0 || k == 1) {
7822                                 bp->fw_version[j++] = (num / k) + '0';
7823                                 skip0 = 0;
7824                         }
7825                 }
7826                 if (i != 2)
7827                         bp->fw_version[j++] = '.';
7828         }
7829         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7830         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7831                 bp->wol = 1;
7832
7833         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7834                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7835
7836                 for (i = 0; i < 30; i++) {
7837                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7838                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7839                                 break;
7840                         msleep(10);
7841                 }
7842         }
7843         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7844         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7845         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7846             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7847                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7848
7849                 bp->fw_version[j++] = ' ';
7850                 for (i = 0; i < 3; i++) {
7851                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7852                         reg = swab32(reg);
7853                         memcpy(&bp->fw_version[j], &reg, 4);
7854                         j += 4;
7855                 }
7856         }
7857
7858         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7859         bp->mac_addr[0] = (u8) (reg >> 8);
7860         bp->mac_addr[1] = (u8) reg;
7861
7862         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7863         bp->mac_addr[2] = (u8) (reg >> 24);
7864         bp->mac_addr[3] = (u8) (reg >> 16);
7865         bp->mac_addr[4] = (u8) (reg >> 8);
7866         bp->mac_addr[5] = (u8) reg;
7867
7868         bp->tx_ring_size = MAX_TX_DESC_CNT;
7869         bnx2_set_rx_ring_size(bp, 255);
7870
7871         bp->rx_csum = 1;
7872
7873         bp->tx_quick_cons_trip_int = 2;
7874         bp->tx_quick_cons_trip = 20;
7875         bp->tx_ticks_int = 18;
7876         bp->tx_ticks = 80;
7877
7878         bp->rx_quick_cons_trip_int = 2;
7879         bp->rx_quick_cons_trip = 12;
7880         bp->rx_ticks_int = 18;
7881         bp->rx_ticks = 18;
7882
7883         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7884
7885         bp->current_interval = BNX2_TIMER_INTERVAL;
7886
7887         bp->phy_addr = 1;
7888
7889         /* Disable WOL support if we are running on a SERDES chip. */
7890         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7891                 bnx2_get_5709_media(bp);
7892         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7893                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7894
7895         bp->phy_port = PORT_TP;
7896         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7897                 bp->phy_port = PORT_FIBRE;
7898                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7899                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7900                         bp->flags |= BNX2_FLAG_NO_WOL;
7901                         bp->wol = 0;
7902                 }
7903                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7904                         /* Don't do parallel detect on this board because of
7905                          * some board problems.  The link will not go down
7906                          * if we do parallel detect.
7907                          */
7908                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7909                             pdev->subsystem_device == 0x310c)
7910                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7911                 } else {
7912                         bp->phy_addr = 2;
7913                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7914                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7915                 }
7916         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7917                    CHIP_NUM(bp) == CHIP_NUM_5708)
7918                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7919         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7920                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7921                   CHIP_REV(bp) == CHIP_REV_Bx))
7922                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7923
7924         bnx2_init_fw_cap(bp);
7925
7926         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7927             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7928             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7929             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7930                 bp->flags |= BNX2_FLAG_NO_WOL;
7931                 bp->wol = 0;
7932         }
7933
7934         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7935                 bp->tx_quick_cons_trip_int =
7936                         bp->tx_quick_cons_trip;
7937                 bp->tx_ticks_int = bp->tx_ticks;
7938                 bp->rx_quick_cons_trip_int =
7939                         bp->rx_quick_cons_trip;
7940                 bp->rx_ticks_int = bp->rx_ticks;
7941                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7942                 bp->com_ticks_int = bp->com_ticks;
7943                 bp->cmd_ticks_int = bp->cmd_ticks;
7944         }
7945
7946         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7947          *
7948          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7949          * with byte enables disabled on the unused 32-bit word.  This is legal
7950          * but causes problems on the AMD 8132 which will eventually stop
7951          * responding after a while.
7952          *
7953          * AMD believes this incompatibility is unique to the 5706, and
7954          * prefers to locally disable MSI rather than globally disabling it.
7955          */
7956         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7957                 struct pci_dev *amd_8132 = NULL;
7958
7959                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7960                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7961                                                   amd_8132))) {
7962
7963                         if (amd_8132->revision >= 0x10 &&
7964                             amd_8132->revision <= 0x13) {
7965                                 disable_msi = 1;
7966                                 pci_dev_put(amd_8132);
7967                                 break;
7968                         }
7969                 }
7970         }
7971
7972         bnx2_set_default_link(bp);
7973         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7974
7975         init_timer(&bp->timer);
7976         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7977         bp->timer.data = (unsigned long) bp;
7978         bp->timer.function = bnx2_timer;
7979
7980         return 0;
7981
7982 err_out_unmap:
7983         if (bp->regview) {
7984                 iounmap(bp->regview);
7985                 bp->regview = NULL;
7986         }
7987
7988 err_out_release:
7989         pci_release_regions(pdev);
7990
7991 err_out_disable:
7992         pci_disable_device(pdev);
7993         pci_set_drvdata(pdev, NULL);
7994
7995 err_out:
7996         return rc;
7997 }
7998
7999 static char * __devinit
8000 bnx2_bus_string(struct bnx2 *bp, char *str)
8001 {
8002         char *s = str;
8003
8004         if (bp->flags & BNX2_FLAG_PCIE) {
8005                 s += sprintf(s, "PCI Express");
8006         } else {
8007                 s += sprintf(s, "PCI");
8008                 if (bp->flags & BNX2_FLAG_PCIX)
8009                         s += sprintf(s, "-X");
8010                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8011                         s += sprintf(s, " 32-bit");
8012                 else
8013                         s += sprintf(s, " 64-bit");
8014                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8015         }
8016         return str;
8017 }
8018
8019 static void __devinit
8020 bnx2_init_napi(struct bnx2 *bp)
8021 {
8022         int i;
8023
8024         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
8025                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8026                 int (*poll)(struct napi_struct *, int);
8027
8028                 if (i == 0)
8029                         poll = bnx2_poll;
8030                 else
8031                         poll = bnx2_poll_msix;
8032
8033                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8034                 bnapi->bp = bp;
8035         }
8036 }
8037
8038 static const struct net_device_ops bnx2_netdev_ops = {
8039         .ndo_open               = bnx2_open,
8040         .ndo_start_xmit         = bnx2_start_xmit,
8041         .ndo_stop               = bnx2_close,
8042         .ndo_get_stats          = bnx2_get_stats,
8043         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8044         .ndo_do_ioctl           = bnx2_ioctl,
8045         .ndo_validate_addr      = eth_validate_addr,
8046         .ndo_set_mac_address    = bnx2_change_mac_addr,
8047         .ndo_change_mtu         = bnx2_change_mtu,
8048         .ndo_tx_timeout         = bnx2_tx_timeout,
8049 #ifdef BCM_VLAN
8050         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
8051 #endif
8052 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8053         .ndo_poll_controller    = poll_bnx2,
8054 #endif
8055 };
8056
8057 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8058 {
8059 #ifdef BCM_VLAN
8060         dev->vlan_features |= flags;
8061 #endif
8062 }
8063
8064 static int __devinit
8065 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8066 {
8067         static int version_printed = 0;
8068         struct net_device *dev = NULL;
8069         struct bnx2 *bp;
8070         int rc;
8071         char str[40];
8072
8073         if (version_printed++ == 0)
8074                 printk(KERN_INFO "%s", version);
8075
8076         /* dev zeroed in init_etherdev */
8077         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8078
8079         if (!dev)
8080                 return -ENOMEM;
8081
8082         rc = bnx2_init_board(pdev, dev);
8083         if (rc < 0) {
8084                 free_netdev(dev);
8085                 return rc;
8086         }
8087
8088         dev->netdev_ops = &bnx2_netdev_ops;
8089         dev->watchdog_timeo = TX_TIMEOUT;
8090         dev->ethtool_ops = &bnx2_ethtool_ops;
8091
8092         bp = netdev_priv(dev);
8093         bnx2_init_napi(bp);
8094
8095         pci_set_drvdata(pdev, dev);
8096
8097         rc = bnx2_request_firmware(bp);
8098         if (rc)
8099                 goto error;
8100
8101         memcpy(dev->dev_addr, bp->mac_addr, 6);
8102         memcpy(dev->perm_addr, bp->mac_addr, 6);
8103
8104         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8105         vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8106         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8107                 dev->features |= NETIF_F_IPV6_CSUM;
8108                 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8109         }
8110 #ifdef BCM_VLAN
8111         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8112 #endif
8113         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8114         vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8115         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8116                 dev->features |= NETIF_F_TSO6;
8117                 vlan_features_add(dev, NETIF_F_TSO6);
8118         }
8119         if ((rc = register_netdev(dev))) {
8120                 dev_err(&pdev->dev, "Cannot register net device\n");
8121                 goto error;
8122         }
8123
8124         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
8125                 "IRQ %d, node addr %pM\n",
8126                 dev->name,
8127                 board_info[ent->driver_data].name,
8128                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8129                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8130                 bnx2_bus_string(bp, str),
8131                 dev->base_addr,
8132                 bp->pdev->irq, dev->dev_addr);
8133
8134         return 0;
8135
8136 error:
8137         if (bp->mips_firmware)
8138                 release_firmware(bp->mips_firmware);
8139         if (bp->rv2p_firmware)
8140                 release_firmware(bp->rv2p_firmware);
8141
8142         if (bp->regview)
8143                 iounmap(bp->regview);
8144         pci_release_regions(pdev);
8145         pci_disable_device(pdev);
8146         pci_set_drvdata(pdev, NULL);
8147         free_netdev(dev);
8148         return rc;
8149 }
8150
8151 static void __devexit
8152 bnx2_remove_one(struct pci_dev *pdev)
8153 {
8154         struct net_device *dev = pci_get_drvdata(pdev);
8155         struct bnx2 *bp = netdev_priv(dev);
8156
8157         flush_scheduled_work();
8158
8159         unregister_netdev(dev);
8160
8161         if (bp->mips_firmware)
8162                 release_firmware(bp->mips_firmware);
8163         if (bp->rv2p_firmware)
8164                 release_firmware(bp->rv2p_firmware);
8165
8166         if (bp->regview)
8167                 iounmap(bp->regview);
8168
8169         free_netdev(dev);
8170         pci_release_regions(pdev);
8171         pci_disable_device(pdev);
8172         pci_set_drvdata(pdev, NULL);
8173 }
8174
8175 static int
8176 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8177 {
8178         struct net_device *dev = pci_get_drvdata(pdev);
8179         struct bnx2 *bp = netdev_priv(dev);
8180
8181         /* PCI register 4 needs to be saved whether netif_running() or not.
8182          * MSI address and data need to be saved if using MSI and
8183          * netif_running().
8184          */
8185         pci_save_state(pdev);
8186         if (!netif_running(dev))
8187                 return 0;
8188
8189         flush_scheduled_work();
8190         bnx2_netif_stop(bp);
8191         netif_device_detach(dev);
8192         del_timer_sync(&bp->timer);
8193         bnx2_shutdown_chip(bp);
8194         bnx2_free_skbs(bp);
8195         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8196         return 0;
8197 }
8198
8199 static int
8200 bnx2_resume(struct pci_dev *pdev)
8201 {
8202         struct net_device *dev = pci_get_drvdata(pdev);
8203         struct bnx2 *bp = netdev_priv(dev);
8204
8205         pci_restore_state(pdev);
8206         if (!netif_running(dev))
8207                 return 0;
8208
8209         bnx2_set_power_state(bp, PCI_D0);
8210         netif_device_attach(dev);
8211         bnx2_init_nic(bp, 1);
8212         bnx2_netif_start(bp);
8213         return 0;
8214 }
8215
8216 /**
8217  * bnx2_io_error_detected - called when PCI error is detected
8218  * @pdev: Pointer to PCI device
8219  * @state: The current pci connection state
8220  *
8221  * This function is called after a PCI bus error affecting
8222  * this device has been detected.
8223  */
8224 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8225                                                pci_channel_state_t state)
8226 {
8227         struct net_device *dev = pci_get_drvdata(pdev);
8228         struct bnx2 *bp = netdev_priv(dev);
8229
8230         rtnl_lock();
8231         netif_device_detach(dev);
8232
8233         if (state == pci_channel_io_perm_failure) {
8234                 rtnl_unlock();
8235                 return PCI_ERS_RESULT_DISCONNECT;
8236         }
8237
8238         if (netif_running(dev)) {
8239                 bnx2_netif_stop(bp);
8240                 del_timer_sync(&bp->timer);
8241                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8242         }
8243
8244         pci_disable_device(pdev);
8245         rtnl_unlock();
8246
8247         /* Request a slot slot reset. */
8248         return PCI_ERS_RESULT_NEED_RESET;
8249 }
8250
8251 /**
8252  * bnx2_io_slot_reset - called after the pci bus has been reset.
8253  * @pdev: Pointer to PCI device
8254  *
8255  * Restart the card from scratch, as if from a cold-boot.
8256  */
8257 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8258 {
8259         struct net_device *dev = pci_get_drvdata(pdev);
8260         struct bnx2 *bp = netdev_priv(dev);
8261
8262         rtnl_lock();
8263         if (pci_enable_device(pdev)) {
8264                 dev_err(&pdev->dev,
8265                         "Cannot re-enable PCI device after reset.\n");
8266                 rtnl_unlock();
8267                 return PCI_ERS_RESULT_DISCONNECT;
8268         }
8269         pci_set_master(pdev);
8270         pci_restore_state(pdev);
8271
8272         if (netif_running(dev)) {
8273                 bnx2_set_power_state(bp, PCI_D0);
8274                 bnx2_init_nic(bp, 1);
8275         }
8276
8277         rtnl_unlock();
8278         return PCI_ERS_RESULT_RECOVERED;
8279 }
8280
8281 /**
8282  * bnx2_io_resume - called when traffic can start flowing again.
8283  * @pdev: Pointer to PCI device
8284  *
8285  * This callback is called when the error recovery driver tells us that
8286  * its OK to resume normal operation.
8287  */
8288 static void bnx2_io_resume(struct pci_dev *pdev)
8289 {
8290         struct net_device *dev = pci_get_drvdata(pdev);
8291         struct bnx2 *bp = netdev_priv(dev);
8292
8293         rtnl_lock();
8294         if (netif_running(dev))
8295                 bnx2_netif_start(bp);
8296
8297         netif_device_attach(dev);
8298         rtnl_unlock();
8299 }
8300
8301 static struct pci_error_handlers bnx2_err_handler = {
8302         .error_detected = bnx2_io_error_detected,
8303         .slot_reset     = bnx2_io_slot_reset,
8304         .resume         = bnx2_io_resume,
8305 };
8306
8307 static struct pci_driver bnx2_pci_driver = {
8308         .name           = DRV_MODULE_NAME,
8309         .id_table       = bnx2_pci_tbl,
8310         .probe          = bnx2_init_one,
8311         .remove         = __devexit_p(bnx2_remove_one),
8312         .suspend        = bnx2_suspend,
8313         .resume         = bnx2_resume,
8314         .err_handler    = &bnx2_err_handler,
8315 };
8316
8317 static int __init bnx2_init(void)
8318 {
8319         return pci_register_driver(&bnx2_pci_driver);
8320 }
8321
8322 static void __exit bnx2_cleanup(void)
8323 {
8324         pci_unregister_driver(&bnx2_pci_driver);
8325 }
8326
8327 module_init(bnx2_init);
8328 module_exit(bnx2_cleanup);
8329
8330
8331