]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/bnx2.c
bnx2: Add BNX2 prefix to descriptor structures and macros
[~andy/linux] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.2.3"
62 #define DRV_MODULE_RELDATE      "June 27, 2012"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254         u32 diff;
255
256         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
257         barrier();
258
259         /* The ring uses 256 indices for 255 entries, one of them
260          * needs to be skipped.
261          */
262         diff = txr->tx_prod - txr->tx_cons;
263         if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264                 diff &= 0xffff;
265                 if (diff == BNX2_TX_DESC_CNT)
266                         diff = BNX2_MAX_TX_DESC_CNT;
267         }
268         return bp->tx_ring_size - diff;
269 }
270
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274         u32 val;
275
276         spin_lock_bh(&bp->indirect_lock);
277         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278         val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279         spin_unlock_bh(&bp->indirect_lock);
280         return val;
281 }
282
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286         spin_lock_bh(&bp->indirect_lock);
287         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289         spin_unlock_bh(&bp->indirect_lock);
290 }
291
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307         offset += cid_addr;
308         spin_lock_bh(&bp->indirect_lock);
309         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
310                 int i;
311
312                 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313                 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314                         offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315                 for (i = 0; i < 5; i++) {
316                         val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318                                 break;
319                         udelay(5);
320                 }
321         } else {
322                 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323                 BNX2_WR(bp, BNX2_CTX_DATA, val);
324         }
325         spin_unlock_bh(&bp->indirect_lock);
326 }
327
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332         struct bnx2 *bp = netdev_priv(dev);
333         struct drv_ctl_io *io = &info->data.io;
334
335         switch (info->cmd) {
336         case DRV_CTL_IO_WR_CMD:
337                 bnx2_reg_wr_ind(bp, io->offset, io->data);
338                 break;
339         case DRV_CTL_IO_RD_CMD:
340                 io->data = bnx2_reg_rd_ind(bp, io->offset);
341                 break;
342         case DRV_CTL_CTX_WR_CMD:
343                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344                 break;
345         default:
346                 return -EINVAL;
347         }
348         return 0;
349 }
350
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355         int sb_id;
356
357         if (bp->flags & BNX2_FLAG_USING_MSIX) {
358                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359                 bnapi->cnic_present = 0;
360                 sb_id = bp->irq_nvecs;
361                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362         } else {
363                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364                 bnapi->cnic_tag = bnapi->last_status_idx;
365                 bnapi->cnic_present = 1;
366                 sb_id = 0;
367                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368         }
369
370         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371         cp->irq_arr[0].status_blk = (void *)
372                 ((unsigned long) bnapi->status_blk.msi +
373                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374         cp->irq_arr[0].status_blk_num = sb_id;
375         cp->num_irq = 1;
376 }
377
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379                               void *data)
380 {
381         struct bnx2 *bp = netdev_priv(dev);
382         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383
384         if (ops == NULL)
385                 return -EINVAL;
386
387         if (cp->drv_state & CNIC_DRV_STATE_REGD)
388                 return -EBUSY;
389
390         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391                 return -ENODEV;
392
393         bp->cnic_data = data;
394         rcu_assign_pointer(bp->cnic_ops, ops);
395
396         cp->num_irq = 0;
397         cp->drv_state = CNIC_DRV_STATE_REGD;
398
399         bnx2_setup_cnic_irq_info(bp);
400
401         return 0;
402 }
403
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406         struct bnx2 *bp = netdev_priv(dev);
407         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409
410         mutex_lock(&bp->cnic_lock);
411         cp->drv_state = 0;
412         bnapi->cnic_present = 0;
413         RCU_INIT_POINTER(bp->cnic_ops, NULL);
414         mutex_unlock(&bp->cnic_lock);
415         synchronize_rcu();
416         return 0;
417 }
418
419 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421         struct bnx2 *bp = netdev_priv(dev);
422         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423
424         if (!cp->max_iscsi_conn)
425                 return NULL;
426
427         cp->drv_owner = THIS_MODULE;
428         cp->chip_id = bp->chip_id;
429         cp->pdev = bp->pdev;
430         cp->io_base = bp->regview;
431         cp->drv_ctl = bnx2_drv_ctl;
432         cp->drv_register_cnic = bnx2_register_cnic;
433         cp->drv_unregister_cnic = bnx2_unregister_cnic;
434
435         return cp;
436 }
437 EXPORT_SYMBOL(bnx2_cnic_probe);
438
439 static void
440 bnx2_cnic_stop(struct bnx2 *bp)
441 {
442         struct cnic_ops *c_ops;
443         struct cnic_ctl_info info;
444
445         mutex_lock(&bp->cnic_lock);
446         c_ops = rcu_dereference_protected(bp->cnic_ops,
447                                           lockdep_is_held(&bp->cnic_lock));
448         if (c_ops) {
449                 info.cmd = CNIC_CTL_STOP_CMD;
450                 c_ops->cnic_ctl(bp->cnic_data, &info);
451         }
452         mutex_unlock(&bp->cnic_lock);
453 }
454
455 static void
456 bnx2_cnic_start(struct bnx2 *bp)
457 {
458         struct cnic_ops *c_ops;
459         struct cnic_ctl_info info;
460
461         mutex_lock(&bp->cnic_lock);
462         c_ops = rcu_dereference_protected(bp->cnic_ops,
463                                           lockdep_is_held(&bp->cnic_lock));
464         if (c_ops) {
465                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
466                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
467
468                         bnapi->cnic_tag = bnapi->last_status_idx;
469                 }
470                 info.cmd = CNIC_CTL_START_CMD;
471                 c_ops->cnic_ctl(bp->cnic_data, &info);
472         }
473         mutex_unlock(&bp->cnic_lock);
474 }
475
476 #else
477
478 static void
479 bnx2_cnic_stop(struct bnx2 *bp)
480 {
481 }
482
483 static void
484 bnx2_cnic_start(struct bnx2 *bp)
485 {
486 }
487
488 #endif
489
490 static int
491 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
492 {
493         u32 val1;
494         int i, ret;
495
496         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
497                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
498                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
499
500                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
501                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
502
503                 udelay(40);
504         }
505
506         val1 = (bp->phy_addr << 21) | (reg << 16) |
507                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
508                 BNX2_EMAC_MDIO_COMM_START_BUSY;
509         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
510
511         for (i = 0; i < 50; i++) {
512                 udelay(10);
513
514                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
515                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
516                         udelay(5);
517
518                         val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
519                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
520
521                         break;
522                 }
523         }
524
525         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
526                 *val = 0x0;
527                 ret = -EBUSY;
528         }
529         else {
530                 *val = val1;
531                 ret = 0;
532         }
533
534         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
535                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
536                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
537
538                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
539                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
540
541                 udelay(40);
542         }
543
544         return ret;
545 }
546
547 static int
548 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
549 {
550         u32 val1;
551         int i, ret;
552
553         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
554                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
555                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
556
557                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
558                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
559
560                 udelay(40);
561         }
562
563         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
564                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
565                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
566         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
567
568         for (i = 0; i < 50; i++) {
569                 udelay(10);
570
571                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
572                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
573                         udelay(5);
574                         break;
575                 }
576         }
577
578         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
579                 ret = -EBUSY;
580         else
581                 ret = 0;
582
583         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
584                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
585                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
586
587                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
588                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
589
590                 udelay(40);
591         }
592
593         return ret;
594 }
595
596 static void
597 bnx2_disable_int(struct bnx2 *bp)
598 {
599         int i;
600         struct bnx2_napi *bnapi;
601
602         for (i = 0; i < bp->irq_nvecs; i++) {
603                 bnapi = &bp->bnx2_napi[i];
604                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
605                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
606         }
607         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
608 }
609
610 static void
611 bnx2_enable_int(struct bnx2 *bp)
612 {
613         int i;
614         struct bnx2_napi *bnapi;
615
616         for (i = 0; i < bp->irq_nvecs; i++) {
617                 bnapi = &bp->bnx2_napi[i];
618
619                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
620                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
621                         BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
622                         bnapi->last_status_idx);
623
624                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
625                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
626                         bnapi->last_status_idx);
627         }
628         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
629 }
630
631 static void
632 bnx2_disable_int_sync(struct bnx2 *bp)
633 {
634         int i;
635
636         atomic_inc(&bp->intr_sem);
637         if (!netif_running(bp->dev))
638                 return;
639
640         bnx2_disable_int(bp);
641         for (i = 0; i < bp->irq_nvecs; i++)
642                 synchronize_irq(bp->irq_tbl[i].vector);
643 }
644
645 static void
646 bnx2_napi_disable(struct bnx2 *bp)
647 {
648         int i;
649
650         for (i = 0; i < bp->irq_nvecs; i++)
651                 napi_disable(&bp->bnx2_napi[i].napi);
652 }
653
654 static void
655 bnx2_napi_enable(struct bnx2 *bp)
656 {
657         int i;
658
659         for (i = 0; i < bp->irq_nvecs; i++)
660                 napi_enable(&bp->bnx2_napi[i].napi);
661 }
662
663 static void
664 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
665 {
666         if (stop_cnic)
667                 bnx2_cnic_stop(bp);
668         if (netif_running(bp->dev)) {
669                 bnx2_napi_disable(bp);
670                 netif_tx_disable(bp->dev);
671         }
672         bnx2_disable_int_sync(bp);
673         netif_carrier_off(bp->dev);     /* prevent tx timeout */
674 }
675
676 static void
677 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
678 {
679         if (atomic_dec_and_test(&bp->intr_sem)) {
680                 if (netif_running(bp->dev)) {
681                         netif_tx_wake_all_queues(bp->dev);
682                         spin_lock_bh(&bp->phy_lock);
683                         if (bp->link_up)
684                                 netif_carrier_on(bp->dev);
685                         spin_unlock_bh(&bp->phy_lock);
686                         bnx2_napi_enable(bp);
687                         bnx2_enable_int(bp);
688                         if (start_cnic)
689                                 bnx2_cnic_start(bp);
690                 }
691         }
692 }
693
694 static void
695 bnx2_free_tx_mem(struct bnx2 *bp)
696 {
697         int i;
698
699         for (i = 0; i < bp->num_tx_rings; i++) {
700                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
701                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
702
703                 if (txr->tx_desc_ring) {
704                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
705                                           txr->tx_desc_ring,
706                                           txr->tx_desc_mapping);
707                         txr->tx_desc_ring = NULL;
708                 }
709                 kfree(txr->tx_buf_ring);
710                 txr->tx_buf_ring = NULL;
711         }
712 }
713
714 static void
715 bnx2_free_rx_mem(struct bnx2 *bp)
716 {
717         int i;
718
719         for (i = 0; i < bp->num_rx_rings; i++) {
720                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
721                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
722                 int j;
723
724                 for (j = 0; j < bp->rx_max_ring; j++) {
725                         if (rxr->rx_desc_ring[j])
726                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
727                                                   rxr->rx_desc_ring[j],
728                                                   rxr->rx_desc_mapping[j]);
729                         rxr->rx_desc_ring[j] = NULL;
730                 }
731                 vfree(rxr->rx_buf_ring);
732                 rxr->rx_buf_ring = NULL;
733
734                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
735                         if (rxr->rx_pg_desc_ring[j])
736                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
737                                                   rxr->rx_pg_desc_ring[j],
738                                                   rxr->rx_pg_desc_mapping[j]);
739                         rxr->rx_pg_desc_ring[j] = NULL;
740                 }
741                 vfree(rxr->rx_pg_ring);
742                 rxr->rx_pg_ring = NULL;
743         }
744 }
745
746 static int
747 bnx2_alloc_tx_mem(struct bnx2 *bp)
748 {
749         int i;
750
751         for (i = 0; i < bp->num_tx_rings; i++) {
752                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
753                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
754
755                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
756                 if (txr->tx_buf_ring == NULL)
757                         return -ENOMEM;
758
759                 txr->tx_desc_ring =
760                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
761                                            &txr->tx_desc_mapping, GFP_KERNEL);
762                 if (txr->tx_desc_ring == NULL)
763                         return -ENOMEM;
764         }
765         return 0;
766 }
767
768 static int
769 bnx2_alloc_rx_mem(struct bnx2 *bp)
770 {
771         int i;
772
773         for (i = 0; i < bp->num_rx_rings; i++) {
774                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
775                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
776                 int j;
777
778                 rxr->rx_buf_ring =
779                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
780                 if (rxr->rx_buf_ring == NULL)
781                         return -ENOMEM;
782
783                 for (j = 0; j < bp->rx_max_ring; j++) {
784                         rxr->rx_desc_ring[j] =
785                                 dma_alloc_coherent(&bp->pdev->dev,
786                                                    RXBD_RING_SIZE,
787                                                    &rxr->rx_desc_mapping[j],
788                                                    GFP_KERNEL);
789                         if (rxr->rx_desc_ring[j] == NULL)
790                                 return -ENOMEM;
791
792                 }
793
794                 if (bp->rx_pg_ring_size) {
795                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
796                                                   bp->rx_max_pg_ring);
797                         if (rxr->rx_pg_ring == NULL)
798                                 return -ENOMEM;
799
800                 }
801
802                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
803                         rxr->rx_pg_desc_ring[j] =
804                                 dma_alloc_coherent(&bp->pdev->dev,
805                                                    RXBD_RING_SIZE,
806                                                    &rxr->rx_pg_desc_mapping[j],
807                                                    GFP_KERNEL);
808                         if (rxr->rx_pg_desc_ring[j] == NULL)
809                                 return -ENOMEM;
810
811                 }
812         }
813         return 0;
814 }
815
816 static void
817 bnx2_free_mem(struct bnx2 *bp)
818 {
819         int i;
820         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
821
822         bnx2_free_tx_mem(bp);
823         bnx2_free_rx_mem(bp);
824
825         for (i = 0; i < bp->ctx_pages; i++) {
826                 if (bp->ctx_blk[i]) {
827                         dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
828                                           bp->ctx_blk[i],
829                                           bp->ctx_blk_mapping[i]);
830                         bp->ctx_blk[i] = NULL;
831                 }
832         }
833         if (bnapi->status_blk.msi) {
834                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
835                                   bnapi->status_blk.msi,
836                                   bp->status_blk_mapping);
837                 bnapi->status_blk.msi = NULL;
838                 bp->stats_blk = NULL;
839         }
840 }
841
842 static int
843 bnx2_alloc_mem(struct bnx2 *bp)
844 {
845         int i, status_blk_size, err;
846         struct bnx2_napi *bnapi;
847         void *status_blk;
848
849         /* Combine status and statistics blocks into one allocation. */
850         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
851         if (bp->flags & BNX2_FLAG_MSIX_CAP)
852                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
853                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
854         bp->status_stats_size = status_blk_size +
855                                 sizeof(struct statistics_block);
856
857         status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
858                                         &bp->status_blk_mapping, GFP_KERNEL);
859         if (status_blk == NULL)
860                 goto alloc_mem_err;
861
862         memset(status_blk, 0, bp->status_stats_size);
863
864         bnapi = &bp->bnx2_napi[0];
865         bnapi->status_blk.msi = status_blk;
866         bnapi->hw_tx_cons_ptr =
867                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
868         bnapi->hw_rx_cons_ptr =
869                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
870         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
871                 for (i = 1; i < bp->irq_nvecs; i++) {
872                         struct status_block_msix *sblk;
873
874                         bnapi = &bp->bnx2_napi[i];
875
876                         sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
877                         bnapi->status_blk.msix = sblk;
878                         bnapi->hw_tx_cons_ptr =
879                                 &sblk->status_tx_quick_consumer_index;
880                         bnapi->hw_rx_cons_ptr =
881                                 &sblk->status_rx_quick_consumer_index;
882                         bnapi->int_num = i << 24;
883                 }
884         }
885
886         bp->stats_blk = status_blk + status_blk_size;
887
888         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
889
890         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
891                 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
892                 if (bp->ctx_pages == 0)
893                         bp->ctx_pages = 1;
894                 for (i = 0; i < bp->ctx_pages; i++) {
895                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
896                                                 BNX2_PAGE_SIZE,
897                                                 &bp->ctx_blk_mapping[i],
898                                                 GFP_KERNEL);
899                         if (bp->ctx_blk[i] == NULL)
900                                 goto alloc_mem_err;
901                 }
902         }
903
904         err = bnx2_alloc_rx_mem(bp);
905         if (err)
906                 goto alloc_mem_err;
907
908         err = bnx2_alloc_tx_mem(bp);
909         if (err)
910                 goto alloc_mem_err;
911
912         return 0;
913
914 alloc_mem_err:
915         bnx2_free_mem(bp);
916         return -ENOMEM;
917 }
918
919 static void
920 bnx2_report_fw_link(struct bnx2 *bp)
921 {
922         u32 fw_link_status = 0;
923
924         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
925                 return;
926
927         if (bp->link_up) {
928                 u32 bmsr;
929
930                 switch (bp->line_speed) {
931                 case SPEED_10:
932                         if (bp->duplex == DUPLEX_HALF)
933                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
934                         else
935                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
936                         break;
937                 case SPEED_100:
938                         if (bp->duplex == DUPLEX_HALF)
939                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
940                         else
941                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
942                         break;
943                 case SPEED_1000:
944                         if (bp->duplex == DUPLEX_HALF)
945                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
946                         else
947                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
948                         break;
949                 case SPEED_2500:
950                         if (bp->duplex == DUPLEX_HALF)
951                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
952                         else
953                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
954                         break;
955                 }
956
957                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
958
959                 if (bp->autoneg) {
960                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
961
962                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
964
965                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
966                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
967                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
968                         else
969                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
970                 }
971         }
972         else
973                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
974
975         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
976 }
977
978 static char *
979 bnx2_xceiver_str(struct bnx2 *bp)
980 {
981         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
982                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
983                  "Copper");
984 }
985
986 static void
987 bnx2_report_link(struct bnx2 *bp)
988 {
989         if (bp->link_up) {
990                 netif_carrier_on(bp->dev);
991                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
992                             bnx2_xceiver_str(bp),
993                             bp->line_speed,
994                             bp->duplex == DUPLEX_FULL ? "full" : "half");
995
996                 if (bp->flow_ctrl) {
997                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
998                                 pr_cont(", receive ");
999                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1000                                         pr_cont("& transmit ");
1001                         }
1002                         else {
1003                                 pr_cont(", transmit ");
1004                         }
1005                         pr_cont("flow control ON");
1006                 }
1007                 pr_cont("\n");
1008         } else {
1009                 netif_carrier_off(bp->dev);
1010                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1011                            bnx2_xceiver_str(bp));
1012         }
1013
1014         bnx2_report_fw_link(bp);
1015 }
1016
1017 static void
1018 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1019 {
1020         u32 local_adv, remote_adv;
1021
1022         bp->flow_ctrl = 0;
1023         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1024                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1025
1026                 if (bp->duplex == DUPLEX_FULL) {
1027                         bp->flow_ctrl = bp->req_flow_ctrl;
1028                 }
1029                 return;
1030         }
1031
1032         if (bp->duplex != DUPLEX_FULL) {
1033                 return;
1034         }
1035
1036         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1037             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1038                 u32 val;
1039
1040                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1041                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1042                         bp->flow_ctrl |= FLOW_CTRL_TX;
1043                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1044                         bp->flow_ctrl |= FLOW_CTRL_RX;
1045                 return;
1046         }
1047
1048         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1049         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1050
1051         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1052                 u32 new_local_adv = 0;
1053                 u32 new_remote_adv = 0;
1054
1055                 if (local_adv & ADVERTISE_1000XPAUSE)
1056                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1057                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1058                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1059                 if (remote_adv & ADVERTISE_1000XPAUSE)
1060                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1061                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1062                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1063
1064                 local_adv = new_local_adv;
1065                 remote_adv = new_remote_adv;
1066         }
1067
1068         /* See Table 28B-3 of 802.3ab-1999 spec. */
1069         if (local_adv & ADVERTISE_PAUSE_CAP) {
1070                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1071                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1072                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1073                         }
1074                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1075                                 bp->flow_ctrl = FLOW_CTRL_RX;
1076                         }
1077                 }
1078                 else {
1079                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1080                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1081                         }
1082                 }
1083         }
1084         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1085                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1086                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1087
1088                         bp->flow_ctrl = FLOW_CTRL_TX;
1089                 }
1090         }
1091 }
1092
1093 static int
1094 bnx2_5709s_linkup(struct bnx2 *bp)
1095 {
1096         u32 val, speed;
1097
1098         bp->link_up = 1;
1099
1100         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1101         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1102         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1103
1104         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1105                 bp->line_speed = bp->req_line_speed;
1106                 bp->duplex = bp->req_duplex;
1107                 return 0;
1108         }
1109         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1110         switch (speed) {
1111                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1112                         bp->line_speed = SPEED_10;
1113                         break;
1114                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1115                         bp->line_speed = SPEED_100;
1116                         break;
1117                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1118                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1119                         bp->line_speed = SPEED_1000;
1120                         break;
1121                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1122                         bp->line_speed = SPEED_2500;
1123                         break;
1124         }
1125         if (val & MII_BNX2_GP_TOP_AN_FD)
1126                 bp->duplex = DUPLEX_FULL;
1127         else
1128                 bp->duplex = DUPLEX_HALF;
1129         return 0;
1130 }
1131
1132 static int
1133 bnx2_5708s_linkup(struct bnx2 *bp)
1134 {
1135         u32 val;
1136
1137         bp->link_up = 1;
1138         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1139         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1140                 case BCM5708S_1000X_STAT1_SPEED_10:
1141                         bp->line_speed = SPEED_10;
1142                         break;
1143                 case BCM5708S_1000X_STAT1_SPEED_100:
1144                         bp->line_speed = SPEED_100;
1145                         break;
1146                 case BCM5708S_1000X_STAT1_SPEED_1G:
1147                         bp->line_speed = SPEED_1000;
1148                         break;
1149                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1150                         bp->line_speed = SPEED_2500;
1151                         break;
1152         }
1153         if (val & BCM5708S_1000X_STAT1_FD)
1154                 bp->duplex = DUPLEX_FULL;
1155         else
1156                 bp->duplex = DUPLEX_HALF;
1157
1158         return 0;
1159 }
1160
1161 static int
1162 bnx2_5706s_linkup(struct bnx2 *bp)
1163 {
1164         u32 bmcr, local_adv, remote_adv, common;
1165
1166         bp->link_up = 1;
1167         bp->line_speed = SPEED_1000;
1168
1169         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1170         if (bmcr & BMCR_FULLDPLX) {
1171                 bp->duplex = DUPLEX_FULL;
1172         }
1173         else {
1174                 bp->duplex = DUPLEX_HALF;
1175         }
1176
1177         if (!(bmcr & BMCR_ANENABLE)) {
1178                 return 0;
1179         }
1180
1181         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1182         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1183
1184         common = local_adv & remote_adv;
1185         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1186
1187                 if (common & ADVERTISE_1000XFULL) {
1188                         bp->duplex = DUPLEX_FULL;
1189                 }
1190                 else {
1191                         bp->duplex = DUPLEX_HALF;
1192                 }
1193         }
1194
1195         return 0;
1196 }
1197
1198 static int
1199 bnx2_copper_linkup(struct bnx2 *bp)
1200 {
1201         u32 bmcr;
1202
1203         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1204         if (bmcr & BMCR_ANENABLE) {
1205                 u32 local_adv, remote_adv, common;
1206
1207                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1208                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1209
1210                 common = local_adv & (remote_adv >> 2);
1211                 if (common & ADVERTISE_1000FULL) {
1212                         bp->line_speed = SPEED_1000;
1213                         bp->duplex = DUPLEX_FULL;
1214                 }
1215                 else if (common & ADVERTISE_1000HALF) {
1216                         bp->line_speed = SPEED_1000;
1217                         bp->duplex = DUPLEX_HALF;
1218                 }
1219                 else {
1220                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1221                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1222
1223                         common = local_adv & remote_adv;
1224                         if (common & ADVERTISE_100FULL) {
1225                                 bp->line_speed = SPEED_100;
1226                                 bp->duplex = DUPLEX_FULL;
1227                         }
1228                         else if (common & ADVERTISE_100HALF) {
1229                                 bp->line_speed = SPEED_100;
1230                                 bp->duplex = DUPLEX_HALF;
1231                         }
1232                         else if (common & ADVERTISE_10FULL) {
1233                                 bp->line_speed = SPEED_10;
1234                                 bp->duplex = DUPLEX_FULL;
1235                         }
1236                         else if (common & ADVERTISE_10HALF) {
1237                                 bp->line_speed = SPEED_10;
1238                                 bp->duplex = DUPLEX_HALF;
1239                         }
1240                         else {
1241                                 bp->line_speed = 0;
1242                                 bp->link_up = 0;
1243                         }
1244                 }
1245         }
1246         else {
1247                 if (bmcr & BMCR_SPEED100) {
1248                         bp->line_speed = SPEED_100;
1249                 }
1250                 else {
1251                         bp->line_speed = SPEED_10;
1252                 }
1253                 if (bmcr & BMCR_FULLDPLX) {
1254                         bp->duplex = DUPLEX_FULL;
1255                 }
1256                 else {
1257                         bp->duplex = DUPLEX_HALF;
1258                 }
1259         }
1260
1261         return 0;
1262 }
1263
1264 static void
1265 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1266 {
1267         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1268
1269         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1270         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1271         val |= 0x02 << 8;
1272
1273         if (bp->flow_ctrl & FLOW_CTRL_TX)
1274                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1275
1276         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1277 }
1278
1279 static void
1280 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1281 {
1282         int i;
1283         u32 cid;
1284
1285         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1286                 if (i == 1)
1287                         cid = RX_RSS_CID;
1288                 bnx2_init_rx_context(bp, cid);
1289         }
1290 }
1291
1292 static void
1293 bnx2_set_mac_link(struct bnx2 *bp)
1294 {
1295         u32 val;
1296
1297         BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1298         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1299                 (bp->duplex == DUPLEX_HALF)) {
1300                 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1301         }
1302
1303         /* Configure the EMAC mode register. */
1304         val = BNX2_RD(bp, BNX2_EMAC_MODE);
1305
1306         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1307                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1308                 BNX2_EMAC_MODE_25G_MODE);
1309
1310         if (bp->link_up) {
1311                 switch (bp->line_speed) {
1312                         case SPEED_10:
1313                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1314                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1315                                         break;
1316                                 }
1317                                 /* fall through */
1318                         case SPEED_100:
1319                                 val |= BNX2_EMAC_MODE_PORT_MII;
1320                                 break;
1321                         case SPEED_2500:
1322                                 val |= BNX2_EMAC_MODE_25G_MODE;
1323                                 /* fall through */
1324                         case SPEED_1000:
1325                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1326                                 break;
1327                 }
1328         }
1329         else {
1330                 val |= BNX2_EMAC_MODE_PORT_GMII;
1331         }
1332
1333         /* Set the MAC to operate in the appropriate duplex mode. */
1334         if (bp->duplex == DUPLEX_HALF)
1335                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1336         BNX2_WR(bp, BNX2_EMAC_MODE, val);
1337
1338         /* Enable/disable rx PAUSE. */
1339         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1340
1341         if (bp->flow_ctrl & FLOW_CTRL_RX)
1342                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1343         BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1344
1345         /* Enable/disable tx PAUSE. */
1346         val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1347         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1348
1349         if (bp->flow_ctrl & FLOW_CTRL_TX)
1350                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1351         BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1352
1353         /* Acknowledge the interrupt. */
1354         BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1355
1356         bnx2_init_all_rx_contexts(bp);
1357 }
1358
1359 static void
1360 bnx2_enable_bmsr1(struct bnx2 *bp)
1361 {
1362         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1363             (CHIP_NUM(bp) == CHIP_NUM_5709))
1364                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1365                                MII_BNX2_BLK_ADDR_GP_STATUS);
1366 }
1367
1368 static void
1369 bnx2_disable_bmsr1(struct bnx2 *bp)
1370 {
1371         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1372             (CHIP_NUM(bp) == CHIP_NUM_5709))
1373                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1374                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1375 }
1376
1377 static int
1378 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1379 {
1380         u32 up1;
1381         int ret = 1;
1382
1383         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1384                 return 0;
1385
1386         if (bp->autoneg & AUTONEG_SPEED)
1387                 bp->advertising |= ADVERTISED_2500baseX_Full;
1388
1389         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1390                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1391
1392         bnx2_read_phy(bp, bp->mii_up1, &up1);
1393         if (!(up1 & BCM5708S_UP1_2G5)) {
1394                 up1 |= BCM5708S_UP1_2G5;
1395                 bnx2_write_phy(bp, bp->mii_up1, up1);
1396                 ret = 0;
1397         }
1398
1399         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1400                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1401                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1402
1403         return ret;
1404 }
1405
1406 static int
1407 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1408 {
1409         u32 up1;
1410         int ret = 0;
1411
1412         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1413                 return 0;
1414
1415         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1416                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1417
1418         bnx2_read_phy(bp, bp->mii_up1, &up1);
1419         if (up1 & BCM5708S_UP1_2G5) {
1420                 up1 &= ~BCM5708S_UP1_2G5;
1421                 bnx2_write_phy(bp, bp->mii_up1, up1);
1422                 ret = 1;
1423         }
1424
1425         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1426                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1427                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1428
1429         return ret;
1430 }
1431
1432 static void
1433 bnx2_enable_forced_2g5(struct bnx2 *bp)
1434 {
1435         u32 uninitialized_var(bmcr);
1436         int err;
1437
1438         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1439                 return;
1440
1441         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1442                 u32 val;
1443
1444                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1445                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1446                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1447                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1448                         val |= MII_BNX2_SD_MISC1_FORCE |
1449                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1450                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1451                 }
1452
1453                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1454                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1455                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1456
1457         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1458                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1459                 if (!err)
1460                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1461         } else {
1462                 return;
1463         }
1464
1465         if (err)
1466                 return;
1467
1468         if (bp->autoneg & AUTONEG_SPEED) {
1469                 bmcr &= ~BMCR_ANENABLE;
1470                 if (bp->req_duplex == DUPLEX_FULL)
1471                         bmcr |= BMCR_FULLDPLX;
1472         }
1473         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1474 }
1475
1476 static void
1477 bnx2_disable_forced_2g5(struct bnx2 *bp)
1478 {
1479         u32 uninitialized_var(bmcr);
1480         int err;
1481
1482         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1483                 return;
1484
1485         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1486                 u32 val;
1487
1488                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1489                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1490                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1491                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1492                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1493                 }
1494
1495                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1497                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1498
1499         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1500                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501                 if (!err)
1502                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1503         } else {
1504                 return;
1505         }
1506
1507         if (err)
1508                 return;
1509
1510         if (bp->autoneg & AUTONEG_SPEED)
1511                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1512         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1513 }
1514
1515 static void
1516 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1517 {
1518         u32 val;
1519
1520         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1521         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1522         if (start)
1523                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1524         else
1525                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1526 }
1527
1528 static int
1529 bnx2_set_link(struct bnx2 *bp)
1530 {
1531         u32 bmsr;
1532         u8 link_up;
1533
1534         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1535                 bp->link_up = 1;
1536                 return 0;
1537         }
1538
1539         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1540                 return 0;
1541
1542         link_up = bp->link_up;
1543
1544         bnx2_enable_bmsr1(bp);
1545         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1547         bnx2_disable_bmsr1(bp);
1548
1549         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1550             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1551                 u32 val, an_dbg;
1552
1553                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1554                         bnx2_5706s_force_link_dn(bp, 0);
1555                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1556                 }
1557                 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1558
1559                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1562
1563                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1564                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1565                         bmsr |= BMSR_LSTATUS;
1566                 else
1567                         bmsr &= ~BMSR_LSTATUS;
1568         }
1569
1570         if (bmsr & BMSR_LSTATUS) {
1571                 bp->link_up = 1;
1572
1573                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1574                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1575                                 bnx2_5706s_linkup(bp);
1576                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1577                                 bnx2_5708s_linkup(bp);
1578                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1579                                 bnx2_5709s_linkup(bp);
1580                 }
1581                 else {
1582                         bnx2_copper_linkup(bp);
1583                 }
1584                 bnx2_resolve_flow_ctrl(bp);
1585         }
1586         else {
1587                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1588                     (bp->autoneg & AUTONEG_SPEED))
1589                         bnx2_disable_forced_2g5(bp);
1590
1591                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1592                         u32 bmcr;
1593
1594                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1595                         bmcr |= BMCR_ANENABLE;
1596                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1597
1598                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1599                 }
1600                 bp->link_up = 0;
1601         }
1602
1603         if (bp->link_up != link_up) {
1604                 bnx2_report_link(bp);
1605         }
1606
1607         bnx2_set_mac_link(bp);
1608
1609         return 0;
1610 }
1611
1612 static int
1613 bnx2_reset_phy(struct bnx2 *bp)
1614 {
1615         int i;
1616         u32 reg;
1617
1618         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1619
1620 #define PHY_RESET_MAX_WAIT 100
1621         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1622                 udelay(10);
1623
1624                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1625                 if (!(reg & BMCR_RESET)) {
1626                         udelay(20);
1627                         break;
1628                 }
1629         }
1630         if (i == PHY_RESET_MAX_WAIT) {
1631                 return -EBUSY;
1632         }
1633         return 0;
1634 }
1635
1636 static u32
1637 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1638 {
1639         u32 adv = 0;
1640
1641         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1642                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1643
1644                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1645                         adv = ADVERTISE_1000XPAUSE;
1646                 }
1647                 else {
1648                         adv = ADVERTISE_PAUSE_CAP;
1649                 }
1650         }
1651         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1652                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1653                         adv = ADVERTISE_1000XPSE_ASYM;
1654                 }
1655                 else {
1656                         adv = ADVERTISE_PAUSE_ASYM;
1657                 }
1658         }
1659         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1660                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1661                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1662                 }
1663                 else {
1664                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1665                 }
1666         }
1667         return adv;
1668 }
1669
1670 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1671
1672 static int
1673 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1674 __releases(&bp->phy_lock)
1675 __acquires(&bp->phy_lock)
1676 {
1677         u32 speed_arg = 0, pause_adv;
1678
1679         pause_adv = bnx2_phy_get_pause_adv(bp);
1680
1681         if (bp->autoneg & AUTONEG_SPEED) {
1682                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1683                 if (bp->advertising & ADVERTISED_10baseT_Half)
1684                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1685                 if (bp->advertising & ADVERTISED_10baseT_Full)
1686                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1687                 if (bp->advertising & ADVERTISED_100baseT_Half)
1688                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1689                 if (bp->advertising & ADVERTISED_100baseT_Full)
1690                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1691                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1692                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1694                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695         } else {
1696                 if (bp->req_line_speed == SPEED_2500)
1697                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1698                 else if (bp->req_line_speed == SPEED_1000)
1699                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700                 else if (bp->req_line_speed == SPEED_100) {
1701                         if (bp->req_duplex == DUPLEX_FULL)
1702                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1703                         else
1704                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1705                 } else if (bp->req_line_speed == SPEED_10) {
1706                         if (bp->req_duplex == DUPLEX_FULL)
1707                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1708                         else
1709                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1710                 }
1711         }
1712
1713         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1714                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1715         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1716                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1717
1718         if (port == PORT_TP)
1719                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1720                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1721
1722         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1723
1724         spin_unlock_bh(&bp->phy_lock);
1725         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1726         spin_lock_bh(&bp->phy_lock);
1727
1728         return 0;
1729 }
1730
1731 static int
1732 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1733 __releases(&bp->phy_lock)
1734 __acquires(&bp->phy_lock)
1735 {
1736         u32 adv, bmcr;
1737         u32 new_adv = 0;
1738
1739         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1740                 return bnx2_setup_remote_phy(bp, port);
1741
1742         if (!(bp->autoneg & AUTONEG_SPEED)) {
1743                 u32 new_bmcr;
1744                 int force_link_down = 0;
1745
1746                 if (bp->req_line_speed == SPEED_2500) {
1747                         if (!bnx2_test_and_enable_2g5(bp))
1748                                 force_link_down = 1;
1749                 } else if (bp->req_line_speed == SPEED_1000) {
1750                         if (bnx2_test_and_disable_2g5(bp))
1751                                 force_link_down = 1;
1752                 }
1753                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1754                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1755
1756                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1757                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1758                 new_bmcr |= BMCR_SPEED1000;
1759
1760                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1761                         if (bp->req_line_speed == SPEED_2500)
1762                                 bnx2_enable_forced_2g5(bp);
1763                         else if (bp->req_line_speed == SPEED_1000) {
1764                                 bnx2_disable_forced_2g5(bp);
1765                                 new_bmcr &= ~0x2000;
1766                         }
1767
1768                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1769                         if (bp->req_line_speed == SPEED_2500)
1770                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771                         else
1772                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1773                 }
1774
1775                 if (bp->req_duplex == DUPLEX_FULL) {
1776                         adv |= ADVERTISE_1000XFULL;
1777                         new_bmcr |= BMCR_FULLDPLX;
1778                 }
1779                 else {
1780                         adv |= ADVERTISE_1000XHALF;
1781                         new_bmcr &= ~BMCR_FULLDPLX;
1782                 }
1783                 if ((new_bmcr != bmcr) || (force_link_down)) {
1784                         /* Force a link down visible on the other side */
1785                         if (bp->link_up) {
1786                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1787                                                ~(ADVERTISE_1000XFULL |
1788                                                  ADVERTISE_1000XHALF));
1789                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1790                                         BMCR_ANRESTART | BMCR_ANENABLE);
1791
1792                                 bp->link_up = 0;
1793                                 netif_carrier_off(bp->dev);
1794                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1795                                 bnx2_report_link(bp);
1796                         }
1797                         bnx2_write_phy(bp, bp->mii_adv, adv);
1798                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799                 } else {
1800                         bnx2_resolve_flow_ctrl(bp);
1801                         bnx2_set_mac_link(bp);
1802                 }
1803                 return 0;
1804         }
1805
1806         bnx2_test_and_enable_2g5(bp);
1807
1808         if (bp->advertising & ADVERTISED_1000baseT_Full)
1809                 new_adv |= ADVERTISE_1000XFULL;
1810
1811         new_adv |= bnx2_phy_get_pause_adv(bp);
1812
1813         bnx2_read_phy(bp, bp->mii_adv, &adv);
1814         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1815
1816         bp->serdes_an_pending = 0;
1817         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1818                 /* Force a link down visible on the other side */
1819                 if (bp->link_up) {
1820                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1821                         spin_unlock_bh(&bp->phy_lock);
1822                         msleep(20);
1823                         spin_lock_bh(&bp->phy_lock);
1824                 }
1825
1826                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1827                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1828                         BMCR_ANENABLE);
1829                 /* Speed up link-up time when the link partner
1830                  * does not autonegotiate which is very common
1831                  * in blade servers. Some blade servers use
1832                  * IPMI for kerboard input and it's important
1833                  * to minimize link disruptions. Autoneg. involves
1834                  * exchanging base pages plus 3 next pages and
1835                  * normally completes in about 120 msec.
1836                  */
1837                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1838                 bp->serdes_an_pending = 1;
1839                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1840         } else {
1841                 bnx2_resolve_flow_ctrl(bp);
1842                 bnx2_set_mac_link(bp);
1843         }
1844
1845         return 0;
1846 }
1847
1848 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1849         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1850                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1851                 (ADVERTISED_1000baseT_Full)
1852
1853 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1854         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1855         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1856         ADVERTISED_1000baseT_Full)
1857
1858 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1859         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1860
1861 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1862
1863 static void
1864 bnx2_set_default_remote_link(struct bnx2 *bp)
1865 {
1866         u32 link;
1867
1868         if (bp->phy_port == PORT_TP)
1869                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1870         else
1871                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1872
1873         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1874                 bp->req_line_speed = 0;
1875                 bp->autoneg |= AUTONEG_SPEED;
1876                 bp->advertising = ADVERTISED_Autoneg;
1877                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1878                         bp->advertising |= ADVERTISED_10baseT_Half;
1879                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1880                         bp->advertising |= ADVERTISED_10baseT_Full;
1881                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1882                         bp->advertising |= ADVERTISED_100baseT_Half;
1883                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1884                         bp->advertising |= ADVERTISED_100baseT_Full;
1885                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1886                         bp->advertising |= ADVERTISED_1000baseT_Full;
1887                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1888                         bp->advertising |= ADVERTISED_2500baseX_Full;
1889         } else {
1890                 bp->autoneg = 0;
1891                 bp->advertising = 0;
1892                 bp->req_duplex = DUPLEX_FULL;
1893                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1894                         bp->req_line_speed = SPEED_10;
1895                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1896                                 bp->req_duplex = DUPLEX_HALF;
1897                 }
1898                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1899                         bp->req_line_speed = SPEED_100;
1900                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1901                                 bp->req_duplex = DUPLEX_HALF;
1902                 }
1903                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1904                         bp->req_line_speed = SPEED_1000;
1905                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1906                         bp->req_line_speed = SPEED_2500;
1907         }
1908 }
1909
1910 static void
1911 bnx2_set_default_link(struct bnx2 *bp)
1912 {
1913         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1914                 bnx2_set_default_remote_link(bp);
1915                 return;
1916         }
1917
1918         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1919         bp->req_line_speed = 0;
1920         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1921                 u32 reg;
1922
1923                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1924
1925                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1926                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1927                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1928                         bp->autoneg = 0;
1929                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1930                         bp->req_duplex = DUPLEX_FULL;
1931                 }
1932         } else
1933                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1934 }
1935
1936 static void
1937 bnx2_send_heart_beat(struct bnx2 *bp)
1938 {
1939         u32 msg;
1940         u32 addr;
1941
1942         spin_lock(&bp->indirect_lock);
1943         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1944         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1945         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1946         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947         spin_unlock(&bp->indirect_lock);
1948 }
1949
1950 static void
1951 bnx2_remote_phy_event(struct bnx2 *bp)
1952 {
1953         u32 msg;
1954         u8 link_up = bp->link_up;
1955         u8 old_port;
1956
1957         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1958
1959         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1960                 bnx2_send_heart_beat(bp);
1961
1962         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1963
1964         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1965                 bp->link_up = 0;
1966         else {
1967                 u32 speed;
1968
1969                 bp->link_up = 1;
1970                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1971                 bp->duplex = DUPLEX_FULL;
1972                 switch (speed) {
1973                         case BNX2_LINK_STATUS_10HALF:
1974                                 bp->duplex = DUPLEX_HALF;
1975                                 /* fall through */
1976                         case BNX2_LINK_STATUS_10FULL:
1977                                 bp->line_speed = SPEED_10;
1978                                 break;
1979                         case BNX2_LINK_STATUS_100HALF:
1980                                 bp->duplex = DUPLEX_HALF;
1981                                 /* fall through */
1982                         case BNX2_LINK_STATUS_100BASE_T4:
1983                         case BNX2_LINK_STATUS_100FULL:
1984                                 bp->line_speed = SPEED_100;
1985                                 break;
1986                         case BNX2_LINK_STATUS_1000HALF:
1987                                 bp->duplex = DUPLEX_HALF;
1988                                 /* fall through */
1989                         case BNX2_LINK_STATUS_1000FULL:
1990                                 bp->line_speed = SPEED_1000;
1991                                 break;
1992                         case BNX2_LINK_STATUS_2500HALF:
1993                                 bp->duplex = DUPLEX_HALF;
1994                                 /* fall through */
1995                         case BNX2_LINK_STATUS_2500FULL:
1996                                 bp->line_speed = SPEED_2500;
1997                                 break;
1998                         default:
1999                                 bp->line_speed = 0;
2000                                 break;
2001                 }
2002
2003                 bp->flow_ctrl = 0;
2004                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2005                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2006                         if (bp->duplex == DUPLEX_FULL)
2007                                 bp->flow_ctrl = bp->req_flow_ctrl;
2008                 } else {
2009                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2010                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2011                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2012                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2013                 }
2014
2015                 old_port = bp->phy_port;
2016                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2017                         bp->phy_port = PORT_FIBRE;
2018                 else
2019                         bp->phy_port = PORT_TP;
2020
2021                 if (old_port != bp->phy_port)
2022                         bnx2_set_default_link(bp);
2023
2024         }
2025         if (bp->link_up != link_up)
2026                 bnx2_report_link(bp);
2027
2028         bnx2_set_mac_link(bp);
2029 }
2030
2031 static int
2032 bnx2_set_remote_link(struct bnx2 *bp)
2033 {
2034         u32 evt_code;
2035
2036         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2037         switch (evt_code) {
2038                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2039                         bnx2_remote_phy_event(bp);
2040                         break;
2041                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2042                 default:
2043                         bnx2_send_heart_beat(bp);
2044                         break;
2045         }
2046         return 0;
2047 }
2048
2049 static int
2050 bnx2_setup_copper_phy(struct bnx2 *bp)
2051 __releases(&bp->phy_lock)
2052 __acquires(&bp->phy_lock)
2053 {
2054         u32 bmcr;
2055         u32 new_bmcr;
2056
2057         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2058
2059         if (bp->autoneg & AUTONEG_SPEED) {
2060                 u32 adv_reg, adv1000_reg;
2061                 u32 new_adv = 0;
2062                 u32 new_adv1000 = 0;
2063
2064                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2065                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2066                         ADVERTISE_PAUSE_ASYM);
2067
2068                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2069                 adv1000_reg &= PHY_ALL_1000_SPEED;
2070
2071                 new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2072                 new_adv |= ADVERTISE_CSMA;
2073                 new_adv |= bnx2_phy_get_pause_adv(bp);
2074
2075                 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2076
2077                 if ((adv1000_reg != new_adv1000) ||
2078                         (adv_reg != new_adv) ||
2079                         ((bmcr & BMCR_ANENABLE) == 0)) {
2080
2081                         bnx2_write_phy(bp, bp->mii_adv, new_adv);
2082                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2083                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2084                                 BMCR_ANENABLE);
2085                 }
2086                 else if (bp->link_up) {
2087                         /* Flow ctrl may have changed from auto to forced */
2088                         /* or vice-versa. */
2089
2090                         bnx2_resolve_flow_ctrl(bp);
2091                         bnx2_set_mac_link(bp);
2092                 }
2093                 return 0;
2094         }
2095
2096         new_bmcr = 0;
2097         if (bp->req_line_speed == SPEED_100) {
2098                 new_bmcr |= BMCR_SPEED100;
2099         }
2100         if (bp->req_duplex == DUPLEX_FULL) {
2101                 new_bmcr |= BMCR_FULLDPLX;
2102         }
2103         if (new_bmcr != bmcr) {
2104                 u32 bmsr;
2105
2106                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2107                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2108
2109                 if (bmsr & BMSR_LSTATUS) {
2110                         /* Force link down */
2111                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2112                         spin_unlock_bh(&bp->phy_lock);
2113                         msleep(50);
2114                         spin_lock_bh(&bp->phy_lock);
2115
2116                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2118                 }
2119
2120                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2121
2122                 /* Normally, the new speed is setup after the link has
2123                  * gone down and up again. In some cases, link will not go
2124                  * down so we need to set up the new speed here.
2125                  */
2126                 if (bmsr & BMSR_LSTATUS) {
2127                         bp->line_speed = bp->req_line_speed;
2128                         bp->duplex = bp->req_duplex;
2129                         bnx2_resolve_flow_ctrl(bp);
2130                         bnx2_set_mac_link(bp);
2131                 }
2132         } else {
2133                 bnx2_resolve_flow_ctrl(bp);
2134                 bnx2_set_mac_link(bp);
2135         }
2136         return 0;
2137 }
2138
2139 static int
2140 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2141 __releases(&bp->phy_lock)
2142 __acquires(&bp->phy_lock)
2143 {
2144         if (bp->loopback == MAC_LOOPBACK)
2145                 return 0;
2146
2147         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2148                 return bnx2_setup_serdes_phy(bp, port);
2149         }
2150         else {
2151                 return bnx2_setup_copper_phy(bp);
2152         }
2153 }
2154
2155 static int
2156 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2157 {
2158         u32 val;
2159
2160         bp->mii_bmcr = MII_BMCR + 0x10;
2161         bp->mii_bmsr = MII_BMSR + 0x10;
2162         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2163         bp->mii_adv = MII_ADVERTISE + 0x10;
2164         bp->mii_lpa = MII_LPA + 0x10;
2165         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2166
2167         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2168         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2169
2170         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2171         if (reset_phy)
2172                 bnx2_reset_phy(bp);
2173
2174         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2175
2176         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2177         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2178         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2179         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2180
2181         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2182         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2183         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2184                 val |= BCM5708S_UP1_2G5;
2185         else
2186                 val &= ~BCM5708S_UP1_2G5;
2187         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2188
2189         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2190         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2191         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2192         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2193
2194         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2195
2196         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2197               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2198         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2199
2200         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2201
2202         return 0;
2203 }
2204
2205 static int
2206 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2207 {
2208         u32 val;
2209
2210         if (reset_phy)
2211                 bnx2_reset_phy(bp);
2212
2213         bp->mii_up1 = BCM5708S_UP1;
2214
2215         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2216         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2217         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2218
2219         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2220         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2221         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2222
2223         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2224         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2225         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2226
2227         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2228                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2229                 val |= BCM5708S_UP1_2G5;
2230                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2231         }
2232
2233         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2234             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2235             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2236                 /* increase tx signal amplitude */
2237                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2238                                BCM5708S_BLK_ADDR_TX_MISC);
2239                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2240                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2241                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2242                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243         }
2244
2245         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2246               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2247
2248         if (val) {
2249                 u32 is_backplane;
2250
2251                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2252                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2253                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2254                                        BCM5708S_BLK_ADDR_TX_MISC);
2255                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2256                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2257                                        BCM5708S_BLK_ADDR_DIG);
2258                 }
2259         }
2260         return 0;
2261 }
2262
2263 static int
2264 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2265 {
2266         if (reset_phy)
2267                 bnx2_reset_phy(bp);
2268
2269         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2270
2271         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2272                 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2273
2274         if (bp->dev->mtu > 1500) {
2275                 u32 val;
2276
2277                 /* Set extended packet length bit */
2278                 bnx2_write_phy(bp, 0x18, 0x7);
2279                 bnx2_read_phy(bp, 0x18, &val);
2280                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2281
2282                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2283                 bnx2_read_phy(bp, 0x1c, &val);
2284                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2285         }
2286         else {
2287                 u32 val;
2288
2289                 bnx2_write_phy(bp, 0x18, 0x7);
2290                 bnx2_read_phy(bp, 0x18, &val);
2291                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2292
2293                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2294                 bnx2_read_phy(bp, 0x1c, &val);
2295                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2296         }
2297
2298         return 0;
2299 }
2300
2301 static int
2302 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2303 {
2304         u32 val;
2305
2306         if (reset_phy)
2307                 bnx2_reset_phy(bp);
2308
2309         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2310                 bnx2_write_phy(bp, 0x18, 0x0c00);
2311                 bnx2_write_phy(bp, 0x17, 0x000a);
2312                 bnx2_write_phy(bp, 0x15, 0x310b);
2313                 bnx2_write_phy(bp, 0x17, 0x201f);
2314                 bnx2_write_phy(bp, 0x15, 0x9506);
2315                 bnx2_write_phy(bp, 0x17, 0x401f);
2316                 bnx2_write_phy(bp, 0x15, 0x14e2);
2317                 bnx2_write_phy(bp, 0x18, 0x0400);
2318         }
2319
2320         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2321                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2322                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2323                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2324                 val &= ~(1 << 8);
2325                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2326         }
2327
2328         if (bp->dev->mtu > 1500) {
2329                 /* Set extended packet length bit */
2330                 bnx2_write_phy(bp, 0x18, 0x7);
2331                 bnx2_read_phy(bp, 0x18, &val);
2332                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2333
2334                 bnx2_read_phy(bp, 0x10, &val);
2335                 bnx2_write_phy(bp, 0x10, val | 0x1);
2336         }
2337         else {
2338                 bnx2_write_phy(bp, 0x18, 0x7);
2339                 bnx2_read_phy(bp, 0x18, &val);
2340                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2341
2342                 bnx2_read_phy(bp, 0x10, &val);
2343                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2344         }
2345
2346         /* ethernet@wirespeed */
2347         bnx2_write_phy(bp, 0x18, 0x7007);
2348         bnx2_read_phy(bp, 0x18, &val);
2349         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2350         return 0;
2351 }
2352
2353
2354 static int
2355 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2356 __releases(&bp->phy_lock)
2357 __acquires(&bp->phy_lock)
2358 {
2359         u32 val;
2360         int rc = 0;
2361
2362         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2363         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2364
2365         bp->mii_bmcr = MII_BMCR;
2366         bp->mii_bmsr = MII_BMSR;
2367         bp->mii_bmsr1 = MII_BMSR;
2368         bp->mii_adv = MII_ADVERTISE;
2369         bp->mii_lpa = MII_LPA;
2370
2371         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2372
2373         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2374                 goto setup_phy;
2375
2376         bnx2_read_phy(bp, MII_PHYSID1, &val);
2377         bp->phy_id = val << 16;
2378         bnx2_read_phy(bp, MII_PHYSID2, &val);
2379         bp->phy_id |= val & 0xffff;
2380
2381         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2382                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2383                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2384                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2385                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2386                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2387                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2388         }
2389         else {
2390                 rc = bnx2_init_copper_phy(bp, reset_phy);
2391         }
2392
2393 setup_phy:
2394         if (!rc)
2395                 rc = bnx2_setup_phy(bp, bp->phy_port);
2396
2397         return rc;
2398 }
2399
2400 static int
2401 bnx2_set_mac_loopback(struct bnx2 *bp)
2402 {
2403         u32 mac_mode;
2404
2405         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2406         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2407         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2408         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2409         bp->link_up = 1;
2410         return 0;
2411 }
2412
2413 static int bnx2_test_link(struct bnx2 *);
2414
2415 static int
2416 bnx2_set_phy_loopback(struct bnx2 *bp)
2417 {
2418         u32 mac_mode;
2419         int rc, i;
2420
2421         spin_lock_bh(&bp->phy_lock);
2422         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2423                             BMCR_SPEED1000);
2424         spin_unlock_bh(&bp->phy_lock);
2425         if (rc)
2426                 return rc;
2427
2428         for (i = 0; i < 10; i++) {
2429                 if (bnx2_test_link(bp) == 0)
2430                         break;
2431                 msleep(100);
2432         }
2433
2434         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2435         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2436                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2437                       BNX2_EMAC_MODE_25G_MODE);
2438
2439         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2440         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2441         bp->link_up = 1;
2442         return 0;
2443 }
2444
2445 static void
2446 bnx2_dump_mcp_state(struct bnx2 *bp)
2447 {
2448         struct net_device *dev = bp->dev;
2449         u32 mcp_p0, mcp_p1;
2450
2451         netdev_err(dev, "<--- start MCP states dump --->\n");
2452         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2453                 mcp_p0 = BNX2_MCP_STATE_P0;
2454                 mcp_p1 = BNX2_MCP_STATE_P1;
2455         } else {
2456                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2457                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2458         }
2459         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2460                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2461         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2462                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2463                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2464                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2465         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2466                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2467                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2468                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2469         netdev_err(dev, "DEBUG: shmem states:\n");
2470         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2471                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2472                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2473                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2474         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2475         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2476                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2477                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2478         pr_cont(" condition[%08x]\n",
2479                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2480         DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2481         DP_SHMEM_LINE(bp, 0x3cc);
2482         DP_SHMEM_LINE(bp, 0x3dc);
2483         DP_SHMEM_LINE(bp, 0x3ec);
2484         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2485         netdev_err(dev, "<--- end MCP states dump --->\n");
2486 }
2487
2488 static int
2489 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2490 {
2491         int i;
2492         u32 val;
2493
2494         bp->fw_wr_seq++;
2495         msg_data |= bp->fw_wr_seq;
2496
2497         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2498
2499         if (!ack)
2500                 return 0;
2501
2502         /* wait for an acknowledgement. */
2503         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2504                 msleep(10);
2505
2506                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2507
2508                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2509                         break;
2510         }
2511         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2512                 return 0;
2513
2514         /* If we timed out, inform the firmware that this is the case. */
2515         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2516                 msg_data &= ~BNX2_DRV_MSG_CODE;
2517                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2518
2519                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2520                 if (!silent) {
2521                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2522                         bnx2_dump_mcp_state(bp);
2523                 }
2524
2525                 return -EBUSY;
2526         }
2527
2528         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2529                 return -EIO;
2530
2531         return 0;
2532 }
2533
2534 static int
2535 bnx2_init_5709_context(struct bnx2 *bp)
2536 {
2537         int i, ret = 0;
2538         u32 val;
2539
2540         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2541         val |= (BNX2_PAGE_BITS - 8) << 16;
2542         BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2543         for (i = 0; i < 10; i++) {
2544                 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2545                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2546                         break;
2547                 udelay(2);
2548         }
2549         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2550                 return -EBUSY;
2551
2552         for (i = 0; i < bp->ctx_pages; i++) {
2553                 int j;
2554
2555                 if (bp->ctx_blk[i])
2556                         memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2557                 else
2558                         return -ENOMEM;
2559
2560                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2561                         (bp->ctx_blk_mapping[i] & 0xffffffff) |
2562                         BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2563                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2564                         (u64) bp->ctx_blk_mapping[i] >> 32);
2565                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2566                         BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2567                 for (j = 0; j < 10; j++) {
2568
2569                         val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2570                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2571                                 break;
2572                         udelay(5);
2573                 }
2574                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2575                         ret = -EBUSY;
2576                         break;
2577                 }
2578         }
2579         return ret;
2580 }
2581
2582 static void
2583 bnx2_init_context(struct bnx2 *bp)
2584 {
2585         u32 vcid;
2586
2587         vcid = 96;
2588         while (vcid) {
2589                 u32 vcid_addr, pcid_addr, offset;
2590                 int i;
2591
2592                 vcid--;
2593
2594                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2595                         u32 new_vcid;
2596
2597                         vcid_addr = GET_PCID_ADDR(vcid);
2598                         if (vcid & 0x8) {
2599                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2600                         }
2601                         else {
2602                                 new_vcid = vcid;
2603                         }
2604                         pcid_addr = GET_PCID_ADDR(new_vcid);
2605                 }
2606                 else {
2607                         vcid_addr = GET_CID_ADDR(vcid);
2608                         pcid_addr = vcid_addr;
2609                 }
2610
2611                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2612                         vcid_addr += (i << PHY_CTX_SHIFT);
2613                         pcid_addr += (i << PHY_CTX_SHIFT);
2614
2615                         BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2616                         BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2617
2618                         /* Zero out the context. */
2619                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2620                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2621                 }
2622         }
2623 }
2624
2625 static int
2626 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2627 {
2628         u16 *good_mbuf;
2629         u32 good_mbuf_cnt;
2630         u32 val;
2631
2632         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2633         if (good_mbuf == NULL)
2634                 return -ENOMEM;
2635
2636         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2637                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2638
2639         good_mbuf_cnt = 0;
2640
2641         /* Allocate a bunch of mbufs and save the good ones in an array. */
2642         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2643         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2644                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2645                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2646
2647                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2648
2649                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2650
2651                 /* The addresses with Bit 9 set are bad memory blocks. */
2652                 if (!(val & (1 << 9))) {
2653                         good_mbuf[good_mbuf_cnt] = (u16) val;
2654                         good_mbuf_cnt++;
2655                 }
2656
2657                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2658         }
2659
2660         /* Free the good ones back to the mbuf pool thus discarding
2661          * all the bad ones. */
2662         while (good_mbuf_cnt) {
2663                 good_mbuf_cnt--;
2664
2665                 val = good_mbuf[good_mbuf_cnt];
2666                 val = (val << 9) | val | 1;
2667
2668                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2669         }
2670         kfree(good_mbuf);
2671         return 0;
2672 }
2673
2674 static void
2675 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2676 {
2677         u32 val;
2678
2679         val = (mac_addr[0] << 8) | mac_addr[1];
2680
2681         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2682
2683         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2684                 (mac_addr[4] << 8) | mac_addr[5];
2685
2686         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2687 }
2688
2689 static inline int
2690 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2691 {
2692         dma_addr_t mapping;
2693         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2694         struct bnx2_rx_bd *rxbd =
2695                 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2696         struct page *page = alloc_page(gfp);
2697
2698         if (!page)
2699                 return -ENOMEM;
2700         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2701                                PCI_DMA_FROMDEVICE);
2702         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2703                 __free_page(page);
2704                 return -EIO;
2705         }
2706
2707         rx_pg->page = page;
2708         dma_unmap_addr_set(rx_pg, mapping, mapping);
2709         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2710         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2711         return 0;
2712 }
2713
2714 static void
2715 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2716 {
2717         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2718         struct page *page = rx_pg->page;
2719
2720         if (!page)
2721                 return;
2722
2723         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2724                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2725
2726         __free_page(page);
2727         rx_pg->page = NULL;
2728 }
2729
2730 static inline int
2731 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2732 {
2733         u8 *data;
2734         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2735         dma_addr_t mapping;
2736         struct bnx2_rx_bd *rxbd =
2737                 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2738
2739         data = kmalloc(bp->rx_buf_size, gfp);
2740         if (!data)
2741                 return -ENOMEM;
2742
2743         mapping = dma_map_single(&bp->pdev->dev,
2744                                  get_l2_fhdr(data),
2745                                  bp->rx_buf_use_size,
2746                                  PCI_DMA_FROMDEVICE);
2747         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2748                 kfree(data);
2749                 return -EIO;
2750         }
2751
2752         rx_buf->data = data;
2753         dma_unmap_addr_set(rx_buf, mapping, mapping);
2754
2755         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2756         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2757
2758         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2759
2760         return 0;
2761 }
2762
2763 static int
2764 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2765 {
2766         struct status_block *sblk = bnapi->status_blk.msi;
2767         u32 new_link_state, old_link_state;
2768         int is_set = 1;
2769
2770         new_link_state = sblk->status_attn_bits & event;
2771         old_link_state = sblk->status_attn_bits_ack & event;
2772         if (new_link_state != old_link_state) {
2773                 if (new_link_state)
2774                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2775                 else
2776                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2777         } else
2778                 is_set = 0;
2779
2780         return is_set;
2781 }
2782
2783 static void
2784 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2785 {
2786         spin_lock(&bp->phy_lock);
2787
2788         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2789                 bnx2_set_link(bp);
2790         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2791                 bnx2_set_remote_link(bp);
2792
2793         spin_unlock(&bp->phy_lock);
2794
2795 }
2796
2797 static inline u16
2798 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2799 {
2800         u16 cons;
2801
2802         /* Tell compiler that status block fields can change. */
2803         barrier();
2804         cons = *bnapi->hw_tx_cons_ptr;
2805         barrier();
2806         if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2807                 cons++;
2808         return cons;
2809 }
2810
2811 static int
2812 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2813 {
2814         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2815         u16 hw_cons, sw_cons, sw_ring_cons;
2816         int tx_pkt = 0, index;
2817         unsigned int tx_bytes = 0;
2818         struct netdev_queue *txq;
2819
2820         index = (bnapi - bp->bnx2_napi);
2821         txq = netdev_get_tx_queue(bp->dev, index);
2822
2823         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2824         sw_cons = txr->tx_cons;
2825
2826         while (sw_cons != hw_cons) {
2827                 struct bnx2_sw_tx_bd *tx_buf;
2828                 struct sk_buff *skb;
2829                 int i, last;
2830
2831                 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2832
2833                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2834                 skb = tx_buf->skb;
2835
2836                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2837                 prefetch(&skb->end);
2838
2839                 /* partial BD completions possible with TSO packets */
2840                 if (tx_buf->is_gso) {
2841                         u16 last_idx, last_ring_idx;
2842
2843                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2844                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2845                         if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2846                                 last_idx++;
2847                         }
2848                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2849                                 break;
2850                         }
2851                 }
2852
2853                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2854                         skb_headlen(skb), PCI_DMA_TODEVICE);
2855
2856                 tx_buf->skb = NULL;
2857                 last = tx_buf->nr_frags;
2858
2859                 for (i = 0; i < last; i++) {
2860                         struct bnx2_sw_tx_bd *tx_buf;
2861
2862                         sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2863
2864                         tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2865                         dma_unmap_page(&bp->pdev->dev,
2866                                 dma_unmap_addr(tx_buf, mapping),
2867                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2868                                 PCI_DMA_TODEVICE);
2869                 }
2870
2871                 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2872
2873                 tx_bytes += skb->len;
2874                 dev_kfree_skb(skb);
2875                 tx_pkt++;
2876                 if (tx_pkt == budget)
2877                         break;
2878
2879                 if (hw_cons == sw_cons)
2880                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2881         }
2882
2883         netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2884         txr->hw_tx_cons = hw_cons;
2885         txr->tx_cons = sw_cons;
2886
2887         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2888          * before checking for netif_tx_queue_stopped().  Without the
2889          * memory barrier, there is a small possibility that bnx2_start_xmit()
2890          * will miss it and cause the queue to be stopped forever.
2891          */
2892         smp_mb();
2893
2894         if (unlikely(netif_tx_queue_stopped(txq)) &&
2895                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2896                 __netif_tx_lock(txq, smp_processor_id());
2897                 if ((netif_tx_queue_stopped(txq)) &&
2898                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2899                         netif_tx_wake_queue(txq);
2900                 __netif_tx_unlock(txq);
2901         }
2902
2903         return tx_pkt;
2904 }
2905
2906 static void
2907 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2908                         struct sk_buff *skb, int count)
2909 {
2910         struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2911         struct bnx2_rx_bd *cons_bd, *prod_bd;
2912         int i;
2913         u16 hw_prod, prod;
2914         u16 cons = rxr->rx_pg_cons;
2915
2916         cons_rx_pg = &rxr->rx_pg_ring[cons];
2917
2918         /* The caller was unable to allocate a new page to replace the
2919          * last one in the frags array, so we need to recycle that page
2920          * and then free the skb.
2921          */
2922         if (skb) {
2923                 struct page *page;
2924                 struct skb_shared_info *shinfo;
2925
2926                 shinfo = skb_shinfo(skb);
2927                 shinfo->nr_frags--;
2928                 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2929                 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2930
2931                 cons_rx_pg->page = page;
2932                 dev_kfree_skb(skb);
2933         }
2934
2935         hw_prod = rxr->rx_pg_prod;
2936
2937         for (i = 0; i < count; i++) {
2938                 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2939
2940                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2941                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2942                 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2943                                                 [BNX2_RX_IDX(cons)];
2944                 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2945                                                 [BNX2_RX_IDX(prod)];
2946
2947                 if (prod != cons) {
2948                         prod_rx_pg->page = cons_rx_pg->page;
2949                         cons_rx_pg->page = NULL;
2950                         dma_unmap_addr_set(prod_rx_pg, mapping,
2951                                 dma_unmap_addr(cons_rx_pg, mapping));
2952
2953                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2954                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2955
2956                 }
2957                 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2958                 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2959         }
2960         rxr->rx_pg_prod = hw_prod;
2961         rxr->rx_pg_cons = cons;
2962 }
2963
2964 static inline void
2965 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2966                    u8 *data, u16 cons, u16 prod)
2967 {
2968         struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2969         struct bnx2_rx_bd *cons_bd, *prod_bd;
2970
2971         cons_rx_buf = &rxr->rx_buf_ring[cons];
2972         prod_rx_buf = &rxr->rx_buf_ring[prod];
2973
2974         dma_sync_single_for_device(&bp->pdev->dev,
2975                 dma_unmap_addr(cons_rx_buf, mapping),
2976                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2977
2978         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2979
2980         prod_rx_buf->data = data;
2981
2982         if (cons == prod)
2983                 return;
2984
2985         dma_unmap_addr_set(prod_rx_buf, mapping,
2986                         dma_unmap_addr(cons_rx_buf, mapping));
2987
2988         cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
2989         prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
2990         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2991         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2992 }
2993
2994 static struct sk_buff *
2995 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2996             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2997             u32 ring_idx)
2998 {
2999         int err;
3000         u16 prod = ring_idx & 0xffff;
3001         struct sk_buff *skb;
3002
3003         err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3004         if (unlikely(err)) {
3005                 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3006 error:
3007                 if (hdr_len) {
3008                         unsigned int raw_len = len + 4;
3009                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3010
3011                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3012                 }
3013                 return NULL;
3014         }
3015
3016         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3017                          PCI_DMA_FROMDEVICE);
3018         skb = build_skb(data, 0);
3019         if (!skb) {
3020                 kfree(data);
3021                 goto error;
3022         }
3023         skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3024         if (hdr_len == 0) {
3025                 skb_put(skb, len);
3026                 return skb;
3027         } else {
3028                 unsigned int i, frag_len, frag_size, pages;
3029                 struct bnx2_sw_pg *rx_pg;
3030                 u16 pg_cons = rxr->rx_pg_cons;
3031                 u16 pg_prod = rxr->rx_pg_prod;
3032
3033                 frag_size = len + 4 - hdr_len;
3034                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3035                 skb_put(skb, hdr_len);
3036
3037                 for (i = 0; i < pages; i++) {
3038                         dma_addr_t mapping_old;
3039
3040                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3041                         if (unlikely(frag_len <= 4)) {
3042                                 unsigned int tail = 4 - frag_len;
3043
3044                                 rxr->rx_pg_cons = pg_cons;
3045                                 rxr->rx_pg_prod = pg_prod;
3046                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3047                                                         pages - i);
3048                                 skb->len -= tail;
3049                                 if (i == 0) {
3050                                         skb->tail -= tail;
3051                                 } else {
3052                                         skb_frag_t *frag =
3053                                                 &skb_shinfo(skb)->frags[i - 1];
3054                                         skb_frag_size_sub(frag, tail);
3055                                         skb->data_len -= tail;
3056                                 }
3057                                 return skb;
3058                         }
3059                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3060
3061                         /* Don't unmap yet.  If we're unable to allocate a new
3062                          * page, we need to recycle the page and the DMA addr.
3063                          */
3064                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3065                         if (i == pages - 1)
3066                                 frag_len -= 4;
3067
3068                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3069                         rx_pg->page = NULL;
3070
3071                         err = bnx2_alloc_rx_page(bp, rxr,
3072                                                  BNX2_RX_PG_RING_IDX(pg_prod),
3073                                                  GFP_ATOMIC);
3074                         if (unlikely(err)) {
3075                                 rxr->rx_pg_cons = pg_cons;
3076                                 rxr->rx_pg_prod = pg_prod;
3077                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3078                                                         pages - i);
3079                                 return NULL;
3080                         }
3081
3082                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3083                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3084
3085                         frag_size -= frag_len;
3086                         skb->data_len += frag_len;
3087                         skb->truesize += PAGE_SIZE;
3088                         skb->len += frag_len;
3089
3090                         pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3091                         pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3092                 }
3093                 rxr->rx_pg_prod = pg_prod;
3094                 rxr->rx_pg_cons = pg_cons;
3095         }
3096         return skb;
3097 }
3098
3099 static inline u16
3100 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3101 {
3102         u16 cons;
3103
3104         /* Tell compiler that status block fields can change. */
3105         barrier();
3106         cons = *bnapi->hw_rx_cons_ptr;
3107         barrier();
3108         if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3109                 cons++;
3110         return cons;
3111 }
3112
3113 static int
3114 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3115 {
3116         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3117         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3118         struct l2_fhdr *rx_hdr;
3119         int rx_pkt = 0, pg_ring_used = 0;
3120
3121         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3122         sw_cons = rxr->rx_cons;
3123         sw_prod = rxr->rx_prod;
3124
3125         /* Memory barrier necessary as speculative reads of the rx
3126          * buffer can be ahead of the index in the status block
3127          */
3128         rmb();
3129         while (sw_cons != hw_cons) {
3130                 unsigned int len, hdr_len;
3131                 u32 status;
3132                 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3133                 struct sk_buff *skb;
3134                 dma_addr_t dma_addr;
3135                 u8 *data;
3136                 u16 next_ring_idx;
3137
3138                 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3139                 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3140
3141                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3142                 data = rx_buf->data;
3143                 rx_buf->data = NULL;
3144
3145                 rx_hdr = get_l2_fhdr(data);
3146                 prefetch(rx_hdr);
3147
3148                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3149
3150                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3151                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3152                         PCI_DMA_FROMDEVICE);
3153
3154                 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3155                 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3156                 prefetch(get_l2_fhdr(next_rx_buf->data));
3157
3158                 len = rx_hdr->l2_fhdr_pkt_len;
3159                 status = rx_hdr->l2_fhdr_status;
3160
3161                 hdr_len = 0;
3162                 if (status & L2_FHDR_STATUS_SPLIT) {
3163                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3164                         pg_ring_used = 1;
3165                 } else if (len > bp->rx_jumbo_thresh) {
3166                         hdr_len = bp->rx_jumbo_thresh;
3167                         pg_ring_used = 1;
3168                 }
3169
3170                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3171                                        L2_FHDR_ERRORS_PHY_DECODE |
3172                                        L2_FHDR_ERRORS_ALIGNMENT |
3173                                        L2_FHDR_ERRORS_TOO_SHORT |
3174                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3175
3176                         bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3177                                           sw_ring_prod);
3178                         if (pg_ring_used) {
3179                                 int pages;
3180
3181                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3182
3183                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3184                         }
3185                         goto next_rx;
3186                 }
3187
3188                 len -= 4;
3189
3190                 if (len <= bp->rx_copy_thresh) {
3191                         skb = netdev_alloc_skb(bp->dev, len + 6);
3192                         if (skb == NULL) {
3193                                 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3194                                                   sw_ring_prod);
3195                                 goto next_rx;
3196                         }
3197
3198                         /* aligned copy */
3199                         memcpy(skb->data,
3200                                (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3201                                len + 6);
3202                         skb_reserve(skb, 6);
3203                         skb_put(skb, len);
3204
3205                         bnx2_reuse_rx_data(bp, rxr, data,
3206                                 sw_ring_cons, sw_ring_prod);
3207
3208                 } else {
3209                         skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3210                                           (sw_ring_cons << 16) | sw_ring_prod);
3211                         if (!skb)
3212                                 goto next_rx;
3213                 }
3214                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3215                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3216                         __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3217
3218                 skb->protocol = eth_type_trans(skb, bp->dev);
3219
3220                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3221                         (ntohs(skb->protocol) != 0x8100)) {
3222
3223                         dev_kfree_skb(skb);
3224                         goto next_rx;
3225
3226                 }
3227
3228                 skb_checksum_none_assert(skb);
3229                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3230                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3231                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3232
3233                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3234                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3235                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3236                 }
3237                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3238                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3239                      L2_FHDR_STATUS_USE_RXHASH))
3240                         skb->rxhash = rx_hdr->l2_fhdr_hash;
3241
3242                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3243                 napi_gro_receive(&bnapi->napi, skb);
3244                 rx_pkt++;
3245
3246 next_rx:
3247                 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3248                 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3249
3250                 if ((rx_pkt == budget))
3251                         break;
3252
3253                 /* Refresh hw_cons to see if there is new work */
3254                 if (sw_cons == hw_cons) {
3255                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3256                         rmb();
3257                 }
3258         }
3259         rxr->rx_cons = sw_cons;
3260         rxr->rx_prod = sw_prod;
3261
3262         if (pg_ring_used)
3263                 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3264
3265         BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3266
3267         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3268
3269         mmiowb();
3270
3271         return rx_pkt;
3272
3273 }
3274
3275 /* MSI ISR - The only difference between this and the INTx ISR
3276  * is that the MSI interrupt is always serviced.
3277  */
3278 static irqreturn_t
3279 bnx2_msi(int irq, void *dev_instance)
3280 {
3281         struct bnx2_napi *bnapi = dev_instance;
3282         struct bnx2 *bp = bnapi->bp;
3283
3284         prefetch(bnapi->status_blk.msi);
3285         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3286                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3287                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3288
3289         /* Return here if interrupt is disabled. */
3290         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3291                 return IRQ_HANDLED;
3292
3293         napi_schedule(&bnapi->napi);
3294
3295         return IRQ_HANDLED;
3296 }
3297
3298 static irqreturn_t
3299 bnx2_msi_1shot(int irq, void *dev_instance)
3300 {
3301         struct bnx2_napi *bnapi = dev_instance;
3302         struct bnx2 *bp = bnapi->bp;
3303
3304         prefetch(bnapi->status_blk.msi);
3305
3306         /* Return here if interrupt is disabled. */
3307         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3308                 return IRQ_HANDLED;
3309
3310         napi_schedule(&bnapi->napi);
3311
3312         return IRQ_HANDLED;
3313 }
3314
3315 static irqreturn_t
3316 bnx2_interrupt(int irq, void *dev_instance)
3317 {
3318         struct bnx2_napi *bnapi = dev_instance;
3319         struct bnx2 *bp = bnapi->bp;
3320         struct status_block *sblk = bnapi->status_blk.msi;
3321
3322         /* When using INTx, it is possible for the interrupt to arrive
3323          * at the CPU before the status block posted prior to the
3324          * interrupt. Reading a register will flush the status block.
3325          * When using MSI, the MSI message will always complete after
3326          * the status block write.
3327          */
3328         if ((sblk->status_idx == bnapi->last_status_idx) &&
3329             (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3330              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3331                 return IRQ_NONE;
3332
3333         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3334                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3335                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3336
3337         /* Read back to deassert IRQ immediately to avoid too many
3338          * spurious interrupts.
3339          */
3340         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3341
3342         /* Return here if interrupt is shared and is disabled. */
3343         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3344                 return IRQ_HANDLED;
3345
3346         if (napi_schedule_prep(&bnapi->napi)) {
3347                 bnapi->last_status_idx = sblk->status_idx;
3348                 __napi_schedule(&bnapi->napi);
3349         }
3350
3351         return IRQ_HANDLED;
3352 }
3353
3354 static inline int
3355 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3356 {
3357         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3358         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3359
3360         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3361             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3362                 return 1;
3363         return 0;
3364 }
3365
3366 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3367                                  STATUS_ATTN_BITS_TIMER_ABORT)
3368
3369 static inline int
3370 bnx2_has_work(struct bnx2_napi *bnapi)
3371 {
3372         struct status_block *sblk = bnapi->status_blk.msi;
3373
3374         if (bnx2_has_fast_work(bnapi))
3375                 return 1;
3376
3377 #ifdef BCM_CNIC
3378         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3379                 return 1;
3380 #endif
3381
3382         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3383             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3384                 return 1;
3385
3386         return 0;
3387 }
3388
3389 static void
3390 bnx2_chk_missed_msi(struct bnx2 *bp)
3391 {
3392         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3393         u32 msi_ctrl;
3394
3395         if (bnx2_has_work(bnapi)) {
3396                 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3397                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3398                         return;
3399
3400                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3401                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3402                                 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3403                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3404                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3405                 }
3406         }
3407
3408         bp->idle_chk_status_idx = bnapi->last_status_idx;
3409 }
3410
3411 #ifdef BCM_CNIC
3412 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3413 {
3414         struct cnic_ops *c_ops;
3415
3416         if (!bnapi->cnic_present)
3417                 return;
3418
3419         rcu_read_lock();
3420         c_ops = rcu_dereference(bp->cnic_ops);
3421         if (c_ops)
3422                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3423                                                       bnapi->status_blk.msi);
3424         rcu_read_unlock();
3425 }
3426 #endif
3427
3428 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3429 {
3430         struct status_block *sblk = bnapi->status_blk.msi;
3431         u32 status_attn_bits = sblk->status_attn_bits;
3432         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3433
3434         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3435             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3436
3437                 bnx2_phy_int(bp, bnapi);
3438
3439                 /* This is needed to take care of transient status
3440                  * during link changes.
3441                  */
3442                 BNX2_WR(bp, BNX2_HC_COMMAND,
3443                         bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3444                 BNX2_RD(bp, BNX2_HC_COMMAND);
3445         }
3446 }
3447
3448 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3449                           int work_done, int budget)
3450 {
3451         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3452         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3453
3454         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3455                 bnx2_tx_int(bp, bnapi, 0);
3456
3457         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3458                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3459
3460         return work_done;
3461 }
3462
3463 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3464 {
3465         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3466         struct bnx2 *bp = bnapi->bp;
3467         int work_done = 0;
3468         struct status_block_msix *sblk = bnapi->status_blk.msix;
3469
3470         while (1) {
3471                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3472                 if (unlikely(work_done >= budget))
3473                         break;
3474
3475                 bnapi->last_status_idx = sblk->status_idx;
3476                 /* status idx must be read before checking for more work. */
3477                 rmb();
3478                 if (likely(!bnx2_has_fast_work(bnapi))) {
3479
3480                         napi_complete(napi);
3481                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3482                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3483                                 bnapi->last_status_idx);
3484                         break;
3485                 }
3486         }
3487         return work_done;
3488 }
3489
3490 static int bnx2_poll(struct napi_struct *napi, int budget)
3491 {
3492         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3493         struct bnx2 *bp = bnapi->bp;
3494         int work_done = 0;
3495         struct status_block *sblk = bnapi->status_blk.msi;
3496
3497         while (1) {
3498                 bnx2_poll_link(bp, bnapi);
3499
3500                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3501
3502 #ifdef BCM_CNIC
3503                 bnx2_poll_cnic(bp, bnapi);
3504 #endif
3505
3506                 /* bnapi->last_status_idx is used below to tell the hw how
3507                  * much work has been processed, so we must read it before
3508                  * checking for more work.
3509                  */
3510                 bnapi->last_status_idx = sblk->status_idx;
3511
3512                 if (unlikely(work_done >= budget))
3513                         break;
3514
3515                 rmb();
3516                 if (likely(!bnx2_has_work(bnapi))) {
3517                         napi_complete(napi);
3518                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3519                                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3520                                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3521                                         bnapi->last_status_idx);
3522                                 break;
3523                         }
3524                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3525                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3526                                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3527                                 bnapi->last_status_idx);
3528
3529                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3530                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3531                                 bnapi->last_status_idx);
3532                         break;
3533                 }
3534         }
3535
3536         return work_done;
3537 }
3538
3539 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3540  * from set_multicast.
3541  */
3542 static void
3543 bnx2_set_rx_mode(struct net_device *dev)
3544 {
3545         struct bnx2 *bp = netdev_priv(dev);
3546         u32 rx_mode, sort_mode;
3547         struct netdev_hw_addr *ha;
3548         int i;
3549
3550         if (!netif_running(dev))
3551                 return;
3552
3553         spin_lock_bh(&bp->phy_lock);
3554
3555         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3556                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3557         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3558         if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3559              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3560                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3561         if (dev->flags & IFF_PROMISC) {
3562                 /* Promiscuous mode. */
3563                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3564                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3565                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3566         }
3567         else if (dev->flags & IFF_ALLMULTI) {
3568                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3569                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3570                                 0xffffffff);
3571                 }
3572                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3573         }
3574         else {
3575                 /* Accept one or more multicast(s). */
3576                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3577                 u32 regidx;
3578                 u32 bit;
3579                 u32 crc;
3580
3581                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3582
3583                 netdev_for_each_mc_addr(ha, dev) {
3584                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3585                         bit = crc & 0xff;
3586                         regidx = (bit & 0xe0) >> 5;
3587                         bit &= 0x1f;
3588                         mc_filter[regidx] |= (1 << bit);
3589                 }
3590
3591                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3592                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3593                                 mc_filter[i]);
3594                 }
3595
3596                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3597         }
3598
3599         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3600                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3601                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3602                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3603         } else if (!(dev->flags & IFF_PROMISC)) {
3604                 /* Add all entries into to the match filter list */
3605                 i = 0;
3606                 netdev_for_each_uc_addr(ha, dev) {
3607                         bnx2_set_mac_addr(bp, ha->addr,
3608                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3609                         sort_mode |= (1 <<
3610                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3611                         i++;
3612                 }
3613
3614         }
3615
3616         if (rx_mode != bp->rx_mode) {
3617                 bp->rx_mode = rx_mode;
3618                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3619         }
3620
3621         BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3622         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3623         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3624
3625         spin_unlock_bh(&bp->phy_lock);
3626 }
3627
3628 static int
3629 check_fw_section(const struct firmware *fw,
3630                  const struct bnx2_fw_file_section *section,
3631                  u32 alignment, bool non_empty)
3632 {
3633         u32 offset = be32_to_cpu(section->offset);
3634         u32 len = be32_to_cpu(section->len);
3635
3636         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3637                 return -EINVAL;
3638         if ((non_empty && len == 0) || len > fw->size - offset ||
3639             len & (alignment - 1))
3640                 return -EINVAL;
3641         return 0;
3642 }
3643
3644 static int
3645 check_mips_fw_entry(const struct firmware *fw,
3646                     const struct bnx2_mips_fw_file_entry *entry)
3647 {
3648         if (check_fw_section(fw, &entry->text, 4, true) ||
3649             check_fw_section(fw, &entry->data, 4, false) ||
3650             check_fw_section(fw, &entry->rodata, 4, false))
3651                 return -EINVAL;
3652         return 0;
3653 }
3654
3655 static void bnx2_release_firmware(struct bnx2 *bp)
3656 {
3657         if (bp->rv2p_firmware) {
3658                 release_firmware(bp->mips_firmware);
3659                 release_firmware(bp->rv2p_firmware);
3660                 bp->rv2p_firmware = NULL;
3661         }
3662 }
3663
3664 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3665 {
3666         const char *mips_fw_file, *rv2p_fw_file;
3667         const struct bnx2_mips_fw_file *mips_fw;
3668         const struct bnx2_rv2p_fw_file *rv2p_fw;
3669         int rc;
3670
3671         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3672                 mips_fw_file = FW_MIPS_FILE_09;
3673                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3674                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3675                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3676                 else
3677                         rv2p_fw_file = FW_RV2P_FILE_09;
3678         } else {
3679                 mips_fw_file = FW_MIPS_FILE_06;
3680                 rv2p_fw_file = FW_RV2P_FILE_06;
3681         }
3682
3683         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3684         if (rc) {
3685                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3686                 goto out;
3687         }
3688
3689         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3690         if (rc) {
3691                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3692                 goto err_release_mips_firmware;
3693         }
3694         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3695         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3696         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3697             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3698             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3699             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3700             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3701             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3702                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3703                 rc = -EINVAL;
3704                 goto err_release_firmware;
3705         }
3706         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3707             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3708             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3709                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3710                 rc = -EINVAL;
3711                 goto err_release_firmware;
3712         }
3713 out:
3714         return rc;
3715
3716 err_release_firmware:
3717         release_firmware(bp->rv2p_firmware);
3718         bp->rv2p_firmware = NULL;
3719 err_release_mips_firmware:
3720         release_firmware(bp->mips_firmware);
3721         goto out;
3722 }
3723
3724 static int bnx2_request_firmware(struct bnx2 *bp)
3725 {
3726         return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3727 }
3728
3729 static u32
3730 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3731 {
3732         switch (idx) {
3733         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3734                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3735                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3736                 break;
3737         }
3738         return rv2p_code;
3739 }
3740
3741 static int
3742 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3743              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3744 {
3745         u32 rv2p_code_len, file_offset;
3746         __be32 *rv2p_code;
3747         int i;
3748         u32 val, cmd, addr;
3749
3750         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3751         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3752
3753         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3754
3755         if (rv2p_proc == RV2P_PROC1) {
3756                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3757                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3758         } else {
3759                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3760                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3761         }
3762
3763         for (i = 0; i < rv2p_code_len; i += 8) {
3764                 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3765                 rv2p_code++;
3766                 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3767                 rv2p_code++;
3768
3769                 val = (i / 8) | cmd;
3770                 BNX2_WR(bp, addr, val);
3771         }
3772
3773         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3774         for (i = 0; i < 8; i++) {
3775                 u32 loc, code;
3776
3777                 loc = be32_to_cpu(fw_entry->fixup[i]);
3778                 if (loc && ((loc * 4) < rv2p_code_len)) {
3779                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3780                         BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3781                         code = be32_to_cpu(*(rv2p_code + loc));
3782                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3783                         BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3784
3785                         val = (loc / 2) | cmd;
3786                         BNX2_WR(bp, addr, val);
3787                 }
3788         }
3789
3790         /* Reset the processor, un-stall is done later. */
3791         if (rv2p_proc == RV2P_PROC1) {
3792                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3793         }
3794         else {
3795                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3796         }
3797
3798         return 0;
3799 }
3800
3801 static int
3802 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3803             const struct bnx2_mips_fw_file_entry *fw_entry)
3804 {
3805         u32 addr, len, file_offset;
3806         __be32 *data;
3807         u32 offset;
3808         u32 val;
3809
3810         /* Halt the CPU. */
3811         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3812         val |= cpu_reg->mode_value_halt;
3813         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3814         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3815
3816         /* Load the Text area. */
3817         addr = be32_to_cpu(fw_entry->text.addr);
3818         len = be32_to_cpu(fw_entry->text.len);
3819         file_offset = be32_to_cpu(fw_entry->text.offset);
3820         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3821
3822         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3823         if (len) {
3824                 int j;
3825
3826                 for (j = 0; j < (len / 4); j++, offset += 4)
3827                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3828         }
3829
3830         /* Load the Data area. */
3831         addr = be32_to_cpu(fw_entry->data.addr);
3832         len = be32_to_cpu(fw_entry->data.len);
3833         file_offset = be32_to_cpu(fw_entry->data.offset);
3834         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3835
3836         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3837         if (len) {
3838                 int j;
3839
3840                 for (j = 0; j < (len / 4); j++, offset += 4)
3841                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3842         }
3843
3844         /* Load the Read-Only area. */
3845         addr = be32_to_cpu(fw_entry->rodata.addr);
3846         len = be32_to_cpu(fw_entry->rodata.len);
3847         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3848         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3849
3850         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3851         if (len) {
3852                 int j;
3853
3854                 for (j = 0; j < (len / 4); j++, offset += 4)
3855                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3856         }
3857
3858         /* Clear the pre-fetch instruction. */
3859         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3860
3861         val = be32_to_cpu(fw_entry->start_addr);
3862         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3863
3864         /* Start the CPU. */
3865         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3866         val &= ~cpu_reg->mode_value_halt;
3867         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3868         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3869
3870         return 0;
3871 }
3872
3873 static int
3874 bnx2_init_cpus(struct bnx2 *bp)
3875 {
3876         const struct bnx2_mips_fw_file *mips_fw =
3877                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3878         const struct bnx2_rv2p_fw_file *rv2p_fw =
3879                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3880         int rc;
3881
3882         /* Initialize the RV2P processor. */
3883         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3884         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3885
3886         /* Initialize the RX Processor. */
3887         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3888         if (rc)
3889                 goto init_cpu_err;
3890
3891         /* Initialize the TX Processor. */
3892         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3893         if (rc)
3894                 goto init_cpu_err;
3895
3896         /* Initialize the TX Patch-up Processor. */
3897         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3898         if (rc)
3899                 goto init_cpu_err;
3900
3901         /* Initialize the Completion Processor. */
3902         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3903         if (rc)
3904                 goto init_cpu_err;
3905
3906         /* Initialize the Command Processor. */
3907         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3908
3909 init_cpu_err:
3910         return rc;
3911 }
3912
3913 static int
3914 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3915 {
3916         u16 pmcsr;
3917
3918         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3919
3920         switch (state) {
3921         case PCI_D0: {
3922                 u32 val;
3923
3924                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3925                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3926                         PCI_PM_CTRL_PME_STATUS);
3927
3928                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3929                         /* delay required during transition out of D3hot */
3930                         msleep(20);
3931
3932                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3933                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3934                 val &= ~BNX2_EMAC_MODE_MPKT;
3935                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3936
3937                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3938                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3939                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3940                 break;
3941         }
3942         case PCI_D3hot: {
3943                 int i;
3944                 u32 val, wol_msg;
3945
3946                 if (bp->wol) {
3947                         u32 advertising;
3948                         u8 autoneg;
3949
3950                         autoneg = bp->autoneg;
3951                         advertising = bp->advertising;
3952
3953                         if (bp->phy_port == PORT_TP) {
3954                                 bp->autoneg = AUTONEG_SPEED;
3955                                 bp->advertising = ADVERTISED_10baseT_Half |
3956                                         ADVERTISED_10baseT_Full |
3957                                         ADVERTISED_100baseT_Half |
3958                                         ADVERTISED_100baseT_Full |
3959                                         ADVERTISED_Autoneg;
3960                         }
3961
3962                         spin_lock_bh(&bp->phy_lock);
3963                         bnx2_setup_phy(bp, bp->phy_port);
3964                         spin_unlock_bh(&bp->phy_lock);
3965
3966                         bp->autoneg = autoneg;
3967                         bp->advertising = advertising;
3968
3969                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3970
3971                         val = BNX2_RD(bp, BNX2_EMAC_MODE);
3972
3973                         /* Enable port mode. */
3974                         val &= ~BNX2_EMAC_MODE_PORT;
3975                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3976                                BNX2_EMAC_MODE_ACPI_RCVD |
3977                                BNX2_EMAC_MODE_MPKT;
3978                         if (bp->phy_port == PORT_TP)
3979                                 val |= BNX2_EMAC_MODE_PORT_MII;
3980                         else {
3981                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3982                                 if (bp->line_speed == SPEED_2500)
3983                                         val |= BNX2_EMAC_MODE_25G_MODE;
3984                         }
3985
3986                         BNX2_WR(bp, BNX2_EMAC_MODE, val);
3987
3988                         /* receive all multicast */
3989                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3990                                 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3991                                         0xffffffff);
3992                         }
3993                         BNX2_WR(bp, BNX2_EMAC_RX_MODE,
3994                                 BNX2_EMAC_RX_MODE_SORT_MODE);
3995
3996                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3997                               BNX2_RPM_SORT_USER0_MC_EN;
3998                         BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3999                         BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4000                         BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
4001                                 BNX2_RPM_SORT_USER0_ENA);
4002
4003                         /* Need to enable EMAC and RPM for WOL. */
4004                         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4005                                 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4006                                 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4007                                 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4008
4009                         val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4010                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4011                         BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4012
4013                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4014                 }
4015                 else {
4016                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4017                 }
4018
4019                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
4020                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4021                                      1, 0);
4022
4023                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4024                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4025                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4026
4027                         if (bp->wol)
4028                                 pmcsr |= 3;
4029                 }
4030                 else {
4031                         pmcsr |= 3;
4032                 }
4033                 if (bp->wol) {
4034                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4035                 }
4036                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4037                                       pmcsr);
4038
4039                 /* No more memory access after this point until
4040                  * device is brought back to D0.
4041                  */
4042                 udelay(50);
4043                 break;
4044         }
4045         default:
4046                 return -EINVAL;
4047         }
4048         return 0;
4049 }
4050
4051 static int
4052 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4053 {
4054         u32 val;
4055         int j;
4056
4057         /* Request access to the flash interface. */
4058         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4059         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4060                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4061                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4062                         break;
4063
4064                 udelay(5);
4065         }
4066
4067         if (j >= NVRAM_TIMEOUT_COUNT)
4068                 return -EBUSY;
4069
4070         return 0;
4071 }
4072
4073 static int
4074 bnx2_release_nvram_lock(struct bnx2 *bp)
4075 {
4076         int j;
4077         u32 val;
4078
4079         /* Relinquish nvram interface. */
4080         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4081
4082         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4083                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4084                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4085                         break;
4086
4087                 udelay(5);
4088         }
4089
4090         if (j >= NVRAM_TIMEOUT_COUNT)
4091                 return -EBUSY;
4092
4093         return 0;
4094 }
4095
4096
4097 static int
4098 bnx2_enable_nvram_write(struct bnx2 *bp)
4099 {
4100         u32 val;
4101
4102         val = BNX2_RD(bp, BNX2_MISC_CFG);
4103         BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4104
4105         if (bp->flash_info->flags & BNX2_NV_WREN) {
4106                 int j;
4107
4108                 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4109                 BNX2_WR(bp, BNX2_NVM_COMMAND,
4110                         BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4111
4112                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4113                         udelay(5);
4114
4115                         val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4116                         if (val & BNX2_NVM_COMMAND_DONE)
4117                                 break;
4118                 }
4119
4120                 if (j >= NVRAM_TIMEOUT_COUNT)
4121                         return -EBUSY;
4122         }
4123         return 0;
4124 }
4125
4126 static void
4127 bnx2_disable_nvram_write(struct bnx2 *bp)
4128 {
4129         u32 val;
4130
4131         val = BNX2_RD(bp, BNX2_MISC_CFG);
4132         BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4133 }
4134
4135
4136 static void
4137 bnx2_enable_nvram_access(struct bnx2 *bp)
4138 {
4139         u32 val;
4140
4141         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4142         /* Enable both bits, even on read. */
4143         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4144                 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4145 }
4146
4147 static void
4148 bnx2_disable_nvram_access(struct bnx2 *bp)
4149 {
4150         u32 val;
4151
4152         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4153         /* Disable both bits, even after read. */
4154         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4155                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4156                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4157 }
4158
4159 static int
4160 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4161 {
4162         u32 cmd;
4163         int j;
4164
4165         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4166                 /* Buffered flash, no erase needed */
4167                 return 0;
4168
4169         /* Build an erase command */
4170         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4171               BNX2_NVM_COMMAND_DOIT;
4172
4173         /* Need to clear DONE bit separately. */
4174         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4175
4176         /* Address of the NVRAM to read from. */
4177         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4178
4179         /* Issue an erase command. */
4180         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4181
4182         /* Wait for completion. */
4183         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4184                 u32 val;
4185
4186                 udelay(5);
4187
4188                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4189                 if (val & BNX2_NVM_COMMAND_DONE)
4190                         break;
4191         }
4192
4193         if (j >= NVRAM_TIMEOUT_COUNT)
4194                 return -EBUSY;
4195
4196         return 0;
4197 }
4198
4199 static int
4200 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4201 {
4202         u32 cmd;
4203         int j;
4204
4205         /* Build the command word. */
4206         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4207
4208         /* Calculate an offset of a buffered flash, not needed for 5709. */
4209         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4210                 offset = ((offset / bp->flash_info->page_size) <<
4211                            bp->flash_info->page_bits) +
4212                           (offset % bp->flash_info->page_size);
4213         }
4214
4215         /* Need to clear DONE bit separately. */
4216         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4217
4218         /* Address of the NVRAM to read from. */
4219         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4220
4221         /* Issue a read command. */
4222         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4223
4224         /* Wait for completion. */
4225         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4226                 u32 val;
4227
4228                 udelay(5);
4229
4230                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4231                 if (val & BNX2_NVM_COMMAND_DONE) {
4232                         __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4233                         memcpy(ret_val, &v, 4);
4234                         break;
4235                 }
4236         }
4237         if (j >= NVRAM_TIMEOUT_COUNT)
4238                 return -EBUSY;
4239
4240         return 0;
4241 }
4242
4243
4244 static int
4245 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4246 {
4247         u32 cmd;
4248         __be32 val32;
4249         int j;
4250
4251         /* Build the command word. */
4252         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4253
4254         /* Calculate an offset of a buffered flash, not needed for 5709. */
4255         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4256                 offset = ((offset / bp->flash_info->page_size) <<
4257                           bp->flash_info->page_bits) +
4258                          (offset % bp->flash_info->page_size);
4259         }
4260
4261         /* Need to clear DONE bit separately. */
4262         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4263
4264         memcpy(&val32, val, 4);
4265
4266         /* Write the data. */
4267         BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4268
4269         /* Address of the NVRAM to write to. */
4270         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4271
4272         /* Issue the write command. */
4273         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4274
4275         /* Wait for completion. */
4276         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4277                 udelay(5);
4278
4279                 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4280                         break;
4281         }
4282         if (j >= NVRAM_TIMEOUT_COUNT)
4283                 return -EBUSY;
4284
4285         return 0;
4286 }
4287
4288 static int
4289 bnx2_init_nvram(struct bnx2 *bp)
4290 {
4291         u32 val;
4292         int j, entry_count, rc = 0;
4293         const struct flash_spec *flash;
4294
4295         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4296                 bp->flash_info = &flash_5709;
4297                 goto get_flash_size;
4298         }
4299
4300         /* Determine the selected interface. */
4301         val = BNX2_RD(bp, BNX2_NVM_CFG1);
4302
4303         entry_count = ARRAY_SIZE(flash_table);
4304
4305         if (val & 0x40000000) {
4306
4307                 /* Flash interface has been reconfigured */
4308                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4309                      j++, flash++) {
4310                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4311                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4312                                 bp->flash_info = flash;
4313                                 break;
4314                         }
4315                 }
4316         }
4317         else {
4318                 u32 mask;
4319                 /* Not yet been reconfigured */
4320
4321                 if (val & (1 << 23))
4322                         mask = FLASH_BACKUP_STRAP_MASK;
4323                 else
4324                         mask = FLASH_STRAP_MASK;
4325
4326                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4327                         j++, flash++) {
4328
4329                         if ((val & mask) == (flash->strapping & mask)) {
4330                                 bp->flash_info = flash;
4331
4332                                 /* Request access to the flash interface. */
4333                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4334                                         return rc;
4335
4336                                 /* Enable access to flash interface */
4337                                 bnx2_enable_nvram_access(bp);
4338
4339                                 /* Reconfigure the flash interface */
4340                                 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4341                                 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4342                                 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4343                                 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4344
4345                                 /* Disable access to flash interface */
4346                                 bnx2_disable_nvram_access(bp);
4347                                 bnx2_release_nvram_lock(bp);
4348
4349                                 break;
4350                         }
4351                 }
4352         } /* if (val & 0x40000000) */
4353
4354         if (j == entry_count) {
4355                 bp->flash_info = NULL;
4356                 pr_alert("Unknown flash/EEPROM type\n");
4357                 return -ENODEV;
4358         }
4359
4360 get_flash_size:
4361         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4362         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4363         if (val)
4364                 bp->flash_size = val;
4365         else
4366                 bp->flash_size = bp->flash_info->total_size;
4367
4368         return rc;
4369 }
4370
4371 static int
4372 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4373                 int buf_size)
4374 {
4375         int rc = 0;
4376         u32 cmd_flags, offset32, len32, extra;
4377
4378         if (buf_size == 0)
4379                 return 0;
4380
4381         /* Request access to the flash interface. */
4382         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4383                 return rc;
4384
4385         /* Enable access to flash interface */
4386         bnx2_enable_nvram_access(bp);
4387
4388         len32 = buf_size;
4389         offset32 = offset;
4390         extra = 0;
4391
4392         cmd_flags = 0;
4393
4394         if (offset32 & 3) {
4395                 u8 buf[4];
4396                 u32 pre_len;
4397
4398                 offset32 &= ~3;
4399                 pre_len = 4 - (offset & 3);
4400
4401                 if (pre_len >= len32) {
4402                         pre_len = len32;
4403                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4404                                     BNX2_NVM_COMMAND_LAST;
4405                 }
4406                 else {
4407                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4408                 }
4409
4410                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4411
4412                 if (rc)
4413                         return rc;
4414
4415                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4416
4417                 offset32 += 4;
4418                 ret_buf += pre_len;
4419                 len32 -= pre_len;
4420         }
4421         if (len32 & 3) {
4422                 extra = 4 - (len32 & 3);
4423                 len32 = (len32 + 4) & ~3;
4424         }
4425
4426         if (len32 == 4) {
4427                 u8 buf[4];
4428
4429                 if (cmd_flags)
4430                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4431                 else
4432                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4433                                     BNX2_NVM_COMMAND_LAST;
4434
4435                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4436
4437                 memcpy(ret_buf, buf, 4 - extra);
4438         }
4439         else if (len32 > 0) {
4440                 u8 buf[4];
4441
4442                 /* Read the first word. */
4443                 if (cmd_flags)
4444                         cmd_flags = 0;
4445                 else
4446                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4447
4448                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4449
4450                 /* Advance to the next dword. */
4451                 offset32 += 4;
4452                 ret_buf += 4;
4453                 len32 -= 4;
4454
4455                 while (len32 > 4 && rc == 0) {
4456                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4457
4458                         /* Advance to the next dword. */
4459                         offset32 += 4;
4460                         ret_buf += 4;
4461                         len32 -= 4;
4462                 }
4463
4464                 if (rc)
4465                         return rc;
4466
4467                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4468                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4469
4470                 memcpy(ret_buf, buf, 4 - extra);
4471         }
4472
4473         /* Disable access to flash interface */
4474         bnx2_disable_nvram_access(bp);
4475
4476         bnx2_release_nvram_lock(bp);
4477
4478         return rc;
4479 }
4480
4481 static int
4482 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4483                 int buf_size)
4484 {
4485         u32 written, offset32, len32;
4486         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4487         int rc = 0;
4488         int align_start, align_end;
4489
4490         buf = data_buf;
4491         offset32 = offset;
4492         len32 = buf_size;
4493         align_start = align_end = 0;
4494
4495         if ((align_start = (offset32 & 3))) {
4496                 offset32 &= ~3;
4497                 len32 += align_start;
4498                 if (len32 < 4)
4499                         len32 = 4;
4500                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4501                         return rc;
4502         }
4503
4504         if (len32 & 3) {
4505                 align_end = 4 - (len32 & 3);
4506                 len32 += align_end;
4507                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4508                         return rc;
4509         }
4510
4511         if (align_start || align_end) {
4512                 align_buf = kmalloc(len32, GFP_KERNEL);
4513                 if (align_buf == NULL)
4514                         return -ENOMEM;
4515                 if (align_start) {
4516                         memcpy(align_buf, start, 4);
4517                 }
4518                 if (align_end) {
4519                         memcpy(align_buf + len32 - 4, end, 4);
4520                 }
4521                 memcpy(align_buf + align_start, data_buf, buf_size);
4522                 buf = align_buf;
4523         }
4524
4525         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4526                 flash_buffer = kmalloc(264, GFP_KERNEL);
4527                 if (flash_buffer == NULL) {
4528                         rc = -ENOMEM;
4529                         goto nvram_write_end;
4530                 }
4531         }
4532
4533         written = 0;
4534         while ((written < len32) && (rc == 0)) {
4535                 u32 page_start, page_end, data_start, data_end;
4536                 u32 addr, cmd_flags;
4537                 int i;
4538
4539                 /* Find the page_start addr */
4540                 page_start = offset32 + written;
4541                 page_start -= (page_start % bp->flash_info->page_size);
4542                 /* Find the page_end addr */
4543                 page_end = page_start + bp->flash_info->page_size;
4544                 /* Find the data_start addr */
4545                 data_start = (written == 0) ? offset32 : page_start;
4546                 /* Find the data_end addr */
4547                 data_end = (page_end > offset32 + len32) ?
4548                         (offset32 + len32) : page_end;
4549
4550                 /* Request access to the flash interface. */
4551                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4552                         goto nvram_write_end;
4553
4554                 /* Enable access to flash interface */
4555                 bnx2_enable_nvram_access(bp);
4556
4557                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4558                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4559                         int j;
4560
4561                         /* Read the whole page into the buffer
4562                          * (non-buffer flash only) */
4563                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4564                                 if (j == (bp->flash_info->page_size - 4)) {
4565                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4566                                 }
4567                                 rc = bnx2_nvram_read_dword(bp,
4568                                         page_start + j,
4569                                         &flash_buffer[j],
4570                                         cmd_flags);
4571
4572                                 if (rc)
4573                                         goto nvram_write_end;
4574
4575                                 cmd_flags = 0;
4576                         }
4577                 }
4578
4579                 /* Enable writes to flash interface (unlock write-protect) */
4580                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4581                         goto nvram_write_end;
4582
4583                 /* Loop to write back the buffer data from page_start to
4584                  * data_start */
4585                 i = 0;
4586                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4587                         /* Erase the page */
4588                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4589                                 goto nvram_write_end;
4590
4591                         /* Re-enable the write again for the actual write */
4592                         bnx2_enable_nvram_write(bp);
4593
4594                         for (addr = page_start; addr < data_start;
4595                                 addr += 4, i += 4) {
4596
4597                                 rc = bnx2_nvram_write_dword(bp, addr,
4598                                         &flash_buffer[i], cmd_flags);
4599
4600                                 if (rc != 0)
4601                                         goto nvram_write_end;
4602
4603                                 cmd_flags = 0;
4604                         }
4605                 }
4606
4607                 /* Loop to write the new data from data_start to data_end */
4608                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4609                         if ((addr == page_end - 4) ||
4610                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4611                                  (addr == data_end - 4))) {
4612
4613                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4614                         }
4615                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4616                                 cmd_flags);
4617
4618                         if (rc != 0)
4619                                 goto nvram_write_end;
4620
4621                         cmd_flags = 0;
4622                         buf += 4;
4623                 }
4624
4625                 /* Loop to write back the buffer data from data_end
4626                  * to page_end */
4627                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4628                         for (addr = data_end; addr < page_end;
4629                                 addr += 4, i += 4) {
4630
4631                                 if (addr == page_end-4) {
4632                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4633                                 }
4634                                 rc = bnx2_nvram_write_dword(bp, addr,
4635                                         &flash_buffer[i], cmd_flags);
4636
4637                                 if (rc != 0)
4638                                         goto nvram_write_end;
4639
4640                                 cmd_flags = 0;
4641                         }
4642                 }
4643
4644                 /* Disable writes to flash interface (lock write-protect) */
4645                 bnx2_disable_nvram_write(bp);
4646
4647                 /* Disable access to flash interface */
4648                 bnx2_disable_nvram_access(bp);
4649                 bnx2_release_nvram_lock(bp);
4650
4651                 /* Increment written */
4652                 written += data_end - data_start;
4653         }
4654
4655 nvram_write_end:
4656         kfree(flash_buffer);
4657         kfree(align_buf);
4658         return rc;
4659 }
4660
4661 static void
4662 bnx2_init_fw_cap(struct bnx2 *bp)
4663 {
4664         u32 val, sig = 0;
4665
4666         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4667         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4668
4669         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4670                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4671
4672         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4673         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4674                 return;
4675
4676         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4677                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4678                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4679         }
4680
4681         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4682             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4683                 u32 link;
4684
4685                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4686
4687                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4688                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4689                         bp->phy_port = PORT_FIBRE;
4690                 else
4691                         bp->phy_port = PORT_TP;
4692
4693                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4694                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4695         }
4696
4697         if (netif_running(bp->dev) && sig)
4698                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4699 }
4700
4701 static void
4702 bnx2_setup_msix_tbl(struct bnx2 *bp)
4703 {
4704         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4705
4706         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4707         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4708 }
4709
4710 static int
4711 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4712 {
4713         u32 val;
4714         int i, rc = 0;
4715         u8 old_port;
4716
4717         /* Wait for the current PCI transaction to complete before
4718          * issuing a reset. */
4719         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4720             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4721                 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4722                         BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4723                         BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4724                         BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4725                         BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4726                 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4727                 udelay(5);
4728         } else {  /* 5709 */
4729                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4730                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4731                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4732                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4733
4734                 for (i = 0; i < 100; i++) {
4735                         msleep(1);
4736                         val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4737                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4738                                 break;
4739                 }
4740         }
4741
4742         /* Wait for the firmware to tell us it is ok to issue a reset. */
4743         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4744
4745         /* Deposit a driver reset signature so the firmware knows that
4746          * this is a soft reset. */
4747         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4748                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4749
4750         /* Do a dummy read to force the chip to complete all current transaction
4751          * before we issue a reset. */
4752         val = BNX2_RD(bp, BNX2_MISC_ID);
4753
4754         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4755                 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4756                 BNX2_RD(bp, BNX2_MISC_COMMAND);
4757                 udelay(5);
4758
4759                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4760                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4761
4762                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4763
4764         } else {
4765                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4766                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4767                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4768
4769                 /* Chip reset. */
4770                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4771
4772                 /* Reading back any register after chip reset will hang the
4773                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4774                  * of margin for write posting.
4775                  */
4776                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4777                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4778                         msleep(20);
4779
4780                 /* Reset takes approximate 30 usec */
4781                 for (i = 0; i < 10; i++) {
4782                         val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4783                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4784                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4785                                 break;
4786                         udelay(10);
4787                 }
4788
4789                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4790                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4791                         pr_err("Chip reset did not complete\n");
4792                         return -EBUSY;
4793                 }
4794         }
4795
4796         /* Make sure byte swapping is properly configured. */
4797         val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4798         if (val != 0x01020304) {
4799                 pr_err("Chip not in correct endian mode\n");
4800                 return -ENODEV;
4801         }
4802
4803         /* Wait for the firmware to finish its initialization. */
4804         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4805         if (rc)
4806                 return rc;
4807
4808         spin_lock_bh(&bp->phy_lock);
4809         old_port = bp->phy_port;
4810         bnx2_init_fw_cap(bp);
4811         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4812             old_port != bp->phy_port)
4813                 bnx2_set_default_remote_link(bp);
4814         spin_unlock_bh(&bp->phy_lock);
4815
4816         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4817                 /* Adjust the voltage regular to two steps lower.  The default
4818                  * of this register is 0x0000000e. */
4819                 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4820
4821                 /* Remove bad rbuf memory from the free pool. */
4822                 rc = bnx2_alloc_bad_rbuf(bp);
4823         }
4824
4825         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4826                 bnx2_setup_msix_tbl(bp);
4827                 /* Prevent MSIX table reads and write from timing out */
4828                 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4829                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4830         }
4831
4832         return rc;
4833 }
4834
4835 static int
4836 bnx2_init_chip(struct bnx2 *bp)
4837 {
4838         u32 val, mtu;
4839         int rc, i;
4840
4841         /* Make sure the interrupt is not active. */
4842         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4843
4844         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4845               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4846 #ifdef __BIG_ENDIAN
4847               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4848 #endif
4849               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4850               DMA_READ_CHANS << 12 |
4851               DMA_WRITE_CHANS << 16;
4852
4853         val |= (0x2 << 20) | (1 << 11);
4854
4855         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4856                 val |= (1 << 23);
4857
4858         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4859             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4860                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4861
4862         BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4863
4864         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4865                 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4866                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4867                 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4868         }
4869
4870         if (bp->flags & BNX2_FLAG_PCIX) {
4871                 u16 val16;
4872
4873                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4874                                      &val16);
4875                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4876                                       val16 & ~PCI_X_CMD_ERO);
4877         }
4878
4879         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4880                 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4881                 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4882                 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4883
4884         /* Initialize context mapping and zero out the quick contexts.  The
4885          * context block must have already been enabled. */
4886         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4887                 rc = bnx2_init_5709_context(bp);
4888                 if (rc)
4889                         return rc;
4890         } else
4891                 bnx2_init_context(bp);
4892
4893         if ((rc = bnx2_init_cpus(bp)) != 0)
4894                 return rc;
4895
4896         bnx2_init_nvram(bp);
4897
4898         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4899
4900         val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4901         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4902         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4903         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4904                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4905                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4906                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4907         }
4908
4909         BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4910
4911         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4912         BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4913         BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4914
4915         val = (BNX2_PAGE_BITS - 8) << 24;
4916         BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4917
4918         /* Configure page size. */
4919         val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4920         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4921         val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4922         BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4923
4924         val = bp->mac_addr[0] +
4925               (bp->mac_addr[1] << 8) +
4926               (bp->mac_addr[2] << 16) +
4927               bp->mac_addr[3] +
4928               (bp->mac_addr[4] << 8) +
4929               (bp->mac_addr[5] << 16);
4930         BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4931
4932         /* Program the MTU.  Also include 4 bytes for CRC32. */
4933         mtu = bp->dev->mtu;
4934         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4935         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4936                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4937         BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4938
4939         if (mtu < 1500)
4940                 mtu = 1500;
4941
4942         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4943         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4944         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4945
4946         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4947         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4948                 bp->bnx2_napi[i].last_status_idx = 0;
4949
4950         bp->idle_chk_status_idx = 0xffff;
4951
4952         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4953
4954         /* Set up how to generate a link change interrupt. */
4955         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4956
4957         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4958                 (u64) bp->status_blk_mapping & 0xffffffff);
4959         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4960
4961         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4962                 (u64) bp->stats_blk_mapping & 0xffffffff);
4963         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4964                 (u64) bp->stats_blk_mapping >> 32);
4965
4966         BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4967                 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4968
4969         BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4970                 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4971
4972         BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4973                 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4974
4975         BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4976
4977         BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4978
4979         BNX2_WR(bp, BNX2_HC_COM_TICKS,
4980                 (bp->com_ticks_int << 16) | bp->com_ticks);
4981
4982         BNX2_WR(bp, BNX2_HC_CMD_TICKS,
4983                 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4984
4985         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4986                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4987         else
4988                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4989         BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4990
4991         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4992                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4993         else {
4994                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4995                       BNX2_HC_CONFIG_COLLECT_STATS;
4996         }
4997
4998         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4999                 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5000                         BNX2_HC_MSIX_BIT_VECTOR_VAL);
5001
5002                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5003         }
5004
5005         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5006                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5007
5008         BNX2_WR(bp, BNX2_HC_CONFIG, val);
5009
5010         if (bp->rx_ticks < 25)
5011                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5012         else
5013                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5014
5015         for (i = 1; i < bp->irq_nvecs; i++) {
5016                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5017                            BNX2_HC_SB_CONFIG_1;
5018
5019                 BNX2_WR(bp, base,
5020                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5021                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5022                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5023
5024                 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5025                         (bp->tx_quick_cons_trip_int << 16) |
5026                          bp->tx_quick_cons_trip);
5027
5028                 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5029                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5030
5031                 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5032                         (bp->rx_quick_cons_trip_int << 16) |
5033                         bp->rx_quick_cons_trip);
5034
5035                 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5036                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5037         }
5038
5039         /* Clear internal stats counters. */
5040         BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5041
5042         BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5043
5044         /* Initialize the receive filter. */
5045         bnx2_set_rx_mode(bp->dev);
5046
5047         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5048                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5049                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5050                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5051         }
5052         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5053                           1, 0);
5054
5055         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5056         BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5057
5058         udelay(20);
5059
5060         bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5061
5062         return rc;
5063 }
5064
5065 static void
5066 bnx2_clear_ring_states(struct bnx2 *bp)
5067 {
5068         struct bnx2_napi *bnapi;
5069         struct bnx2_tx_ring_info *txr;
5070         struct bnx2_rx_ring_info *rxr;
5071         int i;
5072
5073         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5074                 bnapi = &bp->bnx2_napi[i];
5075                 txr = &bnapi->tx_ring;
5076                 rxr = &bnapi->rx_ring;
5077
5078                 txr->tx_cons = 0;
5079                 txr->hw_tx_cons = 0;
5080                 rxr->rx_prod_bseq = 0;
5081                 rxr->rx_prod = 0;
5082                 rxr->rx_cons = 0;
5083                 rxr->rx_pg_prod = 0;
5084                 rxr->rx_pg_cons = 0;
5085         }
5086 }
5087
5088 static void
5089 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5090 {
5091         u32 val, offset0, offset1, offset2, offset3;
5092         u32 cid_addr = GET_CID_ADDR(cid);
5093
5094         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5095                 offset0 = BNX2_L2CTX_TYPE_XI;
5096                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5097                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5098                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5099         } else {
5100                 offset0 = BNX2_L2CTX_TYPE;
5101                 offset1 = BNX2_L2CTX_CMD_TYPE;
5102                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5103                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5104         }
5105         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5106         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5107
5108         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5109         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5110
5111         val = (u64) txr->tx_desc_mapping >> 32;
5112         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5113
5114         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5115         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5116 }
5117
5118 static void
5119 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5120 {
5121         struct bnx2_tx_bd *txbd;
5122         u32 cid = TX_CID;
5123         struct bnx2_napi *bnapi;
5124         struct bnx2_tx_ring_info *txr;
5125
5126         bnapi = &bp->bnx2_napi[ring_num];
5127         txr = &bnapi->tx_ring;
5128
5129         if (ring_num == 0)
5130                 cid = TX_CID;
5131         else
5132                 cid = TX_TSS_CID + ring_num - 1;
5133
5134         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5135
5136         txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5137
5138         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5139         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5140
5141         txr->tx_prod = 0;
5142         txr->tx_prod_bseq = 0;
5143
5144         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5145         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5146
5147         bnx2_init_tx_context(bp, cid, txr);
5148 }
5149
5150 static void
5151 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5152                      u32 buf_size, int num_rings)
5153 {
5154         int i;
5155         struct bnx2_rx_bd *rxbd;
5156
5157         for (i = 0; i < num_rings; i++) {
5158                 int j;
5159
5160                 rxbd = &rx_ring[i][0];
5161                 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5162                         rxbd->rx_bd_len = buf_size;
5163                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5164                 }
5165                 if (i == (num_rings - 1))
5166                         j = 0;
5167                 else
5168                         j = i + 1;
5169                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5170                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5171         }
5172 }
5173
5174 static void
5175 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5176 {
5177         int i;
5178         u16 prod, ring_prod;
5179         u32 cid, rx_cid_addr, val;
5180         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5181         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5182
5183         if (ring_num == 0)
5184                 cid = RX_CID;
5185         else
5186                 cid = RX_RSS_CID + ring_num - 1;
5187
5188         rx_cid_addr = GET_CID_ADDR(cid);
5189
5190         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5191                              bp->rx_buf_use_size, bp->rx_max_ring);
5192
5193         bnx2_init_rx_context(bp, cid);
5194
5195         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5196                 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5197                 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5198         }
5199
5200         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5201         if (bp->rx_pg_ring_size) {
5202                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5203                                      rxr->rx_pg_desc_mapping,
5204                                      PAGE_SIZE, bp->rx_max_pg_ring);
5205                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5206                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5207                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5208                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5209
5210                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5211                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5212
5213                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5214                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5215
5216                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5217                         BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5218         }
5219
5220         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5221         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5222
5223         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5224         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5225
5226         ring_prod = prod = rxr->rx_pg_prod;
5227         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5228                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5229                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5230                                     ring_num, i, bp->rx_pg_ring_size);
5231                         break;
5232                 }
5233                 prod = BNX2_NEXT_RX_BD(prod);
5234                 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5235         }
5236         rxr->rx_pg_prod = prod;
5237
5238         ring_prod = prod = rxr->rx_prod;
5239         for (i = 0; i < bp->rx_ring_size; i++) {
5240                 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5241                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5242                                     ring_num, i, bp->rx_ring_size);
5243                         break;
5244                 }
5245                 prod = BNX2_NEXT_RX_BD(prod);
5246                 ring_prod = BNX2_RX_RING_IDX(prod);
5247         }
5248         rxr->rx_prod = prod;
5249
5250         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5251         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5252         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5253
5254         BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5255         BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5256
5257         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5258 }
5259
5260 static void
5261 bnx2_init_all_rings(struct bnx2 *bp)
5262 {
5263         int i;
5264         u32 val;
5265
5266         bnx2_clear_ring_states(bp);
5267
5268         BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5269         for (i = 0; i < bp->num_tx_rings; i++)
5270                 bnx2_init_tx_ring(bp, i);
5271
5272         if (bp->num_tx_rings > 1)
5273                 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5274                         (TX_TSS_CID << 7));
5275
5276         BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5277         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5278
5279         for (i = 0; i < bp->num_rx_rings; i++)
5280                 bnx2_init_rx_ring(bp, i);
5281
5282         if (bp->num_rx_rings > 1) {
5283                 u32 tbl_32 = 0;
5284
5285                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5286                         int shift = (i % 8) << 2;
5287
5288                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5289                         if ((i % 8) == 7) {
5290                                 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5291                                 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5292                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5293                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5294                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5295                                 tbl_32 = 0;
5296                         }
5297                 }
5298
5299                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5300                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5301
5302                 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5303
5304         }
5305 }
5306
5307 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5308 {
5309         u32 max, num_rings = 1;
5310
5311         while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5312                 ring_size -= BNX2_MAX_RX_DESC_CNT;
5313                 num_rings++;
5314         }
5315         /* round to next power of 2 */
5316         max = max_size;
5317         while ((max & num_rings) == 0)
5318                 max >>= 1;
5319
5320         if (num_rings != max)
5321                 max <<= 1;
5322
5323         return max;
5324 }
5325
5326 static void
5327 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5328 {
5329         u32 rx_size, rx_space, jumbo_size;
5330
5331         /* 8 for CRC and VLAN */
5332         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5333
5334         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5335                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5336
5337         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5338         bp->rx_pg_ring_size = 0;
5339         bp->rx_max_pg_ring = 0;
5340         bp->rx_max_pg_ring_idx = 0;
5341         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5342                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5343
5344                 jumbo_size = size * pages;
5345                 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5346                         jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5347
5348                 bp->rx_pg_ring_size = jumbo_size;
5349                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5350                                                         BNX2_MAX_RX_PG_RINGS);
5351                 bp->rx_max_pg_ring_idx =
5352                         (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5353                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5354                 bp->rx_copy_thresh = 0;
5355         }
5356
5357         bp->rx_buf_use_size = rx_size;
5358         /* hw alignment + build_skb() overhead*/
5359         bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5360                 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5361         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5362         bp->rx_ring_size = size;
5363         bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5364         bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5365 }
5366
5367 static void
5368 bnx2_free_tx_skbs(struct bnx2 *bp)
5369 {
5370         int i;
5371
5372         for (i = 0; i < bp->num_tx_rings; i++) {
5373                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5374                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5375                 int j;
5376
5377                 if (txr->tx_buf_ring == NULL)
5378                         continue;
5379
5380                 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5381                         struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5382                         struct sk_buff *skb = tx_buf->skb;
5383                         int k, last;
5384
5385                         if (skb == NULL) {
5386                                 j = BNX2_NEXT_TX_BD(j);
5387                                 continue;
5388                         }
5389
5390                         dma_unmap_single(&bp->pdev->dev,
5391                                          dma_unmap_addr(tx_buf, mapping),
5392                                          skb_headlen(skb),
5393                                          PCI_DMA_TODEVICE);
5394
5395                         tx_buf->skb = NULL;
5396
5397                         last = tx_buf->nr_frags;
5398                         j = BNX2_NEXT_TX_BD(j);
5399                         for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5400                                 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5401                                 dma_unmap_page(&bp->pdev->dev,
5402                                         dma_unmap_addr(tx_buf, mapping),
5403                                         skb_frag_size(&skb_shinfo(skb)->frags[k]),
5404                                         PCI_DMA_TODEVICE);
5405                         }
5406                         dev_kfree_skb(skb);
5407                 }
5408                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5409         }
5410 }
5411
5412 static void
5413 bnx2_free_rx_skbs(struct bnx2 *bp)
5414 {
5415         int i;
5416
5417         for (i = 0; i < bp->num_rx_rings; i++) {
5418                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5419                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5420                 int j;
5421
5422                 if (rxr->rx_buf_ring == NULL)
5423                         return;
5424
5425                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5426                         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5427                         u8 *data = rx_buf->data;
5428
5429                         if (data == NULL)
5430                                 continue;
5431
5432                         dma_unmap_single(&bp->pdev->dev,
5433                                          dma_unmap_addr(rx_buf, mapping),
5434                                          bp->rx_buf_use_size,
5435                                          PCI_DMA_FROMDEVICE);
5436
5437                         rx_buf->data = NULL;
5438
5439                         kfree(data);
5440                 }
5441                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5442                         bnx2_free_rx_page(bp, rxr, j);
5443         }
5444 }
5445
5446 static void
5447 bnx2_free_skbs(struct bnx2 *bp)
5448 {
5449         bnx2_free_tx_skbs(bp);
5450         bnx2_free_rx_skbs(bp);
5451 }
5452
5453 static int
5454 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5455 {
5456         int rc;
5457
5458         rc = bnx2_reset_chip(bp, reset_code);
5459         bnx2_free_skbs(bp);
5460         if (rc)
5461                 return rc;
5462
5463         if ((rc = bnx2_init_chip(bp)) != 0)
5464                 return rc;
5465
5466         bnx2_init_all_rings(bp);
5467         return 0;
5468 }
5469
5470 static int
5471 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5472 {
5473         int rc;
5474
5475         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5476                 return rc;
5477
5478         spin_lock_bh(&bp->phy_lock);
5479         bnx2_init_phy(bp, reset_phy);
5480         bnx2_set_link(bp);
5481         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5482                 bnx2_remote_phy_event(bp);
5483         spin_unlock_bh(&bp->phy_lock);
5484         return 0;
5485 }
5486
5487 static int
5488 bnx2_shutdown_chip(struct bnx2 *bp)
5489 {
5490         u32 reset_code;
5491
5492         if (bp->flags & BNX2_FLAG_NO_WOL)
5493                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5494         else if (bp->wol)
5495                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5496         else
5497                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5498
5499         return bnx2_reset_chip(bp, reset_code);
5500 }
5501
5502 static int
5503 bnx2_test_registers(struct bnx2 *bp)
5504 {
5505         int ret;
5506         int i, is_5709;
5507         static const struct {
5508                 u16   offset;
5509                 u16   flags;
5510 #define BNX2_FL_NOT_5709        1
5511                 u32   rw_mask;
5512                 u32   ro_mask;
5513         } reg_tbl[] = {
5514                 { 0x006c, 0, 0x00000000, 0x0000003f },
5515                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5516                 { 0x0094, 0, 0x00000000, 0x00000000 },
5517
5518                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5519                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5520                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5521                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5522                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5523                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5524                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5525                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5526                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5527
5528                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5529                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5530                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5531                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5532                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5533                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5534
5535                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5536                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5537                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5538
5539                 { 0x1000, 0, 0x00000000, 0x00000001 },
5540                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5541
5542                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5543                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5544                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5545                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5546                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5547                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5548                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5549                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5550                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5551                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5552
5553                 { 0x1800, 0, 0x00000000, 0x00000001 },
5554                 { 0x1804, 0, 0x00000000, 0x00000003 },
5555
5556                 { 0x2800, 0, 0x00000000, 0x00000001 },
5557                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5558                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5559                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5560                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5561                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5562                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5563                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5564                 { 0x2840, 0, 0x00000000, 0xffffffff },
5565                 { 0x2844, 0, 0x00000000, 0xffffffff },
5566                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5567                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5568
5569                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5570                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5571
5572                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5573                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5574                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5575                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5576                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5577                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5578                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5579                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5580                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5581
5582                 { 0x5004, 0, 0x00000000, 0x0000007f },
5583                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5584
5585                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5586                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5587                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5588                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5589                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5590                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5591                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5592                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5593                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5594
5595                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5596                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5597                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5598                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5599                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5600                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5601                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5602                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5603                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5604                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5605                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5606                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5607                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5608                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5609                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5610                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5611                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5612                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5613                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5614                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5615                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5616                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5617                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5618
5619                 { 0xffff, 0, 0x00000000, 0x00000000 },
5620         };
5621
5622         ret = 0;
5623         is_5709 = 0;
5624         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5625                 is_5709 = 1;
5626
5627         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5628                 u32 offset, rw_mask, ro_mask, save_val, val;
5629                 u16 flags = reg_tbl[i].flags;
5630
5631                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5632                         continue;
5633
5634                 offset = (u32) reg_tbl[i].offset;
5635                 rw_mask = reg_tbl[i].rw_mask;
5636                 ro_mask = reg_tbl[i].ro_mask;
5637
5638                 save_val = readl(bp->regview + offset);
5639
5640                 writel(0, bp->regview + offset);
5641
5642                 val = readl(bp->regview + offset);
5643                 if ((val & rw_mask) != 0) {
5644                         goto reg_test_err;
5645                 }
5646
5647                 if ((val & ro_mask) != (save_val & ro_mask)) {
5648                         goto reg_test_err;
5649                 }
5650
5651                 writel(0xffffffff, bp->regview + offset);
5652
5653                 val = readl(bp->regview + offset);
5654                 if ((val & rw_mask) != rw_mask) {
5655                         goto reg_test_err;
5656                 }
5657
5658                 if ((val & ro_mask) != (save_val & ro_mask)) {
5659                         goto reg_test_err;
5660                 }
5661
5662                 writel(save_val, bp->regview + offset);
5663                 continue;
5664
5665 reg_test_err:
5666                 writel(save_val, bp->regview + offset);
5667                 ret = -ENODEV;
5668                 break;
5669         }
5670         return ret;
5671 }
5672
5673 static int
5674 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5675 {
5676         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5677                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5678         int i;
5679
5680         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5681                 u32 offset;
5682
5683                 for (offset = 0; offset < size; offset += 4) {
5684
5685                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5686
5687                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5688                                 test_pattern[i]) {
5689                                 return -ENODEV;
5690                         }
5691                 }
5692         }
5693         return 0;
5694 }
5695
5696 static int
5697 bnx2_test_memory(struct bnx2 *bp)
5698 {
5699         int ret = 0;
5700         int i;
5701         static struct mem_entry {
5702                 u32   offset;
5703                 u32   len;
5704         } mem_tbl_5706[] = {
5705                 { 0x60000,  0x4000 },
5706                 { 0xa0000,  0x3000 },
5707                 { 0xe0000,  0x4000 },
5708                 { 0x120000, 0x4000 },
5709                 { 0x1a0000, 0x4000 },
5710                 { 0x160000, 0x4000 },
5711                 { 0xffffffff, 0    },
5712         },
5713         mem_tbl_5709[] = {
5714                 { 0x60000,  0x4000 },
5715                 { 0xa0000,  0x3000 },
5716                 { 0xe0000,  0x4000 },
5717                 { 0x120000, 0x4000 },
5718                 { 0x1a0000, 0x4000 },
5719                 { 0xffffffff, 0    },
5720         };
5721         struct mem_entry *mem_tbl;
5722
5723         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5724                 mem_tbl = mem_tbl_5709;
5725         else
5726                 mem_tbl = mem_tbl_5706;
5727
5728         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5729                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5730                         mem_tbl[i].len)) != 0) {
5731                         return ret;
5732                 }
5733         }
5734
5735         return ret;
5736 }
5737
5738 #define BNX2_MAC_LOOPBACK       0
5739 #define BNX2_PHY_LOOPBACK       1
5740
5741 static int
5742 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5743 {
5744         unsigned int pkt_size, num_pkts, i;
5745         struct sk_buff *skb;
5746         u8 *data;
5747         unsigned char *packet;
5748         u16 rx_start_idx, rx_idx;
5749         dma_addr_t map;
5750         struct bnx2_tx_bd *txbd;
5751         struct bnx2_sw_bd *rx_buf;
5752         struct l2_fhdr *rx_hdr;
5753         int ret = -ENODEV;
5754         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5755         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5756         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5757
5758         tx_napi = bnapi;
5759
5760         txr = &tx_napi->tx_ring;
5761         rxr = &bnapi->rx_ring;
5762         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5763                 bp->loopback = MAC_LOOPBACK;
5764                 bnx2_set_mac_loopback(bp);
5765         }
5766         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5767                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5768                         return 0;
5769
5770                 bp->loopback = PHY_LOOPBACK;
5771                 bnx2_set_phy_loopback(bp);
5772         }
5773         else
5774                 return -EINVAL;
5775
5776         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5777         skb = netdev_alloc_skb(bp->dev, pkt_size);
5778         if (!skb)
5779                 return -ENOMEM;
5780         packet = skb_put(skb, pkt_size);
5781         memcpy(packet, bp->dev->dev_addr, 6);
5782         memset(packet + 6, 0x0, 8);
5783         for (i = 14; i < pkt_size; i++)
5784                 packet[i] = (unsigned char) (i & 0xff);
5785
5786         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5787                              PCI_DMA_TODEVICE);
5788         if (dma_mapping_error(&bp->pdev->dev, map)) {
5789                 dev_kfree_skb(skb);
5790                 return -EIO;
5791         }
5792
5793         BNX2_WR(bp, BNX2_HC_COMMAND,
5794                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5795
5796         BNX2_RD(bp, BNX2_HC_COMMAND);
5797
5798         udelay(5);
5799         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5800
5801         num_pkts = 0;
5802
5803         txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5804
5805         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5806         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5807         txbd->tx_bd_mss_nbytes = pkt_size;
5808         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5809
5810         num_pkts++;
5811         txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5812         txr->tx_prod_bseq += pkt_size;
5813
5814         BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5815         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5816
5817         udelay(100);
5818
5819         BNX2_WR(bp, BNX2_HC_COMMAND,
5820                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5821
5822         BNX2_RD(bp, BNX2_HC_COMMAND);
5823
5824         udelay(5);
5825
5826         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5827         dev_kfree_skb(skb);
5828
5829         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5830                 goto loopback_test_done;
5831
5832         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5833         if (rx_idx != rx_start_idx + num_pkts) {
5834                 goto loopback_test_done;
5835         }
5836
5837         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5838         data = rx_buf->data;
5839
5840         rx_hdr = get_l2_fhdr(data);
5841         data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5842
5843         dma_sync_single_for_cpu(&bp->pdev->dev,
5844                 dma_unmap_addr(rx_buf, mapping),
5845                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5846
5847         if (rx_hdr->l2_fhdr_status &
5848                 (L2_FHDR_ERRORS_BAD_CRC |
5849                 L2_FHDR_ERRORS_PHY_DECODE |
5850                 L2_FHDR_ERRORS_ALIGNMENT |
5851                 L2_FHDR_ERRORS_TOO_SHORT |
5852                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5853
5854                 goto loopback_test_done;
5855         }
5856
5857         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5858                 goto loopback_test_done;
5859         }
5860
5861         for (i = 14; i < pkt_size; i++) {
5862                 if (*(data + i) != (unsigned char) (i & 0xff)) {
5863                         goto loopback_test_done;
5864                 }
5865         }
5866
5867         ret = 0;
5868
5869 loopback_test_done:
5870         bp->loopback = 0;
5871         return ret;
5872 }
5873
5874 #define BNX2_MAC_LOOPBACK_FAILED        1
5875 #define BNX2_PHY_LOOPBACK_FAILED        2
5876 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5877                                          BNX2_PHY_LOOPBACK_FAILED)
5878
5879 static int
5880 bnx2_test_loopback(struct bnx2 *bp)
5881 {
5882         int rc = 0;
5883
5884         if (!netif_running(bp->dev))
5885                 return BNX2_LOOPBACK_FAILED;
5886
5887         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5888         spin_lock_bh(&bp->phy_lock);
5889         bnx2_init_phy(bp, 1);
5890         spin_unlock_bh(&bp->phy_lock);
5891         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5892                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5893         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5894                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5895         return rc;
5896 }
5897
5898 #define NVRAM_SIZE 0x200
5899 #define CRC32_RESIDUAL 0xdebb20e3
5900
5901 static int
5902 bnx2_test_nvram(struct bnx2 *bp)
5903 {
5904         __be32 buf[NVRAM_SIZE / 4];
5905         u8 *data = (u8 *) buf;
5906         int rc = 0;
5907         u32 magic, csum;
5908
5909         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5910                 goto test_nvram_done;
5911
5912         magic = be32_to_cpu(buf[0]);
5913         if (magic != 0x669955aa) {
5914                 rc = -ENODEV;
5915                 goto test_nvram_done;
5916         }
5917
5918         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5919                 goto test_nvram_done;
5920
5921         csum = ether_crc_le(0x100, data);
5922         if (csum != CRC32_RESIDUAL) {
5923                 rc = -ENODEV;
5924                 goto test_nvram_done;
5925         }
5926
5927         csum = ether_crc_le(0x100, data + 0x100);
5928         if (csum != CRC32_RESIDUAL) {
5929                 rc = -ENODEV;
5930         }
5931
5932 test_nvram_done:
5933         return rc;
5934 }
5935
5936 static int
5937 bnx2_test_link(struct bnx2 *bp)
5938 {
5939         u32 bmsr;
5940
5941         if (!netif_running(bp->dev))
5942                 return -ENODEV;
5943
5944         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5945                 if (bp->link_up)
5946                         return 0;
5947                 return -ENODEV;
5948         }
5949         spin_lock_bh(&bp->phy_lock);
5950         bnx2_enable_bmsr1(bp);
5951         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5952         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5953         bnx2_disable_bmsr1(bp);
5954         spin_unlock_bh(&bp->phy_lock);
5955
5956         if (bmsr & BMSR_LSTATUS) {
5957                 return 0;
5958         }
5959         return -ENODEV;
5960 }
5961
5962 static int
5963 bnx2_test_intr(struct bnx2 *bp)
5964 {
5965         int i;
5966         u16 status_idx;
5967
5968         if (!netif_running(bp->dev))
5969                 return -ENODEV;
5970
5971         status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5972
5973         /* This register is not touched during run-time. */
5974         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5975         BNX2_RD(bp, BNX2_HC_COMMAND);
5976
5977         for (i = 0; i < 10; i++) {
5978                 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5979                         status_idx) {
5980
5981                         break;
5982                 }
5983
5984                 msleep_interruptible(10);
5985         }
5986         if (i < 10)
5987                 return 0;
5988
5989         return -ENODEV;
5990 }
5991
5992 /* Determining link for parallel detection. */
5993 static int
5994 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5995 {
5996         u32 mode_ctl, an_dbg, exp;
5997
5998         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5999                 return 0;
6000
6001         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6002         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6003
6004         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6005                 return 0;
6006
6007         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6008         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6009         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6010
6011         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6012                 return 0;
6013
6014         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6015         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6016         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6017
6018         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6019                 return 0;
6020
6021         return 1;
6022 }
6023
6024 static void
6025 bnx2_5706_serdes_timer(struct bnx2 *bp)
6026 {
6027         int check_link = 1;
6028
6029         spin_lock(&bp->phy_lock);
6030         if (bp->serdes_an_pending) {
6031                 bp->serdes_an_pending--;
6032                 check_link = 0;
6033         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6034                 u32 bmcr;
6035
6036                 bp->current_interval = BNX2_TIMER_INTERVAL;
6037
6038                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6039
6040                 if (bmcr & BMCR_ANENABLE) {
6041                         if (bnx2_5706_serdes_has_link(bp)) {
6042                                 bmcr &= ~BMCR_ANENABLE;
6043                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6044                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6045                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6046                         }
6047                 }
6048         }
6049         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6050                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6051                 u32 phy2;
6052
6053                 bnx2_write_phy(bp, 0x17, 0x0f01);
6054                 bnx2_read_phy(bp, 0x15, &phy2);
6055                 if (phy2 & 0x20) {
6056                         u32 bmcr;
6057
6058                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6059                         bmcr |= BMCR_ANENABLE;
6060                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6061
6062                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6063                 }
6064         } else
6065                 bp->current_interval = BNX2_TIMER_INTERVAL;
6066
6067         if (check_link) {
6068                 u32 val;
6069
6070                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6071                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6072                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6073
6074                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6075                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6076                                 bnx2_5706s_force_link_dn(bp, 1);
6077                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6078                         } else
6079                                 bnx2_set_link(bp);
6080                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6081                         bnx2_set_link(bp);
6082         }
6083         spin_unlock(&bp->phy_lock);
6084 }
6085
6086 static void
6087 bnx2_5708_serdes_timer(struct bnx2 *bp)
6088 {
6089         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6090                 return;
6091
6092         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6093                 bp->serdes_an_pending = 0;
6094                 return;
6095         }
6096
6097         spin_lock(&bp->phy_lock);
6098         if (bp->serdes_an_pending)
6099                 bp->serdes_an_pending--;
6100         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6101                 u32 bmcr;
6102
6103                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6104                 if (bmcr & BMCR_ANENABLE) {
6105                         bnx2_enable_forced_2g5(bp);
6106                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6107                 } else {
6108                         bnx2_disable_forced_2g5(bp);
6109                         bp->serdes_an_pending = 2;
6110                         bp->current_interval = BNX2_TIMER_INTERVAL;
6111                 }
6112
6113         } else
6114                 bp->current_interval = BNX2_TIMER_INTERVAL;
6115
6116         spin_unlock(&bp->phy_lock);
6117 }
6118
6119 static void
6120 bnx2_timer(unsigned long data)
6121 {
6122         struct bnx2 *bp = (struct bnx2 *) data;
6123
6124         if (!netif_running(bp->dev))
6125                 return;
6126
6127         if (atomic_read(&bp->intr_sem) != 0)
6128                 goto bnx2_restart_timer;
6129
6130         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6131              BNX2_FLAG_USING_MSI)
6132                 bnx2_chk_missed_msi(bp);
6133
6134         bnx2_send_heart_beat(bp);
6135
6136         bp->stats_blk->stat_FwRxDrop =
6137                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6138
6139         /* workaround occasional corrupted counters */
6140         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6141                 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6142                         BNX2_HC_COMMAND_STATS_NOW);
6143
6144         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6145                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6146                         bnx2_5706_serdes_timer(bp);
6147                 else
6148                         bnx2_5708_serdes_timer(bp);
6149         }
6150
6151 bnx2_restart_timer:
6152         mod_timer(&bp->timer, jiffies + bp->current_interval);
6153 }
6154
6155 static int
6156 bnx2_request_irq(struct bnx2 *bp)
6157 {
6158         unsigned long flags;
6159         struct bnx2_irq *irq;
6160         int rc = 0, i;
6161
6162         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6163                 flags = 0;
6164         else
6165                 flags = IRQF_SHARED;
6166
6167         for (i = 0; i < bp->irq_nvecs; i++) {
6168                 irq = &bp->irq_tbl[i];
6169                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6170                                  &bp->bnx2_napi[i]);
6171                 if (rc)
6172                         break;
6173                 irq->requested = 1;
6174         }
6175         return rc;
6176 }
6177
6178 static void
6179 __bnx2_free_irq(struct bnx2 *bp)
6180 {
6181         struct bnx2_irq *irq;
6182         int i;
6183
6184         for (i = 0; i < bp->irq_nvecs; i++) {
6185                 irq = &bp->irq_tbl[i];
6186                 if (irq->requested)
6187                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6188                 irq->requested = 0;
6189         }
6190 }
6191
6192 static void
6193 bnx2_free_irq(struct bnx2 *bp)
6194 {
6195
6196         __bnx2_free_irq(bp);
6197         if (bp->flags & BNX2_FLAG_USING_MSI)
6198                 pci_disable_msi(bp->pdev);
6199         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6200                 pci_disable_msix(bp->pdev);
6201
6202         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6203 }
6204
6205 static void
6206 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6207 {
6208         int i, total_vecs, rc;
6209         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6210         struct net_device *dev = bp->dev;
6211         const int len = sizeof(bp->irq_tbl[0].name);
6212
6213         bnx2_setup_msix_tbl(bp);
6214         BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6215         BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6216         BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6217
6218         /*  Need to flush the previous three writes to ensure MSI-X
6219          *  is setup properly */
6220         BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6221
6222         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6223                 msix_ent[i].entry = i;
6224                 msix_ent[i].vector = 0;
6225         }
6226
6227         total_vecs = msix_vecs;
6228 #ifdef BCM_CNIC
6229         total_vecs++;
6230 #endif
6231         rc = -ENOSPC;
6232         while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6233                 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6234                 if (rc <= 0)
6235                         break;
6236                 if (rc > 0)
6237                         total_vecs = rc;
6238         }
6239
6240         if (rc != 0)
6241                 return;
6242
6243         msix_vecs = total_vecs;
6244 #ifdef BCM_CNIC
6245         msix_vecs--;
6246 #endif
6247         bp->irq_nvecs = msix_vecs;
6248         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6249         for (i = 0; i < total_vecs; i++) {
6250                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6251                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6252                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6253         }
6254 }
6255
6256 static int
6257 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6258 {
6259         int cpus = netif_get_num_default_rss_queues();
6260         int msix_vecs;
6261
6262         if (!bp->num_req_rx_rings)
6263                 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6264         else if (!bp->num_req_tx_rings)
6265                 msix_vecs = max(cpus, bp->num_req_rx_rings);
6266         else
6267                 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6268
6269         msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6270
6271         bp->irq_tbl[0].handler = bnx2_interrupt;
6272         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6273         bp->irq_nvecs = 1;
6274         bp->irq_tbl[0].vector = bp->pdev->irq;
6275
6276         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6277                 bnx2_enable_msix(bp, msix_vecs);
6278
6279         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6280             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6281                 if (pci_enable_msi(bp->pdev) == 0) {
6282                         bp->flags |= BNX2_FLAG_USING_MSI;
6283                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6284                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6285                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6286                         } else
6287                                 bp->irq_tbl[0].handler = bnx2_msi;
6288
6289                         bp->irq_tbl[0].vector = bp->pdev->irq;
6290                 }
6291         }
6292
6293         if (!bp->num_req_tx_rings)
6294                 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6295         else
6296                 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6297
6298         if (!bp->num_req_rx_rings)
6299                 bp->num_rx_rings = bp->irq_nvecs;
6300         else
6301                 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6302
6303         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6304
6305         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6306 }
6307
6308 /* Called with rtnl_lock */
6309 static int
6310 bnx2_open(struct net_device *dev)
6311 {
6312         struct bnx2 *bp = netdev_priv(dev);
6313         int rc;
6314
6315         rc = bnx2_request_firmware(bp);
6316         if (rc < 0)
6317                 goto out;
6318
6319         netif_carrier_off(dev);
6320
6321         bnx2_set_power_state(bp, PCI_D0);
6322         bnx2_disable_int(bp);
6323
6324         rc = bnx2_setup_int_mode(bp, disable_msi);
6325         if (rc)
6326                 goto open_err;
6327         bnx2_init_napi(bp);
6328         bnx2_napi_enable(bp);
6329         rc = bnx2_alloc_mem(bp);
6330         if (rc)
6331                 goto open_err;
6332
6333         rc = bnx2_request_irq(bp);
6334         if (rc)
6335                 goto open_err;
6336
6337         rc = bnx2_init_nic(bp, 1);
6338         if (rc)
6339                 goto open_err;
6340
6341         mod_timer(&bp->timer, jiffies + bp->current_interval);
6342
6343         atomic_set(&bp->intr_sem, 0);
6344
6345         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6346
6347         bnx2_enable_int(bp);
6348
6349         if (bp->flags & BNX2_FLAG_USING_MSI) {
6350                 /* Test MSI to make sure it is working
6351                  * If MSI test fails, go back to INTx mode
6352                  */
6353                 if (bnx2_test_intr(bp) != 0) {
6354                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6355
6356                         bnx2_disable_int(bp);
6357                         bnx2_free_irq(bp);
6358
6359                         bnx2_setup_int_mode(bp, 1);
6360
6361                         rc = bnx2_init_nic(bp, 0);
6362
6363                         if (!rc)
6364                                 rc = bnx2_request_irq(bp);
6365
6366                         if (rc) {
6367                                 del_timer_sync(&bp->timer);
6368                                 goto open_err;
6369                         }
6370                         bnx2_enable_int(bp);
6371                 }
6372         }
6373         if (bp->flags & BNX2_FLAG_USING_MSI)
6374                 netdev_info(dev, "using MSI\n");
6375         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6376                 netdev_info(dev, "using MSIX\n");
6377
6378         netif_tx_start_all_queues(dev);
6379 out:
6380         return rc;
6381
6382 open_err:
6383         bnx2_napi_disable(bp);
6384         bnx2_free_skbs(bp);
6385         bnx2_free_irq(bp);
6386         bnx2_free_mem(bp);
6387         bnx2_del_napi(bp);
6388         bnx2_release_firmware(bp);
6389         goto out;
6390 }
6391
6392 static void
6393 bnx2_reset_task(struct work_struct *work)
6394 {
6395         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6396         int rc;
6397         u16 pcicmd;
6398
6399         rtnl_lock();
6400         if (!netif_running(bp->dev)) {
6401                 rtnl_unlock();
6402                 return;
6403         }
6404
6405         bnx2_netif_stop(bp, true);
6406
6407         pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6408         if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6409                 /* in case PCI block has reset */
6410                 pci_restore_state(bp->pdev);
6411                 pci_save_state(bp->pdev);
6412         }
6413         rc = bnx2_init_nic(bp, 1);
6414         if (rc) {
6415                 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6416                 bnx2_napi_enable(bp);
6417                 dev_close(bp->dev);
6418                 rtnl_unlock();
6419                 return;
6420         }
6421
6422         atomic_set(&bp->intr_sem, 1);
6423         bnx2_netif_start(bp, true);
6424         rtnl_unlock();
6425 }
6426
6427 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6428
6429 static void
6430 bnx2_dump_ftq(struct bnx2 *bp)
6431 {
6432         int i;
6433         u32 reg, bdidx, cid, valid;
6434         struct net_device *dev = bp->dev;
6435         static const struct ftq_reg {
6436                 char *name;
6437                 u32 off;
6438         } ftq_arr[] = {
6439                 BNX2_FTQ_ENTRY(RV2P_P),
6440                 BNX2_FTQ_ENTRY(RV2P_T),
6441                 BNX2_FTQ_ENTRY(RV2P_M),
6442                 BNX2_FTQ_ENTRY(TBDR_),
6443                 BNX2_FTQ_ENTRY(TDMA_),
6444                 BNX2_FTQ_ENTRY(TXP_),
6445                 BNX2_FTQ_ENTRY(TXP_),
6446                 BNX2_FTQ_ENTRY(TPAT_),
6447                 BNX2_FTQ_ENTRY(RXP_C),
6448                 BNX2_FTQ_ENTRY(RXP_),
6449                 BNX2_FTQ_ENTRY(COM_COMXQ_),
6450                 BNX2_FTQ_ENTRY(COM_COMTQ_),
6451                 BNX2_FTQ_ENTRY(COM_COMQ_),
6452                 BNX2_FTQ_ENTRY(CP_CPQ_),
6453         };
6454
6455         netdev_err(dev, "<--- start FTQ dump --->\n");
6456         for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6457                 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6458                            bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6459
6460         netdev_err(dev, "CPU states:\n");
6461         for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6462                 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6463                            reg, bnx2_reg_rd_ind(bp, reg),
6464                            bnx2_reg_rd_ind(bp, reg + 4),
6465                            bnx2_reg_rd_ind(bp, reg + 8),
6466                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6467                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6468                            bnx2_reg_rd_ind(bp, reg + 0x20));
6469
6470         netdev_err(dev, "<--- end FTQ dump --->\n");
6471         netdev_err(dev, "<--- start TBDC dump --->\n");
6472         netdev_err(dev, "TBDC free cnt: %ld\n",
6473                    BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6474         netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6475         for (i = 0; i < 0x20; i++) {
6476                 int j = 0;
6477
6478                 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6479                 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6480                         BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6481                 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6482                 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6483                         BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6484                         j++;
6485
6486                 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6487                 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6488                 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6489                 netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6490                            i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6491                            bdidx >> 24, (valid >> 8) & 0x0ff);
6492         }
6493         netdev_err(dev, "<--- end TBDC dump --->\n");
6494 }
6495
6496 static void
6497 bnx2_dump_state(struct bnx2 *bp)
6498 {
6499         struct net_device *dev = bp->dev;
6500         u32 val1, val2;
6501
6502         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6503         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6504                    atomic_read(&bp->intr_sem), val1);
6505         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6506         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6507         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6508         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6509                    BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6510                    BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6511         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6512                    BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6513         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6514                    BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6515         if (bp->flags & BNX2_FLAG_USING_MSIX)
6516                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6517                            BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6518 }
6519
6520 static void
6521 bnx2_tx_timeout(struct net_device *dev)
6522 {
6523         struct bnx2 *bp = netdev_priv(dev);
6524
6525         bnx2_dump_ftq(bp);
6526         bnx2_dump_state(bp);
6527         bnx2_dump_mcp_state(bp);
6528
6529         /* This allows the netif to be shutdown gracefully before resetting */
6530         schedule_work(&bp->reset_task);
6531 }
6532
6533 /* Called with netif_tx_lock.
6534  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6535  * netif_wake_queue().
6536  */
6537 static netdev_tx_t
6538 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6539 {
6540         struct bnx2 *bp = netdev_priv(dev);
6541         dma_addr_t mapping;
6542         struct bnx2_tx_bd *txbd;
6543         struct bnx2_sw_tx_bd *tx_buf;
6544         u32 len, vlan_tag_flags, last_frag, mss;
6545         u16 prod, ring_prod;
6546         int i;
6547         struct bnx2_napi *bnapi;
6548         struct bnx2_tx_ring_info *txr;
6549         struct netdev_queue *txq;
6550
6551         /*  Determine which tx ring we will be placed on */
6552         i = skb_get_queue_mapping(skb);
6553         bnapi = &bp->bnx2_napi[i];
6554         txr = &bnapi->tx_ring;
6555         txq = netdev_get_tx_queue(dev, i);
6556
6557         if (unlikely(bnx2_tx_avail(bp, txr) <
6558             (skb_shinfo(skb)->nr_frags + 1))) {
6559                 netif_tx_stop_queue(txq);
6560                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6561
6562                 return NETDEV_TX_BUSY;
6563         }
6564         len = skb_headlen(skb);
6565         prod = txr->tx_prod;
6566         ring_prod = BNX2_TX_RING_IDX(prod);
6567
6568         vlan_tag_flags = 0;
6569         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6570                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6571         }
6572
6573         if (vlan_tx_tag_present(skb)) {
6574                 vlan_tag_flags |=
6575                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6576         }
6577
6578         if ((mss = skb_shinfo(skb)->gso_size)) {
6579                 u32 tcp_opt_len;
6580                 struct iphdr *iph;
6581
6582                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6583
6584                 tcp_opt_len = tcp_optlen(skb);
6585
6586                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6587                         u32 tcp_off = skb_transport_offset(skb) -
6588                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6589
6590                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6591                                           TX_BD_FLAGS_SW_FLAGS;
6592                         if (likely(tcp_off == 0))
6593                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6594                         else {
6595                                 tcp_off >>= 3;
6596                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6597                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6598                                                   ((tcp_off & 0x10) <<
6599                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6600                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6601                         }
6602                 } else {
6603                         iph = ip_hdr(skb);
6604                         if (tcp_opt_len || (iph->ihl > 5)) {
6605                                 vlan_tag_flags |= ((iph->ihl - 5) +
6606                                                    (tcp_opt_len >> 2)) << 8;
6607                         }
6608                 }
6609         } else
6610                 mss = 0;
6611
6612         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6613         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6614                 dev_kfree_skb(skb);
6615                 return NETDEV_TX_OK;
6616         }
6617
6618         tx_buf = &txr->tx_buf_ring[ring_prod];
6619         tx_buf->skb = skb;
6620         dma_unmap_addr_set(tx_buf, mapping, mapping);
6621
6622         txbd = &txr->tx_desc_ring[ring_prod];
6623
6624         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6625         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6626         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6627         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6628
6629         last_frag = skb_shinfo(skb)->nr_frags;
6630         tx_buf->nr_frags = last_frag;
6631         tx_buf->is_gso = skb_is_gso(skb);
6632
6633         for (i = 0; i < last_frag; i++) {
6634                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6635
6636                 prod = BNX2_NEXT_TX_BD(prod);
6637                 ring_prod = BNX2_TX_RING_IDX(prod);
6638                 txbd = &txr->tx_desc_ring[ring_prod];
6639
6640                 len = skb_frag_size(frag);
6641                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6642                                            DMA_TO_DEVICE);
6643                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6644                         goto dma_error;
6645                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6646                                    mapping);
6647
6648                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6649                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6650                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6651                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6652
6653         }
6654         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6655
6656         /* Sync BD data before updating TX mailbox */
6657         wmb();
6658
6659         netdev_tx_sent_queue(txq, skb->len);
6660
6661         prod = BNX2_NEXT_TX_BD(prod);
6662         txr->tx_prod_bseq += skb->len;
6663
6664         BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6665         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6666
6667         mmiowb();
6668
6669         txr->tx_prod = prod;
6670
6671         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6672                 netif_tx_stop_queue(txq);
6673
6674                 /* netif_tx_stop_queue() must be done before checking
6675                  * tx index in bnx2_tx_avail() below, because in
6676                  * bnx2_tx_int(), we update tx index before checking for
6677                  * netif_tx_queue_stopped().
6678                  */
6679                 smp_mb();
6680                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6681                         netif_tx_wake_queue(txq);
6682         }
6683
6684         return NETDEV_TX_OK;
6685 dma_error:
6686         /* save value of frag that failed */
6687         last_frag = i;
6688
6689         /* start back at beginning and unmap skb */
6690         prod = txr->tx_prod;
6691         ring_prod = BNX2_TX_RING_IDX(prod);
6692         tx_buf = &txr->tx_buf_ring[ring_prod];
6693         tx_buf->skb = NULL;
6694         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6695                          skb_headlen(skb), PCI_DMA_TODEVICE);
6696
6697         /* unmap remaining mapped pages */
6698         for (i = 0; i < last_frag; i++) {
6699                 prod = BNX2_NEXT_TX_BD(prod);
6700                 ring_prod = BNX2_TX_RING_IDX(prod);
6701                 tx_buf = &txr->tx_buf_ring[ring_prod];
6702                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6703                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
6704                                PCI_DMA_TODEVICE);
6705         }
6706
6707         dev_kfree_skb(skb);
6708         return NETDEV_TX_OK;
6709 }
6710
6711 /* Called with rtnl_lock */
6712 static int
6713 bnx2_close(struct net_device *dev)
6714 {
6715         struct bnx2 *bp = netdev_priv(dev);
6716
6717         bnx2_disable_int_sync(bp);
6718         bnx2_napi_disable(bp);
6719         netif_tx_disable(dev);
6720         del_timer_sync(&bp->timer);
6721         bnx2_shutdown_chip(bp);
6722         bnx2_free_irq(bp);
6723         bnx2_free_skbs(bp);
6724         bnx2_free_mem(bp);
6725         bnx2_del_napi(bp);
6726         bp->link_up = 0;
6727         netif_carrier_off(bp->dev);
6728         bnx2_set_power_state(bp, PCI_D3hot);
6729         return 0;
6730 }
6731
6732 static void
6733 bnx2_save_stats(struct bnx2 *bp)
6734 {
6735         u32 *hw_stats = (u32 *) bp->stats_blk;
6736         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6737         int i;
6738
6739         /* The 1st 10 counters are 64-bit counters */
6740         for (i = 0; i < 20; i += 2) {
6741                 u32 hi;
6742                 u64 lo;
6743
6744                 hi = temp_stats[i] + hw_stats[i];
6745                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6746                 if (lo > 0xffffffff)
6747                         hi++;
6748                 temp_stats[i] = hi;
6749                 temp_stats[i + 1] = lo & 0xffffffff;
6750         }
6751
6752         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6753                 temp_stats[i] += hw_stats[i];
6754 }
6755
6756 #define GET_64BIT_NET_STATS64(ctr)              \
6757         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6758
6759 #define GET_64BIT_NET_STATS(ctr)                                \
6760         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6761         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6762
6763 #define GET_32BIT_NET_STATS(ctr)                                \
6764         (unsigned long) (bp->stats_blk->ctr +                   \
6765                          bp->temp_stats_blk->ctr)
6766
6767 static struct rtnl_link_stats64 *
6768 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6769 {
6770         struct bnx2 *bp = netdev_priv(dev);
6771
6772         if (bp->stats_blk == NULL)
6773                 return net_stats;
6774
6775         net_stats->rx_packets =
6776                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6777                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6778                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6779
6780         net_stats->tx_packets =
6781                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6782                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6783                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6784
6785         net_stats->rx_bytes =
6786                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6787
6788         net_stats->tx_bytes =
6789                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6790
6791         net_stats->multicast =
6792                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6793
6794         net_stats->collisions =
6795                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6796
6797         net_stats->rx_length_errors =
6798                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6799                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6800
6801         net_stats->rx_over_errors =
6802                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6803                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6804
6805         net_stats->rx_frame_errors =
6806                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6807
6808         net_stats->rx_crc_errors =
6809                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6810
6811         net_stats->rx_errors = net_stats->rx_length_errors +
6812                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6813                 net_stats->rx_crc_errors;
6814
6815         net_stats->tx_aborted_errors =
6816                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6817                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6818
6819         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6820             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6821                 net_stats->tx_carrier_errors = 0;
6822         else {
6823                 net_stats->tx_carrier_errors =
6824                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6825         }
6826
6827         net_stats->tx_errors =
6828                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6829                 net_stats->tx_aborted_errors +
6830                 net_stats->tx_carrier_errors;
6831
6832         net_stats->rx_missed_errors =
6833                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6834                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6835                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6836
6837         return net_stats;
6838 }
6839
6840 /* All ethtool functions called with rtnl_lock */
6841
6842 static int
6843 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6844 {
6845         struct bnx2 *bp = netdev_priv(dev);
6846         int support_serdes = 0, support_copper = 0;
6847
6848         cmd->supported = SUPPORTED_Autoneg;
6849         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6850                 support_serdes = 1;
6851                 support_copper = 1;
6852         } else if (bp->phy_port == PORT_FIBRE)
6853                 support_serdes = 1;
6854         else
6855                 support_copper = 1;
6856
6857         if (support_serdes) {
6858                 cmd->supported |= SUPPORTED_1000baseT_Full |
6859                         SUPPORTED_FIBRE;
6860                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6861                         cmd->supported |= SUPPORTED_2500baseX_Full;
6862
6863         }
6864         if (support_copper) {
6865                 cmd->supported |= SUPPORTED_10baseT_Half |
6866                         SUPPORTED_10baseT_Full |
6867                         SUPPORTED_100baseT_Half |
6868                         SUPPORTED_100baseT_Full |
6869                         SUPPORTED_1000baseT_Full |
6870                         SUPPORTED_TP;
6871
6872         }
6873
6874         spin_lock_bh(&bp->phy_lock);
6875         cmd->port = bp->phy_port;
6876         cmd->advertising = bp->advertising;
6877
6878         if (bp->autoneg & AUTONEG_SPEED) {
6879                 cmd->autoneg = AUTONEG_ENABLE;
6880         } else {
6881                 cmd->autoneg = AUTONEG_DISABLE;
6882         }
6883
6884         if (netif_carrier_ok(dev)) {
6885                 ethtool_cmd_speed_set(cmd, bp->line_speed);
6886                 cmd->duplex = bp->duplex;
6887         }
6888         else {
6889                 ethtool_cmd_speed_set(cmd, -1);
6890                 cmd->duplex = -1;
6891         }
6892         spin_unlock_bh(&bp->phy_lock);
6893
6894         cmd->transceiver = XCVR_INTERNAL;
6895         cmd->phy_address = bp->phy_addr;
6896
6897         return 0;
6898 }
6899
6900 static int
6901 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6902 {
6903         struct bnx2 *bp = netdev_priv(dev);
6904         u8 autoneg = bp->autoneg;
6905         u8 req_duplex = bp->req_duplex;
6906         u16 req_line_speed = bp->req_line_speed;
6907         u32 advertising = bp->advertising;
6908         int err = -EINVAL;
6909
6910         spin_lock_bh(&bp->phy_lock);
6911
6912         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6913                 goto err_out_unlock;
6914
6915         if (cmd->port != bp->phy_port &&
6916             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6917                 goto err_out_unlock;
6918
6919         /* If device is down, we can store the settings only if the user
6920          * is setting the currently active port.
6921          */
6922         if (!netif_running(dev) && cmd->port != bp->phy_port)
6923                 goto err_out_unlock;
6924
6925         if (cmd->autoneg == AUTONEG_ENABLE) {
6926                 autoneg |= AUTONEG_SPEED;
6927
6928                 advertising = cmd->advertising;
6929                 if (cmd->port == PORT_TP) {
6930                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6931                         if (!advertising)
6932                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6933                 } else {
6934                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6935                         if (!advertising)
6936                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6937                 }
6938                 advertising |= ADVERTISED_Autoneg;
6939         }
6940         else {
6941                 u32 speed = ethtool_cmd_speed(cmd);
6942                 if (cmd->port == PORT_FIBRE) {
6943                         if ((speed != SPEED_1000 &&
6944                              speed != SPEED_2500) ||
6945                             (cmd->duplex != DUPLEX_FULL))
6946                                 goto err_out_unlock;
6947
6948                         if (speed == SPEED_2500 &&
6949                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6950                                 goto err_out_unlock;
6951                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6952                         goto err_out_unlock;
6953
6954                 autoneg &= ~AUTONEG_SPEED;
6955                 req_line_speed = speed;
6956                 req_duplex = cmd->duplex;
6957                 advertising = 0;
6958         }
6959
6960         bp->autoneg = autoneg;
6961         bp->advertising = advertising;
6962         bp->req_line_speed = req_line_speed;
6963         bp->req_duplex = req_duplex;
6964
6965         err = 0;
6966         /* If device is down, the new settings will be picked up when it is
6967          * brought up.
6968          */
6969         if (netif_running(dev))
6970                 err = bnx2_setup_phy(bp, cmd->port);
6971
6972 err_out_unlock:
6973         spin_unlock_bh(&bp->phy_lock);
6974
6975         return err;
6976 }
6977
6978 static void
6979 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6980 {
6981         struct bnx2 *bp = netdev_priv(dev);
6982
6983         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6984         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6985         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6986         strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6987 }
6988
6989 #define BNX2_REGDUMP_LEN                (32 * 1024)
6990
6991 static int
6992 bnx2_get_regs_len(struct net_device *dev)
6993 {
6994         return BNX2_REGDUMP_LEN;
6995 }
6996
6997 static void
6998 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6999 {
7000         u32 *p = _p, i, offset;
7001         u8 *orig_p = _p;
7002         struct bnx2 *bp = netdev_priv(dev);
7003         static const u32 reg_boundaries[] = {
7004                 0x0000, 0x0098, 0x0400, 0x045c,
7005                 0x0800, 0x0880, 0x0c00, 0x0c10,
7006                 0x0c30, 0x0d08, 0x1000, 0x101c,
7007                 0x1040, 0x1048, 0x1080, 0x10a4,
7008                 0x1400, 0x1490, 0x1498, 0x14f0,
7009                 0x1500, 0x155c, 0x1580, 0x15dc,
7010                 0x1600, 0x1658, 0x1680, 0x16d8,
7011                 0x1800, 0x1820, 0x1840, 0x1854,
7012                 0x1880, 0x1894, 0x1900, 0x1984,
7013                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7014                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7015                 0x2000, 0x2030, 0x23c0, 0x2400,
7016                 0x2800, 0x2820, 0x2830, 0x2850,
7017                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7018                 0x3c00, 0x3c94, 0x4000, 0x4010,
7019                 0x4080, 0x4090, 0x43c0, 0x4458,
7020                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7021                 0x4fc0, 0x5010, 0x53c0, 0x5444,
7022                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7023                 0x5fc0, 0x6000, 0x6400, 0x6428,
7024                 0x6800, 0x6848, 0x684c, 0x6860,
7025                 0x6888, 0x6910, 0x8000
7026         };
7027
7028         regs->version = 0;
7029
7030         memset(p, 0, BNX2_REGDUMP_LEN);
7031
7032         if (!netif_running(bp->dev))
7033                 return;
7034
7035         i = 0;
7036         offset = reg_boundaries[0];
7037         p += offset;
7038         while (offset < BNX2_REGDUMP_LEN) {
7039                 *p++ = BNX2_RD(bp, offset);
7040                 offset += 4;
7041                 if (offset == reg_boundaries[i + 1]) {
7042                         offset = reg_boundaries[i + 2];
7043                         p = (u32 *) (orig_p + offset);
7044                         i += 2;
7045                 }
7046         }
7047 }
7048
7049 static void
7050 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7051 {
7052         struct bnx2 *bp = netdev_priv(dev);
7053
7054         if (bp->flags & BNX2_FLAG_NO_WOL) {
7055                 wol->supported = 0;
7056                 wol->wolopts = 0;
7057         }
7058         else {
7059                 wol->supported = WAKE_MAGIC;
7060                 if (bp->wol)
7061                         wol->wolopts = WAKE_MAGIC;
7062                 else
7063                         wol->wolopts = 0;
7064         }
7065         memset(&wol->sopass, 0, sizeof(wol->sopass));
7066 }
7067
7068 static int
7069 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7070 {
7071         struct bnx2 *bp = netdev_priv(dev);
7072
7073         if (wol->wolopts & ~WAKE_MAGIC)
7074                 return -EINVAL;
7075
7076         if (wol->wolopts & WAKE_MAGIC) {
7077                 if (bp->flags & BNX2_FLAG_NO_WOL)
7078                         return -EINVAL;
7079
7080                 bp->wol = 1;
7081         }
7082         else {
7083                 bp->wol = 0;
7084         }
7085         return 0;
7086 }
7087
7088 static int
7089 bnx2_nway_reset(struct net_device *dev)
7090 {
7091         struct bnx2 *bp = netdev_priv(dev);
7092         u32 bmcr;
7093
7094         if (!netif_running(dev))
7095                 return -EAGAIN;
7096
7097         if (!(bp->autoneg & AUTONEG_SPEED)) {
7098                 return -EINVAL;
7099         }
7100
7101         spin_lock_bh(&bp->phy_lock);
7102
7103         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7104                 int rc;
7105
7106                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7107                 spin_unlock_bh(&bp->phy_lock);
7108                 return rc;
7109         }
7110
7111         /* Force a link down visible on the other side */
7112         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7113                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7114                 spin_unlock_bh(&bp->phy_lock);
7115
7116                 msleep(20);
7117
7118                 spin_lock_bh(&bp->phy_lock);
7119
7120                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7121                 bp->serdes_an_pending = 1;
7122                 mod_timer(&bp->timer, jiffies + bp->current_interval);
7123         }
7124
7125         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7126         bmcr &= ~BMCR_LOOPBACK;
7127         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7128
7129         spin_unlock_bh(&bp->phy_lock);
7130
7131         return 0;
7132 }
7133
7134 static u32
7135 bnx2_get_link(struct net_device *dev)
7136 {
7137         struct bnx2 *bp = netdev_priv(dev);
7138
7139         return bp->link_up;
7140 }
7141
7142 static int
7143 bnx2_get_eeprom_len(struct net_device *dev)
7144 {
7145         struct bnx2 *bp = netdev_priv(dev);
7146
7147         if (bp->flash_info == NULL)
7148                 return 0;
7149
7150         return (int) bp->flash_size;
7151 }
7152
7153 static int
7154 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7155                 u8 *eebuf)
7156 {
7157         struct bnx2 *bp = netdev_priv(dev);
7158         int rc;
7159
7160         if (!netif_running(dev))
7161                 return -EAGAIN;
7162
7163         /* parameters already validated in ethtool_get_eeprom */
7164
7165         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7166
7167         return rc;
7168 }
7169
7170 static int
7171 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7172                 u8 *eebuf)
7173 {
7174         struct bnx2 *bp = netdev_priv(dev);
7175         int rc;
7176
7177         if (!netif_running(dev))
7178                 return -EAGAIN;
7179
7180         /* parameters already validated in ethtool_set_eeprom */
7181
7182         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7183
7184         return rc;
7185 }
7186
7187 static int
7188 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7189 {
7190         struct bnx2 *bp = netdev_priv(dev);
7191
7192         memset(coal, 0, sizeof(struct ethtool_coalesce));
7193
7194         coal->rx_coalesce_usecs = bp->rx_ticks;
7195         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7196         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7197         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7198
7199         coal->tx_coalesce_usecs = bp->tx_ticks;
7200         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7201         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7202         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7203
7204         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7205
7206         return 0;
7207 }
7208
7209 static int
7210 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7211 {
7212         struct bnx2 *bp = netdev_priv(dev);
7213
7214         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7215         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7216
7217         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7218         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7219
7220         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7221         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7222
7223         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7224         if (bp->rx_quick_cons_trip_int > 0xff)
7225                 bp->rx_quick_cons_trip_int = 0xff;
7226
7227         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7228         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7229
7230         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7231         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7232
7233         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7234         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7235
7236         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7237         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7238                 0xff;
7239
7240         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7241         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7242                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7243                         bp->stats_ticks = USEC_PER_SEC;
7244         }
7245         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7246                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7247         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7248
7249         if (netif_running(bp->dev)) {
7250                 bnx2_netif_stop(bp, true);
7251                 bnx2_init_nic(bp, 0);
7252                 bnx2_netif_start(bp, true);
7253         }
7254
7255         return 0;
7256 }
7257
7258 static void
7259 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7260 {
7261         struct bnx2 *bp = netdev_priv(dev);
7262
7263         ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7264         ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7265
7266         ering->rx_pending = bp->rx_ring_size;
7267         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7268
7269         ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7270         ering->tx_pending = bp->tx_ring_size;
7271 }
7272
7273 static int
7274 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7275 {
7276         if (netif_running(bp->dev)) {
7277                 /* Reset will erase chipset stats; save them */
7278                 bnx2_save_stats(bp);
7279
7280                 bnx2_netif_stop(bp, true);
7281                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7282                 if (reset_irq) {
7283                         bnx2_free_irq(bp);
7284                         bnx2_del_napi(bp);
7285                 } else {
7286                         __bnx2_free_irq(bp);
7287                 }
7288                 bnx2_free_skbs(bp);
7289                 bnx2_free_mem(bp);
7290         }
7291
7292         bnx2_set_rx_ring_size(bp, rx);
7293         bp->tx_ring_size = tx;
7294
7295         if (netif_running(bp->dev)) {
7296                 int rc = 0;
7297
7298                 if (reset_irq) {
7299                         rc = bnx2_setup_int_mode(bp, disable_msi);
7300                         bnx2_init_napi(bp);
7301                 }
7302
7303                 if (!rc)
7304                         rc = bnx2_alloc_mem(bp);
7305
7306                 if (!rc)
7307                         rc = bnx2_request_irq(bp);
7308
7309                 if (!rc)
7310                         rc = bnx2_init_nic(bp, 0);
7311
7312                 if (rc) {
7313                         bnx2_napi_enable(bp);
7314                         dev_close(bp->dev);
7315                         return rc;
7316                 }
7317 #ifdef BCM_CNIC
7318                 mutex_lock(&bp->cnic_lock);
7319                 /* Let cnic know about the new status block. */
7320                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7321                         bnx2_setup_cnic_irq_info(bp);
7322                 mutex_unlock(&bp->cnic_lock);
7323 #endif
7324                 bnx2_netif_start(bp, true);
7325         }
7326         return 0;
7327 }
7328
7329 static int
7330 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7331 {
7332         struct bnx2 *bp = netdev_priv(dev);
7333         int rc;
7334
7335         if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7336                 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7337                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7338
7339                 return -EINVAL;
7340         }
7341         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7342                                    false);
7343         return rc;
7344 }
7345
7346 static void
7347 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7348 {
7349         struct bnx2 *bp = netdev_priv(dev);
7350
7351         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7352         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7353         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7354 }
7355
7356 static int
7357 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7358 {
7359         struct bnx2 *bp = netdev_priv(dev);
7360
7361         bp->req_flow_ctrl = 0;
7362         if (epause->rx_pause)
7363                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7364         if (epause->tx_pause)
7365                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7366
7367         if (epause->autoneg) {
7368                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7369         }
7370         else {
7371                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7372         }
7373
7374         if (netif_running(dev)) {
7375                 spin_lock_bh(&bp->phy_lock);
7376                 bnx2_setup_phy(bp, bp->phy_port);
7377                 spin_unlock_bh(&bp->phy_lock);
7378         }
7379
7380         return 0;
7381 }
7382
7383 static struct {
7384         char string[ETH_GSTRING_LEN];
7385 } bnx2_stats_str_arr[] = {
7386         { "rx_bytes" },
7387         { "rx_error_bytes" },
7388         { "tx_bytes" },
7389         { "tx_error_bytes" },
7390         { "rx_ucast_packets" },
7391         { "rx_mcast_packets" },
7392         { "rx_bcast_packets" },
7393         { "tx_ucast_packets" },
7394         { "tx_mcast_packets" },
7395         { "tx_bcast_packets" },
7396         { "tx_mac_errors" },
7397         { "tx_carrier_errors" },
7398         { "rx_crc_errors" },
7399         { "rx_align_errors" },
7400         { "tx_single_collisions" },
7401         { "tx_multi_collisions" },
7402         { "tx_deferred" },
7403         { "tx_excess_collisions" },
7404         { "tx_late_collisions" },
7405         { "tx_total_collisions" },
7406         { "rx_fragments" },
7407         { "rx_jabbers" },
7408         { "rx_undersize_packets" },
7409         { "rx_oversize_packets" },
7410         { "rx_64_byte_packets" },
7411         { "rx_65_to_127_byte_packets" },
7412         { "rx_128_to_255_byte_packets" },
7413         { "rx_256_to_511_byte_packets" },
7414         { "rx_512_to_1023_byte_packets" },
7415         { "rx_1024_to_1522_byte_packets" },
7416         { "rx_1523_to_9022_byte_packets" },
7417         { "tx_64_byte_packets" },
7418         { "tx_65_to_127_byte_packets" },
7419         { "tx_128_to_255_byte_packets" },
7420         { "tx_256_to_511_byte_packets" },
7421         { "tx_512_to_1023_byte_packets" },
7422         { "tx_1024_to_1522_byte_packets" },
7423         { "tx_1523_to_9022_byte_packets" },
7424         { "rx_xon_frames" },
7425         { "rx_xoff_frames" },
7426         { "tx_xon_frames" },
7427         { "tx_xoff_frames" },
7428         { "rx_mac_ctrl_frames" },
7429         { "rx_filtered_packets" },
7430         { "rx_ftq_discards" },
7431         { "rx_discards" },
7432         { "rx_fw_discards" },
7433 };
7434
7435 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7436
7437 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7438
7439 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7440     STATS_OFFSET32(stat_IfHCInOctets_hi),
7441     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7442     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7443     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7444     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7445     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7446     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7447     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7448     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7449     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7450     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7451     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7452     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7453     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7454     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7455     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7456     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7457     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7458     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7459     STATS_OFFSET32(stat_EtherStatsCollisions),
7460     STATS_OFFSET32(stat_EtherStatsFragments),
7461     STATS_OFFSET32(stat_EtherStatsJabbers),
7462     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7463     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7464     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7465     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7466     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7467     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7468     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7469     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7470     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7471     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7472     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7473     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7474     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7475     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7476     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7477     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7478     STATS_OFFSET32(stat_XonPauseFramesReceived),
7479     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7480     STATS_OFFSET32(stat_OutXonSent),
7481     STATS_OFFSET32(stat_OutXoffSent),
7482     STATS_OFFSET32(stat_MacControlFramesReceived),
7483     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7484     STATS_OFFSET32(stat_IfInFTQDiscards),
7485     STATS_OFFSET32(stat_IfInMBUFDiscards),
7486     STATS_OFFSET32(stat_FwRxDrop),
7487 };
7488
7489 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7490  * skipped because of errata.
7491  */
7492 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7493         8,0,8,8,8,8,8,8,8,8,
7494         4,0,4,4,4,4,4,4,4,4,
7495         4,4,4,4,4,4,4,4,4,4,
7496         4,4,4,4,4,4,4,4,4,4,
7497         4,4,4,4,4,4,4,
7498 };
7499
7500 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7501         8,0,8,8,8,8,8,8,8,8,
7502         4,4,4,4,4,4,4,4,4,4,
7503         4,4,4,4,4,4,4,4,4,4,
7504         4,4,4,4,4,4,4,4,4,4,
7505         4,4,4,4,4,4,4,
7506 };
7507
7508 #define BNX2_NUM_TESTS 6
7509
7510 static struct {
7511         char string[ETH_GSTRING_LEN];
7512 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7513         { "register_test (offline)" },
7514         { "memory_test (offline)" },
7515         { "loopback_test (offline)" },
7516         { "nvram_test (online)" },
7517         { "interrupt_test (online)" },
7518         { "link_test (online)" },
7519 };
7520
7521 static int
7522 bnx2_get_sset_count(struct net_device *dev, int sset)
7523 {
7524         switch (sset) {
7525         case ETH_SS_TEST:
7526                 return BNX2_NUM_TESTS;
7527         case ETH_SS_STATS:
7528                 return BNX2_NUM_STATS;
7529         default:
7530                 return -EOPNOTSUPP;
7531         }
7532 }
7533
7534 static void
7535 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7536 {
7537         struct bnx2 *bp = netdev_priv(dev);
7538
7539         bnx2_set_power_state(bp, PCI_D0);
7540
7541         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7542         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7543                 int i;
7544
7545                 bnx2_netif_stop(bp, true);
7546                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7547                 bnx2_free_skbs(bp);
7548
7549                 if (bnx2_test_registers(bp) != 0) {
7550                         buf[0] = 1;
7551                         etest->flags |= ETH_TEST_FL_FAILED;
7552                 }
7553                 if (bnx2_test_memory(bp) != 0) {
7554                         buf[1] = 1;
7555                         etest->flags |= ETH_TEST_FL_FAILED;
7556                 }
7557                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7558                         etest->flags |= ETH_TEST_FL_FAILED;
7559
7560                 if (!netif_running(bp->dev))
7561                         bnx2_shutdown_chip(bp);
7562                 else {
7563                         bnx2_init_nic(bp, 1);
7564                         bnx2_netif_start(bp, true);
7565                 }
7566
7567                 /* wait for link up */
7568                 for (i = 0; i < 7; i++) {
7569                         if (bp->link_up)
7570                                 break;
7571                         msleep_interruptible(1000);
7572                 }
7573         }
7574
7575         if (bnx2_test_nvram(bp) != 0) {
7576                 buf[3] = 1;
7577                 etest->flags |= ETH_TEST_FL_FAILED;
7578         }
7579         if (bnx2_test_intr(bp) != 0) {
7580                 buf[4] = 1;
7581                 etest->flags |= ETH_TEST_FL_FAILED;
7582         }
7583
7584         if (bnx2_test_link(bp) != 0) {
7585                 buf[5] = 1;
7586                 etest->flags |= ETH_TEST_FL_FAILED;
7587
7588         }
7589         if (!netif_running(bp->dev))
7590                 bnx2_set_power_state(bp, PCI_D3hot);
7591 }
7592
7593 static void
7594 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7595 {
7596         switch (stringset) {
7597         case ETH_SS_STATS:
7598                 memcpy(buf, bnx2_stats_str_arr,
7599                         sizeof(bnx2_stats_str_arr));
7600                 break;
7601         case ETH_SS_TEST:
7602                 memcpy(buf, bnx2_tests_str_arr,
7603                         sizeof(bnx2_tests_str_arr));
7604                 break;
7605         }
7606 }
7607
7608 static void
7609 bnx2_get_ethtool_stats(struct net_device *dev,
7610                 struct ethtool_stats *stats, u64 *buf)
7611 {
7612         struct bnx2 *bp = netdev_priv(dev);
7613         int i;
7614         u32 *hw_stats = (u32 *) bp->stats_blk;
7615         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7616         u8 *stats_len_arr = NULL;
7617
7618         if (hw_stats == NULL) {
7619                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7620                 return;
7621         }
7622
7623         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7624             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7625             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7626             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7627                 stats_len_arr = bnx2_5706_stats_len_arr;
7628         else
7629                 stats_len_arr = bnx2_5708_stats_len_arr;
7630
7631         for (i = 0; i < BNX2_NUM_STATS; i++) {
7632                 unsigned long offset;
7633
7634                 if (stats_len_arr[i] == 0) {
7635                         /* skip this counter */
7636                         buf[i] = 0;
7637                         continue;
7638                 }
7639
7640                 offset = bnx2_stats_offset_arr[i];
7641                 if (stats_len_arr[i] == 4) {
7642                         /* 4-byte counter */
7643                         buf[i] = (u64) *(hw_stats + offset) +
7644                                  *(temp_stats + offset);
7645                         continue;
7646                 }
7647                 /* 8-byte counter */
7648                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7649                          *(hw_stats + offset + 1) +
7650                          (((u64) *(temp_stats + offset)) << 32) +
7651                          *(temp_stats + offset + 1);
7652         }
7653 }
7654
7655 static int
7656 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7657 {
7658         struct bnx2 *bp = netdev_priv(dev);
7659
7660         switch (state) {
7661         case ETHTOOL_ID_ACTIVE:
7662                 bnx2_set_power_state(bp, PCI_D0);
7663
7664                 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7665                 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7666                 return 1;       /* cycle on/off once per second */
7667
7668         case ETHTOOL_ID_ON:
7669                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7670                         BNX2_EMAC_LED_1000MB_OVERRIDE |
7671                         BNX2_EMAC_LED_100MB_OVERRIDE |
7672                         BNX2_EMAC_LED_10MB_OVERRIDE |
7673                         BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7674                         BNX2_EMAC_LED_TRAFFIC);
7675                 break;
7676
7677         case ETHTOOL_ID_OFF:
7678                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7679                 break;
7680
7681         case ETHTOOL_ID_INACTIVE:
7682                 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7683                 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7684
7685                 if (!netif_running(dev))
7686                         bnx2_set_power_state(bp, PCI_D3hot);
7687                 break;
7688         }
7689
7690         return 0;
7691 }
7692
7693 static netdev_features_t
7694 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7695 {
7696         struct bnx2 *bp = netdev_priv(dev);
7697
7698         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7699                 features |= NETIF_F_HW_VLAN_RX;
7700
7701         return features;
7702 }
7703
7704 static int
7705 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7706 {
7707         struct bnx2 *bp = netdev_priv(dev);
7708
7709         /* TSO with VLAN tag won't work with current firmware */
7710         if (features & NETIF_F_HW_VLAN_TX)
7711                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7712         else
7713                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7714
7715         if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7716             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7717             netif_running(dev)) {
7718                 bnx2_netif_stop(bp, false);
7719                 dev->features = features;
7720                 bnx2_set_rx_mode(dev);
7721                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7722                 bnx2_netif_start(bp, false);
7723                 return 1;
7724         }
7725
7726         return 0;
7727 }
7728
7729 static void bnx2_get_channels(struct net_device *dev,
7730                               struct ethtool_channels *channels)
7731 {
7732         struct bnx2 *bp = netdev_priv(dev);
7733         u32 max_rx_rings = 1;
7734         u32 max_tx_rings = 1;
7735
7736         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7737                 max_rx_rings = RX_MAX_RINGS;
7738                 max_tx_rings = TX_MAX_RINGS;
7739         }
7740
7741         channels->max_rx = max_rx_rings;
7742         channels->max_tx = max_tx_rings;
7743         channels->max_other = 0;
7744         channels->max_combined = 0;
7745         channels->rx_count = bp->num_rx_rings;
7746         channels->tx_count = bp->num_tx_rings;
7747         channels->other_count = 0;
7748         channels->combined_count = 0;
7749 }
7750
7751 static int bnx2_set_channels(struct net_device *dev,
7752                               struct ethtool_channels *channels)
7753 {
7754         struct bnx2 *bp = netdev_priv(dev);
7755         u32 max_rx_rings = 1;
7756         u32 max_tx_rings = 1;
7757         int rc = 0;
7758
7759         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7760                 max_rx_rings = RX_MAX_RINGS;
7761                 max_tx_rings = TX_MAX_RINGS;
7762         }
7763         if (channels->rx_count > max_rx_rings ||
7764             channels->tx_count > max_tx_rings)
7765                 return -EINVAL;
7766
7767         bp->num_req_rx_rings = channels->rx_count;
7768         bp->num_req_tx_rings = channels->tx_count;
7769
7770         if (netif_running(dev))
7771                 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7772                                            bp->tx_ring_size, true);
7773
7774         return rc;
7775 }
7776
7777 static const struct ethtool_ops bnx2_ethtool_ops = {
7778         .get_settings           = bnx2_get_settings,
7779         .set_settings           = bnx2_set_settings,
7780         .get_drvinfo            = bnx2_get_drvinfo,
7781         .get_regs_len           = bnx2_get_regs_len,
7782         .get_regs               = bnx2_get_regs,
7783         .get_wol                = bnx2_get_wol,
7784         .set_wol                = bnx2_set_wol,
7785         .nway_reset             = bnx2_nway_reset,
7786         .get_link               = bnx2_get_link,
7787         .get_eeprom_len         = bnx2_get_eeprom_len,
7788         .get_eeprom             = bnx2_get_eeprom,
7789         .set_eeprom             = bnx2_set_eeprom,
7790         .get_coalesce           = bnx2_get_coalesce,
7791         .set_coalesce           = bnx2_set_coalesce,
7792         .get_ringparam          = bnx2_get_ringparam,
7793         .set_ringparam          = bnx2_set_ringparam,
7794         .get_pauseparam         = bnx2_get_pauseparam,
7795         .set_pauseparam         = bnx2_set_pauseparam,
7796         .self_test              = bnx2_self_test,
7797         .get_strings            = bnx2_get_strings,
7798         .set_phys_id            = bnx2_set_phys_id,
7799         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7800         .get_sset_count         = bnx2_get_sset_count,
7801         .get_channels           = bnx2_get_channels,
7802         .set_channels           = bnx2_set_channels,
7803 };
7804
7805 /* Called with rtnl_lock */
7806 static int
7807 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7808 {
7809         struct mii_ioctl_data *data = if_mii(ifr);
7810         struct bnx2 *bp = netdev_priv(dev);
7811         int err;
7812
7813         switch(cmd) {
7814         case SIOCGMIIPHY:
7815                 data->phy_id = bp->phy_addr;
7816
7817                 /* fallthru */
7818         case SIOCGMIIREG: {
7819                 u32 mii_regval;
7820
7821                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7822                         return -EOPNOTSUPP;
7823
7824                 if (!netif_running(dev))
7825                         return -EAGAIN;
7826
7827                 spin_lock_bh(&bp->phy_lock);
7828                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7829                 spin_unlock_bh(&bp->phy_lock);
7830
7831                 data->val_out = mii_regval;
7832
7833                 return err;
7834         }
7835
7836         case SIOCSMIIREG:
7837                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7838                         return -EOPNOTSUPP;
7839
7840                 if (!netif_running(dev))
7841                         return -EAGAIN;
7842
7843                 spin_lock_bh(&bp->phy_lock);
7844                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7845                 spin_unlock_bh(&bp->phy_lock);
7846
7847                 return err;
7848
7849         default:
7850                 /* do nothing */
7851                 break;
7852         }
7853         return -EOPNOTSUPP;
7854 }
7855
7856 /* Called with rtnl_lock */
7857 static int
7858 bnx2_change_mac_addr(struct net_device *dev, void *p)
7859 {
7860         struct sockaddr *addr = p;
7861         struct bnx2 *bp = netdev_priv(dev);
7862
7863         if (!is_valid_ether_addr(addr->sa_data))
7864                 return -EADDRNOTAVAIL;
7865
7866         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7867         if (netif_running(dev))
7868                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7869
7870         return 0;
7871 }
7872
7873 /* Called with rtnl_lock */
7874 static int
7875 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7876 {
7877         struct bnx2 *bp = netdev_priv(dev);
7878
7879         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7880                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7881                 return -EINVAL;
7882
7883         dev->mtu = new_mtu;
7884         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7885                                      false);
7886 }
7887
7888 #ifdef CONFIG_NET_POLL_CONTROLLER
7889 static void
7890 poll_bnx2(struct net_device *dev)
7891 {
7892         struct bnx2 *bp = netdev_priv(dev);
7893         int i;
7894
7895         for (i = 0; i < bp->irq_nvecs; i++) {
7896                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7897
7898                 disable_irq(irq->vector);
7899                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7900                 enable_irq(irq->vector);
7901         }
7902 }
7903 #endif
7904
7905 static void
7906 bnx2_get_5709_media(struct bnx2 *bp)
7907 {
7908         u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7909         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7910         u32 strap;
7911
7912         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7913                 return;
7914         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7915                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7916                 return;
7917         }
7918
7919         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7920                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7921         else
7922                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7923
7924         if (bp->func == 0) {
7925                 switch (strap) {
7926                 case 0x4:
7927                 case 0x5:
7928                 case 0x6:
7929                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7930                         return;
7931                 }
7932         } else {
7933                 switch (strap) {
7934                 case 0x1:
7935                 case 0x2:
7936                 case 0x4:
7937                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7938                         return;
7939                 }
7940         }
7941 }
7942
7943 static void
7944 bnx2_get_pci_speed(struct bnx2 *bp)
7945 {
7946         u32 reg;
7947
7948         reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7949         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7950                 u32 clkreg;
7951
7952                 bp->flags |= BNX2_FLAG_PCIX;
7953
7954                 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7955
7956                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7957                 switch (clkreg) {
7958                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7959                         bp->bus_speed_mhz = 133;
7960                         break;
7961
7962                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7963                         bp->bus_speed_mhz = 100;
7964                         break;
7965
7966                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7967                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7968                         bp->bus_speed_mhz = 66;
7969                         break;
7970
7971                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7972                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7973                         bp->bus_speed_mhz = 50;
7974                         break;
7975
7976                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7977                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7978                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7979                         bp->bus_speed_mhz = 33;
7980                         break;
7981                 }
7982         }
7983         else {
7984                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7985                         bp->bus_speed_mhz = 66;
7986                 else
7987                         bp->bus_speed_mhz = 33;
7988         }
7989
7990         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7991                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7992
7993 }
7994
7995 static void
7996 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7997 {
7998         int rc, i, j;
7999         u8 *data;
8000         unsigned int block_end, rosize, len;
8001
8002 #define BNX2_VPD_NVRAM_OFFSET   0x300
8003 #define BNX2_VPD_LEN            128
8004 #define BNX2_MAX_VER_SLEN       30
8005
8006         data = kmalloc(256, GFP_KERNEL);
8007         if (!data)
8008                 return;
8009
8010         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8011                              BNX2_VPD_LEN);
8012         if (rc)
8013                 goto vpd_done;
8014
8015         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8016                 data[i] = data[i + BNX2_VPD_LEN + 3];
8017                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8018                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8019                 data[i + 3] = data[i + BNX2_VPD_LEN];
8020         }
8021
8022         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8023         if (i < 0)
8024                 goto vpd_done;
8025
8026         rosize = pci_vpd_lrdt_size(&data[i]);
8027         i += PCI_VPD_LRDT_TAG_SIZE;
8028         block_end = i + rosize;
8029
8030         if (block_end > BNX2_VPD_LEN)
8031                 goto vpd_done;
8032
8033         j = pci_vpd_find_info_keyword(data, i, rosize,
8034                                       PCI_VPD_RO_KEYWORD_MFR_ID);
8035         if (j < 0)
8036                 goto vpd_done;
8037
8038         len = pci_vpd_info_field_size(&data[j]);
8039
8040         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8041         if (j + len > block_end || len != 4 ||
8042             memcmp(&data[j], "1028", 4))
8043                 goto vpd_done;
8044
8045         j = pci_vpd_find_info_keyword(data, i, rosize,
8046                                       PCI_VPD_RO_KEYWORD_VENDOR0);
8047         if (j < 0)
8048                 goto vpd_done;
8049
8050         len = pci_vpd_info_field_size(&data[j]);
8051
8052         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8053         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8054                 goto vpd_done;
8055
8056         memcpy(bp->fw_version, &data[j], len);
8057         bp->fw_version[len] = ' ';
8058
8059 vpd_done:
8060         kfree(data);
8061 }
8062
8063 static int
8064 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8065 {
8066         struct bnx2 *bp;
8067         int rc, i, j;
8068         u32 reg;
8069         u64 dma_mask, persist_dma_mask;
8070         int err;
8071
8072         SET_NETDEV_DEV(dev, &pdev->dev);
8073         bp = netdev_priv(dev);
8074
8075         bp->flags = 0;
8076         bp->phy_flags = 0;
8077
8078         bp->temp_stats_blk =
8079                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8080
8081         if (bp->temp_stats_blk == NULL) {
8082                 rc = -ENOMEM;
8083                 goto err_out;
8084         }
8085
8086         /* enable device (incl. PCI PM wakeup), and bus-mastering */
8087         rc = pci_enable_device(pdev);
8088         if (rc) {
8089                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8090                 goto err_out;
8091         }
8092
8093         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8094                 dev_err(&pdev->dev,
8095                         "Cannot find PCI device base address, aborting\n");
8096                 rc = -ENODEV;
8097                 goto err_out_disable;
8098         }
8099
8100         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8101         if (rc) {
8102                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8103                 goto err_out_disable;
8104         }
8105
8106         pci_set_master(pdev);
8107
8108         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8109         if (bp->pm_cap == 0) {
8110                 dev_err(&pdev->dev,
8111                         "Cannot find power management capability, aborting\n");
8112                 rc = -EIO;
8113                 goto err_out_release;
8114         }
8115
8116         bp->dev = dev;
8117         bp->pdev = pdev;
8118
8119         spin_lock_init(&bp->phy_lock);
8120         spin_lock_init(&bp->indirect_lock);
8121 #ifdef BCM_CNIC
8122         mutex_init(&bp->cnic_lock);
8123 #endif
8124         INIT_WORK(&bp->reset_task, bnx2_reset_task);
8125
8126         bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8127                                                          TX_MAX_TSS_RINGS + 1));
8128         if (!bp->regview) {
8129                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8130                 rc = -ENOMEM;
8131                 goto err_out_release;
8132         }
8133
8134         bnx2_set_power_state(bp, PCI_D0);
8135
8136         /* Configure byte swap and enable write to the reg_window registers.
8137          * Rely on CPU to do target byte swapping on big endian systems
8138          * The chip's target access swapping will not swap all accesses
8139          */
8140         BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8141                 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8142                 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8143
8144         bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8145
8146         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8147                 if (!pci_is_pcie(pdev)) {
8148                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
8149                         rc = -EIO;
8150                         goto err_out_unmap;
8151                 }
8152                 bp->flags |= BNX2_FLAG_PCIE;
8153                 if (CHIP_REV(bp) == CHIP_REV_Ax)
8154                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8155
8156                 /* AER (Advanced Error Reporting) hooks */
8157                 err = pci_enable_pcie_error_reporting(pdev);
8158                 if (!err)
8159                         bp->flags |= BNX2_FLAG_AER_ENABLED;
8160
8161         } else {
8162                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8163                 if (bp->pcix_cap == 0) {
8164                         dev_err(&pdev->dev,
8165                                 "Cannot find PCIX capability, aborting\n");
8166                         rc = -EIO;
8167                         goto err_out_unmap;
8168                 }
8169                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8170         }
8171
8172         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
8173                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8174                         bp->flags |= BNX2_FLAG_MSIX_CAP;
8175         }
8176
8177         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
8178                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8179                         bp->flags |= BNX2_FLAG_MSI_CAP;
8180         }
8181
8182         /* 5708 cannot support DMA addresses > 40-bit.  */
8183         if (CHIP_NUM(bp) == CHIP_NUM_5708)
8184                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8185         else
8186                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8187
8188         /* Configure DMA attributes. */
8189         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8190                 dev->features |= NETIF_F_HIGHDMA;
8191                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8192                 if (rc) {
8193                         dev_err(&pdev->dev,
8194                                 "pci_set_consistent_dma_mask failed, aborting\n");
8195                         goto err_out_unmap;
8196                 }
8197         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8198                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8199                 goto err_out_unmap;
8200         }
8201
8202         if (!(bp->flags & BNX2_FLAG_PCIE))
8203                 bnx2_get_pci_speed(bp);
8204
8205         /* 5706A0 may falsely detect SERR and PERR. */
8206         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8207                 reg = BNX2_RD(bp, PCI_COMMAND);
8208                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8209                 BNX2_WR(bp, PCI_COMMAND, reg);
8210         }
8211         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8212                 !(bp->flags & BNX2_FLAG_PCIX)) {
8213
8214                 dev_err(&pdev->dev,
8215                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8216                 goto err_out_unmap;
8217         }
8218
8219         bnx2_init_nvram(bp);
8220
8221         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8222
8223         if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8224                 bp->func = 1;
8225
8226         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8227             BNX2_SHM_HDR_SIGNATURE_SIG) {
8228                 u32 off = bp->func << 2;
8229
8230                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8231         } else
8232                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8233
8234         /* Get the permanent MAC address.  First we need to make sure the
8235          * firmware is actually running.
8236          */
8237         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8238
8239         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8240             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8241                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8242                 rc = -ENODEV;
8243                 goto err_out_unmap;
8244         }
8245
8246         bnx2_read_vpd_fw_ver(bp);
8247
8248         j = strlen(bp->fw_version);
8249         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8250         for (i = 0; i < 3 && j < 24; i++) {
8251                 u8 num, k, skip0;
8252
8253                 if (i == 0) {
8254                         bp->fw_version[j++] = 'b';
8255                         bp->fw_version[j++] = 'c';
8256                         bp->fw_version[j++] = ' ';
8257                 }
8258                 num = (u8) (reg >> (24 - (i * 8)));
8259                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8260                         if (num >= k || !skip0 || k == 1) {
8261                                 bp->fw_version[j++] = (num / k) + '0';
8262                                 skip0 = 0;
8263                         }
8264                 }
8265                 if (i != 2)
8266                         bp->fw_version[j++] = '.';
8267         }
8268         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8269         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8270                 bp->wol = 1;
8271
8272         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8273                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8274
8275                 for (i = 0; i < 30; i++) {
8276                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8277                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8278                                 break;
8279                         msleep(10);
8280                 }
8281         }
8282         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8283         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8284         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8285             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8286                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8287
8288                 if (j < 32)
8289                         bp->fw_version[j++] = ' ';
8290                 for (i = 0; i < 3 && j < 28; i++) {
8291                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8292                         reg = be32_to_cpu(reg);
8293                         memcpy(&bp->fw_version[j], &reg, 4);
8294                         j += 4;
8295                 }
8296         }
8297
8298         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8299         bp->mac_addr[0] = (u8) (reg >> 8);
8300         bp->mac_addr[1] = (u8) reg;
8301
8302         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8303         bp->mac_addr[2] = (u8) (reg >> 24);
8304         bp->mac_addr[3] = (u8) (reg >> 16);
8305         bp->mac_addr[4] = (u8) (reg >> 8);
8306         bp->mac_addr[5] = (u8) reg;
8307
8308         bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8309         bnx2_set_rx_ring_size(bp, 255);
8310
8311         bp->tx_quick_cons_trip_int = 2;
8312         bp->tx_quick_cons_trip = 20;
8313         bp->tx_ticks_int = 18;
8314         bp->tx_ticks = 80;
8315
8316         bp->rx_quick_cons_trip_int = 2;
8317         bp->rx_quick_cons_trip = 12;
8318         bp->rx_ticks_int = 18;
8319         bp->rx_ticks = 18;
8320
8321         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8322
8323         bp->current_interval = BNX2_TIMER_INTERVAL;
8324
8325         bp->phy_addr = 1;
8326
8327         /* Disable WOL support if we are running on a SERDES chip. */
8328         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8329                 bnx2_get_5709_media(bp);
8330         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8331                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8332
8333         bp->phy_port = PORT_TP;
8334         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8335                 bp->phy_port = PORT_FIBRE;
8336                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8337                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8338                         bp->flags |= BNX2_FLAG_NO_WOL;
8339                         bp->wol = 0;
8340                 }
8341                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8342                         /* Don't do parallel detect on this board because of
8343                          * some board problems.  The link will not go down
8344                          * if we do parallel detect.
8345                          */
8346                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8347                             pdev->subsystem_device == 0x310c)
8348                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8349                 } else {
8350                         bp->phy_addr = 2;
8351                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8352                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8353                 }
8354         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8355                    CHIP_NUM(bp) == CHIP_NUM_5708)
8356                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8357         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8358                  (CHIP_REV(bp) == CHIP_REV_Ax ||
8359                   CHIP_REV(bp) == CHIP_REV_Bx))
8360                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8361
8362         bnx2_init_fw_cap(bp);
8363
8364         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8365             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8366             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8367             !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8368                 bp->flags |= BNX2_FLAG_NO_WOL;
8369                 bp->wol = 0;
8370         }
8371
8372         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8373                 bp->tx_quick_cons_trip_int =
8374                         bp->tx_quick_cons_trip;
8375                 bp->tx_ticks_int = bp->tx_ticks;
8376                 bp->rx_quick_cons_trip_int =
8377                         bp->rx_quick_cons_trip;
8378                 bp->rx_ticks_int = bp->rx_ticks;
8379                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8380                 bp->com_ticks_int = bp->com_ticks;
8381                 bp->cmd_ticks_int = bp->cmd_ticks;
8382         }
8383
8384         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8385          *
8386          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8387          * with byte enables disabled on the unused 32-bit word.  This is legal
8388          * but causes problems on the AMD 8132 which will eventually stop
8389          * responding after a while.
8390          *
8391          * AMD believes this incompatibility is unique to the 5706, and
8392          * prefers to locally disable MSI rather than globally disabling it.
8393          */
8394         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8395                 struct pci_dev *amd_8132 = NULL;
8396
8397                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8398                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8399                                                   amd_8132))) {
8400
8401                         if (amd_8132->revision >= 0x10 &&
8402                             amd_8132->revision <= 0x13) {
8403                                 disable_msi = 1;
8404                                 pci_dev_put(amd_8132);
8405                                 break;
8406                         }
8407                 }
8408         }
8409
8410         bnx2_set_default_link(bp);
8411         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8412
8413         init_timer(&bp->timer);
8414         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8415         bp->timer.data = (unsigned long) bp;
8416         bp->timer.function = bnx2_timer;
8417
8418 #ifdef BCM_CNIC
8419         if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8420                 bp->cnic_eth_dev.max_iscsi_conn =
8421                         (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8422                          BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8423 #endif
8424         pci_save_state(pdev);
8425
8426         return 0;
8427
8428 err_out_unmap:
8429         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8430                 pci_disable_pcie_error_reporting(pdev);
8431                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8432         }
8433
8434         pci_iounmap(pdev, bp->regview);
8435         bp->regview = NULL;
8436
8437 err_out_release:
8438         pci_release_regions(pdev);
8439
8440 err_out_disable:
8441         pci_disable_device(pdev);
8442         pci_set_drvdata(pdev, NULL);
8443
8444 err_out:
8445         return rc;
8446 }
8447
8448 static char *
8449 bnx2_bus_string(struct bnx2 *bp, char *str)
8450 {
8451         char *s = str;
8452
8453         if (bp->flags & BNX2_FLAG_PCIE) {
8454                 s += sprintf(s, "PCI Express");
8455         } else {
8456                 s += sprintf(s, "PCI");
8457                 if (bp->flags & BNX2_FLAG_PCIX)
8458                         s += sprintf(s, "-X");
8459                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8460                         s += sprintf(s, " 32-bit");
8461                 else
8462                         s += sprintf(s, " 64-bit");
8463                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8464         }
8465         return str;
8466 }
8467
8468 static void
8469 bnx2_del_napi(struct bnx2 *bp)
8470 {
8471         int i;
8472
8473         for (i = 0; i < bp->irq_nvecs; i++)
8474                 netif_napi_del(&bp->bnx2_napi[i].napi);
8475 }
8476
8477 static void
8478 bnx2_init_napi(struct bnx2 *bp)
8479 {
8480         int i;
8481
8482         for (i = 0; i < bp->irq_nvecs; i++) {
8483                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8484                 int (*poll)(struct napi_struct *, int);
8485
8486                 if (i == 0)
8487                         poll = bnx2_poll;
8488                 else
8489                         poll = bnx2_poll_msix;
8490
8491                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8492                 bnapi->bp = bp;
8493         }
8494 }
8495
8496 static const struct net_device_ops bnx2_netdev_ops = {
8497         .ndo_open               = bnx2_open,
8498         .ndo_start_xmit         = bnx2_start_xmit,
8499         .ndo_stop               = bnx2_close,
8500         .ndo_get_stats64        = bnx2_get_stats64,
8501         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8502         .ndo_do_ioctl           = bnx2_ioctl,
8503         .ndo_validate_addr      = eth_validate_addr,
8504         .ndo_set_mac_address    = bnx2_change_mac_addr,
8505         .ndo_change_mtu         = bnx2_change_mtu,
8506         .ndo_fix_features       = bnx2_fix_features,
8507         .ndo_set_features       = bnx2_set_features,
8508         .ndo_tx_timeout         = bnx2_tx_timeout,
8509 #ifdef CONFIG_NET_POLL_CONTROLLER
8510         .ndo_poll_controller    = poll_bnx2,
8511 #endif
8512 };
8513
8514 static int
8515 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8516 {
8517         static int version_printed = 0;
8518         struct net_device *dev;
8519         struct bnx2 *bp;
8520         int rc;
8521         char str[40];
8522
8523         if (version_printed++ == 0)
8524                 pr_info("%s", version);
8525
8526         /* dev zeroed in init_etherdev */
8527         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8528         if (!dev)
8529                 return -ENOMEM;
8530
8531         rc = bnx2_init_board(pdev, dev);
8532         if (rc < 0)
8533                 goto err_free;
8534
8535         dev->netdev_ops = &bnx2_netdev_ops;
8536         dev->watchdog_timeo = TX_TIMEOUT;
8537         dev->ethtool_ops = &bnx2_ethtool_ops;
8538
8539         bp = netdev_priv(dev);
8540
8541         pci_set_drvdata(pdev, dev);
8542
8543         memcpy(dev->dev_addr, bp->mac_addr, 6);
8544         memcpy(dev->perm_addr, bp->mac_addr, 6);
8545
8546         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8547                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8548                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8549
8550         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8551                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8552
8553         dev->vlan_features = dev->hw_features;
8554         dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8555         dev->features |= dev->hw_features;
8556         dev->priv_flags |= IFF_UNICAST_FLT;
8557
8558         if ((rc = register_netdev(dev))) {
8559                 dev_err(&pdev->dev, "Cannot register net device\n");
8560                 goto error;
8561         }
8562
8563         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8564                     "node addr %pM\n", board_info[ent->driver_data].name,
8565                     ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8566                     ((CHIP_ID(bp) & 0x0ff0) >> 4),
8567                     bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8568                     pdev->irq, dev->dev_addr);
8569
8570         return 0;
8571
8572 error:
8573         pci_iounmap(pdev, bp->regview);
8574         pci_release_regions(pdev);
8575         pci_disable_device(pdev);
8576         pci_set_drvdata(pdev, NULL);
8577 err_free:
8578         free_netdev(dev);
8579         return rc;
8580 }
8581
8582 static void
8583 bnx2_remove_one(struct pci_dev *pdev)
8584 {
8585         struct net_device *dev = pci_get_drvdata(pdev);
8586         struct bnx2 *bp = netdev_priv(dev);
8587
8588         unregister_netdev(dev);
8589
8590         del_timer_sync(&bp->timer);
8591         cancel_work_sync(&bp->reset_task);
8592
8593         pci_iounmap(bp->pdev, bp->regview);
8594
8595         kfree(bp->temp_stats_blk);
8596
8597         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8598                 pci_disable_pcie_error_reporting(pdev);
8599                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8600         }
8601
8602         bnx2_release_firmware(bp);
8603
8604         free_netdev(dev);
8605
8606         pci_release_regions(pdev);
8607         pci_disable_device(pdev);
8608         pci_set_drvdata(pdev, NULL);
8609 }
8610
8611 static int
8612 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8613 {
8614         struct net_device *dev = pci_get_drvdata(pdev);
8615         struct bnx2 *bp = netdev_priv(dev);
8616
8617         /* PCI register 4 needs to be saved whether netif_running() or not.
8618          * MSI address and data need to be saved if using MSI and
8619          * netif_running().
8620          */
8621         pci_save_state(pdev);
8622         if (!netif_running(dev))
8623                 return 0;
8624
8625         cancel_work_sync(&bp->reset_task);
8626         bnx2_netif_stop(bp, true);
8627         netif_device_detach(dev);
8628         del_timer_sync(&bp->timer);
8629         bnx2_shutdown_chip(bp);
8630         bnx2_free_skbs(bp);
8631         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8632         return 0;
8633 }
8634
8635 static int
8636 bnx2_resume(struct pci_dev *pdev)
8637 {
8638         struct net_device *dev = pci_get_drvdata(pdev);
8639         struct bnx2 *bp = netdev_priv(dev);
8640
8641         pci_restore_state(pdev);
8642         if (!netif_running(dev))
8643                 return 0;
8644
8645         bnx2_set_power_state(bp, PCI_D0);
8646         netif_device_attach(dev);
8647         bnx2_init_nic(bp, 1);
8648         bnx2_netif_start(bp, true);
8649         return 0;
8650 }
8651
8652 /**
8653  * bnx2_io_error_detected - called when PCI error is detected
8654  * @pdev: Pointer to PCI device
8655  * @state: The current pci connection state
8656  *
8657  * This function is called after a PCI bus error affecting
8658  * this device has been detected.
8659  */
8660 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8661                                                pci_channel_state_t state)
8662 {
8663         struct net_device *dev = pci_get_drvdata(pdev);
8664         struct bnx2 *bp = netdev_priv(dev);
8665
8666         rtnl_lock();
8667         netif_device_detach(dev);
8668
8669         if (state == pci_channel_io_perm_failure) {
8670                 rtnl_unlock();
8671                 return PCI_ERS_RESULT_DISCONNECT;
8672         }
8673
8674         if (netif_running(dev)) {
8675                 bnx2_netif_stop(bp, true);
8676                 del_timer_sync(&bp->timer);
8677                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8678         }
8679
8680         pci_disable_device(pdev);
8681         rtnl_unlock();
8682
8683         /* Request a slot slot reset. */
8684         return PCI_ERS_RESULT_NEED_RESET;
8685 }
8686
8687 /**
8688  * bnx2_io_slot_reset - called after the pci bus has been reset.
8689  * @pdev: Pointer to PCI device
8690  *
8691  * Restart the card from scratch, as if from a cold-boot.
8692  */
8693 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8694 {
8695         struct net_device *dev = pci_get_drvdata(pdev);
8696         struct bnx2 *bp = netdev_priv(dev);
8697         pci_ers_result_t result;
8698         int err;
8699
8700         rtnl_lock();
8701         if (pci_enable_device(pdev)) {
8702                 dev_err(&pdev->dev,
8703                         "Cannot re-enable PCI device after reset\n");
8704                 result = PCI_ERS_RESULT_DISCONNECT;
8705         } else {
8706                 pci_set_master(pdev);
8707                 pci_restore_state(pdev);
8708                 pci_save_state(pdev);
8709
8710                 if (netif_running(dev)) {
8711                         bnx2_set_power_state(bp, PCI_D0);
8712                         bnx2_init_nic(bp, 1);
8713                 }
8714                 result = PCI_ERS_RESULT_RECOVERED;
8715         }
8716         rtnl_unlock();
8717
8718         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8719                 return result;
8720
8721         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8722         if (err) {
8723                 dev_err(&pdev->dev,
8724                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8725                          err); /* non-fatal, continue */
8726         }
8727
8728         return result;
8729 }
8730
8731 /**
8732  * bnx2_io_resume - called when traffic can start flowing again.
8733  * @pdev: Pointer to PCI device
8734  *
8735  * This callback is called when the error recovery driver tells us that
8736  * its OK to resume normal operation.
8737  */
8738 static void bnx2_io_resume(struct pci_dev *pdev)
8739 {
8740         struct net_device *dev = pci_get_drvdata(pdev);
8741         struct bnx2 *bp = netdev_priv(dev);
8742
8743         rtnl_lock();
8744         if (netif_running(dev))
8745                 bnx2_netif_start(bp, true);
8746
8747         netif_device_attach(dev);
8748         rtnl_unlock();
8749 }
8750
8751 static const struct pci_error_handlers bnx2_err_handler = {
8752         .error_detected = bnx2_io_error_detected,
8753         .slot_reset     = bnx2_io_slot_reset,
8754         .resume         = bnx2_io_resume,
8755 };
8756
8757 static struct pci_driver bnx2_pci_driver = {
8758         .name           = DRV_MODULE_NAME,
8759         .id_table       = bnx2_pci_tbl,
8760         .probe          = bnx2_init_one,
8761         .remove         = bnx2_remove_one,
8762         .suspend        = bnx2_suspend,
8763         .resume         = bnx2_resume,
8764         .err_handler    = &bnx2_err_handler,
8765 };
8766
8767 static int __init bnx2_init(void)
8768 {
8769         return pci_register_driver(&bnx2_pci_driver);
8770 }
8771
8772 static void __exit bnx2_cleanup(void)
8773 {
8774         pci_unregister_driver(&bnx2_pci_driver);
8775 }
8776
8777 module_init(bnx2_init);
8778 module_exit(bnx2_cleanup);
8779
8780
8781