]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/bnx2.c
3fcabd9e4b7e1a41397ff8dcee1a690c79a4b509
[~andy/linux] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.2.4"
62 #define DRV_MODULE_RELDATE      "Aug 05, 2013"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254         u32 diff;
255
256         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
257         barrier();
258
259         /* The ring uses 256 indices for 255 entries, one of them
260          * needs to be skipped.
261          */
262         diff = txr->tx_prod - txr->tx_cons;
263         if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264                 diff &= 0xffff;
265                 if (diff == BNX2_TX_DESC_CNT)
266                         diff = BNX2_MAX_TX_DESC_CNT;
267         }
268         return bp->tx_ring_size - diff;
269 }
270
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274         u32 val;
275
276         spin_lock_bh(&bp->indirect_lock);
277         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278         val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279         spin_unlock_bh(&bp->indirect_lock);
280         return val;
281 }
282
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286         spin_lock_bh(&bp->indirect_lock);
287         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289         spin_unlock_bh(&bp->indirect_lock);
290 }
291
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307         offset += cid_addr;
308         spin_lock_bh(&bp->indirect_lock);
309         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
310                 int i;
311
312                 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313                 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314                         offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315                 for (i = 0; i < 5; i++) {
316                         val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318                                 break;
319                         udelay(5);
320                 }
321         } else {
322                 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323                 BNX2_WR(bp, BNX2_CTX_DATA, val);
324         }
325         spin_unlock_bh(&bp->indirect_lock);
326 }
327
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332         struct bnx2 *bp = netdev_priv(dev);
333         struct drv_ctl_io *io = &info->data.io;
334
335         switch (info->cmd) {
336         case DRV_CTL_IO_WR_CMD:
337                 bnx2_reg_wr_ind(bp, io->offset, io->data);
338                 break;
339         case DRV_CTL_IO_RD_CMD:
340                 io->data = bnx2_reg_rd_ind(bp, io->offset);
341                 break;
342         case DRV_CTL_CTX_WR_CMD:
343                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344                 break;
345         default:
346                 return -EINVAL;
347         }
348         return 0;
349 }
350
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355         int sb_id;
356
357         if (bp->flags & BNX2_FLAG_USING_MSIX) {
358                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359                 bnapi->cnic_present = 0;
360                 sb_id = bp->irq_nvecs;
361                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362         } else {
363                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364                 bnapi->cnic_tag = bnapi->last_status_idx;
365                 bnapi->cnic_present = 1;
366                 sb_id = 0;
367                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368         }
369
370         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371         cp->irq_arr[0].status_blk = (void *)
372                 ((unsigned long) bnapi->status_blk.msi +
373                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374         cp->irq_arr[0].status_blk_num = sb_id;
375         cp->num_irq = 1;
376 }
377
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379                               void *data)
380 {
381         struct bnx2 *bp = netdev_priv(dev);
382         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383
384         if (ops == NULL)
385                 return -EINVAL;
386
387         if (cp->drv_state & CNIC_DRV_STATE_REGD)
388                 return -EBUSY;
389
390         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391                 return -ENODEV;
392
393         bp->cnic_data = data;
394         rcu_assign_pointer(bp->cnic_ops, ops);
395
396         cp->num_irq = 0;
397         cp->drv_state = CNIC_DRV_STATE_REGD;
398
399         bnx2_setup_cnic_irq_info(bp);
400
401         return 0;
402 }
403
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406         struct bnx2 *bp = netdev_priv(dev);
407         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409
410         mutex_lock(&bp->cnic_lock);
411         cp->drv_state = 0;
412         bnapi->cnic_present = 0;
413         RCU_INIT_POINTER(bp->cnic_ops, NULL);
414         mutex_unlock(&bp->cnic_lock);
415         synchronize_rcu();
416         return 0;
417 }
418
419 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421         struct bnx2 *bp = netdev_priv(dev);
422         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423
424         if (!cp->max_iscsi_conn)
425                 return NULL;
426
427         cp->drv_owner = THIS_MODULE;
428         cp->chip_id = bp->chip_id;
429         cp->pdev = bp->pdev;
430         cp->io_base = bp->regview;
431         cp->drv_ctl = bnx2_drv_ctl;
432         cp->drv_register_cnic = bnx2_register_cnic;
433         cp->drv_unregister_cnic = bnx2_unregister_cnic;
434
435         return cp;
436 }
437
438 static void
439 bnx2_cnic_stop(struct bnx2 *bp)
440 {
441         struct cnic_ops *c_ops;
442         struct cnic_ctl_info info;
443
444         mutex_lock(&bp->cnic_lock);
445         c_ops = rcu_dereference_protected(bp->cnic_ops,
446                                           lockdep_is_held(&bp->cnic_lock));
447         if (c_ops) {
448                 info.cmd = CNIC_CTL_STOP_CMD;
449                 c_ops->cnic_ctl(bp->cnic_data, &info);
450         }
451         mutex_unlock(&bp->cnic_lock);
452 }
453
454 static void
455 bnx2_cnic_start(struct bnx2 *bp)
456 {
457         struct cnic_ops *c_ops;
458         struct cnic_ctl_info info;
459
460         mutex_lock(&bp->cnic_lock);
461         c_ops = rcu_dereference_protected(bp->cnic_ops,
462                                           lockdep_is_held(&bp->cnic_lock));
463         if (c_ops) {
464                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466
467                         bnapi->cnic_tag = bnapi->last_status_idx;
468                 }
469                 info.cmd = CNIC_CTL_START_CMD;
470                 c_ops->cnic_ctl(bp->cnic_data, &info);
471         }
472         mutex_unlock(&bp->cnic_lock);
473 }
474
475 #else
476
477 static void
478 bnx2_cnic_stop(struct bnx2 *bp)
479 {
480 }
481
482 static void
483 bnx2_cnic_start(struct bnx2 *bp)
484 {
485 }
486
487 #endif
488
489 static int
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 {
492         u32 val1;
493         int i, ret;
494
495         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498
499                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501
502                 udelay(40);
503         }
504
505         val1 = (bp->phy_addr << 21) | (reg << 16) |
506                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507                 BNX2_EMAC_MDIO_COMM_START_BUSY;
508         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509
510         for (i = 0; i < 50; i++) {
511                 udelay(10);
512
513                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515                         udelay(5);
516
517                         val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519
520                         break;
521                 }
522         }
523
524         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525                 *val = 0x0;
526                 ret = -EBUSY;
527         }
528         else {
529                 *val = val1;
530                 ret = 0;
531         }
532
533         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536
537                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539
540                 udelay(40);
541         }
542
543         return ret;
544 }
545
546 static int
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 {
549         u32 val1;
550         int i, ret;
551
552         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555
556                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558
559                 udelay(40);
560         }
561
562         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566
567         for (i = 0; i < 50; i++) {
568                 udelay(10);
569
570                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572                         udelay(5);
573                         break;
574                 }
575         }
576
577         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578                 ret = -EBUSY;
579         else
580                 ret = 0;
581
582         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585
586                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588
589                 udelay(40);
590         }
591
592         return ret;
593 }
594
595 static void
596 bnx2_disable_int(struct bnx2 *bp)
597 {
598         int i;
599         struct bnx2_napi *bnapi;
600
601         for (i = 0; i < bp->irq_nvecs; i++) {
602                 bnapi = &bp->bnx2_napi[i];
603                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605         }
606         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 }
608
609 static void
610 bnx2_enable_int(struct bnx2 *bp)
611 {
612         int i;
613         struct bnx2_napi *bnapi;
614
615         for (i = 0; i < bp->irq_nvecs; i++) {
616                 bnapi = &bp->bnx2_napi[i];
617
618                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620                         BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621                         bnapi->last_status_idx);
622
623                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625                         bnapi->last_status_idx);
626         }
627         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 }
629
630 static void
631 bnx2_disable_int_sync(struct bnx2 *bp)
632 {
633         int i;
634
635         atomic_inc(&bp->intr_sem);
636         if (!netif_running(bp->dev))
637                 return;
638
639         bnx2_disable_int(bp);
640         for (i = 0; i < bp->irq_nvecs; i++)
641                 synchronize_irq(bp->irq_tbl[i].vector);
642 }
643
644 static void
645 bnx2_napi_disable(struct bnx2 *bp)
646 {
647         int i;
648
649         for (i = 0; i < bp->irq_nvecs; i++)
650                 napi_disable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_napi_enable(struct bnx2 *bp)
655 {
656         int i;
657
658         for (i = 0; i < bp->irq_nvecs; i++)
659                 napi_enable(&bp->bnx2_napi[i].napi);
660 }
661
662 static void
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 {
665         if (stop_cnic)
666                 bnx2_cnic_stop(bp);
667         if (netif_running(bp->dev)) {
668                 bnx2_napi_disable(bp);
669                 netif_tx_disable(bp->dev);
670         }
671         bnx2_disable_int_sync(bp);
672         netif_carrier_off(bp->dev);     /* prevent tx timeout */
673 }
674
675 static void
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 {
678         if (atomic_dec_and_test(&bp->intr_sem)) {
679                 if (netif_running(bp->dev)) {
680                         netif_tx_wake_all_queues(bp->dev);
681                         spin_lock_bh(&bp->phy_lock);
682                         if (bp->link_up)
683                                 netif_carrier_on(bp->dev);
684                         spin_unlock_bh(&bp->phy_lock);
685                         bnx2_napi_enable(bp);
686                         bnx2_enable_int(bp);
687                         if (start_cnic)
688                                 bnx2_cnic_start(bp);
689                 }
690         }
691 }
692
693 static void
694 bnx2_free_tx_mem(struct bnx2 *bp)
695 {
696         int i;
697
698         for (i = 0; i < bp->num_tx_rings; i++) {
699                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701
702                 if (txr->tx_desc_ring) {
703                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704                                           txr->tx_desc_ring,
705                                           txr->tx_desc_mapping);
706                         txr->tx_desc_ring = NULL;
707                 }
708                 kfree(txr->tx_buf_ring);
709                 txr->tx_buf_ring = NULL;
710         }
711 }
712
713 static void
714 bnx2_free_rx_mem(struct bnx2 *bp)
715 {
716         int i;
717
718         for (i = 0; i < bp->num_rx_rings; i++) {
719                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721                 int j;
722
723                 for (j = 0; j < bp->rx_max_ring; j++) {
724                         if (rxr->rx_desc_ring[j])
725                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726                                                   rxr->rx_desc_ring[j],
727                                                   rxr->rx_desc_mapping[j]);
728                         rxr->rx_desc_ring[j] = NULL;
729                 }
730                 vfree(rxr->rx_buf_ring);
731                 rxr->rx_buf_ring = NULL;
732
733                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
734                         if (rxr->rx_pg_desc_ring[j])
735                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736                                                   rxr->rx_pg_desc_ring[j],
737                                                   rxr->rx_pg_desc_mapping[j]);
738                         rxr->rx_pg_desc_ring[j] = NULL;
739                 }
740                 vfree(rxr->rx_pg_ring);
741                 rxr->rx_pg_ring = NULL;
742         }
743 }
744
745 static int
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 {
748         int i;
749
750         for (i = 0; i < bp->num_tx_rings; i++) {
751                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753
754                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755                 if (txr->tx_buf_ring == NULL)
756                         return -ENOMEM;
757
758                 txr->tx_desc_ring =
759                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760                                            &txr->tx_desc_mapping, GFP_KERNEL);
761                 if (txr->tx_desc_ring == NULL)
762                         return -ENOMEM;
763         }
764         return 0;
765 }
766
767 static int
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 {
770         int i;
771
772         for (i = 0; i < bp->num_rx_rings; i++) {
773                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775                 int j;
776
777                 rxr->rx_buf_ring =
778                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779                 if (rxr->rx_buf_ring == NULL)
780                         return -ENOMEM;
781
782                 for (j = 0; j < bp->rx_max_ring; j++) {
783                         rxr->rx_desc_ring[j] =
784                                 dma_alloc_coherent(&bp->pdev->dev,
785                                                    RXBD_RING_SIZE,
786                                                    &rxr->rx_desc_mapping[j],
787                                                    GFP_KERNEL);
788                         if (rxr->rx_desc_ring[j] == NULL)
789                                 return -ENOMEM;
790
791                 }
792
793                 if (bp->rx_pg_ring_size) {
794                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795                                                   bp->rx_max_pg_ring);
796                         if (rxr->rx_pg_ring == NULL)
797                                 return -ENOMEM;
798
799                 }
800
801                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
802                         rxr->rx_pg_desc_ring[j] =
803                                 dma_alloc_coherent(&bp->pdev->dev,
804                                                    RXBD_RING_SIZE,
805                                                    &rxr->rx_pg_desc_mapping[j],
806                                                    GFP_KERNEL);
807                         if (rxr->rx_pg_desc_ring[j] == NULL)
808                                 return -ENOMEM;
809
810                 }
811         }
812         return 0;
813 }
814
815 static void
816 bnx2_free_mem(struct bnx2 *bp)
817 {
818         int i;
819         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820
821         bnx2_free_tx_mem(bp);
822         bnx2_free_rx_mem(bp);
823
824         for (i = 0; i < bp->ctx_pages; i++) {
825                 if (bp->ctx_blk[i]) {
826                         dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
827                                           bp->ctx_blk[i],
828                                           bp->ctx_blk_mapping[i]);
829                         bp->ctx_blk[i] = NULL;
830                 }
831         }
832         if (bnapi->status_blk.msi) {
833                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834                                   bnapi->status_blk.msi,
835                                   bp->status_blk_mapping);
836                 bnapi->status_blk.msi = NULL;
837                 bp->stats_blk = NULL;
838         }
839 }
840
841 static int
842 bnx2_alloc_mem(struct bnx2 *bp)
843 {
844         int i, status_blk_size, err;
845         struct bnx2_napi *bnapi;
846         void *status_blk;
847
848         /* Combine status and statistics blocks into one allocation. */
849         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850         if (bp->flags & BNX2_FLAG_MSIX_CAP)
851                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
853         bp->status_stats_size = status_blk_size +
854                                 sizeof(struct statistics_block);
855
856         status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857                                          &bp->status_blk_mapping, GFP_KERNEL);
858         if (status_blk == NULL)
859                 goto alloc_mem_err;
860
861         bnapi = &bp->bnx2_napi[0];
862         bnapi->status_blk.msi = status_blk;
863         bnapi->hw_tx_cons_ptr =
864                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
865         bnapi->hw_rx_cons_ptr =
866                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
867         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
868                 for (i = 1; i < bp->irq_nvecs; i++) {
869                         struct status_block_msix *sblk;
870
871                         bnapi = &bp->bnx2_napi[i];
872
873                         sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
874                         bnapi->status_blk.msix = sblk;
875                         bnapi->hw_tx_cons_ptr =
876                                 &sblk->status_tx_quick_consumer_index;
877                         bnapi->hw_rx_cons_ptr =
878                                 &sblk->status_rx_quick_consumer_index;
879                         bnapi->int_num = i << 24;
880                 }
881         }
882
883         bp->stats_blk = status_blk + status_blk_size;
884
885         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
886
887         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
888                 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
889                 if (bp->ctx_pages == 0)
890                         bp->ctx_pages = 1;
891                 for (i = 0; i < bp->ctx_pages; i++) {
892                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
893                                                 BNX2_PAGE_SIZE,
894                                                 &bp->ctx_blk_mapping[i],
895                                                 GFP_KERNEL);
896                         if (bp->ctx_blk[i] == NULL)
897                                 goto alloc_mem_err;
898                 }
899         }
900
901         err = bnx2_alloc_rx_mem(bp);
902         if (err)
903                 goto alloc_mem_err;
904
905         err = bnx2_alloc_tx_mem(bp);
906         if (err)
907                 goto alloc_mem_err;
908
909         return 0;
910
911 alloc_mem_err:
912         bnx2_free_mem(bp);
913         return -ENOMEM;
914 }
915
916 static void
917 bnx2_report_fw_link(struct bnx2 *bp)
918 {
919         u32 fw_link_status = 0;
920
921         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
922                 return;
923
924         if (bp->link_up) {
925                 u32 bmsr;
926
927                 switch (bp->line_speed) {
928                 case SPEED_10:
929                         if (bp->duplex == DUPLEX_HALF)
930                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
931                         else
932                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
933                         break;
934                 case SPEED_100:
935                         if (bp->duplex == DUPLEX_HALF)
936                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
937                         else
938                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
939                         break;
940                 case SPEED_1000:
941                         if (bp->duplex == DUPLEX_HALF)
942                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
943                         else
944                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
945                         break;
946                 case SPEED_2500:
947                         if (bp->duplex == DUPLEX_HALF)
948                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
949                         else
950                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
951                         break;
952                 }
953
954                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
955
956                 if (bp->autoneg) {
957                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
958
959                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
960                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
961
962                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
963                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
964                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
965                         else
966                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
967                 }
968         }
969         else
970                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
971
972         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
973 }
974
975 static char *
976 bnx2_xceiver_str(struct bnx2 *bp)
977 {
978         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
979                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
980                  "Copper");
981 }
982
983 static void
984 bnx2_report_link(struct bnx2 *bp)
985 {
986         if (bp->link_up) {
987                 netif_carrier_on(bp->dev);
988                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
989                             bnx2_xceiver_str(bp),
990                             bp->line_speed,
991                             bp->duplex == DUPLEX_FULL ? "full" : "half");
992
993                 if (bp->flow_ctrl) {
994                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
995                                 pr_cont(", receive ");
996                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
997                                         pr_cont("& transmit ");
998                         }
999                         else {
1000                                 pr_cont(", transmit ");
1001                         }
1002                         pr_cont("flow control ON");
1003                 }
1004                 pr_cont("\n");
1005         } else {
1006                 netif_carrier_off(bp->dev);
1007                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1008                            bnx2_xceiver_str(bp));
1009         }
1010
1011         bnx2_report_fw_link(bp);
1012 }
1013
1014 static void
1015 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1016 {
1017         u32 local_adv, remote_adv;
1018
1019         bp->flow_ctrl = 0;
1020         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1021                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1022
1023                 if (bp->duplex == DUPLEX_FULL) {
1024                         bp->flow_ctrl = bp->req_flow_ctrl;
1025                 }
1026                 return;
1027         }
1028
1029         if (bp->duplex != DUPLEX_FULL) {
1030                 return;
1031         }
1032
1033         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1034             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1035                 u32 val;
1036
1037                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1038                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1039                         bp->flow_ctrl |= FLOW_CTRL_TX;
1040                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1041                         bp->flow_ctrl |= FLOW_CTRL_RX;
1042                 return;
1043         }
1044
1045         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1046         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1047
1048         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1049                 u32 new_local_adv = 0;
1050                 u32 new_remote_adv = 0;
1051
1052                 if (local_adv & ADVERTISE_1000XPAUSE)
1053                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1054                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1055                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1056                 if (remote_adv & ADVERTISE_1000XPAUSE)
1057                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1058                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1059                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1060
1061                 local_adv = new_local_adv;
1062                 remote_adv = new_remote_adv;
1063         }
1064
1065         /* See Table 28B-3 of 802.3ab-1999 spec. */
1066         if (local_adv & ADVERTISE_PAUSE_CAP) {
1067                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1068                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1069                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1070                         }
1071                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1072                                 bp->flow_ctrl = FLOW_CTRL_RX;
1073                         }
1074                 }
1075                 else {
1076                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1077                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1078                         }
1079                 }
1080         }
1081         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1082                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1083                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1084
1085                         bp->flow_ctrl = FLOW_CTRL_TX;
1086                 }
1087         }
1088 }
1089
1090 static int
1091 bnx2_5709s_linkup(struct bnx2 *bp)
1092 {
1093         u32 val, speed;
1094
1095         bp->link_up = 1;
1096
1097         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1098         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1099         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1100
1101         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1102                 bp->line_speed = bp->req_line_speed;
1103                 bp->duplex = bp->req_duplex;
1104                 return 0;
1105         }
1106         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1107         switch (speed) {
1108                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1109                         bp->line_speed = SPEED_10;
1110                         break;
1111                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1112                         bp->line_speed = SPEED_100;
1113                         break;
1114                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1115                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1116                         bp->line_speed = SPEED_1000;
1117                         break;
1118                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1119                         bp->line_speed = SPEED_2500;
1120                         break;
1121         }
1122         if (val & MII_BNX2_GP_TOP_AN_FD)
1123                 bp->duplex = DUPLEX_FULL;
1124         else
1125                 bp->duplex = DUPLEX_HALF;
1126         return 0;
1127 }
1128
1129 static int
1130 bnx2_5708s_linkup(struct bnx2 *bp)
1131 {
1132         u32 val;
1133
1134         bp->link_up = 1;
1135         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1136         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1137                 case BCM5708S_1000X_STAT1_SPEED_10:
1138                         bp->line_speed = SPEED_10;
1139                         break;
1140                 case BCM5708S_1000X_STAT1_SPEED_100:
1141                         bp->line_speed = SPEED_100;
1142                         break;
1143                 case BCM5708S_1000X_STAT1_SPEED_1G:
1144                         bp->line_speed = SPEED_1000;
1145                         break;
1146                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1147                         bp->line_speed = SPEED_2500;
1148                         break;
1149         }
1150         if (val & BCM5708S_1000X_STAT1_FD)
1151                 bp->duplex = DUPLEX_FULL;
1152         else
1153                 bp->duplex = DUPLEX_HALF;
1154
1155         return 0;
1156 }
1157
1158 static int
1159 bnx2_5706s_linkup(struct bnx2 *bp)
1160 {
1161         u32 bmcr, local_adv, remote_adv, common;
1162
1163         bp->link_up = 1;
1164         bp->line_speed = SPEED_1000;
1165
1166         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1167         if (bmcr & BMCR_FULLDPLX) {
1168                 bp->duplex = DUPLEX_FULL;
1169         }
1170         else {
1171                 bp->duplex = DUPLEX_HALF;
1172         }
1173
1174         if (!(bmcr & BMCR_ANENABLE)) {
1175                 return 0;
1176         }
1177
1178         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1179         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1180
1181         common = local_adv & remote_adv;
1182         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1183
1184                 if (common & ADVERTISE_1000XFULL) {
1185                         bp->duplex = DUPLEX_FULL;
1186                 }
1187                 else {
1188                         bp->duplex = DUPLEX_HALF;
1189                 }
1190         }
1191
1192         return 0;
1193 }
1194
1195 static int
1196 bnx2_copper_linkup(struct bnx2 *bp)
1197 {
1198         u32 bmcr;
1199
1200         bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1201
1202         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1203         if (bmcr & BMCR_ANENABLE) {
1204                 u32 local_adv, remote_adv, common;
1205
1206                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208
1209                 common = local_adv & (remote_adv >> 2);
1210                 if (common & ADVERTISE_1000FULL) {
1211                         bp->line_speed = SPEED_1000;
1212                         bp->duplex = DUPLEX_FULL;
1213                 }
1214                 else if (common & ADVERTISE_1000HALF) {
1215                         bp->line_speed = SPEED_1000;
1216                         bp->duplex = DUPLEX_HALF;
1217                 }
1218                 else {
1219                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1221
1222                         common = local_adv & remote_adv;
1223                         if (common & ADVERTISE_100FULL) {
1224                                 bp->line_speed = SPEED_100;
1225                                 bp->duplex = DUPLEX_FULL;
1226                         }
1227                         else if (common & ADVERTISE_100HALF) {
1228                                 bp->line_speed = SPEED_100;
1229                                 bp->duplex = DUPLEX_HALF;
1230                         }
1231                         else if (common & ADVERTISE_10FULL) {
1232                                 bp->line_speed = SPEED_10;
1233                                 bp->duplex = DUPLEX_FULL;
1234                         }
1235                         else if (common & ADVERTISE_10HALF) {
1236                                 bp->line_speed = SPEED_10;
1237                                 bp->duplex = DUPLEX_HALF;
1238                         }
1239                         else {
1240                                 bp->line_speed = 0;
1241                                 bp->link_up = 0;
1242                         }
1243                 }
1244         }
1245         else {
1246                 if (bmcr & BMCR_SPEED100) {
1247                         bp->line_speed = SPEED_100;
1248                 }
1249                 else {
1250                         bp->line_speed = SPEED_10;
1251                 }
1252                 if (bmcr & BMCR_FULLDPLX) {
1253                         bp->duplex = DUPLEX_FULL;
1254                 }
1255                 else {
1256                         bp->duplex = DUPLEX_HALF;
1257                 }
1258         }
1259
1260         if (bp->link_up) {
1261                 u32 ext_status;
1262
1263                 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1264                 if (ext_status & EXT_STATUS_MDIX)
1265                         bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1266         }
1267
1268         return 0;
1269 }
1270
1271 static void
1272 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1273 {
1274         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1275
1276         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1277         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1278         val |= 0x02 << 8;
1279
1280         if (bp->flow_ctrl & FLOW_CTRL_TX)
1281                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1282
1283         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1284 }
1285
1286 static void
1287 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1288 {
1289         int i;
1290         u32 cid;
1291
1292         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1293                 if (i == 1)
1294                         cid = RX_RSS_CID;
1295                 bnx2_init_rx_context(bp, cid);
1296         }
1297 }
1298
1299 static void
1300 bnx2_set_mac_link(struct bnx2 *bp)
1301 {
1302         u32 val;
1303
1304         BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1305         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1306                 (bp->duplex == DUPLEX_HALF)) {
1307                 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1308         }
1309
1310         /* Configure the EMAC mode register. */
1311         val = BNX2_RD(bp, BNX2_EMAC_MODE);
1312
1313         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1314                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1315                 BNX2_EMAC_MODE_25G_MODE);
1316
1317         if (bp->link_up) {
1318                 switch (bp->line_speed) {
1319                         case SPEED_10:
1320                                 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1321                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1322                                         break;
1323                                 }
1324                                 /* fall through */
1325                         case SPEED_100:
1326                                 val |= BNX2_EMAC_MODE_PORT_MII;
1327                                 break;
1328                         case SPEED_2500:
1329                                 val |= BNX2_EMAC_MODE_25G_MODE;
1330                                 /* fall through */
1331                         case SPEED_1000:
1332                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1333                                 break;
1334                 }
1335         }
1336         else {
1337                 val |= BNX2_EMAC_MODE_PORT_GMII;
1338         }
1339
1340         /* Set the MAC to operate in the appropriate duplex mode. */
1341         if (bp->duplex == DUPLEX_HALF)
1342                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1343         BNX2_WR(bp, BNX2_EMAC_MODE, val);
1344
1345         /* Enable/disable rx PAUSE. */
1346         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1347
1348         if (bp->flow_ctrl & FLOW_CTRL_RX)
1349                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1350         BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1351
1352         /* Enable/disable tx PAUSE. */
1353         val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1354         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1355
1356         if (bp->flow_ctrl & FLOW_CTRL_TX)
1357                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1358         BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1359
1360         /* Acknowledge the interrupt. */
1361         BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1362
1363         bnx2_init_all_rx_contexts(bp);
1364 }
1365
1366 static void
1367 bnx2_enable_bmsr1(struct bnx2 *bp)
1368 {
1369         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1371                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1372                                MII_BNX2_BLK_ADDR_GP_STATUS);
1373 }
1374
1375 static void
1376 bnx2_disable_bmsr1(struct bnx2 *bp)
1377 {
1378         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1379             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1380                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1381                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1382 }
1383
1384 static int
1385 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1386 {
1387         u32 up1;
1388         int ret = 1;
1389
1390         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1391                 return 0;
1392
1393         if (bp->autoneg & AUTONEG_SPEED)
1394                 bp->advertising |= ADVERTISED_2500baseX_Full;
1395
1396         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1397                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1398
1399         bnx2_read_phy(bp, bp->mii_up1, &up1);
1400         if (!(up1 & BCM5708S_UP1_2G5)) {
1401                 up1 |= BCM5708S_UP1_2G5;
1402                 bnx2_write_phy(bp, bp->mii_up1, up1);
1403                 ret = 0;
1404         }
1405
1406         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1407                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1408                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1409
1410         return ret;
1411 }
1412
1413 static int
1414 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1415 {
1416         u32 up1;
1417         int ret = 0;
1418
1419         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1420                 return 0;
1421
1422         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1424
1425         bnx2_read_phy(bp, bp->mii_up1, &up1);
1426         if (up1 & BCM5708S_UP1_2G5) {
1427                 up1 &= ~BCM5708S_UP1_2G5;
1428                 bnx2_write_phy(bp, bp->mii_up1, up1);
1429                 ret = 1;
1430         }
1431
1432         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1433                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1434                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1435
1436         return ret;
1437 }
1438
1439 static void
1440 bnx2_enable_forced_2g5(struct bnx2 *bp)
1441 {
1442         u32 uninitialized_var(bmcr);
1443         int err;
1444
1445         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1446                 return;
1447
1448         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1449                 u32 val;
1450
1451                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1452                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1453                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1454                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1455                         val |= MII_BNX2_SD_MISC1_FORCE |
1456                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1457                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1458                 }
1459
1460                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1462                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1463
1464         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1465                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1466                 if (!err)
1467                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1468         } else {
1469                 return;
1470         }
1471
1472         if (err)
1473                 return;
1474
1475         if (bp->autoneg & AUTONEG_SPEED) {
1476                 bmcr &= ~BMCR_ANENABLE;
1477                 if (bp->req_duplex == DUPLEX_FULL)
1478                         bmcr |= BMCR_FULLDPLX;
1479         }
1480         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1481 }
1482
1483 static void
1484 bnx2_disable_forced_2g5(struct bnx2 *bp)
1485 {
1486         u32 uninitialized_var(bmcr);
1487         int err;
1488
1489         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1490                 return;
1491
1492         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1493                 u32 val;
1494
1495                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1497                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1498                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1499                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1500                 }
1501
1502                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1503                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1504                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1505
1506         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1507                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1508                 if (!err)
1509                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1510         } else {
1511                 return;
1512         }
1513
1514         if (err)
1515                 return;
1516
1517         if (bp->autoneg & AUTONEG_SPEED)
1518                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1519         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1520 }
1521
1522 static void
1523 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1524 {
1525         u32 val;
1526
1527         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1528         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1529         if (start)
1530                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1531         else
1532                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1533 }
1534
1535 static int
1536 bnx2_set_link(struct bnx2 *bp)
1537 {
1538         u32 bmsr;
1539         u8 link_up;
1540
1541         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1542                 bp->link_up = 1;
1543                 return 0;
1544         }
1545
1546         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1547                 return 0;
1548
1549         link_up = bp->link_up;
1550
1551         bnx2_enable_bmsr1(bp);
1552         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1553         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1554         bnx2_disable_bmsr1(bp);
1555
1556         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1557             (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1558                 u32 val, an_dbg;
1559
1560                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1561                         bnx2_5706s_force_link_dn(bp, 0);
1562                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1563                 }
1564                 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1565
1566                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1567                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1568                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1569
1570                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1571                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1572                         bmsr |= BMSR_LSTATUS;
1573                 else
1574                         bmsr &= ~BMSR_LSTATUS;
1575         }
1576
1577         if (bmsr & BMSR_LSTATUS) {
1578                 bp->link_up = 1;
1579
1580                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1581                         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1582                                 bnx2_5706s_linkup(bp);
1583                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1584                                 bnx2_5708s_linkup(bp);
1585                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1586                                 bnx2_5709s_linkup(bp);
1587                 }
1588                 else {
1589                         bnx2_copper_linkup(bp);
1590                 }
1591                 bnx2_resolve_flow_ctrl(bp);
1592         }
1593         else {
1594                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1595                     (bp->autoneg & AUTONEG_SPEED))
1596                         bnx2_disable_forced_2g5(bp);
1597
1598                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1599                         u32 bmcr;
1600
1601                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1602                         bmcr |= BMCR_ANENABLE;
1603                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1604
1605                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1606                 }
1607                 bp->link_up = 0;
1608         }
1609
1610         if (bp->link_up != link_up) {
1611                 bnx2_report_link(bp);
1612         }
1613
1614         bnx2_set_mac_link(bp);
1615
1616         return 0;
1617 }
1618
1619 static int
1620 bnx2_reset_phy(struct bnx2 *bp)
1621 {
1622         int i;
1623         u32 reg;
1624
1625         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1626
1627 #define PHY_RESET_MAX_WAIT 100
1628         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1629                 udelay(10);
1630
1631                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1632                 if (!(reg & BMCR_RESET)) {
1633                         udelay(20);
1634                         break;
1635                 }
1636         }
1637         if (i == PHY_RESET_MAX_WAIT) {
1638                 return -EBUSY;
1639         }
1640         return 0;
1641 }
1642
1643 static u32
1644 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1645 {
1646         u32 adv = 0;
1647
1648         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1649                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1650
1651                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652                         adv = ADVERTISE_1000XPAUSE;
1653                 }
1654                 else {
1655                         adv = ADVERTISE_PAUSE_CAP;
1656                 }
1657         }
1658         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1659                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660                         adv = ADVERTISE_1000XPSE_ASYM;
1661                 }
1662                 else {
1663                         adv = ADVERTISE_PAUSE_ASYM;
1664                 }
1665         }
1666         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1667                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1668                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1669                 }
1670                 else {
1671                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1672                 }
1673         }
1674         return adv;
1675 }
1676
1677 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1678
1679 static int
1680 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1681 __releases(&bp->phy_lock)
1682 __acquires(&bp->phy_lock)
1683 {
1684         u32 speed_arg = 0, pause_adv;
1685
1686         pause_adv = bnx2_phy_get_pause_adv(bp);
1687
1688         if (bp->autoneg & AUTONEG_SPEED) {
1689                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1690                 if (bp->advertising & ADVERTISED_10baseT_Half)
1691                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1692                 if (bp->advertising & ADVERTISED_10baseT_Full)
1693                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1694                 if (bp->advertising & ADVERTISED_100baseT_Half)
1695                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1696                 if (bp->advertising & ADVERTISED_100baseT_Full)
1697                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1698                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1699                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1701                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1702         } else {
1703                 if (bp->req_line_speed == SPEED_2500)
1704                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1705                 else if (bp->req_line_speed == SPEED_1000)
1706                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1707                 else if (bp->req_line_speed == SPEED_100) {
1708                         if (bp->req_duplex == DUPLEX_FULL)
1709                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1710                         else
1711                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712                 } else if (bp->req_line_speed == SPEED_10) {
1713                         if (bp->req_duplex == DUPLEX_FULL)
1714                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1715                         else
1716                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1717                 }
1718         }
1719
1720         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1721                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1722         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1723                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1724
1725         if (port == PORT_TP)
1726                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1727                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1728
1729         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1730
1731         spin_unlock_bh(&bp->phy_lock);
1732         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1733         spin_lock_bh(&bp->phy_lock);
1734
1735         return 0;
1736 }
1737
1738 static int
1739 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1740 __releases(&bp->phy_lock)
1741 __acquires(&bp->phy_lock)
1742 {
1743         u32 adv, bmcr;
1744         u32 new_adv = 0;
1745
1746         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1747                 return bnx2_setup_remote_phy(bp, port);
1748
1749         if (!(bp->autoneg & AUTONEG_SPEED)) {
1750                 u32 new_bmcr;
1751                 int force_link_down = 0;
1752
1753                 if (bp->req_line_speed == SPEED_2500) {
1754                         if (!bnx2_test_and_enable_2g5(bp))
1755                                 force_link_down = 1;
1756                 } else if (bp->req_line_speed == SPEED_1000) {
1757                         if (bnx2_test_and_disable_2g5(bp))
1758                                 force_link_down = 1;
1759                 }
1760                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1761                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1762
1763                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1764                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1765                 new_bmcr |= BMCR_SPEED1000;
1766
1767                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1768                         if (bp->req_line_speed == SPEED_2500)
1769                                 bnx2_enable_forced_2g5(bp);
1770                         else if (bp->req_line_speed == SPEED_1000) {
1771                                 bnx2_disable_forced_2g5(bp);
1772                                 new_bmcr &= ~0x2000;
1773                         }
1774
1775                 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1776                         if (bp->req_line_speed == SPEED_2500)
1777                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1778                         else
1779                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1780                 }
1781
1782                 if (bp->req_duplex == DUPLEX_FULL) {
1783                         adv |= ADVERTISE_1000XFULL;
1784                         new_bmcr |= BMCR_FULLDPLX;
1785                 }
1786                 else {
1787                         adv |= ADVERTISE_1000XHALF;
1788                         new_bmcr &= ~BMCR_FULLDPLX;
1789                 }
1790                 if ((new_bmcr != bmcr) || (force_link_down)) {
1791                         /* Force a link down visible on the other side */
1792                         if (bp->link_up) {
1793                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1794                                                ~(ADVERTISE_1000XFULL |
1795                                                  ADVERTISE_1000XHALF));
1796                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1797                                         BMCR_ANRESTART | BMCR_ANENABLE);
1798
1799                                 bp->link_up = 0;
1800                                 netif_carrier_off(bp->dev);
1801                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1802                                 bnx2_report_link(bp);
1803                         }
1804                         bnx2_write_phy(bp, bp->mii_adv, adv);
1805                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1806                 } else {
1807                         bnx2_resolve_flow_ctrl(bp);
1808                         bnx2_set_mac_link(bp);
1809                 }
1810                 return 0;
1811         }
1812
1813         bnx2_test_and_enable_2g5(bp);
1814
1815         if (bp->advertising & ADVERTISED_1000baseT_Full)
1816                 new_adv |= ADVERTISE_1000XFULL;
1817
1818         new_adv |= bnx2_phy_get_pause_adv(bp);
1819
1820         bnx2_read_phy(bp, bp->mii_adv, &adv);
1821         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1822
1823         bp->serdes_an_pending = 0;
1824         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1825                 /* Force a link down visible on the other side */
1826                 if (bp->link_up) {
1827                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1828                         spin_unlock_bh(&bp->phy_lock);
1829                         msleep(20);
1830                         spin_lock_bh(&bp->phy_lock);
1831                 }
1832
1833                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1834                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1835                         BMCR_ANENABLE);
1836                 /* Speed up link-up time when the link partner
1837                  * does not autonegotiate which is very common
1838                  * in blade servers. Some blade servers use
1839                  * IPMI for kerboard input and it's important
1840                  * to minimize link disruptions. Autoneg. involves
1841                  * exchanging base pages plus 3 next pages and
1842                  * normally completes in about 120 msec.
1843                  */
1844                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1845                 bp->serdes_an_pending = 1;
1846                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1847         } else {
1848                 bnx2_resolve_flow_ctrl(bp);
1849                 bnx2_set_mac_link(bp);
1850         }
1851
1852         return 0;
1853 }
1854
1855 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1856         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1857                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1858                 (ADVERTISED_1000baseT_Full)
1859
1860 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1861         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1862         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1863         ADVERTISED_1000baseT_Full)
1864
1865 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1866         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1867
1868 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1869
1870 static void
1871 bnx2_set_default_remote_link(struct bnx2 *bp)
1872 {
1873         u32 link;
1874
1875         if (bp->phy_port == PORT_TP)
1876                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1877         else
1878                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1879
1880         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1881                 bp->req_line_speed = 0;
1882                 bp->autoneg |= AUTONEG_SPEED;
1883                 bp->advertising = ADVERTISED_Autoneg;
1884                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1885                         bp->advertising |= ADVERTISED_10baseT_Half;
1886                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1887                         bp->advertising |= ADVERTISED_10baseT_Full;
1888                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1889                         bp->advertising |= ADVERTISED_100baseT_Half;
1890                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1891                         bp->advertising |= ADVERTISED_100baseT_Full;
1892                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1893                         bp->advertising |= ADVERTISED_1000baseT_Full;
1894                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1895                         bp->advertising |= ADVERTISED_2500baseX_Full;
1896         } else {
1897                 bp->autoneg = 0;
1898                 bp->advertising = 0;
1899                 bp->req_duplex = DUPLEX_FULL;
1900                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1901                         bp->req_line_speed = SPEED_10;
1902                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1903                                 bp->req_duplex = DUPLEX_HALF;
1904                 }
1905                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1906                         bp->req_line_speed = SPEED_100;
1907                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1908                                 bp->req_duplex = DUPLEX_HALF;
1909                 }
1910                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1911                         bp->req_line_speed = SPEED_1000;
1912                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1913                         bp->req_line_speed = SPEED_2500;
1914         }
1915 }
1916
1917 static void
1918 bnx2_set_default_link(struct bnx2 *bp)
1919 {
1920         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1921                 bnx2_set_default_remote_link(bp);
1922                 return;
1923         }
1924
1925         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1926         bp->req_line_speed = 0;
1927         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1928                 u32 reg;
1929
1930                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1931
1932                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1933                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1934                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1935                         bp->autoneg = 0;
1936                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1937                         bp->req_duplex = DUPLEX_FULL;
1938                 }
1939         } else
1940                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1941 }
1942
1943 static void
1944 bnx2_send_heart_beat(struct bnx2 *bp)
1945 {
1946         u32 msg;
1947         u32 addr;
1948
1949         spin_lock(&bp->indirect_lock);
1950         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1951         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1952         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1953         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1954         spin_unlock(&bp->indirect_lock);
1955 }
1956
1957 static void
1958 bnx2_remote_phy_event(struct bnx2 *bp)
1959 {
1960         u32 msg;
1961         u8 link_up = bp->link_up;
1962         u8 old_port;
1963
1964         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1965
1966         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1967                 bnx2_send_heart_beat(bp);
1968
1969         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1970
1971         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1972                 bp->link_up = 0;
1973         else {
1974                 u32 speed;
1975
1976                 bp->link_up = 1;
1977                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1978                 bp->duplex = DUPLEX_FULL;
1979                 switch (speed) {
1980                         case BNX2_LINK_STATUS_10HALF:
1981                                 bp->duplex = DUPLEX_HALF;
1982                                 /* fall through */
1983                         case BNX2_LINK_STATUS_10FULL:
1984                                 bp->line_speed = SPEED_10;
1985                                 break;
1986                         case BNX2_LINK_STATUS_100HALF:
1987                                 bp->duplex = DUPLEX_HALF;
1988                                 /* fall through */
1989                         case BNX2_LINK_STATUS_100BASE_T4:
1990                         case BNX2_LINK_STATUS_100FULL:
1991                                 bp->line_speed = SPEED_100;
1992                                 break;
1993                         case BNX2_LINK_STATUS_1000HALF:
1994                                 bp->duplex = DUPLEX_HALF;
1995                                 /* fall through */
1996                         case BNX2_LINK_STATUS_1000FULL:
1997                                 bp->line_speed = SPEED_1000;
1998                                 break;
1999                         case BNX2_LINK_STATUS_2500HALF:
2000                                 bp->duplex = DUPLEX_HALF;
2001                                 /* fall through */
2002                         case BNX2_LINK_STATUS_2500FULL:
2003                                 bp->line_speed = SPEED_2500;
2004                                 break;
2005                         default:
2006                                 bp->line_speed = 0;
2007                                 break;
2008                 }
2009
2010                 bp->flow_ctrl = 0;
2011                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2012                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2013                         if (bp->duplex == DUPLEX_FULL)
2014                                 bp->flow_ctrl = bp->req_flow_ctrl;
2015                 } else {
2016                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2017                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2018                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2019                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2020                 }
2021
2022                 old_port = bp->phy_port;
2023                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2024                         bp->phy_port = PORT_FIBRE;
2025                 else
2026                         bp->phy_port = PORT_TP;
2027
2028                 if (old_port != bp->phy_port)
2029                         bnx2_set_default_link(bp);
2030
2031         }
2032         if (bp->link_up != link_up)
2033                 bnx2_report_link(bp);
2034
2035         bnx2_set_mac_link(bp);
2036 }
2037
2038 static int
2039 bnx2_set_remote_link(struct bnx2 *bp)
2040 {
2041         u32 evt_code;
2042
2043         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2044         switch (evt_code) {
2045                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2046                         bnx2_remote_phy_event(bp);
2047                         break;
2048                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2049                 default:
2050                         bnx2_send_heart_beat(bp);
2051                         break;
2052         }
2053         return 0;
2054 }
2055
2056 static int
2057 bnx2_setup_copper_phy(struct bnx2 *bp)
2058 __releases(&bp->phy_lock)
2059 __acquires(&bp->phy_lock)
2060 {
2061         u32 bmcr, adv_reg, new_adv = 0;
2062         u32 new_bmcr;
2063
2064         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2065
2066         bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2067         adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2068                     ADVERTISE_PAUSE_ASYM);
2069
2070         new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2071
2072         if (bp->autoneg & AUTONEG_SPEED) {
2073                 u32 adv1000_reg;
2074                 u32 new_adv1000 = 0;
2075
2076                 new_adv |= bnx2_phy_get_pause_adv(bp);
2077
2078                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2079                 adv1000_reg &= PHY_ALL_1000_SPEED;
2080
2081                 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2082                 if ((adv1000_reg != new_adv1000) ||
2083                         (adv_reg != new_adv) ||
2084                         ((bmcr & BMCR_ANENABLE) == 0)) {
2085
2086                         bnx2_write_phy(bp, bp->mii_adv, new_adv);
2087                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2088                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2089                                 BMCR_ANENABLE);
2090                 }
2091                 else if (bp->link_up) {
2092                         /* Flow ctrl may have changed from auto to forced */
2093                         /* or vice-versa. */
2094
2095                         bnx2_resolve_flow_ctrl(bp);
2096                         bnx2_set_mac_link(bp);
2097                 }
2098                 return 0;
2099         }
2100
2101         /* advertise nothing when forcing speed */
2102         if (adv_reg != new_adv)
2103                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2104
2105         new_bmcr = 0;
2106         if (bp->req_line_speed == SPEED_100) {
2107                 new_bmcr |= BMCR_SPEED100;
2108         }
2109         if (bp->req_duplex == DUPLEX_FULL) {
2110                 new_bmcr |= BMCR_FULLDPLX;
2111         }
2112         if (new_bmcr != bmcr) {
2113                 u32 bmsr;
2114
2115                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117
2118                 if (bmsr & BMSR_LSTATUS) {
2119                         /* Force link down */
2120                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2121                         spin_unlock_bh(&bp->phy_lock);
2122                         msleep(50);
2123                         spin_lock_bh(&bp->phy_lock);
2124
2125                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2127                 }
2128
2129                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2130
2131                 /* Normally, the new speed is setup after the link has
2132                  * gone down and up again. In some cases, link will not go
2133                  * down so we need to set up the new speed here.
2134                  */
2135                 if (bmsr & BMSR_LSTATUS) {
2136                         bp->line_speed = bp->req_line_speed;
2137                         bp->duplex = bp->req_duplex;
2138                         bnx2_resolve_flow_ctrl(bp);
2139                         bnx2_set_mac_link(bp);
2140                 }
2141         } else {
2142                 bnx2_resolve_flow_ctrl(bp);
2143                 bnx2_set_mac_link(bp);
2144         }
2145         return 0;
2146 }
2147
2148 static int
2149 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2150 __releases(&bp->phy_lock)
2151 __acquires(&bp->phy_lock)
2152 {
2153         if (bp->loopback == MAC_LOOPBACK)
2154                 return 0;
2155
2156         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2157                 return bnx2_setup_serdes_phy(bp, port);
2158         }
2159         else {
2160                 return bnx2_setup_copper_phy(bp);
2161         }
2162 }
2163
2164 static int
2165 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2166 {
2167         u32 val;
2168
2169         bp->mii_bmcr = MII_BMCR + 0x10;
2170         bp->mii_bmsr = MII_BMSR + 0x10;
2171         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2172         bp->mii_adv = MII_ADVERTISE + 0x10;
2173         bp->mii_lpa = MII_LPA + 0x10;
2174         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2175
2176         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2177         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2178
2179         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2180         if (reset_phy)
2181                 bnx2_reset_phy(bp);
2182
2183         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2184
2185         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2186         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2187         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2188         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2189
2190         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2191         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2192         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2193                 val |= BCM5708S_UP1_2G5;
2194         else
2195                 val &= ~BCM5708S_UP1_2G5;
2196         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2199         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2200         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2201         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2202
2203         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2204
2205         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2206               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2207         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2208
2209         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2210
2211         return 0;
2212 }
2213
2214 static int
2215 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2216 {
2217         u32 val;
2218
2219         if (reset_phy)
2220                 bnx2_reset_phy(bp);
2221
2222         bp->mii_up1 = BCM5708S_UP1;
2223
2224         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2225         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2226         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2227
2228         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2229         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2230         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2231
2232         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2233         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2234         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2235
2236         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2237                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2238                 val |= BCM5708S_UP1_2G5;
2239                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2240         }
2241
2242         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2243             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2244             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2245                 /* increase tx signal amplitude */
2246                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2247                                BCM5708S_BLK_ADDR_TX_MISC);
2248                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2249                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2250                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2251                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2252         }
2253
2254         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2255               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2256
2257         if (val) {
2258                 u32 is_backplane;
2259
2260                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2261                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2262                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263                                        BCM5708S_BLK_ADDR_TX_MISC);
2264                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2265                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2266                                        BCM5708S_BLK_ADDR_DIG);
2267                 }
2268         }
2269         return 0;
2270 }
2271
2272 static int
2273 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2274 {
2275         if (reset_phy)
2276                 bnx2_reset_phy(bp);
2277
2278         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2279
2280         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2281                 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2282
2283         if (bp->dev->mtu > 1500) {
2284                 u32 val;
2285
2286                 /* Set extended packet length bit */
2287                 bnx2_write_phy(bp, 0x18, 0x7);
2288                 bnx2_read_phy(bp, 0x18, &val);
2289                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2290
2291                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292                 bnx2_read_phy(bp, 0x1c, &val);
2293                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2294         }
2295         else {
2296                 u32 val;
2297
2298                 bnx2_write_phy(bp, 0x18, 0x7);
2299                 bnx2_read_phy(bp, 0x18, &val);
2300                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2301
2302                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2303                 bnx2_read_phy(bp, 0x1c, &val);
2304                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2305         }
2306
2307         return 0;
2308 }
2309
2310 static int
2311 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2312 {
2313         u32 val;
2314
2315         if (reset_phy)
2316                 bnx2_reset_phy(bp);
2317
2318         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2319                 bnx2_write_phy(bp, 0x18, 0x0c00);
2320                 bnx2_write_phy(bp, 0x17, 0x000a);
2321                 bnx2_write_phy(bp, 0x15, 0x310b);
2322                 bnx2_write_phy(bp, 0x17, 0x201f);
2323                 bnx2_write_phy(bp, 0x15, 0x9506);
2324                 bnx2_write_phy(bp, 0x17, 0x401f);
2325                 bnx2_write_phy(bp, 0x15, 0x14e2);
2326                 bnx2_write_phy(bp, 0x18, 0x0400);
2327         }
2328
2329         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2330                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2331                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2332                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2333                 val &= ~(1 << 8);
2334                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2335         }
2336
2337         if (bp->dev->mtu > 1500) {
2338                 /* Set extended packet length bit */
2339                 bnx2_write_phy(bp, 0x18, 0x7);
2340                 bnx2_read_phy(bp, 0x18, &val);
2341                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2342
2343                 bnx2_read_phy(bp, 0x10, &val);
2344                 bnx2_write_phy(bp, 0x10, val | 0x1);
2345         }
2346         else {
2347                 bnx2_write_phy(bp, 0x18, 0x7);
2348                 bnx2_read_phy(bp, 0x18, &val);
2349                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2350
2351                 bnx2_read_phy(bp, 0x10, &val);
2352                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2353         }
2354
2355         /* ethernet@wirespeed */
2356         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2357         bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2358         val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2359
2360         /* auto-mdix */
2361         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2362                 val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2363
2364         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2365         return 0;
2366 }
2367
2368
2369 static int
2370 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2371 __releases(&bp->phy_lock)
2372 __acquires(&bp->phy_lock)
2373 {
2374         u32 val;
2375         int rc = 0;
2376
2377         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2378         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2379
2380         bp->mii_bmcr = MII_BMCR;
2381         bp->mii_bmsr = MII_BMSR;
2382         bp->mii_bmsr1 = MII_BMSR;
2383         bp->mii_adv = MII_ADVERTISE;
2384         bp->mii_lpa = MII_LPA;
2385
2386         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2387
2388         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2389                 goto setup_phy;
2390
2391         bnx2_read_phy(bp, MII_PHYSID1, &val);
2392         bp->phy_id = val << 16;
2393         bnx2_read_phy(bp, MII_PHYSID2, &val);
2394         bp->phy_id |= val & 0xffff;
2395
2396         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2397                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2398                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2399                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2400                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2401                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2402                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2403         }
2404         else {
2405                 rc = bnx2_init_copper_phy(bp, reset_phy);
2406         }
2407
2408 setup_phy:
2409         if (!rc)
2410                 rc = bnx2_setup_phy(bp, bp->phy_port);
2411
2412         return rc;
2413 }
2414
2415 static int
2416 bnx2_set_mac_loopback(struct bnx2 *bp)
2417 {
2418         u32 mac_mode;
2419
2420         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2421         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2422         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2423         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2424         bp->link_up = 1;
2425         return 0;
2426 }
2427
2428 static int bnx2_test_link(struct bnx2 *);
2429
2430 static int
2431 bnx2_set_phy_loopback(struct bnx2 *bp)
2432 {
2433         u32 mac_mode;
2434         int rc, i;
2435
2436         spin_lock_bh(&bp->phy_lock);
2437         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2438                             BMCR_SPEED1000);
2439         spin_unlock_bh(&bp->phy_lock);
2440         if (rc)
2441                 return rc;
2442
2443         for (i = 0; i < 10; i++) {
2444                 if (bnx2_test_link(bp) == 0)
2445                         break;
2446                 msleep(100);
2447         }
2448
2449         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2450         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2451                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2452                       BNX2_EMAC_MODE_25G_MODE);
2453
2454         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2455         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2456         bp->link_up = 1;
2457         return 0;
2458 }
2459
2460 static void
2461 bnx2_dump_mcp_state(struct bnx2 *bp)
2462 {
2463         struct net_device *dev = bp->dev;
2464         u32 mcp_p0, mcp_p1;
2465
2466         netdev_err(dev, "<--- start MCP states dump --->\n");
2467         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2468                 mcp_p0 = BNX2_MCP_STATE_P0;
2469                 mcp_p1 = BNX2_MCP_STATE_P1;
2470         } else {
2471                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2472                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2473         }
2474         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2475                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2476         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2477                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2478                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2479                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2480         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2481                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2482                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2483                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2484         netdev_err(dev, "DEBUG: shmem states:\n");
2485         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2486                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2487                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2488                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2489         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2490         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2491                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2492                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2493         pr_cont(" condition[%08x]\n",
2494                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2495         DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2496         DP_SHMEM_LINE(bp, 0x3cc);
2497         DP_SHMEM_LINE(bp, 0x3dc);
2498         DP_SHMEM_LINE(bp, 0x3ec);
2499         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2500         netdev_err(dev, "<--- end MCP states dump --->\n");
2501 }
2502
2503 static int
2504 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2505 {
2506         int i;
2507         u32 val;
2508
2509         bp->fw_wr_seq++;
2510         msg_data |= bp->fw_wr_seq;
2511
2512         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2513
2514         if (!ack)
2515                 return 0;
2516
2517         /* wait for an acknowledgement. */
2518         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2519                 msleep(10);
2520
2521                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2522
2523                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2524                         break;
2525         }
2526         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2527                 return 0;
2528
2529         /* If we timed out, inform the firmware that this is the case. */
2530         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2531                 msg_data &= ~BNX2_DRV_MSG_CODE;
2532                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2533
2534                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2535                 if (!silent) {
2536                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2537                         bnx2_dump_mcp_state(bp);
2538                 }
2539
2540                 return -EBUSY;
2541         }
2542
2543         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2544                 return -EIO;
2545
2546         return 0;
2547 }
2548
2549 static int
2550 bnx2_init_5709_context(struct bnx2 *bp)
2551 {
2552         int i, ret = 0;
2553         u32 val;
2554
2555         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2556         val |= (BNX2_PAGE_BITS - 8) << 16;
2557         BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2558         for (i = 0; i < 10; i++) {
2559                 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2560                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2561                         break;
2562                 udelay(2);
2563         }
2564         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2565                 return -EBUSY;
2566
2567         for (i = 0; i < bp->ctx_pages; i++) {
2568                 int j;
2569
2570                 if (bp->ctx_blk[i])
2571                         memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2572                 else
2573                         return -ENOMEM;
2574
2575                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2576                         (bp->ctx_blk_mapping[i] & 0xffffffff) |
2577                         BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2578                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2579                         (u64) bp->ctx_blk_mapping[i] >> 32);
2580                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2581                         BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2582                 for (j = 0; j < 10; j++) {
2583
2584                         val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2585                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2586                                 break;
2587                         udelay(5);
2588                 }
2589                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2590                         ret = -EBUSY;
2591                         break;
2592                 }
2593         }
2594         return ret;
2595 }
2596
2597 static void
2598 bnx2_init_context(struct bnx2 *bp)
2599 {
2600         u32 vcid;
2601
2602         vcid = 96;
2603         while (vcid) {
2604                 u32 vcid_addr, pcid_addr, offset;
2605                 int i;
2606
2607                 vcid--;
2608
2609                 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2610                         u32 new_vcid;
2611
2612                         vcid_addr = GET_PCID_ADDR(vcid);
2613                         if (vcid & 0x8) {
2614                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2615                         }
2616                         else {
2617                                 new_vcid = vcid;
2618                         }
2619                         pcid_addr = GET_PCID_ADDR(new_vcid);
2620                 }
2621                 else {
2622                         vcid_addr = GET_CID_ADDR(vcid);
2623                         pcid_addr = vcid_addr;
2624                 }
2625
2626                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2627                         vcid_addr += (i << PHY_CTX_SHIFT);
2628                         pcid_addr += (i << PHY_CTX_SHIFT);
2629
2630                         BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2631                         BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2632
2633                         /* Zero out the context. */
2634                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2635                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2636                 }
2637         }
2638 }
2639
2640 static int
2641 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2642 {
2643         u16 *good_mbuf;
2644         u32 good_mbuf_cnt;
2645         u32 val;
2646
2647         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2648         if (good_mbuf == NULL)
2649                 return -ENOMEM;
2650
2651         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2652                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2653
2654         good_mbuf_cnt = 0;
2655
2656         /* Allocate a bunch of mbufs and save the good ones in an array. */
2657         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2658         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2659                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2660                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2661
2662                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2663
2664                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2665
2666                 /* The addresses with Bit 9 set are bad memory blocks. */
2667                 if (!(val & (1 << 9))) {
2668                         good_mbuf[good_mbuf_cnt] = (u16) val;
2669                         good_mbuf_cnt++;
2670                 }
2671
2672                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2673         }
2674
2675         /* Free the good ones back to the mbuf pool thus discarding
2676          * all the bad ones. */
2677         while (good_mbuf_cnt) {
2678                 good_mbuf_cnt--;
2679
2680                 val = good_mbuf[good_mbuf_cnt];
2681                 val = (val << 9) | val | 1;
2682
2683                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2684         }
2685         kfree(good_mbuf);
2686         return 0;
2687 }
2688
2689 static void
2690 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2691 {
2692         u32 val;
2693
2694         val = (mac_addr[0] << 8) | mac_addr[1];
2695
2696         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2697
2698         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2699                 (mac_addr[4] << 8) | mac_addr[5];
2700
2701         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2702 }
2703
2704 static inline int
2705 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2706 {
2707         dma_addr_t mapping;
2708         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2709         struct bnx2_rx_bd *rxbd =
2710                 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2711         struct page *page = alloc_page(gfp);
2712
2713         if (!page)
2714                 return -ENOMEM;
2715         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2716                                PCI_DMA_FROMDEVICE);
2717         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2718                 __free_page(page);
2719                 return -EIO;
2720         }
2721
2722         rx_pg->page = page;
2723         dma_unmap_addr_set(rx_pg, mapping, mapping);
2724         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2725         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2726         return 0;
2727 }
2728
2729 static void
2730 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2731 {
2732         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2733         struct page *page = rx_pg->page;
2734
2735         if (!page)
2736                 return;
2737
2738         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2739                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2740
2741         __free_page(page);
2742         rx_pg->page = NULL;
2743 }
2744
2745 static inline int
2746 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2747 {
2748         u8 *data;
2749         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2750         dma_addr_t mapping;
2751         struct bnx2_rx_bd *rxbd =
2752                 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2753
2754         data = kmalloc(bp->rx_buf_size, gfp);
2755         if (!data)
2756                 return -ENOMEM;
2757
2758         mapping = dma_map_single(&bp->pdev->dev,
2759                                  get_l2_fhdr(data),
2760                                  bp->rx_buf_use_size,
2761                                  PCI_DMA_FROMDEVICE);
2762         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2763                 kfree(data);
2764                 return -EIO;
2765         }
2766
2767         rx_buf->data = data;
2768         dma_unmap_addr_set(rx_buf, mapping, mapping);
2769
2770         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2771         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2772
2773         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2774
2775         return 0;
2776 }
2777
2778 static int
2779 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2780 {
2781         struct status_block *sblk = bnapi->status_blk.msi;
2782         u32 new_link_state, old_link_state;
2783         int is_set = 1;
2784
2785         new_link_state = sblk->status_attn_bits & event;
2786         old_link_state = sblk->status_attn_bits_ack & event;
2787         if (new_link_state != old_link_state) {
2788                 if (new_link_state)
2789                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2790                 else
2791                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2792         } else
2793                 is_set = 0;
2794
2795         return is_set;
2796 }
2797
2798 static void
2799 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2800 {
2801         spin_lock(&bp->phy_lock);
2802
2803         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2804                 bnx2_set_link(bp);
2805         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2806                 bnx2_set_remote_link(bp);
2807
2808         spin_unlock(&bp->phy_lock);
2809
2810 }
2811
2812 static inline u16
2813 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2814 {
2815         u16 cons;
2816
2817         /* Tell compiler that status block fields can change. */
2818         barrier();
2819         cons = *bnapi->hw_tx_cons_ptr;
2820         barrier();
2821         if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2822                 cons++;
2823         return cons;
2824 }
2825
2826 static int
2827 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2828 {
2829         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2830         u16 hw_cons, sw_cons, sw_ring_cons;
2831         int tx_pkt = 0, index;
2832         unsigned int tx_bytes = 0;
2833         struct netdev_queue *txq;
2834
2835         index = (bnapi - bp->bnx2_napi);
2836         txq = netdev_get_tx_queue(bp->dev, index);
2837
2838         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2839         sw_cons = txr->tx_cons;
2840
2841         while (sw_cons != hw_cons) {
2842                 struct bnx2_sw_tx_bd *tx_buf;
2843                 struct sk_buff *skb;
2844                 int i, last;
2845
2846                 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2847
2848                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2849                 skb = tx_buf->skb;
2850
2851                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2852                 prefetch(&skb->end);
2853
2854                 /* partial BD completions possible with TSO packets */
2855                 if (tx_buf->is_gso) {
2856                         u16 last_idx, last_ring_idx;
2857
2858                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2859                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2860                         if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2861                                 last_idx++;
2862                         }
2863                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2864                                 break;
2865                         }
2866                 }
2867
2868                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2869                         skb_headlen(skb), PCI_DMA_TODEVICE);
2870
2871                 tx_buf->skb = NULL;
2872                 last = tx_buf->nr_frags;
2873
2874                 for (i = 0; i < last; i++) {
2875                         struct bnx2_sw_tx_bd *tx_buf;
2876
2877                         sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2878
2879                         tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2880                         dma_unmap_page(&bp->pdev->dev,
2881                                 dma_unmap_addr(tx_buf, mapping),
2882                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2883                                 PCI_DMA_TODEVICE);
2884                 }
2885
2886                 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2887
2888                 tx_bytes += skb->len;
2889                 dev_kfree_skb(skb);
2890                 tx_pkt++;
2891                 if (tx_pkt == budget)
2892                         break;
2893
2894                 if (hw_cons == sw_cons)
2895                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2896         }
2897
2898         netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2899         txr->hw_tx_cons = hw_cons;
2900         txr->tx_cons = sw_cons;
2901
2902         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2903          * before checking for netif_tx_queue_stopped().  Without the
2904          * memory barrier, there is a small possibility that bnx2_start_xmit()
2905          * will miss it and cause the queue to be stopped forever.
2906          */
2907         smp_mb();
2908
2909         if (unlikely(netif_tx_queue_stopped(txq)) &&
2910                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2911                 __netif_tx_lock(txq, smp_processor_id());
2912                 if ((netif_tx_queue_stopped(txq)) &&
2913                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2914                         netif_tx_wake_queue(txq);
2915                 __netif_tx_unlock(txq);
2916         }
2917
2918         return tx_pkt;
2919 }
2920
2921 static void
2922 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2923                         struct sk_buff *skb, int count)
2924 {
2925         struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2926         struct bnx2_rx_bd *cons_bd, *prod_bd;
2927         int i;
2928         u16 hw_prod, prod;
2929         u16 cons = rxr->rx_pg_cons;
2930
2931         cons_rx_pg = &rxr->rx_pg_ring[cons];
2932
2933         /* The caller was unable to allocate a new page to replace the
2934          * last one in the frags array, so we need to recycle that page
2935          * and then free the skb.
2936          */
2937         if (skb) {
2938                 struct page *page;
2939                 struct skb_shared_info *shinfo;
2940
2941                 shinfo = skb_shinfo(skb);
2942                 shinfo->nr_frags--;
2943                 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2944                 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2945
2946                 cons_rx_pg->page = page;
2947                 dev_kfree_skb(skb);
2948         }
2949
2950         hw_prod = rxr->rx_pg_prod;
2951
2952         for (i = 0; i < count; i++) {
2953                 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2954
2955                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2956                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2957                 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2958                                                 [BNX2_RX_IDX(cons)];
2959                 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2960                                                 [BNX2_RX_IDX(prod)];
2961
2962                 if (prod != cons) {
2963                         prod_rx_pg->page = cons_rx_pg->page;
2964                         cons_rx_pg->page = NULL;
2965                         dma_unmap_addr_set(prod_rx_pg, mapping,
2966                                 dma_unmap_addr(cons_rx_pg, mapping));
2967
2968                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2969                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2970
2971                 }
2972                 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2973                 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2974         }
2975         rxr->rx_pg_prod = hw_prod;
2976         rxr->rx_pg_cons = cons;
2977 }
2978
2979 static inline void
2980 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2981                    u8 *data, u16 cons, u16 prod)
2982 {
2983         struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2984         struct bnx2_rx_bd *cons_bd, *prod_bd;
2985
2986         cons_rx_buf = &rxr->rx_buf_ring[cons];
2987         prod_rx_buf = &rxr->rx_buf_ring[prod];
2988
2989         dma_sync_single_for_device(&bp->pdev->dev,
2990                 dma_unmap_addr(cons_rx_buf, mapping),
2991                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2992
2993         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2994
2995         prod_rx_buf->data = data;
2996
2997         if (cons == prod)
2998                 return;
2999
3000         dma_unmap_addr_set(prod_rx_buf, mapping,
3001                         dma_unmap_addr(cons_rx_buf, mapping));
3002
3003         cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3004         prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3005         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3006         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3007 }
3008
3009 static struct sk_buff *
3010 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3011             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3012             u32 ring_idx)
3013 {
3014         int err;
3015         u16 prod = ring_idx & 0xffff;
3016         struct sk_buff *skb;
3017
3018         err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3019         if (unlikely(err)) {
3020                 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3021 error:
3022                 if (hdr_len) {
3023                         unsigned int raw_len = len + 4;
3024                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3025
3026                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3027                 }
3028                 return NULL;
3029         }
3030
3031         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3032                          PCI_DMA_FROMDEVICE);
3033         skb = build_skb(data, 0);
3034         if (!skb) {
3035                 kfree(data);
3036                 goto error;
3037         }
3038         skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3039         if (hdr_len == 0) {
3040                 skb_put(skb, len);
3041                 return skb;
3042         } else {
3043                 unsigned int i, frag_len, frag_size, pages;
3044                 struct bnx2_sw_pg *rx_pg;
3045                 u16 pg_cons = rxr->rx_pg_cons;
3046                 u16 pg_prod = rxr->rx_pg_prod;
3047
3048                 frag_size = len + 4 - hdr_len;
3049                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3050                 skb_put(skb, hdr_len);
3051
3052                 for (i = 0; i < pages; i++) {
3053                         dma_addr_t mapping_old;
3054
3055                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3056                         if (unlikely(frag_len <= 4)) {
3057                                 unsigned int tail = 4 - frag_len;
3058
3059                                 rxr->rx_pg_cons = pg_cons;
3060                                 rxr->rx_pg_prod = pg_prod;
3061                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3062                                                         pages - i);
3063                                 skb->len -= tail;
3064                                 if (i == 0) {
3065                                         skb->tail -= tail;
3066                                 } else {
3067                                         skb_frag_t *frag =
3068                                                 &skb_shinfo(skb)->frags[i - 1];
3069                                         skb_frag_size_sub(frag, tail);
3070                                         skb->data_len -= tail;
3071                                 }
3072                                 return skb;
3073                         }
3074                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3075
3076                         /* Don't unmap yet.  If we're unable to allocate a new
3077                          * page, we need to recycle the page and the DMA addr.
3078                          */
3079                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3080                         if (i == pages - 1)
3081                                 frag_len -= 4;
3082
3083                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3084                         rx_pg->page = NULL;
3085
3086                         err = bnx2_alloc_rx_page(bp, rxr,
3087                                                  BNX2_RX_PG_RING_IDX(pg_prod),
3088                                                  GFP_ATOMIC);
3089                         if (unlikely(err)) {
3090                                 rxr->rx_pg_cons = pg_cons;
3091                                 rxr->rx_pg_prod = pg_prod;
3092                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3093                                                         pages - i);
3094                                 return NULL;
3095                         }
3096
3097                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3098                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3099
3100                         frag_size -= frag_len;
3101                         skb->data_len += frag_len;
3102                         skb->truesize += PAGE_SIZE;
3103                         skb->len += frag_len;
3104
3105                         pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3106                         pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3107                 }
3108                 rxr->rx_pg_prod = pg_prod;
3109                 rxr->rx_pg_cons = pg_cons;
3110         }
3111         return skb;
3112 }
3113
3114 static inline u16
3115 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3116 {
3117         u16 cons;
3118
3119         /* Tell compiler that status block fields can change. */
3120         barrier();
3121         cons = *bnapi->hw_rx_cons_ptr;
3122         barrier();
3123         if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3124                 cons++;
3125         return cons;
3126 }
3127
3128 static int
3129 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3130 {
3131         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3132         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3133         struct l2_fhdr *rx_hdr;
3134         int rx_pkt = 0, pg_ring_used = 0;
3135
3136         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3137         sw_cons = rxr->rx_cons;
3138         sw_prod = rxr->rx_prod;
3139
3140         /* Memory barrier necessary as speculative reads of the rx
3141          * buffer can be ahead of the index in the status block
3142          */
3143         rmb();
3144         while (sw_cons != hw_cons) {
3145                 unsigned int len, hdr_len;
3146                 u32 status;
3147                 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3148                 struct sk_buff *skb;
3149                 dma_addr_t dma_addr;
3150                 u8 *data;
3151                 u16 next_ring_idx;
3152
3153                 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3154                 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3155
3156                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3157                 data = rx_buf->data;
3158                 rx_buf->data = NULL;
3159
3160                 rx_hdr = get_l2_fhdr(data);
3161                 prefetch(rx_hdr);
3162
3163                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3164
3165                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3166                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3167                         PCI_DMA_FROMDEVICE);
3168
3169                 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3170                 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3171                 prefetch(get_l2_fhdr(next_rx_buf->data));
3172
3173                 len = rx_hdr->l2_fhdr_pkt_len;
3174                 status = rx_hdr->l2_fhdr_status;
3175
3176                 hdr_len = 0;
3177                 if (status & L2_FHDR_STATUS_SPLIT) {
3178                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3179                         pg_ring_used = 1;
3180                 } else if (len > bp->rx_jumbo_thresh) {
3181                         hdr_len = bp->rx_jumbo_thresh;
3182                         pg_ring_used = 1;
3183                 }
3184
3185                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3186                                        L2_FHDR_ERRORS_PHY_DECODE |
3187                                        L2_FHDR_ERRORS_ALIGNMENT |
3188                                        L2_FHDR_ERRORS_TOO_SHORT |
3189                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3190
3191                         bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3192                                           sw_ring_prod);
3193                         if (pg_ring_used) {
3194                                 int pages;
3195
3196                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3197
3198                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3199                         }
3200                         goto next_rx;
3201                 }
3202
3203                 len -= 4;
3204
3205                 if (len <= bp->rx_copy_thresh) {
3206                         skb = netdev_alloc_skb(bp->dev, len + 6);
3207                         if (skb == NULL) {
3208                                 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3209                                                   sw_ring_prod);
3210                                 goto next_rx;
3211                         }
3212
3213                         /* aligned copy */
3214                         memcpy(skb->data,
3215                                (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3216                                len + 6);
3217                         skb_reserve(skb, 6);
3218                         skb_put(skb, len);
3219
3220                         bnx2_reuse_rx_data(bp, rxr, data,
3221                                 sw_ring_cons, sw_ring_prod);
3222
3223                 } else {
3224                         skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3225                                           (sw_ring_cons << 16) | sw_ring_prod);
3226                         if (!skb)
3227                                 goto next_rx;
3228                 }
3229                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3230                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3231                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3232
3233                 skb->protocol = eth_type_trans(skb, bp->dev);
3234
3235                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3236                         (ntohs(skb->protocol) != 0x8100)) {
3237
3238                         dev_kfree_skb(skb);
3239                         goto next_rx;
3240
3241                 }
3242
3243                 skb_checksum_none_assert(skb);
3244                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3245                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3246                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3247
3248                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3249                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3250                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3251                 }
3252                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3253                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3254                      L2_FHDR_STATUS_USE_RXHASH))
3255                         skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3256                                      PKT_HASH_TYPE_L3);
3257
3258                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3259                 napi_gro_receive(&bnapi->napi, skb);
3260                 rx_pkt++;
3261
3262 next_rx:
3263                 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3264                 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3265
3266                 if ((rx_pkt == budget))
3267                         break;
3268
3269                 /* Refresh hw_cons to see if there is new work */
3270                 if (sw_cons == hw_cons) {
3271                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3272                         rmb();
3273                 }
3274         }
3275         rxr->rx_cons = sw_cons;
3276         rxr->rx_prod = sw_prod;
3277
3278         if (pg_ring_used)
3279                 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3280
3281         BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3282
3283         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3284
3285         mmiowb();
3286
3287         return rx_pkt;
3288
3289 }
3290
3291 /* MSI ISR - The only difference between this and the INTx ISR
3292  * is that the MSI interrupt is always serviced.
3293  */
3294 static irqreturn_t
3295 bnx2_msi(int irq, void *dev_instance)
3296 {
3297         struct bnx2_napi *bnapi = dev_instance;
3298         struct bnx2 *bp = bnapi->bp;
3299
3300         prefetch(bnapi->status_blk.msi);
3301         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3302                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3303                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3304
3305         /* Return here if interrupt is disabled. */
3306         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3307                 return IRQ_HANDLED;
3308
3309         napi_schedule(&bnapi->napi);
3310
3311         return IRQ_HANDLED;
3312 }
3313
3314 static irqreturn_t
3315 bnx2_msi_1shot(int irq, void *dev_instance)
3316 {
3317         struct bnx2_napi *bnapi = dev_instance;
3318         struct bnx2 *bp = bnapi->bp;
3319
3320         prefetch(bnapi->status_blk.msi);
3321
3322         /* Return here if interrupt is disabled. */
3323         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3324                 return IRQ_HANDLED;
3325
3326         napi_schedule(&bnapi->napi);
3327
3328         return IRQ_HANDLED;
3329 }
3330
3331 static irqreturn_t
3332 bnx2_interrupt(int irq, void *dev_instance)
3333 {
3334         struct bnx2_napi *bnapi = dev_instance;
3335         struct bnx2 *bp = bnapi->bp;
3336         struct status_block *sblk = bnapi->status_blk.msi;
3337
3338         /* When using INTx, it is possible for the interrupt to arrive
3339          * at the CPU before the status block posted prior to the
3340          * interrupt. Reading a register will flush the status block.
3341          * When using MSI, the MSI message will always complete after
3342          * the status block write.
3343          */
3344         if ((sblk->status_idx == bnapi->last_status_idx) &&
3345             (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3346              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3347                 return IRQ_NONE;
3348
3349         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3350                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3351                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3352
3353         /* Read back to deassert IRQ immediately to avoid too many
3354          * spurious interrupts.
3355          */
3356         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3357
3358         /* Return here if interrupt is shared and is disabled. */
3359         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3360                 return IRQ_HANDLED;
3361
3362         if (napi_schedule_prep(&bnapi->napi)) {
3363                 bnapi->last_status_idx = sblk->status_idx;
3364                 __napi_schedule(&bnapi->napi);
3365         }
3366
3367         return IRQ_HANDLED;
3368 }
3369
3370 static inline int
3371 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3372 {
3373         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3374         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3375
3376         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3377             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3378                 return 1;
3379         return 0;
3380 }
3381
3382 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3383                                  STATUS_ATTN_BITS_TIMER_ABORT)
3384
3385 static inline int
3386 bnx2_has_work(struct bnx2_napi *bnapi)
3387 {
3388         struct status_block *sblk = bnapi->status_blk.msi;
3389
3390         if (bnx2_has_fast_work(bnapi))
3391                 return 1;
3392
3393 #ifdef BCM_CNIC
3394         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3395                 return 1;
3396 #endif
3397
3398         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3399             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3400                 return 1;
3401
3402         return 0;
3403 }
3404
3405 static void
3406 bnx2_chk_missed_msi(struct bnx2 *bp)
3407 {
3408         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3409         u32 msi_ctrl;
3410
3411         if (bnx2_has_work(bnapi)) {
3412                 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3413                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3414                         return;
3415
3416                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3417                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3418                                 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3419                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3420                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3421                 }
3422         }
3423
3424         bp->idle_chk_status_idx = bnapi->last_status_idx;
3425 }
3426
3427 #ifdef BCM_CNIC
3428 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3429 {
3430         struct cnic_ops *c_ops;
3431
3432         if (!bnapi->cnic_present)
3433                 return;
3434
3435         rcu_read_lock();
3436         c_ops = rcu_dereference(bp->cnic_ops);
3437         if (c_ops)
3438                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3439                                                       bnapi->status_blk.msi);
3440         rcu_read_unlock();
3441 }
3442 #endif
3443
3444 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3445 {
3446         struct status_block *sblk = bnapi->status_blk.msi;
3447         u32 status_attn_bits = sblk->status_attn_bits;
3448         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3449
3450         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3451             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3452
3453                 bnx2_phy_int(bp, bnapi);
3454
3455                 /* This is needed to take care of transient status
3456                  * during link changes.
3457                  */
3458                 BNX2_WR(bp, BNX2_HC_COMMAND,
3459                         bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3460                 BNX2_RD(bp, BNX2_HC_COMMAND);
3461         }
3462 }
3463
3464 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3465                           int work_done, int budget)
3466 {
3467         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3468         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3469
3470         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3471                 bnx2_tx_int(bp, bnapi, 0);
3472
3473         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3474                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3475
3476         return work_done;
3477 }
3478
3479 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3480 {
3481         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3482         struct bnx2 *bp = bnapi->bp;
3483         int work_done = 0;
3484         struct status_block_msix *sblk = bnapi->status_blk.msix;
3485
3486         while (1) {
3487                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3488                 if (unlikely(work_done >= budget))
3489                         break;
3490
3491                 bnapi->last_status_idx = sblk->status_idx;
3492                 /* status idx must be read before checking for more work. */
3493                 rmb();
3494                 if (likely(!bnx2_has_fast_work(bnapi))) {
3495
3496                         napi_complete(napi);
3497                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3498                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3499                                 bnapi->last_status_idx);
3500                         break;
3501                 }
3502         }
3503         return work_done;
3504 }
3505
3506 static int bnx2_poll(struct napi_struct *napi, int budget)
3507 {
3508         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3509         struct bnx2 *bp = bnapi->bp;
3510         int work_done = 0;
3511         struct status_block *sblk = bnapi->status_blk.msi;
3512
3513         while (1) {
3514                 bnx2_poll_link(bp, bnapi);
3515
3516                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3517
3518 #ifdef BCM_CNIC
3519                 bnx2_poll_cnic(bp, bnapi);
3520 #endif
3521
3522                 /* bnapi->last_status_idx is used below to tell the hw how
3523                  * much work has been processed, so we must read it before
3524                  * checking for more work.
3525                  */
3526                 bnapi->last_status_idx = sblk->status_idx;
3527
3528                 if (unlikely(work_done >= budget))
3529                         break;
3530
3531                 rmb();
3532                 if (likely(!bnx2_has_work(bnapi))) {
3533                         napi_complete(napi);
3534                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3535                                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3536                                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3537                                         bnapi->last_status_idx);
3538                                 break;
3539                         }
3540                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3541                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3542                                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3543                                 bnapi->last_status_idx);
3544
3545                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3546                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3547                                 bnapi->last_status_idx);
3548                         break;
3549                 }
3550         }
3551
3552         return work_done;
3553 }
3554
3555 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3556  * from set_multicast.
3557  */
3558 static void
3559 bnx2_set_rx_mode(struct net_device *dev)
3560 {
3561         struct bnx2 *bp = netdev_priv(dev);
3562         u32 rx_mode, sort_mode;
3563         struct netdev_hw_addr *ha;
3564         int i;
3565
3566         if (!netif_running(dev))
3567                 return;
3568
3569         spin_lock_bh(&bp->phy_lock);
3570
3571         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3572                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3573         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3574         if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3575              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3576                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3577         if (dev->flags & IFF_PROMISC) {
3578                 /* Promiscuous mode. */
3579                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3580                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3581                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3582         }
3583         else if (dev->flags & IFF_ALLMULTI) {
3584                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3585                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3586                                 0xffffffff);
3587                 }
3588                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3589         }
3590         else {
3591                 /* Accept one or more multicast(s). */
3592                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3593                 u32 regidx;
3594                 u32 bit;
3595                 u32 crc;
3596
3597                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3598
3599                 netdev_for_each_mc_addr(ha, dev) {
3600                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3601                         bit = crc & 0xff;
3602                         regidx = (bit & 0xe0) >> 5;
3603                         bit &= 0x1f;
3604                         mc_filter[regidx] |= (1 << bit);
3605                 }
3606
3607                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3608                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3609                                 mc_filter[i]);
3610                 }
3611
3612                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3613         }
3614
3615         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3616                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3617                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3618                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3619         } else if (!(dev->flags & IFF_PROMISC)) {
3620                 /* Add all entries into to the match filter list */
3621                 i = 0;
3622                 netdev_for_each_uc_addr(ha, dev) {
3623                         bnx2_set_mac_addr(bp, ha->addr,
3624                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3625                         sort_mode |= (1 <<
3626                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3627                         i++;
3628                 }
3629
3630         }
3631
3632         if (rx_mode != bp->rx_mode) {
3633                 bp->rx_mode = rx_mode;
3634                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3635         }
3636
3637         BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3638         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3639         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3640
3641         spin_unlock_bh(&bp->phy_lock);
3642 }
3643
3644 static int
3645 check_fw_section(const struct firmware *fw,
3646                  const struct bnx2_fw_file_section *section,
3647                  u32 alignment, bool non_empty)
3648 {
3649         u32 offset = be32_to_cpu(section->offset);
3650         u32 len = be32_to_cpu(section->len);
3651
3652         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3653                 return -EINVAL;
3654         if ((non_empty && len == 0) || len > fw->size - offset ||
3655             len & (alignment - 1))
3656                 return -EINVAL;
3657         return 0;
3658 }
3659
3660 static int
3661 check_mips_fw_entry(const struct firmware *fw,
3662                     const struct bnx2_mips_fw_file_entry *entry)
3663 {
3664         if (check_fw_section(fw, &entry->text, 4, true) ||
3665             check_fw_section(fw, &entry->data, 4, false) ||
3666             check_fw_section(fw, &entry->rodata, 4, false))
3667                 return -EINVAL;
3668         return 0;
3669 }
3670
3671 static void bnx2_release_firmware(struct bnx2 *bp)
3672 {
3673         if (bp->rv2p_firmware) {
3674                 release_firmware(bp->mips_firmware);
3675                 release_firmware(bp->rv2p_firmware);
3676                 bp->rv2p_firmware = NULL;
3677         }
3678 }
3679
3680 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3681 {
3682         const char *mips_fw_file, *rv2p_fw_file;
3683         const struct bnx2_mips_fw_file *mips_fw;
3684         const struct bnx2_rv2p_fw_file *rv2p_fw;
3685         int rc;
3686
3687         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3688                 mips_fw_file = FW_MIPS_FILE_09;
3689                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3690                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3691                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3692                 else
3693                         rv2p_fw_file = FW_RV2P_FILE_09;
3694         } else {
3695                 mips_fw_file = FW_MIPS_FILE_06;
3696                 rv2p_fw_file = FW_RV2P_FILE_06;
3697         }
3698
3699         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3700         if (rc) {
3701                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3702                 goto out;
3703         }
3704
3705         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3706         if (rc) {
3707                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3708                 goto err_release_mips_firmware;
3709         }
3710         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3711         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3712         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3713             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3714             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3715             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3716             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3717             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3718                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3719                 rc = -EINVAL;
3720                 goto err_release_firmware;
3721         }
3722         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3723             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3724             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3725                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3726                 rc = -EINVAL;
3727                 goto err_release_firmware;
3728         }
3729 out:
3730         return rc;
3731
3732 err_release_firmware:
3733         release_firmware(bp->rv2p_firmware);
3734         bp->rv2p_firmware = NULL;
3735 err_release_mips_firmware:
3736         release_firmware(bp->mips_firmware);
3737         goto out;
3738 }
3739
3740 static int bnx2_request_firmware(struct bnx2 *bp)
3741 {
3742         return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3743 }
3744
3745 static u32
3746 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3747 {
3748         switch (idx) {
3749         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3750                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3751                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3752                 break;
3753         }
3754         return rv2p_code;
3755 }
3756
3757 static int
3758 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3759              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3760 {
3761         u32 rv2p_code_len, file_offset;
3762         __be32 *rv2p_code;
3763         int i;
3764         u32 val, cmd, addr;
3765
3766         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3767         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3768
3769         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3770
3771         if (rv2p_proc == RV2P_PROC1) {
3772                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3773                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3774         } else {
3775                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3776                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3777         }
3778
3779         for (i = 0; i < rv2p_code_len; i += 8) {
3780                 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3781                 rv2p_code++;
3782                 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3783                 rv2p_code++;
3784
3785                 val = (i / 8) | cmd;
3786                 BNX2_WR(bp, addr, val);
3787         }
3788
3789         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3790         for (i = 0; i < 8; i++) {
3791                 u32 loc, code;
3792
3793                 loc = be32_to_cpu(fw_entry->fixup[i]);
3794                 if (loc && ((loc * 4) < rv2p_code_len)) {
3795                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3796                         BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3797                         code = be32_to_cpu(*(rv2p_code + loc));
3798                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3799                         BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3800
3801                         val = (loc / 2) | cmd;
3802                         BNX2_WR(bp, addr, val);
3803                 }
3804         }
3805
3806         /* Reset the processor, un-stall is done later. */
3807         if (rv2p_proc == RV2P_PROC1) {
3808                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3809         }
3810         else {
3811                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3812         }
3813
3814         return 0;
3815 }
3816
3817 static int
3818 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3819             const struct bnx2_mips_fw_file_entry *fw_entry)
3820 {
3821         u32 addr, len, file_offset;
3822         __be32 *data;
3823         u32 offset;
3824         u32 val;
3825
3826         /* Halt the CPU. */
3827         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3828         val |= cpu_reg->mode_value_halt;
3829         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3830         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3831
3832         /* Load the Text area. */
3833         addr = be32_to_cpu(fw_entry->text.addr);
3834         len = be32_to_cpu(fw_entry->text.len);
3835         file_offset = be32_to_cpu(fw_entry->text.offset);
3836         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3837
3838         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3839         if (len) {
3840                 int j;
3841
3842                 for (j = 0; j < (len / 4); j++, offset += 4)
3843                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3844         }
3845
3846         /* Load the Data area. */
3847         addr = be32_to_cpu(fw_entry->data.addr);
3848         len = be32_to_cpu(fw_entry->data.len);
3849         file_offset = be32_to_cpu(fw_entry->data.offset);
3850         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3851
3852         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3853         if (len) {
3854                 int j;
3855
3856                 for (j = 0; j < (len / 4); j++, offset += 4)
3857                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3858         }
3859
3860         /* Load the Read-Only area. */
3861         addr = be32_to_cpu(fw_entry->rodata.addr);
3862         len = be32_to_cpu(fw_entry->rodata.len);
3863         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3864         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3865
3866         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3867         if (len) {
3868                 int j;
3869
3870                 for (j = 0; j < (len / 4); j++, offset += 4)
3871                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3872         }
3873
3874         /* Clear the pre-fetch instruction. */
3875         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3876
3877         val = be32_to_cpu(fw_entry->start_addr);
3878         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3879
3880         /* Start the CPU. */
3881         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3882         val &= ~cpu_reg->mode_value_halt;
3883         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3884         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3885
3886         return 0;
3887 }
3888
3889 static int
3890 bnx2_init_cpus(struct bnx2 *bp)
3891 {
3892         const struct bnx2_mips_fw_file *mips_fw =
3893                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3894         const struct bnx2_rv2p_fw_file *rv2p_fw =
3895                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3896         int rc;
3897
3898         /* Initialize the RV2P processor. */
3899         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3900         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3901
3902         /* Initialize the RX Processor. */
3903         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3904         if (rc)
3905                 goto init_cpu_err;
3906
3907         /* Initialize the TX Processor. */
3908         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3909         if (rc)
3910                 goto init_cpu_err;
3911
3912         /* Initialize the TX Patch-up Processor. */
3913         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3914         if (rc)
3915                 goto init_cpu_err;
3916
3917         /* Initialize the Completion Processor. */
3918         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3919         if (rc)
3920                 goto init_cpu_err;
3921
3922         /* Initialize the Command Processor. */
3923         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3924
3925 init_cpu_err:
3926         return rc;
3927 }
3928
3929 static void
3930 bnx2_setup_wol(struct bnx2 *bp)
3931 {
3932         int i;
3933         u32 val, wol_msg;
3934
3935         if (bp->wol) {
3936                 u32 advertising;
3937                 u8 autoneg;
3938
3939                 autoneg = bp->autoneg;
3940                 advertising = bp->advertising;
3941
3942                 if (bp->phy_port == PORT_TP) {
3943                         bp->autoneg = AUTONEG_SPEED;
3944                         bp->advertising = ADVERTISED_10baseT_Half |
3945                                 ADVERTISED_10baseT_Full |
3946                                 ADVERTISED_100baseT_Half |
3947                                 ADVERTISED_100baseT_Full |
3948                                 ADVERTISED_Autoneg;
3949                 }
3950
3951                 spin_lock_bh(&bp->phy_lock);
3952                 bnx2_setup_phy(bp, bp->phy_port);
3953                 spin_unlock_bh(&bp->phy_lock);
3954
3955                 bp->autoneg = autoneg;
3956                 bp->advertising = advertising;
3957
3958                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3959
3960                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3961
3962                 /* Enable port mode. */
3963                 val &= ~BNX2_EMAC_MODE_PORT;
3964                 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3965                        BNX2_EMAC_MODE_ACPI_RCVD |
3966                        BNX2_EMAC_MODE_MPKT;
3967                 if (bp->phy_port == PORT_TP) {
3968                         val |= BNX2_EMAC_MODE_PORT_MII;
3969                 } else {
3970                         val |= BNX2_EMAC_MODE_PORT_GMII;
3971                         if (bp->line_speed == SPEED_2500)
3972                                 val |= BNX2_EMAC_MODE_25G_MODE;
3973                 }
3974
3975                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3976
3977                 /* receive all multicast */
3978                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3979                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3980                                 0xffffffff);
3981                 }
3982                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3983
3984                 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3985                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3986                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3987                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3988
3989                 /* Need to enable EMAC and RPM for WOL. */
3990                 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3991                         BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3992                         BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3993                         BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3994
3995                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3996                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3997                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3998
3999                 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4000         } else {
4001                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4002         }
4003
4004         if (!(bp->flags & BNX2_FLAG_NO_WOL))
4005                 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
4006
4007 }
4008
4009 static int
4010 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4011 {
4012         switch (state) {
4013         case PCI_D0: {
4014                 u32 val;
4015
4016                 pci_enable_wake(bp->pdev, PCI_D0, false);
4017                 pci_set_power_state(bp->pdev, PCI_D0);
4018
4019                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4020                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4021                 val &= ~BNX2_EMAC_MODE_MPKT;
4022                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4023
4024                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4025                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4026                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4027                 break;
4028         }
4029         case PCI_D3hot: {
4030                 bnx2_setup_wol(bp);
4031                 pci_wake_from_d3(bp->pdev, bp->wol);
4032                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4033                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4034
4035                         if (bp->wol)
4036                                 pci_set_power_state(bp->pdev, PCI_D3hot);
4037                 } else {
4038                         pci_set_power_state(bp->pdev, PCI_D3hot);
4039                 }
4040
4041                 /* No more memory access after this point until
4042                  * device is brought back to D0.
4043                  */
4044                 break;
4045         }
4046         default:
4047                 return -EINVAL;
4048         }
4049         return 0;
4050 }
4051
4052 static int
4053 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4054 {
4055         u32 val;
4056         int j;
4057
4058         /* Request access to the flash interface. */
4059         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4060         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4061                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4062                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4063                         break;
4064
4065                 udelay(5);
4066         }
4067
4068         if (j >= NVRAM_TIMEOUT_COUNT)
4069                 return -EBUSY;
4070
4071         return 0;
4072 }
4073
4074 static int
4075 bnx2_release_nvram_lock(struct bnx2 *bp)
4076 {
4077         int j;
4078         u32 val;
4079
4080         /* Relinquish nvram interface. */
4081         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4082
4083         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4084                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4085                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4086                         break;
4087
4088                 udelay(5);
4089         }
4090
4091         if (j >= NVRAM_TIMEOUT_COUNT)
4092                 return -EBUSY;
4093
4094         return 0;
4095 }
4096
4097
4098 static int
4099 bnx2_enable_nvram_write(struct bnx2 *bp)
4100 {
4101         u32 val;
4102
4103         val = BNX2_RD(bp, BNX2_MISC_CFG);
4104         BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4105
4106         if (bp->flash_info->flags & BNX2_NV_WREN) {
4107                 int j;
4108
4109                 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4110                 BNX2_WR(bp, BNX2_NVM_COMMAND,
4111                         BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4112
4113                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4114                         udelay(5);
4115
4116                         val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4117                         if (val & BNX2_NVM_COMMAND_DONE)
4118                                 break;
4119                 }
4120
4121                 if (j >= NVRAM_TIMEOUT_COUNT)
4122                         return -EBUSY;
4123         }
4124         return 0;
4125 }
4126
4127 static void
4128 bnx2_disable_nvram_write(struct bnx2 *bp)
4129 {
4130         u32 val;
4131
4132         val = BNX2_RD(bp, BNX2_MISC_CFG);
4133         BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4134 }
4135
4136
4137 static void
4138 bnx2_enable_nvram_access(struct bnx2 *bp)
4139 {
4140         u32 val;
4141
4142         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4143         /* Enable both bits, even on read. */
4144         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4145                 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4146 }
4147
4148 static void
4149 bnx2_disable_nvram_access(struct bnx2 *bp)
4150 {
4151         u32 val;
4152
4153         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4154         /* Disable both bits, even after read. */
4155         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4156                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4157                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4158 }
4159
4160 static int
4161 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4162 {
4163         u32 cmd;
4164         int j;
4165
4166         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4167                 /* Buffered flash, no erase needed */
4168                 return 0;
4169
4170         /* Build an erase command */
4171         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4172               BNX2_NVM_COMMAND_DOIT;
4173
4174         /* Need to clear DONE bit separately. */
4175         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4176
4177         /* Address of the NVRAM to read from. */
4178         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4179
4180         /* Issue an erase command. */
4181         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4182
4183         /* Wait for completion. */
4184         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4185                 u32 val;
4186
4187                 udelay(5);
4188
4189                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4190                 if (val & BNX2_NVM_COMMAND_DONE)
4191                         break;
4192         }
4193
4194         if (j >= NVRAM_TIMEOUT_COUNT)
4195                 return -EBUSY;
4196
4197         return 0;
4198 }
4199
4200 static int
4201 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4202 {
4203         u32 cmd;
4204         int j;
4205
4206         /* Build the command word. */
4207         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4208
4209         /* Calculate an offset of a buffered flash, not needed for 5709. */
4210         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4211                 offset = ((offset / bp->flash_info->page_size) <<
4212                            bp->flash_info->page_bits) +
4213                           (offset % bp->flash_info->page_size);
4214         }
4215
4216         /* Need to clear DONE bit separately. */
4217         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4218
4219         /* Address of the NVRAM to read from. */
4220         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4221
4222         /* Issue a read command. */
4223         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4224
4225         /* Wait for completion. */
4226         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4227                 u32 val;
4228
4229                 udelay(5);
4230
4231                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4232                 if (val & BNX2_NVM_COMMAND_DONE) {
4233                         __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4234                         memcpy(ret_val, &v, 4);
4235                         break;
4236                 }
4237         }
4238         if (j >= NVRAM_TIMEOUT_COUNT)
4239                 return -EBUSY;
4240
4241         return 0;
4242 }
4243
4244
4245 static int
4246 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4247 {
4248         u32 cmd;
4249         __be32 val32;
4250         int j;
4251
4252         /* Build the command word. */
4253         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4254
4255         /* Calculate an offset of a buffered flash, not needed for 5709. */
4256         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4257                 offset = ((offset / bp->flash_info->page_size) <<
4258                           bp->flash_info->page_bits) +
4259                          (offset % bp->flash_info->page_size);
4260         }
4261
4262         /* Need to clear DONE bit separately. */
4263         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4264
4265         memcpy(&val32, val, 4);
4266
4267         /* Write the data. */
4268         BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4269
4270         /* Address of the NVRAM to write to. */
4271         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4272
4273         /* Issue the write command. */
4274         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4275
4276         /* Wait for completion. */
4277         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4278                 udelay(5);
4279
4280                 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4281                         break;
4282         }
4283         if (j >= NVRAM_TIMEOUT_COUNT)
4284                 return -EBUSY;
4285
4286         return 0;
4287 }
4288
4289 static int
4290 bnx2_init_nvram(struct bnx2 *bp)
4291 {
4292         u32 val;
4293         int j, entry_count, rc = 0;
4294         const struct flash_spec *flash;
4295
4296         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4297                 bp->flash_info = &flash_5709;
4298                 goto get_flash_size;
4299         }
4300
4301         /* Determine the selected interface. */
4302         val = BNX2_RD(bp, BNX2_NVM_CFG1);
4303
4304         entry_count = ARRAY_SIZE(flash_table);
4305
4306         if (val & 0x40000000) {
4307
4308                 /* Flash interface has been reconfigured */
4309                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4310                      j++, flash++) {
4311                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4312                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4313                                 bp->flash_info = flash;
4314                                 break;
4315                         }
4316                 }
4317         }
4318         else {
4319                 u32 mask;
4320                 /* Not yet been reconfigured */
4321
4322                 if (val & (1 << 23))
4323                         mask = FLASH_BACKUP_STRAP_MASK;
4324                 else
4325                         mask = FLASH_STRAP_MASK;
4326
4327                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4328                         j++, flash++) {
4329
4330                         if ((val & mask) == (flash->strapping & mask)) {
4331                                 bp->flash_info = flash;
4332
4333                                 /* Request access to the flash interface. */
4334                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4335                                         return rc;
4336
4337                                 /* Enable access to flash interface */
4338                                 bnx2_enable_nvram_access(bp);
4339
4340                                 /* Reconfigure the flash interface */
4341                                 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4342                                 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4343                                 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4344                                 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4345
4346                                 /* Disable access to flash interface */
4347                                 bnx2_disable_nvram_access(bp);
4348                                 bnx2_release_nvram_lock(bp);
4349
4350                                 break;
4351                         }
4352                 }
4353         } /* if (val & 0x40000000) */
4354
4355         if (j == entry_count) {
4356                 bp->flash_info = NULL;
4357                 pr_alert("Unknown flash/EEPROM type\n");
4358                 return -ENODEV;
4359         }
4360
4361 get_flash_size:
4362         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4363         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4364         if (val)
4365                 bp->flash_size = val;
4366         else
4367                 bp->flash_size = bp->flash_info->total_size;
4368
4369         return rc;
4370 }
4371
4372 static int
4373 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4374                 int buf_size)
4375 {
4376         int rc = 0;
4377         u32 cmd_flags, offset32, len32, extra;
4378
4379         if (buf_size == 0)
4380                 return 0;
4381
4382         /* Request access to the flash interface. */
4383         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4384                 return rc;
4385
4386         /* Enable access to flash interface */
4387         bnx2_enable_nvram_access(bp);
4388
4389         len32 = buf_size;
4390         offset32 = offset;
4391         extra = 0;
4392
4393         cmd_flags = 0;
4394
4395         if (offset32 & 3) {
4396                 u8 buf[4];
4397                 u32 pre_len;
4398
4399                 offset32 &= ~3;
4400                 pre_len = 4 - (offset & 3);
4401
4402                 if (pre_len >= len32) {
4403                         pre_len = len32;
4404                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4405                                     BNX2_NVM_COMMAND_LAST;
4406                 }
4407                 else {
4408                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4409                 }
4410
4411                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4412
4413                 if (rc)
4414                         return rc;
4415
4416                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4417
4418                 offset32 += 4;
4419                 ret_buf += pre_len;
4420                 len32 -= pre_len;
4421         }
4422         if (len32 & 3) {
4423                 extra = 4 - (len32 & 3);
4424                 len32 = (len32 + 4) & ~3;
4425         }
4426
4427         if (len32 == 4) {
4428                 u8 buf[4];
4429
4430                 if (cmd_flags)
4431                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4432                 else
4433                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4434                                     BNX2_NVM_COMMAND_LAST;
4435
4436                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4437
4438                 memcpy(ret_buf, buf, 4 - extra);
4439         }
4440         else if (len32 > 0) {
4441                 u8 buf[4];
4442
4443                 /* Read the first word. */
4444                 if (cmd_flags)
4445                         cmd_flags = 0;
4446                 else
4447                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4448
4449                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4450
4451                 /* Advance to the next dword. */
4452                 offset32 += 4;
4453                 ret_buf += 4;
4454                 len32 -= 4;
4455
4456                 while (len32 > 4 && rc == 0) {
4457                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4458
4459                         /* Advance to the next dword. */
4460                         offset32 += 4;
4461                         ret_buf += 4;
4462                         len32 -= 4;
4463                 }
4464
4465                 if (rc)
4466                         return rc;
4467
4468                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4469                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4470
4471                 memcpy(ret_buf, buf, 4 - extra);
4472         }
4473
4474         /* Disable access to flash interface */
4475         bnx2_disable_nvram_access(bp);
4476
4477         bnx2_release_nvram_lock(bp);
4478
4479         return rc;
4480 }
4481
4482 static int
4483 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4484                 int buf_size)
4485 {
4486         u32 written, offset32, len32;
4487         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4488         int rc = 0;
4489         int align_start, align_end;
4490
4491         buf = data_buf;
4492         offset32 = offset;
4493         len32 = buf_size;
4494         align_start = align_end = 0;
4495
4496         if ((align_start = (offset32 & 3))) {
4497                 offset32 &= ~3;
4498                 len32 += align_start;
4499                 if (len32 < 4)
4500                         len32 = 4;
4501                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4502                         return rc;
4503         }
4504
4505         if (len32 & 3) {
4506                 align_end = 4 - (len32 & 3);
4507                 len32 += align_end;
4508                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4509                         return rc;
4510         }
4511
4512         if (align_start || align_end) {
4513                 align_buf = kmalloc(len32, GFP_KERNEL);
4514                 if (align_buf == NULL)
4515                         return -ENOMEM;
4516                 if (align_start) {
4517                         memcpy(align_buf, start, 4);
4518                 }
4519                 if (align_end) {
4520                         memcpy(align_buf + len32 - 4, end, 4);
4521                 }
4522                 memcpy(align_buf + align_start, data_buf, buf_size);
4523                 buf = align_buf;
4524         }
4525
4526         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4527                 flash_buffer = kmalloc(264, GFP_KERNEL);
4528                 if (flash_buffer == NULL) {
4529                         rc = -ENOMEM;
4530                         goto nvram_write_end;
4531                 }
4532         }
4533
4534         written = 0;
4535         while ((written < len32) && (rc == 0)) {
4536                 u32 page_start, page_end, data_start, data_end;
4537                 u32 addr, cmd_flags;
4538                 int i;
4539
4540                 /* Find the page_start addr */
4541                 page_start = offset32 + written;
4542                 page_start -= (page_start % bp->flash_info->page_size);
4543                 /* Find the page_end addr */
4544                 page_end = page_start + bp->flash_info->page_size;
4545                 /* Find the data_start addr */
4546                 data_start = (written == 0) ? offset32 : page_start;
4547                 /* Find the data_end addr */
4548                 data_end = (page_end > offset32 + len32) ?
4549                         (offset32 + len32) : page_end;
4550
4551                 /* Request access to the flash interface. */
4552                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4553                         goto nvram_write_end;
4554
4555                 /* Enable access to flash interface */
4556                 bnx2_enable_nvram_access(bp);
4557
4558                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4559                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4560                         int j;
4561
4562                         /* Read the whole page into the buffer
4563                          * (non-buffer flash only) */
4564                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4565                                 if (j == (bp->flash_info->page_size - 4)) {
4566                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4567                                 }
4568                                 rc = bnx2_nvram_read_dword(bp,
4569                                         page_start + j,
4570                                         &flash_buffer[j],
4571                                         cmd_flags);
4572
4573                                 if (rc)
4574                                         goto nvram_write_end;
4575
4576                                 cmd_flags = 0;
4577                         }
4578                 }
4579
4580                 /* Enable writes to flash interface (unlock write-protect) */
4581                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4582                         goto nvram_write_end;
4583
4584                 /* Loop to write back the buffer data from page_start to
4585                  * data_start */
4586                 i = 0;
4587                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4588                         /* Erase the page */
4589                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4590                                 goto nvram_write_end;
4591
4592                         /* Re-enable the write again for the actual write */
4593                         bnx2_enable_nvram_write(bp);
4594
4595                         for (addr = page_start; addr < data_start;
4596                                 addr += 4, i += 4) {
4597
4598                                 rc = bnx2_nvram_write_dword(bp, addr,
4599                                         &flash_buffer[i], cmd_flags);
4600
4601                                 if (rc != 0)
4602                                         goto nvram_write_end;
4603
4604                                 cmd_flags = 0;
4605                         }
4606                 }
4607
4608                 /* Loop to write the new data from data_start to data_end */
4609                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4610                         if ((addr == page_end - 4) ||
4611                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4612                                  (addr == data_end - 4))) {
4613
4614                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4615                         }
4616                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4617                                 cmd_flags);
4618
4619                         if (rc != 0)
4620                                 goto nvram_write_end;
4621
4622                         cmd_flags = 0;
4623                         buf += 4;
4624                 }
4625
4626                 /* Loop to write back the buffer data from data_end
4627                  * to page_end */
4628                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4629                         for (addr = data_end; addr < page_end;
4630                                 addr += 4, i += 4) {
4631
4632                                 if (addr == page_end-4) {
4633                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4634                                 }
4635                                 rc = bnx2_nvram_write_dword(bp, addr,
4636                                         &flash_buffer[i], cmd_flags);
4637
4638                                 if (rc != 0)
4639                                         goto nvram_write_end;
4640
4641                                 cmd_flags = 0;
4642                         }
4643                 }
4644
4645                 /* Disable writes to flash interface (lock write-protect) */
4646                 bnx2_disable_nvram_write(bp);
4647
4648                 /* Disable access to flash interface */
4649                 bnx2_disable_nvram_access(bp);
4650                 bnx2_release_nvram_lock(bp);
4651
4652                 /* Increment written */
4653                 written += data_end - data_start;
4654         }
4655
4656 nvram_write_end:
4657         kfree(flash_buffer);
4658         kfree(align_buf);
4659         return rc;
4660 }
4661
4662 static void
4663 bnx2_init_fw_cap(struct bnx2 *bp)
4664 {
4665         u32 val, sig = 0;
4666
4667         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4668         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4669
4670         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4671                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4672
4673         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4674         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4675                 return;
4676
4677         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4678                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4679                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4680         }
4681
4682         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4683             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4684                 u32 link;
4685
4686                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4687
4688                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4689                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4690                         bp->phy_port = PORT_FIBRE;
4691                 else
4692                         bp->phy_port = PORT_TP;
4693
4694                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4695                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4696         }
4697
4698         if (netif_running(bp->dev) && sig)
4699                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4700 }
4701
4702 static void
4703 bnx2_setup_msix_tbl(struct bnx2 *bp)
4704 {
4705         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4706
4707         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4708         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4709 }
4710
4711 static int
4712 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4713 {
4714         u32 val;
4715         int i, rc = 0;
4716         u8 old_port;
4717
4718         /* Wait for the current PCI transaction to complete before
4719          * issuing a reset. */
4720         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4721             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4722                 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4723                         BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4724                         BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4725                         BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4726                         BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4727                 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4728                 udelay(5);
4729         } else {  /* 5709 */
4730                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4731                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4732                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4733                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4734
4735                 for (i = 0; i < 100; i++) {
4736                         msleep(1);
4737                         val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4738                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4739                                 break;
4740                 }
4741         }
4742
4743         /* Wait for the firmware to tell us it is ok to issue a reset. */
4744         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4745
4746         /* Deposit a driver reset signature so the firmware knows that
4747          * this is a soft reset. */
4748         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4749                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4750
4751         /* Do a dummy read to force the chip to complete all current transaction
4752          * before we issue a reset. */
4753         val = BNX2_RD(bp, BNX2_MISC_ID);
4754
4755         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4756                 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4757                 BNX2_RD(bp, BNX2_MISC_COMMAND);
4758                 udelay(5);
4759
4760                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4761                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4762
4763                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4764
4765         } else {
4766                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4767                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4768                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4769
4770                 /* Chip reset. */
4771                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4772
4773                 /* Reading back any register after chip reset will hang the
4774                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4775                  * of margin for write posting.
4776                  */
4777                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4778                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4779                         msleep(20);
4780
4781                 /* Reset takes approximate 30 usec */
4782                 for (i = 0; i < 10; i++) {
4783                         val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4784                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4785                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4786                                 break;
4787                         udelay(10);
4788                 }
4789
4790                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4791                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4792                         pr_err("Chip reset did not complete\n");
4793                         return -EBUSY;
4794                 }
4795         }
4796
4797         /* Make sure byte swapping is properly configured. */
4798         val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4799         if (val != 0x01020304) {
4800                 pr_err("Chip not in correct endian mode\n");
4801                 return -ENODEV;
4802         }
4803
4804         /* Wait for the firmware to finish its initialization. */
4805         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4806         if (rc)
4807                 return rc;
4808
4809         spin_lock_bh(&bp->phy_lock);
4810         old_port = bp->phy_port;
4811         bnx2_init_fw_cap(bp);
4812         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4813             old_port != bp->phy_port)
4814                 bnx2_set_default_remote_link(bp);
4815         spin_unlock_bh(&bp->phy_lock);
4816
4817         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4818                 /* Adjust the voltage regular to two steps lower.  The default
4819                  * of this register is 0x0000000e. */
4820                 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4821
4822                 /* Remove bad rbuf memory from the free pool. */
4823                 rc = bnx2_alloc_bad_rbuf(bp);
4824         }
4825
4826         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4827                 bnx2_setup_msix_tbl(bp);
4828                 /* Prevent MSIX table reads and write from timing out */
4829                 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4830                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4831         }
4832
4833         return rc;
4834 }
4835
4836 static int
4837 bnx2_init_chip(struct bnx2 *bp)
4838 {
4839         u32 val, mtu;
4840         int rc, i;
4841
4842         /* Make sure the interrupt is not active. */
4843         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4844
4845         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4846               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4847 #ifdef __BIG_ENDIAN
4848               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4849 #endif
4850               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4851               DMA_READ_CHANS << 12 |
4852               DMA_WRITE_CHANS << 16;
4853
4854         val |= (0x2 << 20) | (1 << 11);
4855
4856         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4857                 val |= (1 << 23);
4858
4859         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4860             (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4861             !(bp->flags & BNX2_FLAG_PCIX))
4862                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4863
4864         BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4865
4866         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4867                 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4868                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4869                 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4870         }
4871
4872         if (bp->flags & BNX2_FLAG_PCIX) {
4873                 u16 val16;
4874
4875                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4876                                      &val16);
4877                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4878                                       val16 & ~PCI_X_CMD_ERO);
4879         }
4880
4881         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4882                 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4883                 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4884                 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4885
4886         /* Initialize context mapping and zero out the quick contexts.  The
4887          * context block must have already been enabled. */
4888         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4889                 rc = bnx2_init_5709_context(bp);
4890                 if (rc)
4891                         return rc;
4892         } else
4893                 bnx2_init_context(bp);
4894
4895         if ((rc = bnx2_init_cpus(bp)) != 0)
4896                 return rc;
4897
4898         bnx2_init_nvram(bp);
4899
4900         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4901
4902         val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4903         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4904         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4905         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4906                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4907                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4908                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4909         }
4910
4911         BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4912
4913         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4914         BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4915         BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4916
4917         val = (BNX2_PAGE_BITS - 8) << 24;
4918         BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4919
4920         /* Configure page size. */
4921         val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4922         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4923         val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4924         BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4925
4926         val = bp->mac_addr[0] +
4927               (bp->mac_addr[1] << 8) +
4928               (bp->mac_addr[2] << 16) +
4929               bp->mac_addr[3] +
4930               (bp->mac_addr[4] << 8) +
4931               (bp->mac_addr[5] << 16);
4932         BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4933
4934         /* Program the MTU.  Also include 4 bytes for CRC32. */
4935         mtu = bp->dev->mtu;
4936         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4937         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4938                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4939         BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4940
4941         if (mtu < 1500)
4942                 mtu = 1500;
4943
4944         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4945         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4946         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4947
4948         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4949         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4950                 bp->bnx2_napi[i].last_status_idx = 0;
4951
4952         bp->idle_chk_status_idx = 0xffff;
4953
4954         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4955
4956         /* Set up how to generate a link change interrupt. */
4957         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4958
4959         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4960                 (u64) bp->status_blk_mapping & 0xffffffff);
4961         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4962
4963         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4964                 (u64) bp->stats_blk_mapping & 0xffffffff);
4965         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4966                 (u64) bp->stats_blk_mapping >> 32);
4967
4968         BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4969                 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4970
4971         BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4972                 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4973
4974         BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4975                 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4976
4977         BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4978
4979         BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4980
4981         BNX2_WR(bp, BNX2_HC_COM_TICKS,
4982                 (bp->com_ticks_int << 16) | bp->com_ticks);
4983
4984         BNX2_WR(bp, BNX2_HC_CMD_TICKS,
4985                 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4986
4987         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4988                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4989         else
4990                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4991         BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4992
4993         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
4994                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4995         else {
4996                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4997                       BNX2_HC_CONFIG_COLLECT_STATS;
4998         }
4999
5000         if (bp->flags & BNX2_FLAG_USING_MSIX) {
5001                 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5002                         BNX2_HC_MSIX_BIT_VECTOR_VAL);
5003
5004                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5005         }
5006
5007         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5008                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5009
5010         BNX2_WR(bp, BNX2_HC_CONFIG, val);
5011
5012         if (bp->rx_ticks < 25)
5013                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5014         else
5015                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5016
5017         for (i = 1; i < bp->irq_nvecs; i++) {
5018                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5019                            BNX2_HC_SB_CONFIG_1;
5020
5021                 BNX2_WR(bp, base,
5022                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5023                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5024                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5025
5026                 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5027                         (bp->tx_quick_cons_trip_int << 16) |
5028                          bp->tx_quick_cons_trip);
5029
5030                 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5031                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5032
5033                 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5034                         (bp->rx_quick_cons_trip_int << 16) |
5035                         bp->rx_quick_cons_trip);
5036
5037                 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5038                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5039         }
5040
5041         /* Clear internal stats counters. */
5042         BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5043
5044         BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5045
5046         /* Initialize the receive filter. */
5047         bnx2_set_rx_mode(bp->dev);
5048
5049         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5050                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5051                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5052                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5053         }
5054         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5055                           1, 0);
5056
5057         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5058         BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5059
5060         udelay(20);
5061
5062         bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5063
5064         return rc;
5065 }
5066
5067 static void
5068 bnx2_clear_ring_states(struct bnx2 *bp)
5069 {
5070         struct bnx2_napi *bnapi;
5071         struct bnx2_tx_ring_info *txr;
5072         struct bnx2_rx_ring_info *rxr;
5073         int i;
5074
5075         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5076                 bnapi = &bp->bnx2_napi[i];
5077                 txr = &bnapi->tx_ring;
5078                 rxr = &bnapi->rx_ring;
5079
5080                 txr->tx_cons = 0;
5081                 txr->hw_tx_cons = 0;
5082                 rxr->rx_prod_bseq = 0;
5083                 rxr->rx_prod = 0;
5084                 rxr->rx_cons = 0;
5085                 rxr->rx_pg_prod = 0;
5086                 rxr->rx_pg_cons = 0;
5087         }
5088 }
5089
5090 static void
5091 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5092 {
5093         u32 val, offset0, offset1, offset2, offset3;
5094         u32 cid_addr = GET_CID_ADDR(cid);
5095
5096         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5097                 offset0 = BNX2_L2CTX_TYPE_XI;
5098                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5099                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5100                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5101         } else {
5102                 offset0 = BNX2_L2CTX_TYPE;
5103                 offset1 = BNX2_L2CTX_CMD_TYPE;
5104                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5105                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5106         }
5107         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5108         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5109
5110         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5111         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5112
5113         val = (u64) txr->tx_desc_mapping >> 32;
5114         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5115
5116         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5117         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5118 }
5119
5120 static void
5121 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5122 {
5123         struct bnx2_tx_bd *txbd;
5124         u32 cid = TX_CID;
5125         struct bnx2_napi *bnapi;
5126         struct bnx2_tx_ring_info *txr;
5127
5128         bnapi = &bp->bnx2_napi[ring_num];
5129         txr = &bnapi->tx_ring;
5130
5131         if (ring_num == 0)
5132                 cid = TX_CID;
5133         else
5134                 cid = TX_TSS_CID + ring_num - 1;
5135
5136         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5137
5138         txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5139
5140         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5141         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5142
5143         txr->tx_prod = 0;
5144         txr->tx_prod_bseq = 0;
5145
5146         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5147         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5148
5149         bnx2_init_tx_context(bp, cid, txr);
5150 }
5151
5152 static void
5153 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5154                      u32 buf_size, int num_rings)
5155 {
5156         int i;
5157         struct bnx2_rx_bd *rxbd;
5158
5159         for (i = 0; i < num_rings; i++) {
5160                 int j;
5161
5162                 rxbd = &rx_ring[i][0];
5163                 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5164                         rxbd->rx_bd_len = buf_size;
5165                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5166                 }
5167                 if (i == (num_rings - 1))
5168                         j = 0;
5169                 else
5170                         j = i + 1;
5171                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5172                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5173         }
5174 }
5175
5176 static void
5177 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5178 {
5179         int i;
5180         u16 prod, ring_prod;
5181         u32 cid, rx_cid_addr, val;
5182         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5183         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5184
5185         if (ring_num == 0)
5186                 cid = RX_CID;
5187         else
5188                 cid = RX_RSS_CID + ring_num - 1;
5189
5190         rx_cid_addr = GET_CID_ADDR(cid);
5191
5192         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5193                              bp->rx_buf_use_size, bp->rx_max_ring);
5194
5195         bnx2_init_rx_context(bp, cid);
5196
5197         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5198                 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5199                 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5200         }
5201
5202         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5203         if (bp->rx_pg_ring_size) {
5204                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5205                                      rxr->rx_pg_desc_mapping,
5206                                      PAGE_SIZE, bp->rx_max_pg_ring);
5207                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5208                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5209                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5210                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5211
5212                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5213                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5214
5215                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5216                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5217
5218                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5219                         BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5220         }
5221
5222         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5223         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5224
5225         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5226         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5227
5228         ring_prod = prod = rxr->rx_pg_prod;
5229         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5230                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5231                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5232                                     ring_num, i, bp->rx_pg_ring_size);
5233                         break;
5234                 }
5235                 prod = BNX2_NEXT_RX_BD(prod);
5236                 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5237         }
5238         rxr->rx_pg_prod = prod;
5239
5240         ring_prod = prod = rxr->rx_prod;
5241         for (i = 0; i < bp->rx_ring_size; i++) {
5242                 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5243                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5244                                     ring_num, i, bp->rx_ring_size);
5245                         break;
5246                 }
5247                 prod = BNX2_NEXT_RX_BD(prod);
5248                 ring_prod = BNX2_RX_RING_IDX(prod);
5249         }
5250         rxr->rx_prod = prod;
5251
5252         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5253         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5254         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5255
5256         BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5257         BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5258
5259         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5260 }
5261
5262 static void
5263 bnx2_init_all_rings(struct bnx2 *bp)
5264 {
5265         int i;
5266         u32 val;
5267
5268         bnx2_clear_ring_states(bp);
5269
5270         BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5271         for (i = 0; i < bp->num_tx_rings; i++)
5272                 bnx2_init_tx_ring(bp, i);
5273
5274         if (bp->num_tx_rings > 1)
5275                 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5276                         (TX_TSS_CID << 7));
5277
5278         BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5279         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5280
5281         for (i = 0; i < bp->num_rx_rings; i++)
5282                 bnx2_init_rx_ring(bp, i);
5283
5284         if (bp->num_rx_rings > 1) {
5285                 u32 tbl_32 = 0;
5286
5287                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5288                         int shift = (i % 8) << 2;
5289
5290                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5291                         if ((i % 8) == 7) {
5292                                 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5293                                 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5294                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5295                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5296                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5297                                 tbl_32 = 0;
5298                         }
5299                 }
5300
5301                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5302                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5303
5304                 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5305
5306         }
5307 }
5308
5309 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5310 {
5311         u32 max, num_rings = 1;
5312
5313         while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5314                 ring_size -= BNX2_MAX_RX_DESC_CNT;
5315                 num_rings++;
5316         }
5317         /* round to next power of 2 */
5318         max = max_size;
5319         while ((max & num_rings) == 0)
5320                 max >>= 1;
5321
5322         if (num_rings != max)
5323                 max <<= 1;
5324
5325         return max;
5326 }
5327
5328 static void
5329 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5330 {
5331         u32 rx_size, rx_space, jumbo_size;
5332
5333         /* 8 for CRC and VLAN */
5334         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5335
5336         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5337                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5338
5339         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5340         bp->rx_pg_ring_size = 0;
5341         bp->rx_max_pg_ring = 0;
5342         bp->rx_max_pg_ring_idx = 0;
5343         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5344                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5345
5346                 jumbo_size = size * pages;
5347                 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5348                         jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5349
5350                 bp->rx_pg_ring_size = jumbo_size;
5351                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5352                                                         BNX2_MAX_RX_PG_RINGS);
5353                 bp->rx_max_pg_ring_idx =
5354                         (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5355                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5356                 bp->rx_copy_thresh = 0;
5357         }
5358
5359         bp->rx_buf_use_size = rx_size;
5360         /* hw alignment + build_skb() overhead*/
5361         bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5362                 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5363         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5364         bp->rx_ring_size = size;
5365         bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5366         bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5367 }
5368
5369 static void
5370 bnx2_free_tx_skbs(struct bnx2 *bp)
5371 {
5372         int i;
5373
5374         for (i = 0; i < bp->num_tx_rings; i++) {
5375                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5376                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5377                 int j;
5378
5379                 if (txr->tx_buf_ring == NULL)
5380                         continue;
5381
5382                 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5383                         struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5384                         struct sk_buff *skb = tx_buf->skb;
5385                         int k, last;
5386
5387                         if (skb == NULL) {
5388                                 j = BNX2_NEXT_TX_BD(j);
5389                                 continue;
5390                         }
5391
5392                         dma_unmap_single(&bp->pdev->dev,
5393                                          dma_unmap_addr(tx_buf, mapping),
5394                                          skb_headlen(skb),
5395                                          PCI_DMA_TODEVICE);
5396
5397                         tx_buf->skb = NULL;
5398
5399                         last = tx_buf->nr_frags;
5400                         j = BNX2_NEXT_TX_BD(j);
5401                         for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5402                                 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5403                                 dma_unmap_page(&bp->pdev->dev,
5404                                         dma_unmap_addr(tx_buf, mapping),
5405                                         skb_frag_size(&skb_shinfo(skb)->frags[k]),
5406                                         PCI_DMA_TODEVICE);
5407                         }
5408                         dev_kfree_skb(skb);
5409                 }
5410                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5411         }
5412 }
5413
5414 static void
5415 bnx2_free_rx_skbs(struct bnx2 *bp)
5416 {
5417         int i;
5418
5419         for (i = 0; i < bp->num_rx_rings; i++) {
5420                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5421                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5422                 int j;
5423
5424                 if (rxr->rx_buf_ring == NULL)
5425                         return;
5426
5427                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5428                         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5429                         u8 *data = rx_buf->data;
5430
5431                         if (data == NULL)
5432                                 continue;
5433
5434                         dma_unmap_single(&bp->pdev->dev,
5435                                          dma_unmap_addr(rx_buf, mapping),
5436                                          bp->rx_buf_use_size,
5437                                          PCI_DMA_FROMDEVICE);
5438
5439                         rx_buf->data = NULL;
5440
5441                         kfree(data);
5442                 }
5443                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5444                         bnx2_free_rx_page(bp, rxr, j);
5445         }
5446 }
5447
5448 static void
5449 bnx2_free_skbs(struct bnx2 *bp)
5450 {
5451         bnx2_free_tx_skbs(bp);
5452         bnx2_free_rx_skbs(bp);
5453 }
5454
5455 static int
5456 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5457 {
5458         int rc;
5459
5460         rc = bnx2_reset_chip(bp, reset_code);
5461         bnx2_free_skbs(bp);
5462         if (rc)
5463                 return rc;
5464
5465         if ((rc = bnx2_init_chip(bp)) != 0)
5466                 return rc;
5467
5468         bnx2_init_all_rings(bp);
5469         return 0;
5470 }
5471
5472 static int
5473 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5474 {
5475         int rc;
5476
5477         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5478                 return rc;
5479
5480         spin_lock_bh(&bp->phy_lock);
5481         bnx2_init_phy(bp, reset_phy);
5482         bnx2_set_link(bp);
5483         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5484                 bnx2_remote_phy_event(bp);
5485         spin_unlock_bh(&bp->phy_lock);
5486         return 0;
5487 }
5488
5489 static int
5490 bnx2_shutdown_chip(struct bnx2 *bp)
5491 {
5492         u32 reset_code;
5493
5494         if (bp->flags & BNX2_FLAG_NO_WOL)
5495                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5496         else if (bp->wol)
5497                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5498         else
5499                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5500
5501         return bnx2_reset_chip(bp, reset_code);
5502 }
5503
5504 static int
5505 bnx2_test_registers(struct bnx2 *bp)
5506 {
5507         int ret;
5508         int i, is_5709;
5509         static const struct {
5510                 u16   offset;
5511                 u16   flags;
5512 #define BNX2_FL_NOT_5709        1
5513                 u32   rw_mask;
5514                 u32   ro_mask;
5515         } reg_tbl[] = {
5516                 { 0x006c, 0, 0x00000000, 0x0000003f },
5517                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5518                 { 0x0094, 0, 0x00000000, 0x00000000 },
5519
5520                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5521                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5522                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5523                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5524                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5525                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5526                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5527                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5528                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5529
5530                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5531                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5532                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5533                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5534                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5535                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5536
5537                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5538                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5539                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5540
5541                 { 0x1000, 0, 0x00000000, 0x00000001 },
5542                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5543
5544                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5545                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5546                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5547                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5548                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5549                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5550                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5551                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5552                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5553                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5554
5555                 { 0x1800, 0, 0x00000000, 0x00000001 },
5556                 { 0x1804, 0, 0x00000000, 0x00000003 },
5557
5558                 { 0x2800, 0, 0x00000000, 0x00000001 },
5559                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5560                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5561                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5562                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5563                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5564                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5565                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5566                 { 0x2840, 0, 0x00000000, 0xffffffff },
5567                 { 0x2844, 0, 0x00000000, 0xffffffff },
5568                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5569                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5570
5571                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5572                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5573
5574                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5575                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5576                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5577                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5578                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5579                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5580                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5581                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5582                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5583
5584                 { 0x5004, 0, 0x00000000, 0x0000007f },
5585                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5586
5587                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5588                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5589                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5590                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5591                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5592                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5593                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5594                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5595                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5596
5597                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5598                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5599                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5600                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5601                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5602                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5603                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5604                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5605                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5606                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5607                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5608                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5609                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5610                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5611                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5612                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5613                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5614                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5615                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5616                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5617                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5618                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5619                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5620
5621                 { 0xffff, 0, 0x00000000, 0x00000000 },
5622         };
5623
5624         ret = 0;
5625         is_5709 = 0;
5626         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5627                 is_5709 = 1;
5628
5629         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5630                 u32 offset, rw_mask, ro_mask, save_val, val;
5631                 u16 flags = reg_tbl[i].flags;
5632
5633                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5634                         continue;
5635
5636                 offset = (u32) reg_tbl[i].offset;
5637                 rw_mask = reg_tbl[i].rw_mask;
5638                 ro_mask = reg_tbl[i].ro_mask;
5639
5640                 save_val = readl(bp->regview + offset);
5641
5642                 writel(0, bp->regview + offset);
5643
5644                 val = readl(bp->regview + offset);
5645                 if ((val & rw_mask) != 0) {
5646                         goto reg_test_err;
5647                 }
5648
5649                 if ((val & ro_mask) != (save_val & ro_mask)) {
5650                         goto reg_test_err;
5651                 }
5652
5653                 writel(0xffffffff, bp->regview + offset);
5654
5655                 val = readl(bp->regview + offset);
5656                 if ((val & rw_mask) != rw_mask) {
5657                         goto reg_test_err;
5658                 }
5659
5660                 if ((val & ro_mask) != (save_val & ro_mask)) {
5661                         goto reg_test_err;
5662                 }
5663
5664                 writel(save_val, bp->regview + offset);
5665                 continue;
5666
5667 reg_test_err:
5668                 writel(save_val, bp->regview + offset);
5669                 ret = -ENODEV;
5670                 break;
5671         }
5672         return ret;
5673 }
5674
5675 static int
5676 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5677 {
5678         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5679                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5680         int i;
5681
5682         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5683                 u32 offset;
5684
5685                 for (offset = 0; offset < size; offset += 4) {
5686
5687                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5688
5689                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5690                                 test_pattern[i]) {
5691                                 return -ENODEV;
5692                         }
5693                 }
5694         }
5695         return 0;
5696 }
5697
5698 static int
5699 bnx2_test_memory(struct bnx2 *bp)
5700 {
5701         int ret = 0;
5702         int i;
5703         static struct mem_entry {
5704                 u32   offset;
5705                 u32   len;
5706         } mem_tbl_5706[] = {
5707                 { 0x60000,  0x4000 },
5708                 { 0xa0000,  0x3000 },
5709                 { 0xe0000,  0x4000 },
5710                 { 0x120000, 0x4000 },
5711                 { 0x1a0000, 0x4000 },
5712                 { 0x160000, 0x4000 },
5713                 { 0xffffffff, 0    },
5714         },
5715         mem_tbl_5709[] = {
5716                 { 0x60000,  0x4000 },
5717                 { 0xa0000,  0x3000 },
5718                 { 0xe0000,  0x4000 },
5719                 { 0x120000, 0x4000 },
5720                 { 0x1a0000, 0x4000 },
5721                 { 0xffffffff, 0    },
5722         };
5723         struct mem_entry *mem_tbl;
5724
5725         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5726                 mem_tbl = mem_tbl_5709;
5727         else
5728                 mem_tbl = mem_tbl_5706;
5729
5730         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5731                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5732                         mem_tbl[i].len)) != 0) {
5733                         return ret;
5734                 }
5735         }
5736
5737         return ret;
5738 }
5739
5740 #define BNX2_MAC_LOOPBACK       0
5741 #define BNX2_PHY_LOOPBACK       1
5742
5743 static int
5744 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5745 {
5746         unsigned int pkt_size, num_pkts, i;
5747         struct sk_buff *skb;
5748         u8 *data;
5749         unsigned char *packet;
5750         u16 rx_start_idx, rx_idx;
5751         dma_addr_t map;
5752         struct bnx2_tx_bd *txbd;
5753         struct bnx2_sw_bd *rx_buf;
5754         struct l2_fhdr *rx_hdr;
5755         int ret = -ENODEV;
5756         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5757         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5758         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5759
5760         tx_napi = bnapi;
5761
5762         txr = &tx_napi->tx_ring;
5763         rxr = &bnapi->rx_ring;
5764         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5765                 bp->loopback = MAC_LOOPBACK;
5766                 bnx2_set_mac_loopback(bp);
5767         }
5768         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5769                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5770                         return 0;
5771
5772                 bp->loopback = PHY_LOOPBACK;
5773                 bnx2_set_phy_loopback(bp);
5774         }
5775         else
5776                 return -EINVAL;
5777
5778         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5779         skb = netdev_alloc_skb(bp->dev, pkt_size);
5780         if (!skb)
5781                 return -ENOMEM;
5782         packet = skb_put(skb, pkt_size);
5783         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5784         memset(packet + ETH_ALEN, 0x0, 8);
5785         for (i = 14; i < pkt_size; i++)
5786                 packet[i] = (unsigned char) (i & 0xff);
5787
5788         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5789                              PCI_DMA_TODEVICE);
5790         if (dma_mapping_error(&bp->pdev->dev, map)) {
5791                 dev_kfree_skb(skb);
5792                 return -EIO;
5793         }
5794
5795         BNX2_WR(bp, BNX2_HC_COMMAND,
5796                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5797
5798         BNX2_RD(bp, BNX2_HC_COMMAND);
5799
5800         udelay(5);
5801         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5802
5803         num_pkts = 0;
5804
5805         txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5806
5807         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5808         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5809         txbd->tx_bd_mss_nbytes = pkt_size;
5810         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5811
5812         num_pkts++;
5813         txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5814         txr->tx_prod_bseq += pkt_size;
5815
5816         BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5817         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5818
5819         udelay(100);
5820
5821         BNX2_WR(bp, BNX2_HC_COMMAND,
5822                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5823
5824         BNX2_RD(bp, BNX2_HC_COMMAND);
5825
5826         udelay(5);
5827
5828         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5829         dev_kfree_skb(skb);
5830
5831         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5832                 goto loopback_test_done;
5833
5834         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5835         if (rx_idx != rx_start_idx + num_pkts) {
5836                 goto loopback_test_done;
5837         }
5838
5839         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5840         data = rx_buf->data;
5841
5842         rx_hdr = get_l2_fhdr(data);
5843         data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5844
5845         dma_sync_single_for_cpu(&bp->pdev->dev,
5846                 dma_unmap_addr(rx_buf, mapping),
5847                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5848
5849         if (rx_hdr->l2_fhdr_status &
5850                 (L2_FHDR_ERRORS_BAD_CRC |
5851                 L2_FHDR_ERRORS_PHY_DECODE |
5852                 L2_FHDR_ERRORS_ALIGNMENT |
5853                 L2_FHDR_ERRORS_TOO_SHORT |
5854                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5855
5856                 goto loopback_test_done;
5857         }
5858
5859         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5860                 goto loopback_test_done;
5861         }
5862
5863         for (i = 14; i < pkt_size; i++) {
5864                 if (*(data + i) != (unsigned char) (i & 0xff)) {
5865                         goto loopback_test_done;
5866                 }
5867         }
5868
5869         ret = 0;
5870
5871 loopback_test_done:
5872         bp->loopback = 0;
5873         return ret;
5874 }
5875
5876 #define BNX2_MAC_LOOPBACK_FAILED        1
5877 #define BNX2_PHY_LOOPBACK_FAILED        2
5878 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5879                                          BNX2_PHY_LOOPBACK_FAILED)
5880
5881 static int
5882 bnx2_test_loopback(struct bnx2 *bp)
5883 {
5884         int rc = 0;
5885
5886         if (!netif_running(bp->dev))
5887                 return BNX2_LOOPBACK_FAILED;
5888
5889         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5890         spin_lock_bh(&bp->phy_lock);
5891         bnx2_init_phy(bp, 1);
5892         spin_unlock_bh(&bp->phy_lock);
5893         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5894                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5895         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5896                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5897         return rc;
5898 }
5899
5900 #define NVRAM_SIZE 0x200
5901 #define CRC32_RESIDUAL 0xdebb20e3
5902
5903 static int
5904 bnx2_test_nvram(struct bnx2 *bp)
5905 {
5906         __be32 buf[NVRAM_SIZE / 4];
5907         u8 *data = (u8 *) buf;
5908         int rc = 0;
5909         u32 magic, csum;
5910
5911         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5912                 goto test_nvram_done;
5913
5914         magic = be32_to_cpu(buf[0]);
5915         if (magic != 0x669955aa) {
5916                 rc = -ENODEV;
5917                 goto test_nvram_done;
5918         }
5919
5920         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5921                 goto test_nvram_done;
5922
5923         csum = ether_crc_le(0x100, data);
5924         if (csum != CRC32_RESIDUAL) {
5925                 rc = -ENODEV;
5926                 goto test_nvram_done;
5927         }
5928
5929         csum = ether_crc_le(0x100, data + 0x100);
5930         if (csum != CRC32_RESIDUAL) {
5931                 rc = -ENODEV;
5932         }
5933
5934 test_nvram_done:
5935         return rc;
5936 }
5937
5938 static int
5939 bnx2_test_link(struct bnx2 *bp)
5940 {
5941         u32 bmsr;
5942
5943         if (!netif_running(bp->dev))
5944                 return -ENODEV;
5945
5946         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5947                 if (bp->link_up)
5948                         return 0;
5949                 return -ENODEV;
5950         }
5951         spin_lock_bh(&bp->phy_lock);
5952         bnx2_enable_bmsr1(bp);
5953         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5954         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5955         bnx2_disable_bmsr1(bp);
5956         spin_unlock_bh(&bp->phy_lock);
5957
5958         if (bmsr & BMSR_LSTATUS) {
5959                 return 0;
5960         }
5961         return -ENODEV;
5962 }
5963
5964 static int
5965 bnx2_test_intr(struct bnx2 *bp)
5966 {
5967         int i;
5968         u16 status_idx;
5969
5970         if (!netif_running(bp->dev))
5971                 return -ENODEV;
5972
5973         status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5974
5975         /* This register is not touched during run-time. */
5976         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5977         BNX2_RD(bp, BNX2_HC_COMMAND);
5978
5979         for (i = 0; i < 10; i++) {
5980                 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5981                         status_idx) {
5982
5983                         break;
5984                 }
5985
5986                 msleep_interruptible(10);
5987         }
5988         if (i < 10)
5989                 return 0;
5990
5991         return -ENODEV;
5992 }
5993
5994 /* Determining link for parallel detection. */
5995 static int
5996 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5997 {
5998         u32 mode_ctl, an_dbg, exp;
5999
6000         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6001                 return 0;
6002
6003         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6004         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6005
6006         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6007                 return 0;
6008
6009         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6010         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6011         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6012
6013         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6014                 return 0;
6015
6016         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6017         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6018         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6019
6020         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6021                 return 0;
6022
6023         return 1;
6024 }
6025
6026 static void
6027 bnx2_5706_serdes_timer(struct bnx2 *bp)
6028 {
6029         int check_link = 1;
6030
6031         spin_lock(&bp->phy_lock);
6032         if (bp->serdes_an_pending) {
6033                 bp->serdes_an_pending--;
6034                 check_link = 0;
6035         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6036                 u32 bmcr;
6037
6038                 bp->current_interval = BNX2_TIMER_INTERVAL;
6039
6040                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6041
6042                 if (bmcr & BMCR_ANENABLE) {
6043                         if (bnx2_5706_serdes_has_link(bp)) {
6044                                 bmcr &= ~BMCR_ANENABLE;
6045                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6046                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6047                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6048                         }
6049                 }
6050         }
6051         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6052                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6053                 u32 phy2;
6054
6055                 bnx2_write_phy(bp, 0x17, 0x0f01);
6056                 bnx2_read_phy(bp, 0x15, &phy2);
6057                 if (phy2 & 0x20) {
6058                         u32 bmcr;
6059
6060                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6061                         bmcr |= BMCR_ANENABLE;
6062                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6063
6064                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6065                 }
6066         } else
6067                 bp->current_interval = BNX2_TIMER_INTERVAL;
6068
6069         if (check_link) {
6070                 u32 val;
6071
6072                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6073                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6074                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6075
6076                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6077                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6078                                 bnx2_5706s_force_link_dn(bp, 1);
6079                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6080                         } else
6081                                 bnx2_set_link(bp);
6082                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6083                         bnx2_set_link(bp);
6084         }
6085         spin_unlock(&bp->phy_lock);
6086 }
6087
6088 static void
6089 bnx2_5708_serdes_timer(struct bnx2 *bp)
6090 {
6091         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6092                 return;
6093
6094         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6095                 bp->serdes_an_pending = 0;
6096                 return;
6097         }
6098
6099         spin_lock(&bp->phy_lock);
6100         if (bp->serdes_an_pending)
6101                 bp->serdes_an_pending--;
6102         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6103                 u32 bmcr;
6104
6105                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6106                 if (bmcr & BMCR_ANENABLE) {
6107                         bnx2_enable_forced_2g5(bp);
6108                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6109                 } else {
6110                         bnx2_disable_forced_2g5(bp);
6111                         bp->serdes_an_pending = 2;
6112                         bp->current_interval = BNX2_TIMER_INTERVAL;
6113                 }
6114
6115         } else
6116                 bp->current_interval = BNX2_TIMER_INTERVAL;
6117
6118         spin_unlock(&bp->phy_lock);
6119 }
6120
6121 static void
6122 bnx2_timer(unsigned long data)
6123 {
6124         struct bnx2 *bp = (struct bnx2 *) data;
6125
6126         if (!netif_running(bp->dev))
6127                 return;
6128
6129         if (atomic_read(&bp->intr_sem) != 0)
6130                 goto bnx2_restart_timer;
6131
6132         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6133              BNX2_FLAG_USING_MSI)
6134                 bnx2_chk_missed_msi(bp);
6135
6136         bnx2_send_heart_beat(bp);
6137
6138         bp->stats_blk->stat_FwRxDrop =
6139                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6140
6141         /* workaround occasional corrupted counters */
6142         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6143                 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6144                         BNX2_HC_COMMAND_STATS_NOW);
6145
6146         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6147                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6148                         bnx2_5706_serdes_timer(bp);
6149                 else
6150                         bnx2_5708_serdes_timer(bp);
6151         }
6152
6153 bnx2_restart_timer:
6154         mod_timer(&bp->timer, jiffies + bp->current_interval);
6155 }
6156
6157 static int
6158 bnx2_request_irq(struct bnx2 *bp)
6159 {
6160         unsigned long flags;
6161         struct bnx2_irq *irq;
6162         int rc = 0, i;
6163
6164         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6165                 flags = 0;
6166         else
6167                 flags = IRQF_SHARED;
6168
6169         for (i = 0; i < bp->irq_nvecs; i++) {
6170                 irq = &bp->irq_tbl[i];
6171                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6172                                  &bp->bnx2_napi[i]);
6173                 if (rc)
6174                         break;
6175                 irq->requested = 1;
6176         }
6177         return rc;
6178 }
6179
6180 static void
6181 __bnx2_free_irq(struct bnx2 *bp)
6182 {
6183         struct bnx2_irq *irq;
6184         int i;
6185
6186         for (i = 0; i < bp->irq_nvecs; i++) {
6187                 irq = &bp->irq_tbl[i];
6188                 if (irq->requested)
6189                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6190                 irq->requested = 0;
6191         }
6192 }
6193
6194 static void
6195 bnx2_free_irq(struct bnx2 *bp)
6196 {
6197
6198         __bnx2_free_irq(bp);
6199         if (bp->flags & BNX2_FLAG_USING_MSI)
6200                 pci_disable_msi(bp->pdev);
6201         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6202                 pci_disable_msix(bp->pdev);
6203
6204         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6205 }
6206
6207 static void
6208 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6209 {
6210         int i, total_vecs, rc;
6211         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6212         struct net_device *dev = bp->dev;
6213         const int len = sizeof(bp->irq_tbl[0].name);
6214
6215         bnx2_setup_msix_tbl(bp);
6216         BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6217         BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6218         BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6219
6220         /*  Need to flush the previous three writes to ensure MSI-X
6221          *  is setup properly */
6222         BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6223
6224         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6225                 msix_ent[i].entry = i;
6226                 msix_ent[i].vector = 0;
6227         }
6228
6229         total_vecs = msix_vecs;
6230 #ifdef BCM_CNIC
6231         total_vecs++;
6232 #endif
6233         rc = -ENOSPC;
6234         while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6235                 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6236                 if (rc <= 0)
6237                         break;
6238                 if (rc > 0)
6239                         total_vecs = rc;
6240         }
6241
6242         if (rc != 0)
6243                 return;
6244
6245         msix_vecs = total_vecs;
6246 #ifdef BCM_CNIC
6247         msix_vecs--;
6248 #endif
6249         bp->irq_nvecs = msix_vecs;
6250         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6251         for (i = 0; i < total_vecs; i++) {
6252                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6253                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6254                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6255         }
6256 }
6257
6258 static int
6259 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6260 {
6261         int cpus = netif_get_num_default_rss_queues();
6262         int msix_vecs;
6263
6264         if (!bp->num_req_rx_rings)
6265                 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6266         else if (!bp->num_req_tx_rings)
6267                 msix_vecs = max(cpus, bp->num_req_rx_rings);
6268         else
6269                 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6270
6271         msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6272
6273         bp->irq_tbl[0].handler = bnx2_interrupt;
6274         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6275         bp->irq_nvecs = 1;
6276         bp->irq_tbl[0].vector = bp->pdev->irq;
6277
6278         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6279                 bnx2_enable_msix(bp, msix_vecs);
6280
6281         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6282             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6283                 if (pci_enable_msi(bp->pdev) == 0) {
6284                         bp->flags |= BNX2_FLAG_USING_MSI;
6285                         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6286                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6287                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6288                         } else
6289                                 bp->irq_tbl[0].handler = bnx2_msi;
6290
6291                         bp->irq_tbl[0].vector = bp->pdev->irq;
6292                 }
6293         }
6294
6295         if (!bp->num_req_tx_rings)
6296                 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6297         else
6298                 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6299
6300         if (!bp->num_req_rx_rings)
6301                 bp->num_rx_rings = bp->irq_nvecs;
6302         else
6303                 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6304
6305         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6306
6307         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6308 }
6309
6310 /* Called with rtnl_lock */
6311 static int
6312 bnx2_open(struct net_device *dev)
6313 {
6314         struct bnx2 *bp = netdev_priv(dev);
6315         int rc;
6316
6317         rc = bnx2_request_firmware(bp);
6318         if (rc < 0)
6319                 goto out;
6320
6321         netif_carrier_off(dev);
6322
6323         bnx2_disable_int(bp);
6324
6325         rc = bnx2_setup_int_mode(bp, disable_msi);
6326         if (rc)
6327                 goto open_err;
6328         bnx2_init_napi(bp);
6329         bnx2_napi_enable(bp);
6330         rc = bnx2_alloc_mem(bp);
6331         if (rc)
6332                 goto open_err;
6333
6334         rc = bnx2_request_irq(bp);
6335         if (rc)
6336                 goto open_err;
6337
6338         rc = bnx2_init_nic(bp, 1);
6339         if (rc)
6340                 goto open_err;
6341
6342         mod_timer(&bp->timer, jiffies + bp->current_interval);
6343
6344         atomic_set(&bp->intr_sem, 0);
6345
6346         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6347
6348         bnx2_enable_int(bp);
6349
6350         if (bp->flags & BNX2_FLAG_USING_MSI) {
6351                 /* Test MSI to make sure it is working
6352                  * If MSI test fails, go back to INTx mode
6353                  */
6354                 if (bnx2_test_intr(bp) != 0) {
6355                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6356
6357                         bnx2_disable_int(bp);
6358                         bnx2_free_irq(bp);
6359
6360                         bnx2_setup_int_mode(bp, 1);
6361
6362                         rc = bnx2_init_nic(bp, 0);
6363
6364                         if (!rc)
6365                                 rc = bnx2_request_irq(bp);
6366
6367                         if (rc) {
6368                                 del_timer_sync(&bp->timer);
6369                                 goto open_err;
6370                         }
6371                         bnx2_enable_int(bp);
6372                 }
6373         }
6374         if (bp->flags & BNX2_FLAG_USING_MSI)
6375                 netdev_info(dev, "using MSI\n");
6376         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6377                 netdev_info(dev, "using MSIX\n");
6378
6379         netif_tx_start_all_queues(dev);
6380 out:
6381         return rc;
6382
6383 open_err:
6384         bnx2_napi_disable(bp);
6385         bnx2_free_skbs(bp);
6386         bnx2_free_irq(bp);
6387         bnx2_free_mem(bp);
6388         bnx2_del_napi(bp);
6389         bnx2_release_firmware(bp);
6390         goto out;
6391 }
6392
6393 static void
6394 bnx2_reset_task(struct work_struct *work)
6395 {
6396         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6397         int rc;
6398         u16 pcicmd;
6399
6400         rtnl_lock();
6401         if (!netif_running(bp->dev)) {
6402                 rtnl_unlock();
6403                 return;
6404         }
6405
6406         bnx2_netif_stop(bp, true);
6407
6408         pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6409         if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6410                 /* in case PCI block has reset */
6411                 pci_restore_state(bp->pdev);
6412                 pci_save_state(bp->pdev);
6413         }
6414         rc = bnx2_init_nic(bp, 1);
6415         if (rc) {
6416                 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6417                 bnx2_napi_enable(bp);
6418                 dev_close(bp->dev);
6419                 rtnl_unlock();
6420                 return;
6421         }
6422
6423         atomic_set(&bp->intr_sem, 1);
6424         bnx2_netif_start(bp, true);
6425         rtnl_unlock();
6426 }
6427
6428 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6429
6430 static void
6431 bnx2_dump_ftq(struct bnx2 *bp)
6432 {
6433         int i;
6434         u32 reg, bdidx, cid, valid;
6435         struct net_device *dev = bp->dev;
6436         static const struct ftq_reg {
6437                 char *name;
6438                 u32 off;
6439         } ftq_arr[] = {
6440                 BNX2_FTQ_ENTRY(RV2P_P),
6441                 BNX2_FTQ_ENTRY(RV2P_T),
6442                 BNX2_FTQ_ENTRY(RV2P_M),
6443                 BNX2_FTQ_ENTRY(TBDR_),
6444                 BNX2_FTQ_ENTRY(TDMA_),
6445                 BNX2_FTQ_ENTRY(TXP_),
6446                 BNX2_FTQ_ENTRY(TXP_),
6447                 BNX2_FTQ_ENTRY(TPAT_),
6448                 BNX2_FTQ_ENTRY(RXP_C),
6449                 BNX2_FTQ_ENTRY(RXP_),
6450                 BNX2_FTQ_ENTRY(COM_COMXQ_),
6451                 BNX2_FTQ_ENTRY(COM_COMTQ_),
6452                 BNX2_FTQ_ENTRY(COM_COMQ_),
6453                 BNX2_FTQ_ENTRY(CP_CPQ_),
6454         };
6455
6456         netdev_err(dev, "<--- start FTQ dump --->\n");
6457         for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6458                 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6459                            bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6460
6461         netdev_err(dev, "CPU states:\n");
6462         for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6463                 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6464                            reg, bnx2_reg_rd_ind(bp, reg),
6465                            bnx2_reg_rd_ind(bp, reg + 4),
6466                            bnx2_reg_rd_ind(bp, reg + 8),
6467                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6468                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6469                            bnx2_reg_rd_ind(bp, reg + 0x20));
6470
6471         netdev_err(dev, "<--- end FTQ dump --->\n");
6472         netdev_err(dev, "<--- start TBDC dump --->\n");
6473         netdev_err(dev, "TBDC free cnt: %ld\n",
6474                    BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6475         netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6476         for (i = 0; i < 0x20; i++) {
6477                 int j = 0;
6478
6479                 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6480                 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6481                         BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6482                 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6483                 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6484                         BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6485                         j++;
6486
6487                 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6488                 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6489                 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6490                 netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6491                            i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6492                            bdidx >> 24, (valid >> 8) & 0x0ff);
6493         }
6494         netdev_err(dev, "<--- end TBDC dump --->\n");
6495 }
6496
6497 static void
6498 bnx2_dump_state(struct bnx2 *bp)
6499 {
6500         struct net_device *dev = bp->dev;
6501         u32 val1, val2;
6502
6503         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6504         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6505                    atomic_read(&bp->intr_sem), val1);
6506         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6507         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6508         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6509         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6510                    BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6511                    BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6512         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6513                    BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6514         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6515                    BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6516         if (bp->flags & BNX2_FLAG_USING_MSIX)
6517                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6518                            BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6519 }
6520
6521 static void
6522 bnx2_tx_timeout(struct net_device *dev)
6523 {
6524         struct bnx2 *bp = netdev_priv(dev);
6525
6526         bnx2_dump_ftq(bp);
6527         bnx2_dump_state(bp);
6528         bnx2_dump_mcp_state(bp);
6529
6530         /* This allows the netif to be shutdown gracefully before resetting */
6531         schedule_work(&bp->reset_task);
6532 }
6533
6534 /* Called with netif_tx_lock.
6535  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6536  * netif_wake_queue().
6537  */
6538 static netdev_tx_t
6539 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6540 {
6541         struct bnx2 *bp = netdev_priv(dev);
6542         dma_addr_t mapping;
6543         struct bnx2_tx_bd *txbd;
6544         struct bnx2_sw_tx_bd *tx_buf;
6545         u32 len, vlan_tag_flags, last_frag, mss;
6546         u16 prod, ring_prod;
6547         int i;
6548         struct bnx2_napi *bnapi;
6549         struct bnx2_tx_ring_info *txr;
6550         struct netdev_queue *txq;
6551
6552         /*  Determine which tx ring we will be placed on */
6553         i = skb_get_queue_mapping(skb);
6554         bnapi = &bp->bnx2_napi[i];
6555         txr = &bnapi->tx_ring;
6556         txq = netdev_get_tx_queue(dev, i);
6557
6558         if (unlikely(bnx2_tx_avail(bp, txr) <
6559             (skb_shinfo(skb)->nr_frags + 1))) {
6560                 netif_tx_stop_queue(txq);
6561                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6562
6563                 return NETDEV_TX_BUSY;
6564         }
6565         len = skb_headlen(skb);
6566         prod = txr->tx_prod;
6567         ring_prod = BNX2_TX_RING_IDX(prod);
6568
6569         vlan_tag_flags = 0;
6570         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6571                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6572         }
6573
6574         if (vlan_tx_tag_present(skb)) {
6575                 vlan_tag_flags |=
6576                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6577         }
6578
6579         if ((mss = skb_shinfo(skb)->gso_size)) {
6580                 u32 tcp_opt_len;
6581                 struct iphdr *iph;
6582
6583                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6584
6585                 tcp_opt_len = tcp_optlen(skb);
6586
6587                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6588                         u32 tcp_off = skb_transport_offset(skb) -
6589                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6590
6591                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6592                                           TX_BD_FLAGS_SW_FLAGS;
6593                         if (likely(tcp_off == 0))
6594                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6595                         else {
6596                                 tcp_off >>= 3;
6597                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6598                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6599                                                   ((tcp_off & 0x10) <<
6600                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6601                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6602                         }
6603                 } else {
6604                         iph = ip_hdr(skb);
6605                         if (tcp_opt_len || (iph->ihl > 5)) {
6606                                 vlan_tag_flags |= ((iph->ihl - 5) +
6607                                                    (tcp_opt_len >> 2)) << 8;
6608                         }
6609                 }
6610         } else
6611                 mss = 0;
6612
6613         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6614         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6615                 dev_kfree_skb(skb);
6616                 return NETDEV_TX_OK;
6617         }
6618
6619         tx_buf = &txr->tx_buf_ring[ring_prod];
6620         tx_buf->skb = skb;
6621         dma_unmap_addr_set(tx_buf, mapping, mapping);
6622
6623         txbd = &txr->tx_desc_ring[ring_prod];
6624
6625         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6626         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6627         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6628         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6629
6630         last_frag = skb_shinfo(skb)->nr_frags;
6631         tx_buf->nr_frags = last_frag;
6632         tx_buf->is_gso = skb_is_gso(skb);
6633
6634         for (i = 0; i < last_frag; i++) {
6635                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6636
6637                 prod = BNX2_NEXT_TX_BD(prod);
6638                 ring_prod = BNX2_TX_RING_IDX(prod);
6639                 txbd = &txr->tx_desc_ring[ring_prod];
6640
6641                 len = skb_frag_size(frag);
6642                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6643                                            DMA_TO_DEVICE);
6644                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6645                         goto dma_error;
6646                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6647                                    mapping);
6648
6649                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6650                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6651                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6652                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6653
6654         }
6655         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6656
6657         /* Sync BD data before updating TX mailbox */
6658         wmb();
6659
6660         netdev_tx_sent_queue(txq, skb->len);
6661
6662         prod = BNX2_NEXT_TX_BD(prod);
6663         txr->tx_prod_bseq += skb->len;
6664
6665         BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6666         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6667
6668         mmiowb();
6669
6670         txr->tx_prod = prod;
6671
6672         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6673                 netif_tx_stop_queue(txq);
6674
6675                 /* netif_tx_stop_queue() must be done before checking
6676                  * tx index in bnx2_tx_avail() below, because in
6677                  * bnx2_tx_int(), we update tx index before checking for
6678                  * netif_tx_queue_stopped().
6679                  */
6680                 smp_mb();
6681                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6682                         netif_tx_wake_queue(txq);
6683         }
6684
6685         return NETDEV_TX_OK;
6686 dma_error:
6687         /* save value of frag that failed */
6688         last_frag = i;
6689
6690         /* start back at beginning and unmap skb */
6691         prod = txr->tx_prod;
6692         ring_prod = BNX2_TX_RING_IDX(prod);
6693         tx_buf = &txr->tx_buf_ring[ring_prod];
6694         tx_buf->skb = NULL;
6695         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6696                          skb_headlen(skb), PCI_DMA_TODEVICE);
6697
6698         /* unmap remaining mapped pages */
6699         for (i = 0; i < last_frag; i++) {
6700                 prod = BNX2_NEXT_TX_BD(prod);
6701                 ring_prod = BNX2_TX_RING_IDX(prod);
6702                 tx_buf = &txr->tx_buf_ring[ring_prod];
6703                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6704                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
6705                                PCI_DMA_TODEVICE);
6706         }
6707
6708         dev_kfree_skb(skb);
6709         return NETDEV_TX_OK;
6710 }
6711
6712 /* Called with rtnl_lock */
6713 static int
6714 bnx2_close(struct net_device *dev)
6715 {
6716         struct bnx2 *bp = netdev_priv(dev);
6717
6718         bnx2_disable_int_sync(bp);
6719         bnx2_napi_disable(bp);
6720         netif_tx_disable(dev);
6721         del_timer_sync(&bp->timer);
6722         bnx2_shutdown_chip(bp);
6723         bnx2_free_irq(bp);
6724         bnx2_free_skbs(bp);
6725         bnx2_free_mem(bp);
6726         bnx2_del_napi(bp);
6727         bp->link_up = 0;
6728         netif_carrier_off(bp->dev);
6729         return 0;
6730 }
6731
6732 static void
6733 bnx2_save_stats(struct bnx2 *bp)
6734 {
6735         u32 *hw_stats = (u32 *) bp->stats_blk;
6736         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6737         int i;
6738
6739         /* The 1st 10 counters are 64-bit counters */
6740         for (i = 0; i < 20; i += 2) {
6741                 u32 hi;
6742                 u64 lo;
6743
6744                 hi = temp_stats[i] + hw_stats[i];
6745                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6746                 if (lo > 0xffffffff)
6747                         hi++;
6748                 temp_stats[i] = hi;
6749                 temp_stats[i + 1] = lo & 0xffffffff;
6750         }
6751
6752         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6753                 temp_stats[i] += hw_stats[i];
6754 }
6755
6756 #define GET_64BIT_NET_STATS64(ctr)              \
6757         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6758
6759 #define GET_64BIT_NET_STATS(ctr)                                \
6760         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6761         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6762
6763 #define GET_32BIT_NET_STATS(ctr)                                \
6764         (unsigned long) (bp->stats_blk->ctr +                   \
6765                          bp->temp_stats_blk->ctr)
6766
6767 static struct rtnl_link_stats64 *
6768 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6769 {
6770         struct bnx2 *bp = netdev_priv(dev);
6771
6772         if (bp->stats_blk == NULL)
6773                 return net_stats;
6774
6775         net_stats->rx_packets =
6776                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6777                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6778                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6779
6780         net_stats->tx_packets =
6781                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6782                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6783                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6784
6785         net_stats->rx_bytes =
6786                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6787
6788         net_stats->tx_bytes =
6789                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6790
6791         net_stats->multicast =
6792                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6793
6794         net_stats->collisions =
6795                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6796
6797         net_stats->rx_length_errors =
6798                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6799                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6800
6801         net_stats->rx_over_errors =
6802                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6803                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6804
6805         net_stats->rx_frame_errors =
6806                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6807
6808         net_stats->rx_crc_errors =
6809                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6810
6811         net_stats->rx_errors = net_stats->rx_length_errors +
6812                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6813                 net_stats->rx_crc_errors;
6814
6815         net_stats->tx_aborted_errors =
6816                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6817                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6818
6819         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6820             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6821                 net_stats->tx_carrier_errors = 0;
6822         else {
6823                 net_stats->tx_carrier_errors =
6824                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6825         }
6826
6827         net_stats->tx_errors =
6828                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6829                 net_stats->tx_aborted_errors +
6830                 net_stats->tx_carrier_errors;
6831
6832         net_stats->rx_missed_errors =
6833                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6834                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6835                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6836
6837         return net_stats;
6838 }
6839
6840 /* All ethtool functions called with rtnl_lock */
6841
6842 static int
6843 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6844 {
6845         struct bnx2 *bp = netdev_priv(dev);
6846         int support_serdes = 0, support_copper = 0;
6847
6848         cmd->supported = SUPPORTED_Autoneg;
6849         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6850                 support_serdes = 1;
6851                 support_copper = 1;
6852         } else if (bp->phy_port == PORT_FIBRE)
6853                 support_serdes = 1;
6854         else
6855                 support_copper = 1;
6856
6857         if (support_serdes) {
6858                 cmd->supported |= SUPPORTED_1000baseT_Full |
6859                         SUPPORTED_FIBRE;
6860                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6861                         cmd->supported |= SUPPORTED_2500baseX_Full;
6862
6863         }
6864         if (support_copper) {
6865                 cmd->supported |= SUPPORTED_10baseT_Half |
6866                         SUPPORTED_10baseT_Full |
6867                         SUPPORTED_100baseT_Half |
6868                         SUPPORTED_100baseT_Full |
6869                         SUPPORTED_1000baseT_Full |
6870                         SUPPORTED_TP;
6871
6872         }
6873
6874         spin_lock_bh(&bp->phy_lock);
6875         cmd->port = bp->phy_port;
6876         cmd->advertising = bp->advertising;
6877
6878         if (bp->autoneg & AUTONEG_SPEED) {
6879                 cmd->autoneg = AUTONEG_ENABLE;
6880         } else {
6881                 cmd->autoneg = AUTONEG_DISABLE;
6882         }
6883
6884         if (netif_carrier_ok(dev)) {
6885                 ethtool_cmd_speed_set(cmd, bp->line_speed);
6886                 cmd->duplex = bp->duplex;
6887                 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6888                         if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6889                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
6890                         else
6891                                 cmd->eth_tp_mdix = ETH_TP_MDI;
6892                 }
6893         }
6894         else {
6895                 ethtool_cmd_speed_set(cmd, -1);
6896                 cmd->duplex = -1;
6897         }
6898         spin_unlock_bh(&bp->phy_lock);
6899
6900         cmd->transceiver = XCVR_INTERNAL;
6901         cmd->phy_address = bp->phy_addr;
6902
6903         return 0;
6904 }
6905
6906 static int
6907 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6908 {
6909         struct bnx2 *bp = netdev_priv(dev);
6910         u8 autoneg = bp->autoneg;
6911         u8 req_duplex = bp->req_duplex;
6912         u16 req_line_speed = bp->req_line_speed;
6913         u32 advertising = bp->advertising;
6914         int err = -EINVAL;
6915
6916         spin_lock_bh(&bp->phy_lock);
6917
6918         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6919                 goto err_out_unlock;
6920
6921         if (cmd->port != bp->phy_port &&
6922             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6923                 goto err_out_unlock;
6924
6925         /* If device is down, we can store the settings only if the user
6926          * is setting the currently active port.
6927          */
6928         if (!netif_running(dev) && cmd->port != bp->phy_port)
6929                 goto err_out_unlock;
6930
6931         if (cmd->autoneg == AUTONEG_ENABLE) {
6932                 autoneg |= AUTONEG_SPEED;
6933
6934                 advertising = cmd->advertising;
6935                 if (cmd->port == PORT_TP) {
6936                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6937                         if (!advertising)
6938                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6939                 } else {
6940                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6941                         if (!advertising)
6942                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6943                 }
6944                 advertising |= ADVERTISED_Autoneg;
6945         }
6946         else {
6947                 u32 speed = ethtool_cmd_speed(cmd);
6948                 if (cmd->port == PORT_FIBRE) {
6949                         if ((speed != SPEED_1000 &&
6950                              speed != SPEED_2500) ||
6951                             (cmd->duplex != DUPLEX_FULL))
6952                                 goto err_out_unlock;
6953
6954                         if (speed == SPEED_2500 &&
6955                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6956                                 goto err_out_unlock;
6957                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6958                         goto err_out_unlock;
6959
6960                 autoneg &= ~AUTONEG_SPEED;
6961                 req_line_speed = speed;
6962                 req_duplex = cmd->duplex;
6963                 advertising = 0;
6964         }
6965
6966         bp->autoneg = autoneg;
6967         bp->advertising = advertising;
6968         bp->req_line_speed = req_line_speed;
6969         bp->req_duplex = req_duplex;
6970
6971         err = 0;
6972         /* If device is down, the new settings will be picked up when it is
6973          * brought up.
6974          */
6975         if (netif_running(dev))
6976                 err = bnx2_setup_phy(bp, cmd->port);
6977
6978 err_out_unlock:
6979         spin_unlock_bh(&bp->phy_lock);
6980
6981         return err;
6982 }
6983
6984 static void
6985 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6986 {
6987         struct bnx2 *bp = netdev_priv(dev);
6988
6989         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6990         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6991         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6992         strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6993 }
6994
6995 #define BNX2_REGDUMP_LEN                (32 * 1024)
6996
6997 static int
6998 bnx2_get_regs_len(struct net_device *dev)
6999 {
7000         return BNX2_REGDUMP_LEN;
7001 }
7002
7003 static void
7004 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7005 {
7006         u32 *p = _p, i, offset;
7007         u8 *orig_p = _p;
7008         struct bnx2 *bp = netdev_priv(dev);
7009         static const u32 reg_boundaries[] = {
7010                 0x0000, 0x0098, 0x0400, 0x045c,
7011                 0x0800, 0x0880, 0x0c00, 0x0c10,
7012                 0x0c30, 0x0d08, 0x1000, 0x101c,
7013                 0x1040, 0x1048, 0x1080, 0x10a4,
7014                 0x1400, 0x1490, 0x1498, 0x14f0,
7015                 0x1500, 0x155c, 0x1580, 0x15dc,
7016                 0x1600, 0x1658, 0x1680, 0x16d8,
7017                 0x1800, 0x1820, 0x1840, 0x1854,
7018                 0x1880, 0x1894, 0x1900, 0x1984,
7019                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7020                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7021                 0x2000, 0x2030, 0x23c0, 0x2400,
7022                 0x2800, 0x2820, 0x2830, 0x2850,
7023                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7024                 0x3c00, 0x3c94, 0x4000, 0x4010,
7025                 0x4080, 0x4090, 0x43c0, 0x4458,
7026                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7027                 0x4fc0, 0x5010, 0x53c0, 0x5444,
7028                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7029                 0x5fc0, 0x6000, 0x6400, 0x6428,
7030                 0x6800, 0x6848, 0x684c, 0x6860,
7031                 0x6888, 0x6910, 0x8000
7032         };
7033
7034         regs->version = 0;
7035
7036         memset(p, 0, BNX2_REGDUMP_LEN);
7037
7038         if (!netif_running(bp->dev))
7039                 return;
7040
7041         i = 0;
7042         offset = reg_boundaries[0];
7043         p += offset;
7044         while (offset < BNX2_REGDUMP_LEN) {
7045                 *p++ = BNX2_RD(bp, offset);
7046                 offset += 4;
7047                 if (offset == reg_boundaries[i + 1]) {
7048                         offset = reg_boundaries[i + 2];
7049                         p = (u32 *) (orig_p + offset);
7050                         i += 2;
7051                 }
7052         }
7053 }
7054
7055 static void
7056 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7057 {
7058         struct bnx2 *bp = netdev_priv(dev);
7059
7060         if (bp->flags & BNX2_FLAG_NO_WOL) {
7061                 wol->supported = 0;
7062                 wol->wolopts = 0;
7063         }
7064         else {
7065                 wol->supported = WAKE_MAGIC;
7066                 if (bp->wol)
7067                         wol->wolopts = WAKE_MAGIC;
7068                 else
7069                         wol->wolopts = 0;
7070         }
7071         memset(&wol->sopass, 0, sizeof(wol->sopass));
7072 }
7073
7074 static int
7075 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7076 {
7077         struct bnx2 *bp = netdev_priv(dev);
7078
7079         if (wol->wolopts & ~WAKE_MAGIC)
7080                 return -EINVAL;
7081
7082         if (wol->wolopts & WAKE_MAGIC) {
7083                 if (bp->flags & BNX2_FLAG_NO_WOL)
7084                         return -EINVAL;
7085
7086                 bp->wol = 1;
7087         }
7088         else {
7089                 bp->wol = 0;
7090         }
7091
7092         device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7093
7094         return 0;
7095 }
7096
7097 static int
7098 bnx2_nway_reset(struct net_device *dev)
7099 {
7100         struct bnx2 *bp = netdev_priv(dev);
7101         u32 bmcr;
7102
7103         if (!netif_running(dev))
7104                 return -EAGAIN;
7105
7106         if (!(bp->autoneg & AUTONEG_SPEED)) {
7107                 return -EINVAL;
7108         }
7109
7110         spin_lock_bh(&bp->phy_lock);
7111
7112         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7113                 int rc;
7114
7115                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7116                 spin_unlock_bh(&bp->phy_lock);
7117                 return rc;
7118         }
7119
7120         /* Force a link down visible on the other side */
7121         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7122                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7123                 spin_unlock_bh(&bp->phy_lock);
7124
7125                 msleep(20);
7126
7127                 spin_lock_bh(&bp->phy_lock);
7128
7129                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7130                 bp->serdes_an_pending = 1;
7131                 mod_timer(&bp->timer, jiffies + bp->current_interval);
7132         }
7133
7134         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7135         bmcr &= ~BMCR_LOOPBACK;
7136         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7137
7138         spin_unlock_bh(&bp->phy_lock);
7139
7140         return 0;
7141 }
7142
7143 static u32
7144 bnx2_get_link(struct net_device *dev)
7145 {
7146         struct bnx2 *bp = netdev_priv(dev);
7147
7148         return bp->link_up;
7149 }
7150
7151 static int
7152 bnx2_get_eeprom_len(struct net_device *dev)
7153 {
7154         struct bnx2 *bp = netdev_priv(dev);
7155
7156         if (bp->flash_info == NULL)
7157                 return 0;
7158
7159         return (int) bp->flash_size;
7160 }
7161
7162 static int
7163 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7164                 u8 *eebuf)
7165 {
7166         struct bnx2 *bp = netdev_priv(dev);
7167         int rc;
7168
7169         /* parameters already validated in ethtool_get_eeprom */
7170
7171         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7172
7173         return rc;
7174 }
7175
7176 static int
7177 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7178                 u8 *eebuf)
7179 {
7180         struct bnx2 *bp = netdev_priv(dev);
7181         int rc;
7182
7183         /* parameters already validated in ethtool_set_eeprom */
7184
7185         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7186
7187         return rc;
7188 }
7189
7190 static int
7191 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7192 {
7193         struct bnx2 *bp = netdev_priv(dev);
7194
7195         memset(coal, 0, sizeof(struct ethtool_coalesce));
7196
7197         coal->rx_coalesce_usecs = bp->rx_ticks;
7198         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7199         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7200         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7201
7202         coal->tx_coalesce_usecs = bp->tx_ticks;
7203         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7204         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7205         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7206
7207         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7208
7209         return 0;
7210 }
7211
7212 static int
7213 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7214 {
7215         struct bnx2 *bp = netdev_priv(dev);
7216
7217         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7218         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7219
7220         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7221         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7222
7223         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7224         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7225
7226         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7227         if (bp->rx_quick_cons_trip_int > 0xff)
7228                 bp->rx_quick_cons_trip_int = 0xff;
7229
7230         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7231         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7232
7233         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7234         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7235
7236         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7237         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7238
7239         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7240         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7241                 0xff;
7242
7243         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7244         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7245                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7246                         bp->stats_ticks = USEC_PER_SEC;
7247         }
7248         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7249                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7250         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7251
7252         if (netif_running(bp->dev)) {
7253                 bnx2_netif_stop(bp, true);
7254                 bnx2_init_nic(bp, 0);
7255                 bnx2_netif_start(bp, true);
7256         }
7257
7258         return 0;
7259 }
7260
7261 static void
7262 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7263 {
7264         struct bnx2 *bp = netdev_priv(dev);
7265
7266         ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7267         ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7268
7269         ering->rx_pending = bp->rx_ring_size;
7270         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7271
7272         ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7273         ering->tx_pending = bp->tx_ring_size;
7274 }
7275
7276 static int
7277 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7278 {
7279         if (netif_running(bp->dev)) {
7280                 /* Reset will erase chipset stats; save them */
7281                 bnx2_save_stats(bp);
7282
7283                 bnx2_netif_stop(bp, true);
7284                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7285                 if (reset_irq) {
7286                         bnx2_free_irq(bp);
7287                         bnx2_del_napi(bp);
7288                 } else {
7289                         __bnx2_free_irq(bp);
7290                 }
7291                 bnx2_free_skbs(bp);
7292                 bnx2_free_mem(bp);
7293         }
7294
7295         bnx2_set_rx_ring_size(bp, rx);
7296         bp->tx_ring_size = tx;
7297
7298         if (netif_running(bp->dev)) {
7299                 int rc = 0;
7300
7301                 if (reset_irq) {
7302                         rc = bnx2_setup_int_mode(bp, disable_msi);
7303                         bnx2_init_napi(bp);
7304                 }
7305
7306                 if (!rc)
7307                         rc = bnx2_alloc_mem(bp);
7308
7309                 if (!rc)
7310                         rc = bnx2_request_irq(bp);
7311
7312                 if (!rc)
7313                         rc = bnx2_init_nic(bp, 0);
7314
7315                 if (rc) {
7316                         bnx2_napi_enable(bp);
7317                         dev_close(bp->dev);
7318                         return rc;
7319                 }
7320 #ifdef BCM_CNIC
7321                 mutex_lock(&bp->cnic_lock);
7322                 /* Let cnic know about the new status block. */
7323                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7324                         bnx2_setup_cnic_irq_info(bp);
7325                 mutex_unlock(&bp->cnic_lock);
7326 #endif
7327                 bnx2_netif_start(bp, true);
7328         }
7329         return 0;
7330 }
7331
7332 static int
7333 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7334 {
7335         struct bnx2 *bp = netdev_priv(dev);
7336         int rc;
7337
7338         if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7339                 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7340                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7341
7342                 return -EINVAL;
7343         }
7344         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7345                                    false);
7346         return rc;
7347 }
7348
7349 static void
7350 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7351 {
7352         struct bnx2 *bp = netdev_priv(dev);
7353
7354         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7355         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7356         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7357 }
7358
7359 static int
7360 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7361 {
7362         struct bnx2 *bp = netdev_priv(dev);
7363
7364         bp->req_flow_ctrl = 0;
7365         if (epause->rx_pause)
7366                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7367         if (epause->tx_pause)
7368                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7369
7370         if (epause->autoneg) {
7371                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7372         }
7373         else {
7374                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7375         }
7376
7377         if (netif_running(dev)) {
7378                 spin_lock_bh(&bp->phy_lock);
7379                 bnx2_setup_phy(bp, bp->phy_port);
7380                 spin_unlock_bh(&bp->phy_lock);
7381         }
7382
7383         return 0;
7384 }
7385
7386 static struct {
7387         char string[ETH_GSTRING_LEN];
7388 } bnx2_stats_str_arr[] = {
7389         { "rx_bytes" },
7390         { "rx_error_bytes" },
7391         { "tx_bytes" },
7392         { "tx_error_bytes" },
7393         { "rx_ucast_packets" },
7394         { "rx_mcast_packets" },
7395         { "rx_bcast_packets" },
7396         { "tx_ucast_packets" },
7397         { "tx_mcast_packets" },
7398         { "tx_bcast_packets" },
7399         { "tx_mac_errors" },
7400         { "tx_carrier_errors" },
7401         { "rx_crc_errors" },
7402         { "rx_align_errors" },
7403         { "tx_single_collisions" },
7404         { "tx_multi_collisions" },
7405         { "tx_deferred" },
7406         { "tx_excess_collisions" },
7407         { "tx_late_collisions" },
7408         { "tx_total_collisions" },
7409         { "rx_fragments" },
7410         { "rx_jabbers" },
7411         { "rx_undersize_packets" },
7412         { "rx_oversize_packets" },
7413         { "rx_64_byte_packets" },
7414         { "rx_65_to_127_byte_packets" },
7415         { "rx_128_to_255_byte_packets" },
7416         { "rx_256_to_511_byte_packets" },
7417         { "rx_512_to_1023_byte_packets" },
7418         { "rx_1024_to_1522_byte_packets" },
7419         { "rx_1523_to_9022_byte_packets" },
7420         { "tx_64_byte_packets" },
7421         { "tx_65_to_127_byte_packets" },
7422         { "tx_128_to_255_byte_packets" },
7423         { "tx_256_to_511_byte_packets" },
7424         { "tx_512_to_1023_byte_packets" },
7425         { "tx_1024_to_1522_byte_packets" },
7426         { "tx_1523_to_9022_byte_packets" },
7427         { "rx_xon_frames" },
7428         { "rx_xoff_frames" },
7429         { "tx_xon_frames" },
7430         { "tx_xoff_frames" },
7431         { "rx_mac_ctrl_frames" },
7432         { "rx_filtered_packets" },
7433         { "rx_ftq_discards" },
7434         { "rx_discards" },
7435         { "rx_fw_discards" },
7436 };
7437
7438 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7439
7440 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7441
7442 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7443     STATS_OFFSET32(stat_IfHCInOctets_hi),
7444     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7445     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7446     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7447     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7448     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7449     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7450     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7451     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7452     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7453     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7454     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7455     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7456     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7457     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7458     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7459     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7460     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7461     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7462     STATS_OFFSET32(stat_EtherStatsCollisions),
7463     STATS_OFFSET32(stat_EtherStatsFragments),
7464     STATS_OFFSET32(stat_EtherStatsJabbers),
7465     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7466     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7467     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7468     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7469     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7470     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7471     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7472     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7473     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7474     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7475     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7476     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7477     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7478     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7479     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7480     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7481     STATS_OFFSET32(stat_XonPauseFramesReceived),
7482     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7483     STATS_OFFSET32(stat_OutXonSent),
7484     STATS_OFFSET32(stat_OutXoffSent),
7485     STATS_OFFSET32(stat_MacControlFramesReceived),
7486     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7487     STATS_OFFSET32(stat_IfInFTQDiscards),
7488     STATS_OFFSET32(stat_IfInMBUFDiscards),
7489     STATS_OFFSET32(stat_FwRxDrop),
7490 };
7491
7492 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7493  * skipped because of errata.
7494  */
7495 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7496         8,0,8,8,8,8,8,8,8,8,
7497         4,0,4,4,4,4,4,4,4,4,
7498         4,4,4,4,4,4,4,4,4,4,
7499         4,4,4,4,4,4,4,4,4,4,
7500         4,4,4,4,4,4,4,
7501 };
7502
7503 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7504         8,0,8,8,8,8,8,8,8,8,
7505         4,4,4,4,4,4,4,4,4,4,
7506         4,4,4,4,4,4,4,4,4,4,
7507         4,4,4,4,4,4,4,4,4,4,
7508         4,4,4,4,4,4,4,
7509 };
7510
7511 #define BNX2_NUM_TESTS 6
7512
7513 static struct {
7514         char string[ETH_GSTRING_LEN];
7515 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7516         { "register_test (offline)" },
7517         { "memory_test (offline)" },
7518         { "loopback_test (offline)" },
7519         { "nvram_test (online)" },
7520         { "interrupt_test (online)" },
7521         { "link_test (online)" },
7522 };
7523
7524 static int
7525 bnx2_get_sset_count(struct net_device *dev, int sset)
7526 {
7527         switch (sset) {
7528         case ETH_SS_TEST:
7529                 return BNX2_NUM_TESTS;
7530         case ETH_SS_STATS:
7531                 return BNX2_NUM_STATS;
7532         default:
7533                 return -EOPNOTSUPP;
7534         }
7535 }
7536
7537 static void
7538 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7539 {
7540         struct bnx2 *bp = netdev_priv(dev);
7541
7542         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7543         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7544                 int i;
7545
7546                 bnx2_netif_stop(bp, true);
7547                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7548                 bnx2_free_skbs(bp);
7549
7550                 if (bnx2_test_registers(bp) != 0) {
7551                         buf[0] = 1;
7552                         etest->flags |= ETH_TEST_FL_FAILED;
7553                 }
7554                 if (bnx2_test_memory(bp) != 0) {
7555                         buf[1] = 1;
7556                         etest->flags |= ETH_TEST_FL_FAILED;
7557                 }
7558                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7559                         etest->flags |= ETH_TEST_FL_FAILED;
7560
7561                 if (!netif_running(bp->dev))
7562                         bnx2_shutdown_chip(bp);
7563                 else {
7564                         bnx2_init_nic(bp, 1);
7565                         bnx2_netif_start(bp, true);
7566                 }
7567
7568                 /* wait for link up */
7569                 for (i = 0; i < 7; i++) {
7570                         if (bp->link_up)
7571                                 break;
7572                         msleep_interruptible(1000);
7573                 }
7574         }
7575
7576         if (bnx2_test_nvram(bp) != 0) {
7577                 buf[3] = 1;
7578                 etest->flags |= ETH_TEST_FL_FAILED;
7579         }
7580         if (bnx2_test_intr(bp) != 0) {
7581                 buf[4] = 1;
7582                 etest->flags |= ETH_TEST_FL_FAILED;
7583         }
7584
7585         if (bnx2_test_link(bp) != 0) {
7586                 buf[5] = 1;
7587                 etest->flags |= ETH_TEST_FL_FAILED;
7588
7589         }
7590 }
7591
7592 static void
7593 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7594 {
7595         switch (stringset) {
7596         case ETH_SS_STATS:
7597                 memcpy(buf, bnx2_stats_str_arr,
7598                         sizeof(bnx2_stats_str_arr));
7599                 break;
7600         case ETH_SS_TEST:
7601                 memcpy(buf, bnx2_tests_str_arr,
7602                         sizeof(bnx2_tests_str_arr));
7603                 break;
7604         }
7605 }
7606
7607 static void
7608 bnx2_get_ethtool_stats(struct net_device *dev,
7609                 struct ethtool_stats *stats, u64 *buf)
7610 {
7611         struct bnx2 *bp = netdev_priv(dev);
7612         int i;
7613         u32 *hw_stats = (u32 *) bp->stats_blk;
7614         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7615         u8 *stats_len_arr = NULL;
7616
7617         if (hw_stats == NULL) {
7618                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7619                 return;
7620         }
7621
7622         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7623             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7624             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7625             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7626                 stats_len_arr = bnx2_5706_stats_len_arr;
7627         else
7628                 stats_len_arr = bnx2_5708_stats_len_arr;
7629
7630         for (i = 0; i < BNX2_NUM_STATS; i++) {
7631                 unsigned long offset;
7632
7633                 if (stats_len_arr[i] == 0) {
7634                         /* skip this counter */
7635                         buf[i] = 0;
7636                         continue;
7637                 }
7638
7639                 offset = bnx2_stats_offset_arr[i];
7640                 if (stats_len_arr[i] == 4) {
7641                         /* 4-byte counter */
7642                         buf[i] = (u64) *(hw_stats + offset) +
7643                                  *(temp_stats + offset);
7644                         continue;
7645                 }
7646                 /* 8-byte counter */
7647                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7648                          *(hw_stats + offset + 1) +
7649                          (((u64) *(temp_stats + offset)) << 32) +
7650                          *(temp_stats + offset + 1);
7651         }
7652 }
7653
7654 static int
7655 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7656 {
7657         struct bnx2 *bp = netdev_priv(dev);
7658
7659         switch (state) {
7660         case ETHTOOL_ID_ACTIVE:
7661                 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7662                 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7663                 return 1;       /* cycle on/off once per second */
7664
7665         case ETHTOOL_ID_ON:
7666                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7667                         BNX2_EMAC_LED_1000MB_OVERRIDE |
7668                         BNX2_EMAC_LED_100MB_OVERRIDE |
7669                         BNX2_EMAC_LED_10MB_OVERRIDE |
7670                         BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7671                         BNX2_EMAC_LED_TRAFFIC);
7672                 break;
7673
7674         case ETHTOOL_ID_OFF:
7675                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7676                 break;
7677
7678         case ETHTOOL_ID_INACTIVE:
7679                 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7680                 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7681                 break;
7682         }
7683
7684         return 0;
7685 }
7686
7687 static netdev_features_t
7688 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7689 {
7690         struct bnx2 *bp = netdev_priv(dev);
7691
7692         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7693                 features |= NETIF_F_HW_VLAN_CTAG_RX;
7694
7695         return features;
7696 }
7697
7698 static int
7699 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7700 {
7701         struct bnx2 *bp = netdev_priv(dev);
7702
7703         /* TSO with VLAN tag won't work with current firmware */
7704         if (features & NETIF_F_HW_VLAN_CTAG_TX)
7705                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7706         else
7707                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7708
7709         if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7710             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7711             netif_running(dev)) {
7712                 bnx2_netif_stop(bp, false);
7713                 dev->features = features;
7714                 bnx2_set_rx_mode(dev);
7715                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7716                 bnx2_netif_start(bp, false);
7717                 return 1;
7718         }
7719
7720         return 0;
7721 }
7722
7723 static void bnx2_get_channels(struct net_device *dev,
7724                               struct ethtool_channels *channels)
7725 {
7726         struct bnx2 *bp = netdev_priv(dev);
7727         u32 max_rx_rings = 1;
7728         u32 max_tx_rings = 1;
7729
7730         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7731                 max_rx_rings = RX_MAX_RINGS;
7732                 max_tx_rings = TX_MAX_RINGS;
7733         }
7734
7735         channels->max_rx = max_rx_rings;
7736         channels->max_tx = max_tx_rings;
7737         channels->max_other = 0;
7738         channels->max_combined = 0;
7739         channels->rx_count = bp->num_rx_rings;
7740         channels->tx_count = bp->num_tx_rings;
7741         channels->other_count = 0;
7742         channels->combined_count = 0;
7743 }
7744
7745 static int bnx2_set_channels(struct net_device *dev,
7746                               struct ethtool_channels *channels)
7747 {
7748         struct bnx2 *bp = netdev_priv(dev);
7749         u32 max_rx_rings = 1;
7750         u32 max_tx_rings = 1;
7751         int rc = 0;
7752
7753         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7754                 max_rx_rings = RX_MAX_RINGS;
7755                 max_tx_rings = TX_MAX_RINGS;
7756         }
7757         if (channels->rx_count > max_rx_rings ||
7758             channels->tx_count > max_tx_rings)
7759                 return -EINVAL;
7760
7761         bp->num_req_rx_rings = channels->rx_count;
7762         bp->num_req_tx_rings = channels->tx_count;
7763
7764         if (netif_running(dev))
7765                 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7766                                            bp->tx_ring_size, true);
7767
7768         return rc;
7769 }
7770
7771 static const struct ethtool_ops bnx2_ethtool_ops = {
7772         .get_settings           = bnx2_get_settings,
7773         .set_settings           = bnx2_set_settings,
7774         .get_drvinfo            = bnx2_get_drvinfo,
7775         .get_regs_len           = bnx2_get_regs_len,
7776         .get_regs               = bnx2_get_regs,
7777         .get_wol                = bnx2_get_wol,
7778         .set_wol                = bnx2_set_wol,
7779         .nway_reset             = bnx2_nway_reset,
7780         .get_link               = bnx2_get_link,
7781         .get_eeprom_len         = bnx2_get_eeprom_len,
7782         .get_eeprom             = bnx2_get_eeprom,
7783         .set_eeprom             = bnx2_set_eeprom,
7784         .get_coalesce           = bnx2_get_coalesce,
7785         .set_coalesce           = bnx2_set_coalesce,
7786         .get_ringparam          = bnx2_get_ringparam,
7787         .set_ringparam          = bnx2_set_ringparam,
7788         .get_pauseparam         = bnx2_get_pauseparam,
7789         .set_pauseparam         = bnx2_set_pauseparam,
7790         .self_test              = bnx2_self_test,
7791         .get_strings            = bnx2_get_strings,
7792         .set_phys_id            = bnx2_set_phys_id,
7793         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7794         .get_sset_count         = bnx2_get_sset_count,
7795         .get_channels           = bnx2_get_channels,
7796         .set_channels           = bnx2_set_channels,
7797 };
7798
7799 /* Called with rtnl_lock */
7800 static int
7801 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7802 {
7803         struct mii_ioctl_data *data = if_mii(ifr);
7804         struct bnx2 *bp = netdev_priv(dev);
7805         int err;
7806
7807         switch(cmd) {
7808         case SIOCGMIIPHY:
7809                 data->phy_id = bp->phy_addr;
7810
7811                 /* fallthru */
7812         case SIOCGMIIREG: {
7813                 u32 mii_regval;
7814
7815                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7816                         return -EOPNOTSUPP;
7817
7818                 if (!netif_running(dev))
7819                         return -EAGAIN;
7820
7821                 spin_lock_bh(&bp->phy_lock);
7822                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7823                 spin_unlock_bh(&bp->phy_lock);
7824
7825                 data->val_out = mii_regval;
7826
7827                 return err;
7828         }
7829
7830         case SIOCSMIIREG:
7831                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7832                         return -EOPNOTSUPP;
7833
7834                 if (!netif_running(dev))
7835                         return -EAGAIN;
7836
7837                 spin_lock_bh(&bp->phy_lock);
7838                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7839                 spin_unlock_bh(&bp->phy_lock);
7840
7841                 return err;
7842
7843         default:
7844                 /* do nothing */
7845                 break;
7846         }
7847         return -EOPNOTSUPP;
7848 }
7849
7850 /* Called with rtnl_lock */
7851 static int
7852 bnx2_change_mac_addr(struct net_device *dev, void *p)
7853 {
7854         struct sockaddr *addr = p;
7855         struct bnx2 *bp = netdev_priv(dev);
7856
7857         if (!is_valid_ether_addr(addr->sa_data))
7858                 return -EADDRNOTAVAIL;
7859
7860         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7861         if (netif_running(dev))
7862                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7863
7864         return 0;
7865 }
7866
7867 /* Called with rtnl_lock */
7868 static int
7869 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7870 {
7871         struct bnx2 *bp = netdev_priv(dev);
7872
7873         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7874                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7875                 return -EINVAL;
7876
7877         dev->mtu = new_mtu;
7878         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7879                                      false);
7880 }
7881
7882 #ifdef CONFIG_NET_POLL_CONTROLLER
7883 static void
7884 poll_bnx2(struct net_device *dev)
7885 {
7886         struct bnx2 *bp = netdev_priv(dev);
7887         int i;
7888
7889         for (i = 0; i < bp->irq_nvecs; i++) {
7890                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7891
7892                 disable_irq(irq->vector);
7893                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7894                 enable_irq(irq->vector);
7895         }
7896 }
7897 #endif
7898
7899 static void
7900 bnx2_get_5709_media(struct bnx2 *bp)
7901 {
7902         u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7903         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7904         u32 strap;
7905
7906         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7907                 return;
7908         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7909                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7910                 return;
7911         }
7912
7913         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7914                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7915         else
7916                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7917
7918         if (bp->func == 0) {
7919                 switch (strap) {
7920                 case 0x4:
7921                 case 0x5:
7922                 case 0x6:
7923                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7924                         return;
7925                 }
7926         } else {
7927                 switch (strap) {
7928                 case 0x1:
7929                 case 0x2:
7930                 case 0x4:
7931                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7932                         return;
7933                 }
7934         }
7935 }
7936
7937 static void
7938 bnx2_get_pci_speed(struct bnx2 *bp)
7939 {
7940         u32 reg;
7941
7942         reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7943         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7944                 u32 clkreg;
7945
7946                 bp->flags |= BNX2_FLAG_PCIX;
7947
7948                 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7949
7950                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7951                 switch (clkreg) {
7952                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7953                         bp->bus_speed_mhz = 133;
7954                         break;
7955
7956                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7957                         bp->bus_speed_mhz = 100;
7958                         break;
7959
7960                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7961                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7962                         bp->bus_speed_mhz = 66;
7963                         break;
7964
7965                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7966                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7967                         bp->bus_speed_mhz = 50;
7968                         break;
7969
7970                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7971                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7972                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7973                         bp->bus_speed_mhz = 33;
7974                         break;
7975                 }
7976         }
7977         else {
7978                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7979                         bp->bus_speed_mhz = 66;
7980                 else
7981                         bp->bus_speed_mhz = 33;
7982         }
7983
7984         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7985                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7986
7987 }
7988
7989 static void
7990 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7991 {
7992         int rc, i, j;
7993         u8 *data;
7994         unsigned int block_end, rosize, len;
7995
7996 #define BNX2_VPD_NVRAM_OFFSET   0x300
7997 #define BNX2_VPD_LEN            128
7998 #define BNX2_MAX_VER_SLEN       30
7999
8000         data = kmalloc(256, GFP_KERNEL);
8001         if (!data)
8002                 return;
8003
8004         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8005                              BNX2_VPD_LEN);
8006         if (rc)
8007                 goto vpd_done;
8008
8009         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8010                 data[i] = data[i + BNX2_VPD_LEN + 3];
8011                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8012                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8013                 data[i + 3] = data[i + BNX2_VPD_LEN];
8014         }
8015
8016         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8017         if (i < 0)
8018                 goto vpd_done;
8019
8020         rosize = pci_vpd_lrdt_size(&data[i]);
8021         i += PCI_VPD_LRDT_TAG_SIZE;
8022         block_end = i + rosize;
8023
8024         if (block_end > BNX2_VPD_LEN)
8025                 goto vpd_done;
8026
8027         j = pci_vpd_find_info_keyword(data, i, rosize,
8028                                       PCI_VPD_RO_KEYWORD_MFR_ID);
8029         if (j < 0)
8030                 goto vpd_done;
8031
8032         len = pci_vpd_info_field_size(&data[j]);
8033
8034         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8035         if (j + len > block_end || len != 4 ||
8036             memcmp(&data[j], "1028", 4))
8037                 goto vpd_done;
8038
8039         j = pci_vpd_find_info_keyword(data, i, rosize,
8040                                       PCI_VPD_RO_KEYWORD_VENDOR0);
8041         if (j < 0)
8042                 goto vpd_done;
8043
8044         len = pci_vpd_info_field_size(&data[j]);
8045
8046         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8047         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8048                 goto vpd_done;
8049
8050         memcpy(bp->fw_version, &data[j], len);
8051         bp->fw_version[len] = ' ';
8052
8053 vpd_done:
8054         kfree(data);
8055 }
8056
8057 static int
8058 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8059 {
8060         struct bnx2 *bp;
8061         int rc, i, j;
8062         u32 reg;
8063         u64 dma_mask, persist_dma_mask;
8064         int err;
8065
8066         SET_NETDEV_DEV(dev, &pdev->dev);
8067         bp = netdev_priv(dev);
8068
8069         bp->flags = 0;
8070         bp->phy_flags = 0;
8071
8072         bp->temp_stats_blk =
8073                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8074
8075         if (bp->temp_stats_blk == NULL) {
8076                 rc = -ENOMEM;
8077                 goto err_out;
8078         }
8079
8080         /* enable device (incl. PCI PM wakeup), and bus-mastering */
8081         rc = pci_enable_device(pdev);
8082         if (rc) {
8083                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8084                 goto err_out;
8085         }
8086
8087         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8088                 dev_err(&pdev->dev,
8089                         "Cannot find PCI device base address, aborting\n");
8090                 rc = -ENODEV;
8091                 goto err_out_disable;
8092         }
8093
8094         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8095         if (rc) {
8096                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8097                 goto err_out_disable;
8098         }
8099
8100         pci_set_master(pdev);
8101
8102         bp->pm_cap = pdev->pm_cap;
8103         if (bp->pm_cap == 0) {
8104                 dev_err(&pdev->dev,
8105                         "Cannot find power management capability, aborting\n");
8106                 rc = -EIO;
8107                 goto err_out_release;
8108         }
8109
8110         bp->dev = dev;
8111         bp->pdev = pdev;
8112
8113         spin_lock_init(&bp->phy_lock);
8114         spin_lock_init(&bp->indirect_lock);
8115 #ifdef BCM_CNIC
8116         mutex_init(&bp->cnic_lock);
8117 #endif
8118         INIT_WORK(&bp->reset_task, bnx2_reset_task);
8119
8120         bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8121                                                          TX_MAX_TSS_RINGS + 1));
8122         if (!bp->regview) {
8123                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8124                 rc = -ENOMEM;
8125                 goto err_out_release;
8126         }
8127
8128         /* Configure byte swap and enable write to the reg_window registers.
8129          * Rely on CPU to do target byte swapping on big endian systems
8130          * The chip's target access swapping will not swap all accesses
8131          */
8132         BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8133                 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8134                 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8135
8136         bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8137
8138         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8139                 if (!pci_is_pcie(pdev)) {
8140                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
8141                         rc = -EIO;
8142                         goto err_out_unmap;
8143                 }
8144                 bp->flags |= BNX2_FLAG_PCIE;
8145                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8146                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8147
8148                 /* AER (Advanced Error Reporting) hooks */
8149                 err = pci_enable_pcie_error_reporting(pdev);
8150                 if (!err)
8151                         bp->flags |= BNX2_FLAG_AER_ENABLED;
8152
8153         } else {
8154                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8155                 if (bp->pcix_cap == 0) {
8156                         dev_err(&pdev->dev,
8157                                 "Cannot find PCIX capability, aborting\n");
8158                         rc = -EIO;
8159                         goto err_out_unmap;
8160                 }
8161                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8162         }
8163
8164         if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8165             BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8166                 if (pdev->msix_cap)
8167                         bp->flags |= BNX2_FLAG_MSIX_CAP;
8168         }
8169
8170         if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8171             BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8172                 if (pdev->msi_cap)
8173                         bp->flags |= BNX2_FLAG_MSI_CAP;
8174         }
8175
8176         /* 5708 cannot support DMA addresses > 40-bit.  */
8177         if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8178                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8179         else
8180                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8181
8182         /* Configure DMA attributes. */
8183         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8184                 dev->features |= NETIF_F_HIGHDMA;
8185                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8186                 if (rc) {
8187                         dev_err(&pdev->dev,
8188                                 "pci_set_consistent_dma_mask failed, aborting\n");
8189                         goto err_out_unmap;
8190                 }
8191         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8192                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8193                 goto err_out_unmap;
8194         }
8195
8196         if (!(bp->flags & BNX2_FLAG_PCIE))
8197                 bnx2_get_pci_speed(bp);
8198
8199         /* 5706A0 may falsely detect SERR and PERR. */
8200         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8201                 reg = BNX2_RD(bp, PCI_COMMAND);
8202                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8203                 BNX2_WR(bp, PCI_COMMAND, reg);
8204         } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8205                 !(bp->flags & BNX2_FLAG_PCIX)) {
8206
8207                 dev_err(&pdev->dev,
8208                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8209                 goto err_out_unmap;
8210         }
8211
8212         bnx2_init_nvram(bp);
8213
8214         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8215
8216         if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8217                 bp->func = 1;
8218
8219         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8220             BNX2_SHM_HDR_SIGNATURE_SIG) {
8221                 u32 off = bp->func << 2;
8222
8223                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8224         } else
8225                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8226
8227         /* Get the permanent MAC address.  First we need to make sure the
8228          * firmware is actually running.
8229          */
8230         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8231
8232         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8233             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8234                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8235                 rc = -ENODEV;
8236                 goto err_out_unmap;
8237         }
8238
8239         bnx2_read_vpd_fw_ver(bp);
8240
8241         j = strlen(bp->fw_version);
8242         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8243         for (i = 0; i < 3 && j < 24; i++) {
8244                 u8 num, k, skip0;
8245
8246                 if (i == 0) {
8247                         bp->fw_version[j++] = 'b';
8248                         bp->fw_version[j++] = 'c';
8249                         bp->fw_version[j++] = ' ';
8250                 }
8251                 num = (u8) (reg >> (24 - (i * 8)));
8252                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8253                         if (num >= k || !skip0 || k == 1) {
8254                                 bp->fw_version[j++] = (num / k) + '0';
8255                                 skip0 = 0;
8256                         }
8257                 }
8258                 if (i != 2)
8259                         bp->fw_version[j++] = '.';
8260         }
8261         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8262         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8263                 bp->wol = 1;
8264
8265         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8266                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8267
8268                 for (i = 0; i < 30; i++) {
8269                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8270                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8271                                 break;
8272                         msleep(10);
8273                 }
8274         }
8275         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8276         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8277         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8278             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8279                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8280
8281                 if (j < 32)
8282                         bp->fw_version[j++] = ' ';
8283                 for (i = 0; i < 3 && j < 28; i++) {
8284                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8285                         reg = be32_to_cpu(reg);
8286                         memcpy(&bp->fw_version[j], &reg, 4);
8287                         j += 4;
8288                 }
8289         }
8290
8291         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8292         bp->mac_addr[0] = (u8) (reg >> 8);
8293         bp->mac_addr[1] = (u8) reg;
8294
8295         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8296         bp->mac_addr[2] = (u8) (reg >> 24);
8297         bp->mac_addr[3] = (u8) (reg >> 16);
8298         bp->mac_addr[4] = (u8) (reg >> 8);
8299         bp->mac_addr[5] = (u8) reg;
8300
8301         bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8302         bnx2_set_rx_ring_size(bp, 255);
8303
8304         bp->tx_quick_cons_trip_int = 2;
8305         bp->tx_quick_cons_trip = 20;
8306         bp->tx_ticks_int = 18;
8307         bp->tx_ticks = 80;
8308
8309         bp->rx_quick_cons_trip_int = 2;
8310         bp->rx_quick_cons_trip = 12;
8311         bp->rx_ticks_int = 18;
8312         bp->rx_ticks = 18;
8313
8314         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8315
8316         bp->current_interval = BNX2_TIMER_INTERVAL;
8317
8318         bp->phy_addr = 1;
8319
8320         /* Disable WOL support if we are running on a SERDES chip. */
8321         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8322                 bnx2_get_5709_media(bp);
8323         else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8324                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8325
8326         bp->phy_port = PORT_TP;
8327         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8328                 bp->phy_port = PORT_FIBRE;
8329                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8330                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8331                         bp->flags |= BNX2_FLAG_NO_WOL;
8332                         bp->wol = 0;
8333                 }
8334                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8335                         /* Don't do parallel detect on this board because of
8336                          * some board problems.  The link will not go down
8337                          * if we do parallel detect.
8338                          */
8339                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8340                             pdev->subsystem_device == 0x310c)
8341                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8342                 } else {
8343                         bp->phy_addr = 2;
8344                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8345                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8346                 }
8347         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8348                    BNX2_CHIP(bp) == BNX2_CHIP_5708)
8349                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8350         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8351                  (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8352                   BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8353                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8354
8355         bnx2_init_fw_cap(bp);
8356
8357         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8358             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8359             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8360             !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8361                 bp->flags |= BNX2_FLAG_NO_WOL;
8362                 bp->wol = 0;
8363         }
8364
8365         if (bp->flags & BNX2_FLAG_NO_WOL)
8366                 device_set_wakeup_capable(&bp->pdev->dev, false);
8367         else
8368                 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8369
8370         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8371                 bp->tx_quick_cons_trip_int =
8372                         bp->tx_quick_cons_trip;
8373                 bp->tx_ticks_int = bp->tx_ticks;
8374                 bp->rx_quick_cons_trip_int =
8375                         bp->rx_quick_cons_trip;
8376                 bp->rx_ticks_int = bp->rx_ticks;
8377                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8378                 bp->com_ticks_int = bp->com_ticks;
8379                 bp->cmd_ticks_int = bp->cmd_ticks;
8380         }
8381
8382         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8383          *
8384          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8385          * with byte enables disabled on the unused 32-bit word.  This is legal
8386          * but causes problems on the AMD 8132 which will eventually stop
8387          * responding after a while.
8388          *
8389          * AMD believes this incompatibility is unique to the 5706, and
8390          * prefers to locally disable MSI rather than globally disabling it.
8391          */
8392         if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8393                 struct pci_dev *amd_8132 = NULL;
8394
8395                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8396                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8397                                                   amd_8132))) {
8398
8399                         if (amd_8132->revision >= 0x10 &&
8400                             amd_8132->revision <= 0x13) {
8401                                 disable_msi = 1;
8402                                 pci_dev_put(amd_8132);
8403                                 break;
8404                         }
8405                 }
8406         }
8407
8408         bnx2_set_default_link(bp);
8409         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8410
8411         init_timer(&bp->timer);
8412         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8413         bp->timer.data = (unsigned long) bp;
8414         bp->timer.function = bnx2_timer;
8415
8416 #ifdef BCM_CNIC
8417         if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8418                 bp->cnic_eth_dev.max_iscsi_conn =
8419                         (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8420                          BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8421         bp->cnic_probe = bnx2_cnic_probe;
8422 #endif
8423         pci_save_state(pdev);
8424
8425         return 0;
8426
8427 err_out_unmap:
8428         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8429                 pci_disable_pcie_error_reporting(pdev);
8430                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8431         }
8432
8433         pci_iounmap(pdev, bp->regview);
8434         bp->regview = NULL;
8435
8436 err_out_release:
8437         pci_release_regions(pdev);
8438
8439 err_out_disable:
8440         pci_disable_device(pdev);
8441
8442 err_out:
8443         return rc;
8444 }
8445
8446 static char *
8447 bnx2_bus_string(struct bnx2 *bp, char *str)
8448 {
8449         char *s = str;
8450
8451         if (bp->flags & BNX2_FLAG_PCIE) {
8452                 s += sprintf(s, "PCI Express");
8453         } else {
8454                 s += sprintf(s, "PCI");
8455                 if (bp->flags & BNX2_FLAG_PCIX)
8456                         s += sprintf(s, "-X");
8457                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8458                         s += sprintf(s, " 32-bit");
8459                 else
8460                         s += sprintf(s, " 64-bit");
8461                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8462         }
8463         return str;
8464 }
8465
8466 static void
8467 bnx2_del_napi(struct bnx2 *bp)
8468 {
8469         int i;
8470
8471         for (i = 0; i < bp->irq_nvecs; i++)
8472                 netif_napi_del(&bp->bnx2_napi[i].napi);
8473 }
8474
8475 static void
8476 bnx2_init_napi(struct bnx2 *bp)
8477 {
8478         int i;
8479
8480         for (i = 0; i < bp->irq_nvecs; i++) {
8481                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8482                 int (*poll)(struct napi_struct *, int);
8483
8484                 if (i == 0)
8485                         poll = bnx2_poll;
8486                 else
8487                         poll = bnx2_poll_msix;
8488
8489                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8490                 bnapi->bp = bp;
8491         }
8492 }
8493
8494 static const struct net_device_ops bnx2_netdev_ops = {
8495         .ndo_open               = bnx2_open,
8496         .ndo_start_xmit         = bnx2_start_xmit,
8497         .ndo_stop               = bnx2_close,
8498         .ndo_get_stats64        = bnx2_get_stats64,
8499         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8500         .ndo_do_ioctl           = bnx2_ioctl,
8501         .ndo_validate_addr      = eth_validate_addr,
8502         .ndo_set_mac_address    = bnx2_change_mac_addr,
8503         .ndo_change_mtu         = bnx2_change_mtu,
8504         .ndo_fix_features       = bnx2_fix_features,
8505         .ndo_set_features       = bnx2_set_features,
8506         .ndo_tx_timeout         = bnx2_tx_timeout,
8507 #ifdef CONFIG_NET_POLL_CONTROLLER
8508         .ndo_poll_controller    = poll_bnx2,
8509 #endif
8510 };
8511
8512 static int
8513 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8514 {
8515         static int version_printed = 0;
8516         struct net_device *dev;
8517         struct bnx2 *bp;
8518         int rc;
8519         char str[40];
8520
8521         if (version_printed++ == 0)
8522                 pr_info("%s", version);
8523
8524         /* dev zeroed in init_etherdev */
8525         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8526         if (!dev)
8527                 return -ENOMEM;
8528
8529         rc = bnx2_init_board(pdev, dev);
8530         if (rc < 0)
8531                 goto err_free;
8532
8533         dev->netdev_ops = &bnx2_netdev_ops;
8534         dev->watchdog_timeo = TX_TIMEOUT;
8535         dev->ethtool_ops = &bnx2_ethtool_ops;
8536
8537         bp = netdev_priv(dev);
8538
8539         pci_set_drvdata(pdev, dev);
8540
8541         memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8542
8543         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8544                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8545                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8546
8547         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8548                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8549
8550         dev->vlan_features = dev->hw_features;
8551         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8552         dev->features |= dev->hw_features;
8553         dev->priv_flags |= IFF_UNICAST_FLT;
8554
8555         if ((rc = register_netdev(dev))) {
8556                 dev_err(&pdev->dev, "Cannot register net device\n");
8557                 goto error;
8558         }
8559
8560         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8561                     "node addr %pM\n", board_info[ent->driver_data].name,
8562                     ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8563                     ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8564                     bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8565                     pdev->irq, dev->dev_addr);
8566
8567         return 0;
8568
8569 error:
8570         pci_iounmap(pdev, bp->regview);
8571         pci_release_regions(pdev);
8572         pci_disable_device(pdev);
8573 err_free:
8574         free_netdev(dev);
8575         return rc;
8576 }
8577
8578 static void
8579 bnx2_remove_one(struct pci_dev *pdev)
8580 {
8581         struct net_device *dev = pci_get_drvdata(pdev);
8582         struct bnx2 *bp = netdev_priv(dev);
8583
8584         unregister_netdev(dev);
8585
8586         del_timer_sync(&bp->timer);
8587         cancel_work_sync(&bp->reset_task);
8588
8589         pci_iounmap(bp->pdev, bp->regview);
8590
8591         kfree(bp->temp_stats_blk);
8592
8593         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8594                 pci_disable_pcie_error_reporting(pdev);
8595                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8596         }
8597
8598         bnx2_release_firmware(bp);
8599
8600         free_netdev(dev);
8601
8602         pci_release_regions(pdev);
8603         pci_disable_device(pdev);
8604 }
8605
8606 static int
8607 bnx2_suspend(struct device *device)
8608 {
8609         struct pci_dev *pdev = to_pci_dev(device);
8610         struct net_device *dev = pci_get_drvdata(pdev);
8611         struct bnx2 *bp = netdev_priv(dev);
8612
8613         if (netif_running(dev)) {
8614                 cancel_work_sync(&bp->reset_task);
8615                 bnx2_netif_stop(bp, true);
8616                 netif_device_detach(dev);
8617                 del_timer_sync(&bp->timer);
8618                 bnx2_shutdown_chip(bp);
8619                 __bnx2_free_irq(bp);
8620                 bnx2_free_skbs(bp);
8621         }
8622         bnx2_setup_wol(bp);
8623         return 0;
8624 }
8625
8626 static int
8627 bnx2_resume(struct device *device)
8628 {
8629         struct pci_dev *pdev = to_pci_dev(device);
8630         struct net_device *dev = pci_get_drvdata(pdev);
8631         struct bnx2 *bp = netdev_priv(dev);
8632
8633         if (!netif_running(dev))
8634                 return 0;
8635
8636         bnx2_set_power_state(bp, PCI_D0);
8637         netif_device_attach(dev);
8638         bnx2_request_irq(bp);
8639         bnx2_init_nic(bp, 1);
8640         bnx2_netif_start(bp, true);
8641         return 0;
8642 }
8643
8644 #ifdef CONFIG_PM_SLEEP
8645 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8646 #define BNX2_PM_OPS (&bnx2_pm_ops)
8647
8648 #else
8649
8650 #define BNX2_PM_OPS NULL
8651
8652 #endif /* CONFIG_PM_SLEEP */
8653 /**
8654  * bnx2_io_error_detected - called when PCI error is detected
8655  * @pdev: Pointer to PCI device
8656  * @state: The current pci connection state
8657  *
8658  * This function is called after a PCI bus error affecting
8659  * this device has been detected.
8660  */
8661 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8662                                                pci_channel_state_t state)
8663 {
8664         struct net_device *dev = pci_get_drvdata(pdev);
8665         struct bnx2 *bp = netdev_priv(dev);
8666
8667         rtnl_lock();
8668         netif_device_detach(dev);
8669
8670         if (state == pci_channel_io_perm_failure) {
8671                 rtnl_unlock();
8672                 return PCI_ERS_RESULT_DISCONNECT;
8673         }
8674
8675         if (netif_running(dev)) {
8676                 bnx2_netif_stop(bp, true);
8677                 del_timer_sync(&bp->timer);
8678                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8679         }
8680
8681         pci_disable_device(pdev);
8682         rtnl_unlock();
8683
8684         /* Request a slot slot reset. */
8685         return PCI_ERS_RESULT_NEED_RESET;
8686 }
8687
8688 /**
8689  * bnx2_io_slot_reset - called after the pci bus has been reset.
8690  * @pdev: Pointer to PCI device
8691  *
8692  * Restart the card from scratch, as if from a cold-boot.
8693  */
8694 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8695 {
8696         struct net_device *dev = pci_get_drvdata(pdev);
8697         struct bnx2 *bp = netdev_priv(dev);
8698         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8699         int err = 0;
8700
8701         rtnl_lock();
8702         if (pci_enable_device(pdev)) {
8703                 dev_err(&pdev->dev,
8704                         "Cannot re-enable PCI device after reset\n");
8705         } else {
8706                 pci_set_master(pdev);
8707                 pci_restore_state(pdev);
8708                 pci_save_state(pdev);
8709
8710                 if (netif_running(dev))
8711                         err = bnx2_init_nic(bp, 1);
8712
8713                 if (!err)
8714                         result = PCI_ERS_RESULT_RECOVERED;
8715         }
8716
8717         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8718                 bnx2_napi_enable(bp);
8719                 dev_close(dev);
8720         }
8721         rtnl_unlock();
8722
8723         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8724                 return result;
8725
8726         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8727         if (err) {
8728                 dev_err(&pdev->dev,
8729                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8730                          err); /* non-fatal, continue */
8731         }
8732
8733         return result;
8734 }
8735
8736 /**
8737  * bnx2_io_resume - called when traffic can start flowing again.
8738  * @pdev: Pointer to PCI device
8739  *
8740  * This callback is called when the error recovery driver tells us that
8741  * its OK to resume normal operation.
8742  */
8743 static void bnx2_io_resume(struct pci_dev *pdev)
8744 {
8745         struct net_device *dev = pci_get_drvdata(pdev);
8746         struct bnx2 *bp = netdev_priv(dev);
8747
8748         rtnl_lock();
8749         if (netif_running(dev))
8750                 bnx2_netif_start(bp, true);
8751
8752         netif_device_attach(dev);
8753         rtnl_unlock();
8754 }
8755
8756 static void bnx2_shutdown(struct pci_dev *pdev)
8757 {
8758         struct net_device *dev = pci_get_drvdata(pdev);
8759         struct bnx2 *bp;
8760
8761         if (!dev)
8762                 return;
8763
8764         bp = netdev_priv(dev);
8765         if (!bp)
8766                 return;
8767
8768         rtnl_lock();
8769         if (netif_running(dev))
8770                 dev_close(bp->dev);
8771
8772         if (system_state == SYSTEM_POWER_OFF)
8773                 bnx2_set_power_state(bp, PCI_D3hot);
8774
8775         rtnl_unlock();
8776 }
8777
8778 static const struct pci_error_handlers bnx2_err_handler = {
8779         .error_detected = bnx2_io_error_detected,
8780         .slot_reset     = bnx2_io_slot_reset,
8781         .resume         = bnx2_io_resume,
8782 };
8783
8784 static struct pci_driver bnx2_pci_driver = {
8785         .name           = DRV_MODULE_NAME,
8786         .id_table       = bnx2_pci_tbl,
8787         .probe          = bnx2_init_one,
8788         .remove         = bnx2_remove_one,
8789         .driver.pm      = BNX2_PM_OPS,
8790         .err_handler    = &bnx2_err_handler,
8791         .shutdown       = bnx2_shutdown,
8792 };
8793
8794 module_pci_driver(bnx2_pci_driver);