]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/bnx2.c
8eaab0ce220a441289e7543ddfa404cf04ce0a64
[~andy/linux] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.2.1"
62 #define DRV_MODULE_RELDATE      "Dec 18, 2011"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254         u32 diff;
255
256         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
257         barrier();
258
259         /* The ring uses 256 indices for 255 entries, one of them
260          * needs to be skipped.
261          */
262         diff = txr->tx_prod - txr->tx_cons;
263         if (unlikely(diff >= TX_DESC_CNT)) {
264                 diff &= 0xffff;
265                 if (diff == TX_DESC_CNT)
266                         diff = MAX_TX_DESC_CNT;
267         }
268         return bp->tx_ring_size - diff;
269 }
270
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274         u32 val;
275
276         spin_lock_bh(&bp->indirect_lock);
277         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
279         spin_unlock_bh(&bp->indirect_lock);
280         return val;
281 }
282
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286         spin_lock_bh(&bp->indirect_lock);
287         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289         spin_unlock_bh(&bp->indirect_lock);
290 }
291
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307         offset += cid_addr;
308         spin_lock_bh(&bp->indirect_lock);
309         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
310                 int i;
311
312                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
313                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
314                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315                 for (i = 0; i < 5; i++) {
316                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
317                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318                                 break;
319                         udelay(5);
320                 }
321         } else {
322                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
323                 REG_WR(bp, BNX2_CTX_DATA, val);
324         }
325         spin_unlock_bh(&bp->indirect_lock);
326 }
327
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332         struct bnx2 *bp = netdev_priv(dev);
333         struct drv_ctl_io *io = &info->data.io;
334
335         switch (info->cmd) {
336         case DRV_CTL_IO_WR_CMD:
337                 bnx2_reg_wr_ind(bp, io->offset, io->data);
338                 break;
339         case DRV_CTL_IO_RD_CMD:
340                 io->data = bnx2_reg_rd_ind(bp, io->offset);
341                 break;
342         case DRV_CTL_CTX_WR_CMD:
343                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344                 break;
345         default:
346                 return -EINVAL;
347         }
348         return 0;
349 }
350
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355         int sb_id;
356
357         if (bp->flags & BNX2_FLAG_USING_MSIX) {
358                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359                 bnapi->cnic_present = 0;
360                 sb_id = bp->irq_nvecs;
361                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362         } else {
363                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364                 bnapi->cnic_tag = bnapi->last_status_idx;
365                 bnapi->cnic_present = 1;
366                 sb_id = 0;
367                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368         }
369
370         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371         cp->irq_arr[0].status_blk = (void *)
372                 ((unsigned long) bnapi->status_blk.msi +
373                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374         cp->irq_arr[0].status_blk_num = sb_id;
375         cp->num_irq = 1;
376 }
377
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379                               void *data)
380 {
381         struct bnx2 *bp = netdev_priv(dev);
382         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383
384         if (ops == NULL)
385                 return -EINVAL;
386
387         if (cp->drv_state & CNIC_DRV_STATE_REGD)
388                 return -EBUSY;
389
390         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391                 return -ENODEV;
392
393         bp->cnic_data = data;
394         rcu_assign_pointer(bp->cnic_ops, ops);
395
396         cp->num_irq = 0;
397         cp->drv_state = CNIC_DRV_STATE_REGD;
398
399         bnx2_setup_cnic_irq_info(bp);
400
401         return 0;
402 }
403
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406         struct bnx2 *bp = netdev_priv(dev);
407         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409
410         mutex_lock(&bp->cnic_lock);
411         cp->drv_state = 0;
412         bnapi->cnic_present = 0;
413         RCU_INIT_POINTER(bp->cnic_ops, NULL);
414         mutex_unlock(&bp->cnic_lock);
415         synchronize_rcu();
416         return 0;
417 }
418
419 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421         struct bnx2 *bp = netdev_priv(dev);
422         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423
424         if (!cp->max_iscsi_conn)
425                 return NULL;
426
427         cp->drv_owner = THIS_MODULE;
428         cp->chip_id = bp->chip_id;
429         cp->pdev = bp->pdev;
430         cp->io_base = bp->regview;
431         cp->drv_ctl = bnx2_drv_ctl;
432         cp->drv_register_cnic = bnx2_register_cnic;
433         cp->drv_unregister_cnic = bnx2_unregister_cnic;
434
435         return cp;
436 }
437 EXPORT_SYMBOL(bnx2_cnic_probe);
438
439 static void
440 bnx2_cnic_stop(struct bnx2 *bp)
441 {
442         struct cnic_ops *c_ops;
443         struct cnic_ctl_info info;
444
445         mutex_lock(&bp->cnic_lock);
446         c_ops = rcu_dereference_protected(bp->cnic_ops,
447                                           lockdep_is_held(&bp->cnic_lock));
448         if (c_ops) {
449                 info.cmd = CNIC_CTL_STOP_CMD;
450                 c_ops->cnic_ctl(bp->cnic_data, &info);
451         }
452         mutex_unlock(&bp->cnic_lock);
453 }
454
455 static void
456 bnx2_cnic_start(struct bnx2 *bp)
457 {
458         struct cnic_ops *c_ops;
459         struct cnic_ctl_info info;
460
461         mutex_lock(&bp->cnic_lock);
462         c_ops = rcu_dereference_protected(bp->cnic_ops,
463                                           lockdep_is_held(&bp->cnic_lock));
464         if (c_ops) {
465                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
466                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
467
468                         bnapi->cnic_tag = bnapi->last_status_idx;
469                 }
470                 info.cmd = CNIC_CTL_START_CMD;
471                 c_ops->cnic_ctl(bp->cnic_data, &info);
472         }
473         mutex_unlock(&bp->cnic_lock);
474 }
475
476 #else
477
478 static void
479 bnx2_cnic_stop(struct bnx2 *bp)
480 {
481 }
482
483 static void
484 bnx2_cnic_start(struct bnx2 *bp)
485 {
486 }
487
488 #endif
489
490 static int
491 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
492 {
493         u32 val1;
494         int i, ret;
495
496         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
497                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
498                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
499
500                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
501                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
502
503                 udelay(40);
504         }
505
506         val1 = (bp->phy_addr << 21) | (reg << 16) |
507                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
508                 BNX2_EMAC_MDIO_COMM_START_BUSY;
509         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
510
511         for (i = 0; i < 50; i++) {
512                 udelay(10);
513
514                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
515                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
516                         udelay(5);
517
518                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
519                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
520
521                         break;
522                 }
523         }
524
525         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
526                 *val = 0x0;
527                 ret = -EBUSY;
528         }
529         else {
530                 *val = val1;
531                 ret = 0;
532         }
533
534         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
535                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
536                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
537
538                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
539                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
540
541                 udelay(40);
542         }
543
544         return ret;
545 }
546
547 static int
548 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
549 {
550         u32 val1;
551         int i, ret;
552
553         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
554                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
555                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
556
557                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
558                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
559
560                 udelay(40);
561         }
562
563         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
564                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
565                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
566         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
567
568         for (i = 0; i < 50; i++) {
569                 udelay(10);
570
571                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
572                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
573                         udelay(5);
574                         break;
575                 }
576         }
577
578         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
579                 ret = -EBUSY;
580         else
581                 ret = 0;
582
583         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
584                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
585                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
586
587                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
588                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
589
590                 udelay(40);
591         }
592
593         return ret;
594 }
595
596 static void
597 bnx2_disable_int(struct bnx2 *bp)
598 {
599         int i;
600         struct bnx2_napi *bnapi;
601
602         for (i = 0; i < bp->irq_nvecs; i++) {
603                 bnapi = &bp->bnx2_napi[i];
604                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
605                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
606         }
607         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
608 }
609
610 static void
611 bnx2_enable_int(struct bnx2 *bp)
612 {
613         int i;
614         struct bnx2_napi *bnapi;
615
616         for (i = 0; i < bp->irq_nvecs; i++) {
617                 bnapi = &bp->bnx2_napi[i];
618
619                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
620                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
621                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
622                        bnapi->last_status_idx);
623
624                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
625                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
626                        bnapi->last_status_idx);
627         }
628         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
629 }
630
631 static void
632 bnx2_disable_int_sync(struct bnx2 *bp)
633 {
634         int i;
635
636         atomic_inc(&bp->intr_sem);
637         if (!netif_running(bp->dev))
638                 return;
639
640         bnx2_disable_int(bp);
641         for (i = 0; i < bp->irq_nvecs; i++)
642                 synchronize_irq(bp->irq_tbl[i].vector);
643 }
644
645 static void
646 bnx2_napi_disable(struct bnx2 *bp)
647 {
648         int i;
649
650         for (i = 0; i < bp->irq_nvecs; i++)
651                 napi_disable(&bp->bnx2_napi[i].napi);
652 }
653
654 static void
655 bnx2_napi_enable(struct bnx2 *bp)
656 {
657         int i;
658
659         for (i = 0; i < bp->irq_nvecs; i++)
660                 napi_enable(&bp->bnx2_napi[i].napi);
661 }
662
663 static void
664 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
665 {
666         if (stop_cnic)
667                 bnx2_cnic_stop(bp);
668         if (netif_running(bp->dev)) {
669                 bnx2_napi_disable(bp);
670                 netif_tx_disable(bp->dev);
671         }
672         bnx2_disable_int_sync(bp);
673         netif_carrier_off(bp->dev);     /* prevent tx timeout */
674 }
675
676 static void
677 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
678 {
679         if (atomic_dec_and_test(&bp->intr_sem)) {
680                 if (netif_running(bp->dev)) {
681                         netif_tx_wake_all_queues(bp->dev);
682                         spin_lock_bh(&bp->phy_lock);
683                         if (bp->link_up)
684                                 netif_carrier_on(bp->dev);
685                         spin_unlock_bh(&bp->phy_lock);
686                         bnx2_napi_enable(bp);
687                         bnx2_enable_int(bp);
688                         if (start_cnic)
689                                 bnx2_cnic_start(bp);
690                 }
691         }
692 }
693
694 static void
695 bnx2_free_tx_mem(struct bnx2 *bp)
696 {
697         int i;
698
699         for (i = 0; i < bp->num_tx_rings; i++) {
700                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
701                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
702
703                 if (txr->tx_desc_ring) {
704                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
705                                           txr->tx_desc_ring,
706                                           txr->tx_desc_mapping);
707                         txr->tx_desc_ring = NULL;
708                 }
709                 kfree(txr->tx_buf_ring);
710                 txr->tx_buf_ring = NULL;
711         }
712 }
713
714 static void
715 bnx2_free_rx_mem(struct bnx2 *bp)
716 {
717         int i;
718
719         for (i = 0; i < bp->num_rx_rings; i++) {
720                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
721                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
722                 int j;
723
724                 for (j = 0; j < bp->rx_max_ring; j++) {
725                         if (rxr->rx_desc_ring[j])
726                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
727                                                   rxr->rx_desc_ring[j],
728                                                   rxr->rx_desc_mapping[j]);
729                         rxr->rx_desc_ring[j] = NULL;
730                 }
731                 vfree(rxr->rx_buf_ring);
732                 rxr->rx_buf_ring = NULL;
733
734                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
735                         if (rxr->rx_pg_desc_ring[j])
736                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
737                                                   rxr->rx_pg_desc_ring[j],
738                                                   rxr->rx_pg_desc_mapping[j]);
739                         rxr->rx_pg_desc_ring[j] = NULL;
740                 }
741                 vfree(rxr->rx_pg_ring);
742                 rxr->rx_pg_ring = NULL;
743         }
744 }
745
746 static int
747 bnx2_alloc_tx_mem(struct bnx2 *bp)
748 {
749         int i;
750
751         for (i = 0; i < bp->num_tx_rings; i++) {
752                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
753                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
754
755                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
756                 if (txr->tx_buf_ring == NULL)
757                         return -ENOMEM;
758
759                 txr->tx_desc_ring =
760                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
761                                            &txr->tx_desc_mapping, GFP_KERNEL);
762                 if (txr->tx_desc_ring == NULL)
763                         return -ENOMEM;
764         }
765         return 0;
766 }
767
768 static int
769 bnx2_alloc_rx_mem(struct bnx2 *bp)
770 {
771         int i;
772
773         for (i = 0; i < bp->num_rx_rings; i++) {
774                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
775                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
776                 int j;
777
778                 rxr->rx_buf_ring =
779                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
780                 if (rxr->rx_buf_ring == NULL)
781                         return -ENOMEM;
782
783                 for (j = 0; j < bp->rx_max_ring; j++) {
784                         rxr->rx_desc_ring[j] =
785                                 dma_alloc_coherent(&bp->pdev->dev,
786                                                    RXBD_RING_SIZE,
787                                                    &rxr->rx_desc_mapping[j],
788                                                    GFP_KERNEL);
789                         if (rxr->rx_desc_ring[j] == NULL)
790                                 return -ENOMEM;
791
792                 }
793
794                 if (bp->rx_pg_ring_size) {
795                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
796                                                   bp->rx_max_pg_ring);
797                         if (rxr->rx_pg_ring == NULL)
798                                 return -ENOMEM;
799
800                 }
801
802                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
803                         rxr->rx_pg_desc_ring[j] =
804                                 dma_alloc_coherent(&bp->pdev->dev,
805                                                    RXBD_RING_SIZE,
806                                                    &rxr->rx_pg_desc_mapping[j],
807                                                    GFP_KERNEL);
808                         if (rxr->rx_pg_desc_ring[j] == NULL)
809                                 return -ENOMEM;
810
811                 }
812         }
813         return 0;
814 }
815
816 static void
817 bnx2_free_mem(struct bnx2 *bp)
818 {
819         int i;
820         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
821
822         bnx2_free_tx_mem(bp);
823         bnx2_free_rx_mem(bp);
824
825         for (i = 0; i < bp->ctx_pages; i++) {
826                 if (bp->ctx_blk[i]) {
827                         dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
828                                           bp->ctx_blk[i],
829                                           bp->ctx_blk_mapping[i]);
830                         bp->ctx_blk[i] = NULL;
831                 }
832         }
833         if (bnapi->status_blk.msi) {
834                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
835                                   bnapi->status_blk.msi,
836                                   bp->status_blk_mapping);
837                 bnapi->status_blk.msi = NULL;
838                 bp->stats_blk = NULL;
839         }
840 }
841
842 static int
843 bnx2_alloc_mem(struct bnx2 *bp)
844 {
845         int i, status_blk_size, err;
846         struct bnx2_napi *bnapi;
847         void *status_blk;
848
849         /* Combine status and statistics blocks into one allocation. */
850         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
851         if (bp->flags & BNX2_FLAG_MSIX_CAP)
852                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
853                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
854         bp->status_stats_size = status_blk_size +
855                                 sizeof(struct statistics_block);
856
857         status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
858                                         &bp->status_blk_mapping, GFP_KERNEL);
859         if (status_blk == NULL)
860                 goto alloc_mem_err;
861
862         memset(status_blk, 0, bp->status_stats_size);
863
864         bnapi = &bp->bnx2_napi[0];
865         bnapi->status_blk.msi = status_blk;
866         bnapi->hw_tx_cons_ptr =
867                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
868         bnapi->hw_rx_cons_ptr =
869                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
870         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
871                 for (i = 1; i < bp->irq_nvecs; i++) {
872                         struct status_block_msix *sblk;
873
874                         bnapi = &bp->bnx2_napi[i];
875
876                         sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
877                         bnapi->status_blk.msix = sblk;
878                         bnapi->hw_tx_cons_ptr =
879                                 &sblk->status_tx_quick_consumer_index;
880                         bnapi->hw_rx_cons_ptr =
881                                 &sblk->status_rx_quick_consumer_index;
882                         bnapi->int_num = i << 24;
883                 }
884         }
885
886         bp->stats_blk = status_blk + status_blk_size;
887
888         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
889
890         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
891                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
892                 if (bp->ctx_pages == 0)
893                         bp->ctx_pages = 1;
894                 for (i = 0; i < bp->ctx_pages; i++) {
895                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
896                                                 BCM_PAGE_SIZE,
897                                                 &bp->ctx_blk_mapping[i],
898                                                 GFP_KERNEL);
899                         if (bp->ctx_blk[i] == NULL)
900                                 goto alloc_mem_err;
901                 }
902         }
903
904         err = bnx2_alloc_rx_mem(bp);
905         if (err)
906                 goto alloc_mem_err;
907
908         err = bnx2_alloc_tx_mem(bp);
909         if (err)
910                 goto alloc_mem_err;
911
912         return 0;
913
914 alloc_mem_err:
915         bnx2_free_mem(bp);
916         return -ENOMEM;
917 }
918
919 static void
920 bnx2_report_fw_link(struct bnx2 *bp)
921 {
922         u32 fw_link_status = 0;
923
924         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
925                 return;
926
927         if (bp->link_up) {
928                 u32 bmsr;
929
930                 switch (bp->line_speed) {
931                 case SPEED_10:
932                         if (bp->duplex == DUPLEX_HALF)
933                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
934                         else
935                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
936                         break;
937                 case SPEED_100:
938                         if (bp->duplex == DUPLEX_HALF)
939                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
940                         else
941                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
942                         break;
943                 case SPEED_1000:
944                         if (bp->duplex == DUPLEX_HALF)
945                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
946                         else
947                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
948                         break;
949                 case SPEED_2500:
950                         if (bp->duplex == DUPLEX_HALF)
951                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
952                         else
953                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
954                         break;
955                 }
956
957                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
958
959                 if (bp->autoneg) {
960                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
961
962                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
964
965                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
966                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
967                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
968                         else
969                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
970                 }
971         }
972         else
973                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
974
975         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
976 }
977
978 static char *
979 bnx2_xceiver_str(struct bnx2 *bp)
980 {
981         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
982                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
983                  "Copper");
984 }
985
986 static void
987 bnx2_report_link(struct bnx2 *bp)
988 {
989         if (bp->link_up) {
990                 netif_carrier_on(bp->dev);
991                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
992                             bnx2_xceiver_str(bp),
993                             bp->line_speed,
994                             bp->duplex == DUPLEX_FULL ? "full" : "half");
995
996                 if (bp->flow_ctrl) {
997                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
998                                 pr_cont(", receive ");
999                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1000                                         pr_cont("& transmit ");
1001                         }
1002                         else {
1003                                 pr_cont(", transmit ");
1004                         }
1005                         pr_cont("flow control ON");
1006                 }
1007                 pr_cont("\n");
1008         } else {
1009                 netif_carrier_off(bp->dev);
1010                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1011                            bnx2_xceiver_str(bp));
1012         }
1013
1014         bnx2_report_fw_link(bp);
1015 }
1016
1017 static void
1018 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1019 {
1020         u32 local_adv, remote_adv;
1021
1022         bp->flow_ctrl = 0;
1023         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1024                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1025
1026                 if (bp->duplex == DUPLEX_FULL) {
1027                         bp->flow_ctrl = bp->req_flow_ctrl;
1028                 }
1029                 return;
1030         }
1031
1032         if (bp->duplex != DUPLEX_FULL) {
1033                 return;
1034         }
1035
1036         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1037             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1038                 u32 val;
1039
1040                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1041                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1042                         bp->flow_ctrl |= FLOW_CTRL_TX;
1043                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1044                         bp->flow_ctrl |= FLOW_CTRL_RX;
1045                 return;
1046         }
1047
1048         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1049         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1050
1051         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1052                 u32 new_local_adv = 0;
1053                 u32 new_remote_adv = 0;
1054
1055                 if (local_adv & ADVERTISE_1000XPAUSE)
1056                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1057                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1058                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1059                 if (remote_adv & ADVERTISE_1000XPAUSE)
1060                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1061                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1062                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1063
1064                 local_adv = new_local_adv;
1065                 remote_adv = new_remote_adv;
1066         }
1067
1068         /* See Table 28B-3 of 802.3ab-1999 spec. */
1069         if (local_adv & ADVERTISE_PAUSE_CAP) {
1070                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1071                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1072                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1073                         }
1074                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1075                                 bp->flow_ctrl = FLOW_CTRL_RX;
1076                         }
1077                 }
1078                 else {
1079                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1080                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1081                         }
1082                 }
1083         }
1084         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1085                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1086                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1087
1088                         bp->flow_ctrl = FLOW_CTRL_TX;
1089                 }
1090         }
1091 }
1092
1093 static int
1094 bnx2_5709s_linkup(struct bnx2 *bp)
1095 {
1096         u32 val, speed;
1097
1098         bp->link_up = 1;
1099
1100         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1101         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1102         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1103
1104         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1105                 bp->line_speed = bp->req_line_speed;
1106                 bp->duplex = bp->req_duplex;
1107                 return 0;
1108         }
1109         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1110         switch (speed) {
1111                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1112                         bp->line_speed = SPEED_10;
1113                         break;
1114                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1115                         bp->line_speed = SPEED_100;
1116                         break;
1117                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1118                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1119                         bp->line_speed = SPEED_1000;
1120                         break;
1121                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1122                         bp->line_speed = SPEED_2500;
1123                         break;
1124         }
1125         if (val & MII_BNX2_GP_TOP_AN_FD)
1126                 bp->duplex = DUPLEX_FULL;
1127         else
1128                 bp->duplex = DUPLEX_HALF;
1129         return 0;
1130 }
1131
1132 static int
1133 bnx2_5708s_linkup(struct bnx2 *bp)
1134 {
1135         u32 val;
1136
1137         bp->link_up = 1;
1138         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1139         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1140                 case BCM5708S_1000X_STAT1_SPEED_10:
1141                         bp->line_speed = SPEED_10;
1142                         break;
1143                 case BCM5708S_1000X_STAT1_SPEED_100:
1144                         bp->line_speed = SPEED_100;
1145                         break;
1146                 case BCM5708S_1000X_STAT1_SPEED_1G:
1147                         bp->line_speed = SPEED_1000;
1148                         break;
1149                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1150                         bp->line_speed = SPEED_2500;
1151                         break;
1152         }
1153         if (val & BCM5708S_1000X_STAT1_FD)
1154                 bp->duplex = DUPLEX_FULL;
1155         else
1156                 bp->duplex = DUPLEX_HALF;
1157
1158         return 0;
1159 }
1160
1161 static int
1162 bnx2_5706s_linkup(struct bnx2 *bp)
1163 {
1164         u32 bmcr, local_adv, remote_adv, common;
1165
1166         bp->link_up = 1;
1167         bp->line_speed = SPEED_1000;
1168
1169         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1170         if (bmcr & BMCR_FULLDPLX) {
1171                 bp->duplex = DUPLEX_FULL;
1172         }
1173         else {
1174                 bp->duplex = DUPLEX_HALF;
1175         }
1176
1177         if (!(bmcr & BMCR_ANENABLE)) {
1178                 return 0;
1179         }
1180
1181         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1182         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1183
1184         common = local_adv & remote_adv;
1185         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1186
1187                 if (common & ADVERTISE_1000XFULL) {
1188                         bp->duplex = DUPLEX_FULL;
1189                 }
1190                 else {
1191                         bp->duplex = DUPLEX_HALF;
1192                 }
1193         }
1194
1195         return 0;
1196 }
1197
1198 static int
1199 bnx2_copper_linkup(struct bnx2 *bp)
1200 {
1201         u32 bmcr;
1202
1203         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1204         if (bmcr & BMCR_ANENABLE) {
1205                 u32 local_adv, remote_adv, common;
1206
1207                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1208                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1209
1210                 common = local_adv & (remote_adv >> 2);
1211                 if (common & ADVERTISE_1000FULL) {
1212                         bp->line_speed = SPEED_1000;
1213                         bp->duplex = DUPLEX_FULL;
1214                 }
1215                 else if (common & ADVERTISE_1000HALF) {
1216                         bp->line_speed = SPEED_1000;
1217                         bp->duplex = DUPLEX_HALF;
1218                 }
1219                 else {
1220                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1221                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1222
1223                         common = local_adv & remote_adv;
1224                         if (common & ADVERTISE_100FULL) {
1225                                 bp->line_speed = SPEED_100;
1226                                 bp->duplex = DUPLEX_FULL;
1227                         }
1228                         else if (common & ADVERTISE_100HALF) {
1229                                 bp->line_speed = SPEED_100;
1230                                 bp->duplex = DUPLEX_HALF;
1231                         }
1232                         else if (common & ADVERTISE_10FULL) {
1233                                 bp->line_speed = SPEED_10;
1234                                 bp->duplex = DUPLEX_FULL;
1235                         }
1236                         else if (common & ADVERTISE_10HALF) {
1237                                 bp->line_speed = SPEED_10;
1238                                 bp->duplex = DUPLEX_HALF;
1239                         }
1240                         else {
1241                                 bp->line_speed = 0;
1242                                 bp->link_up = 0;
1243                         }
1244                 }
1245         }
1246         else {
1247                 if (bmcr & BMCR_SPEED100) {
1248                         bp->line_speed = SPEED_100;
1249                 }
1250                 else {
1251                         bp->line_speed = SPEED_10;
1252                 }
1253                 if (bmcr & BMCR_FULLDPLX) {
1254                         bp->duplex = DUPLEX_FULL;
1255                 }
1256                 else {
1257                         bp->duplex = DUPLEX_HALF;
1258                 }
1259         }
1260
1261         return 0;
1262 }
1263
1264 static void
1265 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1266 {
1267         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1268
1269         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1270         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1271         val |= 0x02 << 8;
1272
1273         if (bp->flow_ctrl & FLOW_CTRL_TX)
1274                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1275
1276         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1277 }
1278
1279 static void
1280 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1281 {
1282         int i;
1283         u32 cid;
1284
1285         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1286                 if (i == 1)
1287                         cid = RX_RSS_CID;
1288                 bnx2_init_rx_context(bp, cid);
1289         }
1290 }
1291
1292 static void
1293 bnx2_set_mac_link(struct bnx2 *bp)
1294 {
1295         u32 val;
1296
1297         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1298         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1299                 (bp->duplex == DUPLEX_HALF)) {
1300                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1301         }
1302
1303         /* Configure the EMAC mode register. */
1304         val = REG_RD(bp, BNX2_EMAC_MODE);
1305
1306         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1307                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1308                 BNX2_EMAC_MODE_25G_MODE);
1309
1310         if (bp->link_up) {
1311                 switch (bp->line_speed) {
1312                         case SPEED_10:
1313                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1314                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1315                                         break;
1316                                 }
1317                                 /* fall through */
1318                         case SPEED_100:
1319                                 val |= BNX2_EMAC_MODE_PORT_MII;
1320                                 break;
1321                         case SPEED_2500:
1322                                 val |= BNX2_EMAC_MODE_25G_MODE;
1323                                 /* fall through */
1324                         case SPEED_1000:
1325                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1326                                 break;
1327                 }
1328         }
1329         else {
1330                 val |= BNX2_EMAC_MODE_PORT_GMII;
1331         }
1332
1333         /* Set the MAC to operate in the appropriate duplex mode. */
1334         if (bp->duplex == DUPLEX_HALF)
1335                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1336         REG_WR(bp, BNX2_EMAC_MODE, val);
1337
1338         /* Enable/disable rx PAUSE. */
1339         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1340
1341         if (bp->flow_ctrl & FLOW_CTRL_RX)
1342                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1343         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1344
1345         /* Enable/disable tx PAUSE. */
1346         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1347         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1348
1349         if (bp->flow_ctrl & FLOW_CTRL_TX)
1350                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1351         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1352
1353         /* Acknowledge the interrupt. */
1354         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1355
1356         bnx2_init_all_rx_contexts(bp);
1357 }
1358
1359 static void
1360 bnx2_enable_bmsr1(struct bnx2 *bp)
1361 {
1362         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1363             (CHIP_NUM(bp) == CHIP_NUM_5709))
1364                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1365                                MII_BNX2_BLK_ADDR_GP_STATUS);
1366 }
1367
1368 static void
1369 bnx2_disable_bmsr1(struct bnx2 *bp)
1370 {
1371         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1372             (CHIP_NUM(bp) == CHIP_NUM_5709))
1373                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1374                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1375 }
1376
1377 static int
1378 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1379 {
1380         u32 up1;
1381         int ret = 1;
1382
1383         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1384                 return 0;
1385
1386         if (bp->autoneg & AUTONEG_SPEED)
1387                 bp->advertising |= ADVERTISED_2500baseX_Full;
1388
1389         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1390                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1391
1392         bnx2_read_phy(bp, bp->mii_up1, &up1);
1393         if (!(up1 & BCM5708S_UP1_2G5)) {
1394                 up1 |= BCM5708S_UP1_2G5;
1395                 bnx2_write_phy(bp, bp->mii_up1, up1);
1396                 ret = 0;
1397         }
1398
1399         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1400                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1401                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1402
1403         return ret;
1404 }
1405
1406 static int
1407 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1408 {
1409         u32 up1;
1410         int ret = 0;
1411
1412         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1413                 return 0;
1414
1415         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1416                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1417
1418         bnx2_read_phy(bp, bp->mii_up1, &up1);
1419         if (up1 & BCM5708S_UP1_2G5) {
1420                 up1 &= ~BCM5708S_UP1_2G5;
1421                 bnx2_write_phy(bp, bp->mii_up1, up1);
1422                 ret = 1;
1423         }
1424
1425         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1426                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1427                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1428
1429         return ret;
1430 }
1431
1432 static void
1433 bnx2_enable_forced_2g5(struct bnx2 *bp)
1434 {
1435         u32 uninitialized_var(bmcr);
1436         int err;
1437
1438         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1439                 return;
1440
1441         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1442                 u32 val;
1443
1444                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1445                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1446                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1447                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1448                         val |= MII_BNX2_SD_MISC1_FORCE |
1449                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1450                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1451                 }
1452
1453                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1454                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1455                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1456
1457         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1458                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1459                 if (!err)
1460                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1461         } else {
1462                 return;
1463         }
1464
1465         if (err)
1466                 return;
1467
1468         if (bp->autoneg & AUTONEG_SPEED) {
1469                 bmcr &= ~BMCR_ANENABLE;
1470                 if (bp->req_duplex == DUPLEX_FULL)
1471                         bmcr |= BMCR_FULLDPLX;
1472         }
1473         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1474 }
1475
1476 static void
1477 bnx2_disable_forced_2g5(struct bnx2 *bp)
1478 {
1479         u32 uninitialized_var(bmcr);
1480         int err;
1481
1482         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1483                 return;
1484
1485         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1486                 u32 val;
1487
1488                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1489                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1490                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1491                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1492                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1493                 }
1494
1495                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1497                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1498
1499         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1500                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501                 if (!err)
1502                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1503         } else {
1504                 return;
1505         }
1506
1507         if (err)
1508                 return;
1509
1510         if (bp->autoneg & AUTONEG_SPEED)
1511                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1512         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1513 }
1514
1515 static void
1516 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1517 {
1518         u32 val;
1519
1520         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1521         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1522         if (start)
1523                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1524         else
1525                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1526 }
1527
1528 static int
1529 bnx2_set_link(struct bnx2 *bp)
1530 {
1531         u32 bmsr;
1532         u8 link_up;
1533
1534         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1535                 bp->link_up = 1;
1536                 return 0;
1537         }
1538
1539         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1540                 return 0;
1541
1542         link_up = bp->link_up;
1543
1544         bnx2_enable_bmsr1(bp);
1545         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1547         bnx2_disable_bmsr1(bp);
1548
1549         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1550             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1551                 u32 val, an_dbg;
1552
1553                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1554                         bnx2_5706s_force_link_dn(bp, 0);
1555                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1556                 }
1557                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1558
1559                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1562
1563                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1564                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1565                         bmsr |= BMSR_LSTATUS;
1566                 else
1567                         bmsr &= ~BMSR_LSTATUS;
1568         }
1569
1570         if (bmsr & BMSR_LSTATUS) {
1571                 bp->link_up = 1;
1572
1573                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1574                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1575                                 bnx2_5706s_linkup(bp);
1576                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1577                                 bnx2_5708s_linkup(bp);
1578                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1579                                 bnx2_5709s_linkup(bp);
1580                 }
1581                 else {
1582                         bnx2_copper_linkup(bp);
1583                 }
1584                 bnx2_resolve_flow_ctrl(bp);
1585         }
1586         else {
1587                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1588                     (bp->autoneg & AUTONEG_SPEED))
1589                         bnx2_disable_forced_2g5(bp);
1590
1591                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1592                         u32 bmcr;
1593
1594                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1595                         bmcr |= BMCR_ANENABLE;
1596                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1597
1598                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1599                 }
1600                 bp->link_up = 0;
1601         }
1602
1603         if (bp->link_up != link_up) {
1604                 bnx2_report_link(bp);
1605         }
1606
1607         bnx2_set_mac_link(bp);
1608
1609         return 0;
1610 }
1611
1612 static int
1613 bnx2_reset_phy(struct bnx2 *bp)
1614 {
1615         int i;
1616         u32 reg;
1617
1618         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1619
1620 #define PHY_RESET_MAX_WAIT 100
1621         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1622                 udelay(10);
1623
1624                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1625                 if (!(reg & BMCR_RESET)) {
1626                         udelay(20);
1627                         break;
1628                 }
1629         }
1630         if (i == PHY_RESET_MAX_WAIT) {
1631                 return -EBUSY;
1632         }
1633         return 0;
1634 }
1635
1636 static u32
1637 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1638 {
1639         u32 adv = 0;
1640
1641         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1642                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1643
1644                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1645                         adv = ADVERTISE_1000XPAUSE;
1646                 }
1647                 else {
1648                         adv = ADVERTISE_PAUSE_CAP;
1649                 }
1650         }
1651         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1652                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1653                         adv = ADVERTISE_1000XPSE_ASYM;
1654                 }
1655                 else {
1656                         adv = ADVERTISE_PAUSE_ASYM;
1657                 }
1658         }
1659         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1660                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1661                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1662                 }
1663                 else {
1664                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1665                 }
1666         }
1667         return adv;
1668 }
1669
1670 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1671
1672 static int
1673 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1674 __releases(&bp->phy_lock)
1675 __acquires(&bp->phy_lock)
1676 {
1677         u32 speed_arg = 0, pause_adv;
1678
1679         pause_adv = bnx2_phy_get_pause_adv(bp);
1680
1681         if (bp->autoneg & AUTONEG_SPEED) {
1682                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1683                 if (bp->advertising & ADVERTISED_10baseT_Half)
1684                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1685                 if (bp->advertising & ADVERTISED_10baseT_Full)
1686                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1687                 if (bp->advertising & ADVERTISED_100baseT_Half)
1688                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1689                 if (bp->advertising & ADVERTISED_100baseT_Full)
1690                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1691                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1692                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1694                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695         } else {
1696                 if (bp->req_line_speed == SPEED_2500)
1697                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1698                 else if (bp->req_line_speed == SPEED_1000)
1699                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700                 else if (bp->req_line_speed == SPEED_100) {
1701                         if (bp->req_duplex == DUPLEX_FULL)
1702                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1703                         else
1704                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1705                 } else if (bp->req_line_speed == SPEED_10) {
1706                         if (bp->req_duplex == DUPLEX_FULL)
1707                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1708                         else
1709                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1710                 }
1711         }
1712
1713         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1714                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1715         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1716                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1717
1718         if (port == PORT_TP)
1719                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1720                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1721
1722         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1723
1724         spin_unlock_bh(&bp->phy_lock);
1725         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1726         spin_lock_bh(&bp->phy_lock);
1727
1728         return 0;
1729 }
1730
1731 static int
1732 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1733 __releases(&bp->phy_lock)
1734 __acquires(&bp->phy_lock)
1735 {
1736         u32 adv, bmcr;
1737         u32 new_adv = 0;
1738
1739         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1740                 return bnx2_setup_remote_phy(bp, port);
1741
1742         if (!(bp->autoneg & AUTONEG_SPEED)) {
1743                 u32 new_bmcr;
1744                 int force_link_down = 0;
1745
1746                 if (bp->req_line_speed == SPEED_2500) {
1747                         if (!bnx2_test_and_enable_2g5(bp))
1748                                 force_link_down = 1;
1749                 } else if (bp->req_line_speed == SPEED_1000) {
1750                         if (bnx2_test_and_disable_2g5(bp))
1751                                 force_link_down = 1;
1752                 }
1753                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1754                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1755
1756                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1757                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1758                 new_bmcr |= BMCR_SPEED1000;
1759
1760                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1761                         if (bp->req_line_speed == SPEED_2500)
1762                                 bnx2_enable_forced_2g5(bp);
1763                         else if (bp->req_line_speed == SPEED_1000) {
1764                                 bnx2_disable_forced_2g5(bp);
1765                                 new_bmcr &= ~0x2000;
1766                         }
1767
1768                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1769                         if (bp->req_line_speed == SPEED_2500)
1770                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771                         else
1772                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1773                 }
1774
1775                 if (bp->req_duplex == DUPLEX_FULL) {
1776                         adv |= ADVERTISE_1000XFULL;
1777                         new_bmcr |= BMCR_FULLDPLX;
1778                 }
1779                 else {
1780                         adv |= ADVERTISE_1000XHALF;
1781                         new_bmcr &= ~BMCR_FULLDPLX;
1782                 }
1783                 if ((new_bmcr != bmcr) || (force_link_down)) {
1784                         /* Force a link down visible on the other side */
1785                         if (bp->link_up) {
1786                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1787                                                ~(ADVERTISE_1000XFULL |
1788                                                  ADVERTISE_1000XHALF));
1789                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1790                                         BMCR_ANRESTART | BMCR_ANENABLE);
1791
1792                                 bp->link_up = 0;
1793                                 netif_carrier_off(bp->dev);
1794                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1795                                 bnx2_report_link(bp);
1796                         }
1797                         bnx2_write_phy(bp, bp->mii_adv, adv);
1798                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799                 } else {
1800                         bnx2_resolve_flow_ctrl(bp);
1801                         bnx2_set_mac_link(bp);
1802                 }
1803                 return 0;
1804         }
1805
1806         bnx2_test_and_enable_2g5(bp);
1807
1808         if (bp->advertising & ADVERTISED_1000baseT_Full)
1809                 new_adv |= ADVERTISE_1000XFULL;
1810
1811         new_adv |= bnx2_phy_get_pause_adv(bp);
1812
1813         bnx2_read_phy(bp, bp->mii_adv, &adv);
1814         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1815
1816         bp->serdes_an_pending = 0;
1817         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1818                 /* Force a link down visible on the other side */
1819                 if (bp->link_up) {
1820                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1821                         spin_unlock_bh(&bp->phy_lock);
1822                         msleep(20);
1823                         spin_lock_bh(&bp->phy_lock);
1824                 }
1825
1826                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1827                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1828                         BMCR_ANENABLE);
1829                 /* Speed up link-up time when the link partner
1830                  * does not autonegotiate which is very common
1831                  * in blade servers. Some blade servers use
1832                  * IPMI for kerboard input and it's important
1833                  * to minimize link disruptions. Autoneg. involves
1834                  * exchanging base pages plus 3 next pages and
1835                  * normally completes in about 120 msec.
1836                  */
1837                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1838                 bp->serdes_an_pending = 1;
1839                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1840         } else {
1841                 bnx2_resolve_flow_ctrl(bp);
1842                 bnx2_set_mac_link(bp);
1843         }
1844
1845         return 0;
1846 }
1847
1848 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1849         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1850                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1851                 (ADVERTISED_1000baseT_Full)
1852
1853 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1854         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1855         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1856         ADVERTISED_1000baseT_Full)
1857
1858 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1859         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1860
1861 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1862
1863 static void
1864 bnx2_set_default_remote_link(struct bnx2 *bp)
1865 {
1866         u32 link;
1867
1868         if (bp->phy_port == PORT_TP)
1869                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1870         else
1871                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1872
1873         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1874                 bp->req_line_speed = 0;
1875                 bp->autoneg |= AUTONEG_SPEED;
1876                 bp->advertising = ADVERTISED_Autoneg;
1877                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1878                         bp->advertising |= ADVERTISED_10baseT_Half;
1879                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1880                         bp->advertising |= ADVERTISED_10baseT_Full;
1881                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1882                         bp->advertising |= ADVERTISED_100baseT_Half;
1883                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1884                         bp->advertising |= ADVERTISED_100baseT_Full;
1885                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1886                         bp->advertising |= ADVERTISED_1000baseT_Full;
1887                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1888                         bp->advertising |= ADVERTISED_2500baseX_Full;
1889         } else {
1890                 bp->autoneg = 0;
1891                 bp->advertising = 0;
1892                 bp->req_duplex = DUPLEX_FULL;
1893                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1894                         bp->req_line_speed = SPEED_10;
1895                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1896                                 bp->req_duplex = DUPLEX_HALF;
1897                 }
1898                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1899                         bp->req_line_speed = SPEED_100;
1900                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1901                                 bp->req_duplex = DUPLEX_HALF;
1902                 }
1903                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1904                         bp->req_line_speed = SPEED_1000;
1905                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1906                         bp->req_line_speed = SPEED_2500;
1907         }
1908 }
1909
1910 static void
1911 bnx2_set_default_link(struct bnx2 *bp)
1912 {
1913         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1914                 bnx2_set_default_remote_link(bp);
1915                 return;
1916         }
1917
1918         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1919         bp->req_line_speed = 0;
1920         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1921                 u32 reg;
1922
1923                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1924
1925                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1926                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1927                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1928                         bp->autoneg = 0;
1929                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1930                         bp->req_duplex = DUPLEX_FULL;
1931                 }
1932         } else
1933                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1934 }
1935
1936 static void
1937 bnx2_send_heart_beat(struct bnx2 *bp)
1938 {
1939         u32 msg;
1940         u32 addr;
1941
1942         spin_lock(&bp->indirect_lock);
1943         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1944         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1945         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1946         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947         spin_unlock(&bp->indirect_lock);
1948 }
1949
1950 static void
1951 bnx2_remote_phy_event(struct bnx2 *bp)
1952 {
1953         u32 msg;
1954         u8 link_up = bp->link_up;
1955         u8 old_port;
1956
1957         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1958
1959         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1960                 bnx2_send_heart_beat(bp);
1961
1962         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1963
1964         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1965                 bp->link_up = 0;
1966         else {
1967                 u32 speed;
1968
1969                 bp->link_up = 1;
1970                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1971                 bp->duplex = DUPLEX_FULL;
1972                 switch (speed) {
1973                         case BNX2_LINK_STATUS_10HALF:
1974                                 bp->duplex = DUPLEX_HALF;
1975                         case BNX2_LINK_STATUS_10FULL:
1976                                 bp->line_speed = SPEED_10;
1977                                 break;
1978                         case BNX2_LINK_STATUS_100HALF:
1979                                 bp->duplex = DUPLEX_HALF;
1980                         case BNX2_LINK_STATUS_100BASE_T4:
1981                         case BNX2_LINK_STATUS_100FULL:
1982                                 bp->line_speed = SPEED_100;
1983                                 break;
1984                         case BNX2_LINK_STATUS_1000HALF:
1985                                 bp->duplex = DUPLEX_HALF;
1986                         case BNX2_LINK_STATUS_1000FULL:
1987                                 bp->line_speed = SPEED_1000;
1988                                 break;
1989                         case BNX2_LINK_STATUS_2500HALF:
1990                                 bp->duplex = DUPLEX_HALF;
1991                         case BNX2_LINK_STATUS_2500FULL:
1992                                 bp->line_speed = SPEED_2500;
1993                                 break;
1994                         default:
1995                                 bp->line_speed = 0;
1996                                 break;
1997                 }
1998
1999                 bp->flow_ctrl = 0;
2000                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2001                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2002                         if (bp->duplex == DUPLEX_FULL)
2003                                 bp->flow_ctrl = bp->req_flow_ctrl;
2004                 } else {
2005                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2006                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2007                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2008                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2009                 }
2010
2011                 old_port = bp->phy_port;
2012                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2013                         bp->phy_port = PORT_FIBRE;
2014                 else
2015                         bp->phy_port = PORT_TP;
2016
2017                 if (old_port != bp->phy_port)
2018                         bnx2_set_default_link(bp);
2019
2020         }
2021         if (bp->link_up != link_up)
2022                 bnx2_report_link(bp);
2023
2024         bnx2_set_mac_link(bp);
2025 }
2026
2027 static int
2028 bnx2_set_remote_link(struct bnx2 *bp)
2029 {
2030         u32 evt_code;
2031
2032         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2033         switch (evt_code) {
2034                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2035                         bnx2_remote_phy_event(bp);
2036                         break;
2037                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2038                 default:
2039                         bnx2_send_heart_beat(bp);
2040                         break;
2041         }
2042         return 0;
2043 }
2044
2045 static int
2046 bnx2_setup_copper_phy(struct bnx2 *bp)
2047 __releases(&bp->phy_lock)
2048 __acquires(&bp->phy_lock)
2049 {
2050         u32 bmcr;
2051         u32 new_bmcr;
2052
2053         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2054
2055         if (bp->autoneg & AUTONEG_SPEED) {
2056                 u32 adv_reg, adv1000_reg;
2057                 u32 new_adv = 0;
2058                 u32 new_adv1000 = 0;
2059
2060                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2061                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2062                         ADVERTISE_PAUSE_ASYM);
2063
2064                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2065                 adv1000_reg &= PHY_ALL_1000_SPEED;
2066
2067                 new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2068                 new_adv |= ADVERTISE_CSMA;
2069                 new_adv |= bnx2_phy_get_pause_adv(bp);
2070
2071                 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2072
2073                 if ((adv1000_reg != new_adv1000) ||
2074                         (adv_reg != new_adv) ||
2075                         ((bmcr & BMCR_ANENABLE) == 0)) {
2076
2077                         bnx2_write_phy(bp, bp->mii_adv, new_adv);
2078                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2079                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2080                                 BMCR_ANENABLE);
2081                 }
2082                 else if (bp->link_up) {
2083                         /* Flow ctrl may have changed from auto to forced */
2084                         /* or vice-versa. */
2085
2086                         bnx2_resolve_flow_ctrl(bp);
2087                         bnx2_set_mac_link(bp);
2088                 }
2089                 return 0;
2090         }
2091
2092         new_bmcr = 0;
2093         if (bp->req_line_speed == SPEED_100) {
2094                 new_bmcr |= BMCR_SPEED100;
2095         }
2096         if (bp->req_duplex == DUPLEX_FULL) {
2097                 new_bmcr |= BMCR_FULLDPLX;
2098         }
2099         if (new_bmcr != bmcr) {
2100                 u32 bmsr;
2101
2102                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2103                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2104
2105                 if (bmsr & BMSR_LSTATUS) {
2106                         /* Force link down */
2107                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2108                         spin_unlock_bh(&bp->phy_lock);
2109                         msleep(50);
2110                         spin_lock_bh(&bp->phy_lock);
2111
2112                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2113                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114                 }
2115
2116                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2117
2118                 /* Normally, the new speed is setup after the link has
2119                  * gone down and up again. In some cases, link will not go
2120                  * down so we need to set up the new speed here.
2121                  */
2122                 if (bmsr & BMSR_LSTATUS) {
2123                         bp->line_speed = bp->req_line_speed;
2124                         bp->duplex = bp->req_duplex;
2125                         bnx2_resolve_flow_ctrl(bp);
2126                         bnx2_set_mac_link(bp);
2127                 }
2128         } else {
2129                 bnx2_resolve_flow_ctrl(bp);
2130                 bnx2_set_mac_link(bp);
2131         }
2132         return 0;
2133 }
2134
2135 static int
2136 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2137 __releases(&bp->phy_lock)
2138 __acquires(&bp->phy_lock)
2139 {
2140         if (bp->loopback == MAC_LOOPBACK)
2141                 return 0;
2142
2143         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2144                 return bnx2_setup_serdes_phy(bp, port);
2145         }
2146         else {
2147                 return bnx2_setup_copper_phy(bp);
2148         }
2149 }
2150
2151 static int
2152 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2153 {
2154         u32 val;
2155
2156         bp->mii_bmcr = MII_BMCR + 0x10;
2157         bp->mii_bmsr = MII_BMSR + 0x10;
2158         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2159         bp->mii_adv = MII_ADVERTISE + 0x10;
2160         bp->mii_lpa = MII_LPA + 0x10;
2161         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2162
2163         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2164         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2165
2166         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2167         if (reset_phy)
2168                 bnx2_reset_phy(bp);
2169
2170         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2171
2172         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2173         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2174         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2175         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2176
2177         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2178         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2179         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2180                 val |= BCM5708S_UP1_2G5;
2181         else
2182                 val &= ~BCM5708S_UP1_2G5;
2183         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2184
2185         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2186         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2187         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2188         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2189
2190         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2191
2192         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2193               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2194         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2195
2196         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2197
2198         return 0;
2199 }
2200
2201 static int
2202 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2203 {
2204         u32 val;
2205
2206         if (reset_phy)
2207                 bnx2_reset_phy(bp);
2208
2209         bp->mii_up1 = BCM5708S_UP1;
2210
2211         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2212         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2213         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2214
2215         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2216         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2217         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2218
2219         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2220         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2221         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2222
2223         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2224                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2225                 val |= BCM5708S_UP1_2G5;
2226                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2227         }
2228
2229         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2230             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2231             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2232                 /* increase tx signal amplitude */
2233                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2234                                BCM5708S_BLK_ADDR_TX_MISC);
2235                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2236                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2237                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2238                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2239         }
2240
2241         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2242               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2243
2244         if (val) {
2245                 u32 is_backplane;
2246
2247                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2248                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2249                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2250                                        BCM5708S_BLK_ADDR_TX_MISC);
2251                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2252                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2253                                        BCM5708S_BLK_ADDR_DIG);
2254                 }
2255         }
2256         return 0;
2257 }
2258
2259 static int
2260 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2261 {
2262         if (reset_phy)
2263                 bnx2_reset_phy(bp);
2264
2265         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2266
2267         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2268                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2269
2270         if (bp->dev->mtu > 1500) {
2271                 u32 val;
2272
2273                 /* Set extended packet length bit */
2274                 bnx2_write_phy(bp, 0x18, 0x7);
2275                 bnx2_read_phy(bp, 0x18, &val);
2276                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2277
2278                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2279                 bnx2_read_phy(bp, 0x1c, &val);
2280                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2281         }
2282         else {
2283                 u32 val;
2284
2285                 bnx2_write_phy(bp, 0x18, 0x7);
2286                 bnx2_read_phy(bp, 0x18, &val);
2287                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2288
2289                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2290                 bnx2_read_phy(bp, 0x1c, &val);
2291                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2292         }
2293
2294         return 0;
2295 }
2296
2297 static int
2298 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2299 {
2300         u32 val;
2301
2302         if (reset_phy)
2303                 bnx2_reset_phy(bp);
2304
2305         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2306                 bnx2_write_phy(bp, 0x18, 0x0c00);
2307                 bnx2_write_phy(bp, 0x17, 0x000a);
2308                 bnx2_write_phy(bp, 0x15, 0x310b);
2309                 bnx2_write_phy(bp, 0x17, 0x201f);
2310                 bnx2_write_phy(bp, 0x15, 0x9506);
2311                 bnx2_write_phy(bp, 0x17, 0x401f);
2312                 bnx2_write_phy(bp, 0x15, 0x14e2);
2313                 bnx2_write_phy(bp, 0x18, 0x0400);
2314         }
2315
2316         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2317                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2318                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2319                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2320                 val &= ~(1 << 8);
2321                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2322         }
2323
2324         if (bp->dev->mtu > 1500) {
2325                 /* Set extended packet length bit */
2326                 bnx2_write_phy(bp, 0x18, 0x7);
2327                 bnx2_read_phy(bp, 0x18, &val);
2328                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2329
2330                 bnx2_read_phy(bp, 0x10, &val);
2331                 bnx2_write_phy(bp, 0x10, val | 0x1);
2332         }
2333         else {
2334                 bnx2_write_phy(bp, 0x18, 0x7);
2335                 bnx2_read_phy(bp, 0x18, &val);
2336                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2337
2338                 bnx2_read_phy(bp, 0x10, &val);
2339                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2340         }
2341
2342         /* ethernet@wirespeed */
2343         bnx2_write_phy(bp, 0x18, 0x7007);
2344         bnx2_read_phy(bp, 0x18, &val);
2345         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2346         return 0;
2347 }
2348
2349
2350 static int
2351 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2352 __releases(&bp->phy_lock)
2353 __acquires(&bp->phy_lock)
2354 {
2355         u32 val;
2356         int rc = 0;
2357
2358         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2359         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2360
2361         bp->mii_bmcr = MII_BMCR;
2362         bp->mii_bmsr = MII_BMSR;
2363         bp->mii_bmsr1 = MII_BMSR;
2364         bp->mii_adv = MII_ADVERTISE;
2365         bp->mii_lpa = MII_LPA;
2366
2367         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2368
2369         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2370                 goto setup_phy;
2371
2372         bnx2_read_phy(bp, MII_PHYSID1, &val);
2373         bp->phy_id = val << 16;
2374         bnx2_read_phy(bp, MII_PHYSID2, &val);
2375         bp->phy_id |= val & 0xffff;
2376
2377         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2378                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2379                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2380                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2381                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2382                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2383                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2384         }
2385         else {
2386                 rc = bnx2_init_copper_phy(bp, reset_phy);
2387         }
2388
2389 setup_phy:
2390         if (!rc)
2391                 rc = bnx2_setup_phy(bp, bp->phy_port);
2392
2393         return rc;
2394 }
2395
2396 static int
2397 bnx2_set_mac_loopback(struct bnx2 *bp)
2398 {
2399         u32 mac_mode;
2400
2401         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2402         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2403         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2404         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2405         bp->link_up = 1;
2406         return 0;
2407 }
2408
2409 static int bnx2_test_link(struct bnx2 *);
2410
2411 static int
2412 bnx2_set_phy_loopback(struct bnx2 *bp)
2413 {
2414         u32 mac_mode;
2415         int rc, i;
2416
2417         spin_lock_bh(&bp->phy_lock);
2418         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2419                             BMCR_SPEED1000);
2420         spin_unlock_bh(&bp->phy_lock);
2421         if (rc)
2422                 return rc;
2423
2424         for (i = 0; i < 10; i++) {
2425                 if (bnx2_test_link(bp) == 0)
2426                         break;
2427                 msleep(100);
2428         }
2429
2430         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2431         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2432                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2433                       BNX2_EMAC_MODE_25G_MODE);
2434
2435         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2436         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2437         bp->link_up = 1;
2438         return 0;
2439 }
2440
2441 static void
2442 bnx2_dump_mcp_state(struct bnx2 *bp)
2443 {
2444         struct net_device *dev = bp->dev;
2445         u32 mcp_p0, mcp_p1;
2446
2447         netdev_err(dev, "<--- start MCP states dump --->\n");
2448         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2449                 mcp_p0 = BNX2_MCP_STATE_P0;
2450                 mcp_p1 = BNX2_MCP_STATE_P1;
2451         } else {
2452                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2453                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2454         }
2455         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2456                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2457         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2458                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2459                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2460                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2461         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2462                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2463                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2464                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2465         netdev_err(dev, "DEBUG: shmem states:\n");
2466         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2467                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2468                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2469                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2470         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2471         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2472                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2473                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2474         pr_cont(" condition[%08x]\n",
2475                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2476         DP_SHMEM_LINE(bp, 0x3cc);
2477         DP_SHMEM_LINE(bp, 0x3dc);
2478         DP_SHMEM_LINE(bp, 0x3ec);
2479         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2480         netdev_err(dev, "<--- end MCP states dump --->\n");
2481 }
2482
2483 static int
2484 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2485 {
2486         int i;
2487         u32 val;
2488
2489         bp->fw_wr_seq++;
2490         msg_data |= bp->fw_wr_seq;
2491
2492         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2493
2494         if (!ack)
2495                 return 0;
2496
2497         /* wait for an acknowledgement. */
2498         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2499                 msleep(10);
2500
2501                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2502
2503                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2504                         break;
2505         }
2506         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2507                 return 0;
2508
2509         /* If we timed out, inform the firmware that this is the case. */
2510         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2511                 msg_data &= ~BNX2_DRV_MSG_CODE;
2512                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2513
2514                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2515                 if (!silent) {
2516                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2517                         bnx2_dump_mcp_state(bp);
2518                 }
2519
2520                 return -EBUSY;
2521         }
2522
2523         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2524                 return -EIO;
2525
2526         return 0;
2527 }
2528
2529 static int
2530 bnx2_init_5709_context(struct bnx2 *bp)
2531 {
2532         int i, ret = 0;
2533         u32 val;
2534
2535         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2536         val |= (BCM_PAGE_BITS - 8) << 16;
2537         REG_WR(bp, BNX2_CTX_COMMAND, val);
2538         for (i = 0; i < 10; i++) {
2539                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2540                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2541                         break;
2542                 udelay(2);
2543         }
2544         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2545                 return -EBUSY;
2546
2547         for (i = 0; i < bp->ctx_pages; i++) {
2548                 int j;
2549
2550                 if (bp->ctx_blk[i])
2551                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2552                 else
2553                         return -ENOMEM;
2554
2555                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2556                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2557                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2558                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2559                        (u64) bp->ctx_blk_mapping[i] >> 32);
2560                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2561                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2562                 for (j = 0; j < 10; j++) {
2563
2564                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2565                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2566                                 break;
2567                         udelay(5);
2568                 }
2569                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2570                         ret = -EBUSY;
2571                         break;
2572                 }
2573         }
2574         return ret;
2575 }
2576
2577 static void
2578 bnx2_init_context(struct bnx2 *bp)
2579 {
2580         u32 vcid;
2581
2582         vcid = 96;
2583         while (vcid) {
2584                 u32 vcid_addr, pcid_addr, offset;
2585                 int i;
2586
2587                 vcid--;
2588
2589                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2590                         u32 new_vcid;
2591
2592                         vcid_addr = GET_PCID_ADDR(vcid);
2593                         if (vcid & 0x8) {
2594                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2595                         }
2596                         else {
2597                                 new_vcid = vcid;
2598                         }
2599                         pcid_addr = GET_PCID_ADDR(new_vcid);
2600                 }
2601                 else {
2602                         vcid_addr = GET_CID_ADDR(vcid);
2603                         pcid_addr = vcid_addr;
2604                 }
2605
2606                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2607                         vcid_addr += (i << PHY_CTX_SHIFT);
2608                         pcid_addr += (i << PHY_CTX_SHIFT);
2609
2610                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2611                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2612
2613                         /* Zero out the context. */
2614                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2615                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2616                 }
2617         }
2618 }
2619
2620 static int
2621 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2622 {
2623         u16 *good_mbuf;
2624         u32 good_mbuf_cnt;
2625         u32 val;
2626
2627         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2628         if (good_mbuf == NULL)
2629                 return -ENOMEM;
2630
2631         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2632                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2633
2634         good_mbuf_cnt = 0;
2635
2636         /* Allocate a bunch of mbufs and save the good ones in an array. */
2637         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2638         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2639                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2640                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2641
2642                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2643
2644                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2645
2646                 /* The addresses with Bit 9 set are bad memory blocks. */
2647                 if (!(val & (1 << 9))) {
2648                         good_mbuf[good_mbuf_cnt] = (u16) val;
2649                         good_mbuf_cnt++;
2650                 }
2651
2652                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2653         }
2654
2655         /* Free the good ones back to the mbuf pool thus discarding
2656          * all the bad ones. */
2657         while (good_mbuf_cnt) {
2658                 good_mbuf_cnt--;
2659
2660                 val = good_mbuf[good_mbuf_cnt];
2661                 val = (val << 9) | val | 1;
2662
2663                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2664         }
2665         kfree(good_mbuf);
2666         return 0;
2667 }
2668
2669 static void
2670 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2671 {
2672         u32 val;
2673
2674         val = (mac_addr[0] << 8) | mac_addr[1];
2675
2676         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2677
2678         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2679                 (mac_addr[4] << 8) | mac_addr[5];
2680
2681         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2682 }
2683
2684 static inline int
2685 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2686 {
2687         dma_addr_t mapping;
2688         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2689         struct rx_bd *rxbd =
2690                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2691         struct page *page = alloc_page(gfp);
2692
2693         if (!page)
2694                 return -ENOMEM;
2695         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2696                                PCI_DMA_FROMDEVICE);
2697         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2698                 __free_page(page);
2699                 return -EIO;
2700         }
2701
2702         rx_pg->page = page;
2703         dma_unmap_addr_set(rx_pg, mapping, mapping);
2704         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2705         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2706         return 0;
2707 }
2708
2709 static void
2710 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2711 {
2712         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2713         struct page *page = rx_pg->page;
2714
2715         if (!page)
2716                 return;
2717
2718         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2719                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2720
2721         __free_page(page);
2722         rx_pg->page = NULL;
2723 }
2724
2725 static inline int
2726 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2727 {
2728         u8 *data;
2729         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2730         dma_addr_t mapping;
2731         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2732
2733         data = kmalloc(bp->rx_buf_size, gfp);
2734         if (!data)
2735                 return -ENOMEM;
2736
2737         mapping = dma_map_single(&bp->pdev->dev,
2738                                  get_l2_fhdr(data),
2739                                  bp->rx_buf_use_size,
2740                                  PCI_DMA_FROMDEVICE);
2741         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2742                 kfree(data);
2743                 return -EIO;
2744         }
2745
2746         rx_buf->data = data;
2747         dma_unmap_addr_set(rx_buf, mapping, mapping);
2748
2749         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2750         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2751
2752         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2753
2754         return 0;
2755 }
2756
2757 static int
2758 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2759 {
2760         struct status_block *sblk = bnapi->status_blk.msi;
2761         u32 new_link_state, old_link_state;
2762         int is_set = 1;
2763
2764         new_link_state = sblk->status_attn_bits & event;
2765         old_link_state = sblk->status_attn_bits_ack & event;
2766         if (new_link_state != old_link_state) {
2767                 if (new_link_state)
2768                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2769                 else
2770                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2771         } else
2772                 is_set = 0;
2773
2774         return is_set;
2775 }
2776
2777 static void
2778 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2779 {
2780         spin_lock(&bp->phy_lock);
2781
2782         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2783                 bnx2_set_link(bp);
2784         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2785                 bnx2_set_remote_link(bp);
2786
2787         spin_unlock(&bp->phy_lock);
2788
2789 }
2790
2791 static inline u16
2792 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2793 {
2794         u16 cons;
2795
2796         /* Tell compiler that status block fields can change. */
2797         barrier();
2798         cons = *bnapi->hw_tx_cons_ptr;
2799         barrier();
2800         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2801                 cons++;
2802         return cons;
2803 }
2804
2805 static int
2806 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2807 {
2808         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2809         u16 hw_cons, sw_cons, sw_ring_cons;
2810         int tx_pkt = 0, index;
2811         unsigned int tx_bytes = 0;
2812         struct netdev_queue *txq;
2813
2814         index = (bnapi - bp->bnx2_napi);
2815         txq = netdev_get_tx_queue(bp->dev, index);
2816
2817         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2818         sw_cons = txr->tx_cons;
2819
2820         while (sw_cons != hw_cons) {
2821                 struct sw_tx_bd *tx_buf;
2822                 struct sk_buff *skb;
2823                 int i, last;
2824
2825                 sw_ring_cons = TX_RING_IDX(sw_cons);
2826
2827                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2828                 skb = tx_buf->skb;
2829
2830                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2831                 prefetch(&skb->end);
2832
2833                 /* partial BD completions possible with TSO packets */
2834                 if (tx_buf->is_gso) {
2835                         u16 last_idx, last_ring_idx;
2836
2837                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2838                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2839                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2840                                 last_idx++;
2841                         }
2842                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2843                                 break;
2844                         }
2845                 }
2846
2847                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2848                         skb_headlen(skb), PCI_DMA_TODEVICE);
2849
2850                 tx_buf->skb = NULL;
2851                 last = tx_buf->nr_frags;
2852
2853                 for (i = 0; i < last; i++) {
2854                         sw_cons = NEXT_TX_BD(sw_cons);
2855
2856                         dma_unmap_page(&bp->pdev->dev,
2857                                 dma_unmap_addr(
2858                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2859                                         mapping),
2860                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2861                                 PCI_DMA_TODEVICE);
2862                 }
2863
2864                 sw_cons = NEXT_TX_BD(sw_cons);
2865
2866                 tx_bytes += skb->len;
2867                 dev_kfree_skb(skb);
2868                 tx_pkt++;
2869                 if (tx_pkt == budget)
2870                         break;
2871
2872                 if (hw_cons == sw_cons)
2873                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2874         }
2875
2876         netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2877         txr->hw_tx_cons = hw_cons;
2878         txr->tx_cons = sw_cons;
2879
2880         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2881          * before checking for netif_tx_queue_stopped().  Without the
2882          * memory barrier, there is a small possibility that bnx2_start_xmit()
2883          * will miss it and cause the queue to be stopped forever.
2884          */
2885         smp_mb();
2886
2887         if (unlikely(netif_tx_queue_stopped(txq)) &&
2888                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2889                 __netif_tx_lock(txq, smp_processor_id());
2890                 if ((netif_tx_queue_stopped(txq)) &&
2891                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2892                         netif_tx_wake_queue(txq);
2893                 __netif_tx_unlock(txq);
2894         }
2895
2896         return tx_pkt;
2897 }
2898
2899 static void
2900 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2901                         struct sk_buff *skb, int count)
2902 {
2903         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2904         struct rx_bd *cons_bd, *prod_bd;
2905         int i;
2906         u16 hw_prod, prod;
2907         u16 cons = rxr->rx_pg_cons;
2908
2909         cons_rx_pg = &rxr->rx_pg_ring[cons];
2910
2911         /* The caller was unable to allocate a new page to replace the
2912          * last one in the frags array, so we need to recycle that page
2913          * and then free the skb.
2914          */
2915         if (skb) {
2916                 struct page *page;
2917                 struct skb_shared_info *shinfo;
2918
2919                 shinfo = skb_shinfo(skb);
2920                 shinfo->nr_frags--;
2921                 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2922                 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2923
2924                 cons_rx_pg->page = page;
2925                 dev_kfree_skb(skb);
2926         }
2927
2928         hw_prod = rxr->rx_pg_prod;
2929
2930         for (i = 0; i < count; i++) {
2931                 prod = RX_PG_RING_IDX(hw_prod);
2932
2933                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2934                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2935                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2936                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2937
2938                 if (prod != cons) {
2939                         prod_rx_pg->page = cons_rx_pg->page;
2940                         cons_rx_pg->page = NULL;
2941                         dma_unmap_addr_set(prod_rx_pg, mapping,
2942                                 dma_unmap_addr(cons_rx_pg, mapping));
2943
2944                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2945                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2946
2947                 }
2948                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2949                 hw_prod = NEXT_RX_BD(hw_prod);
2950         }
2951         rxr->rx_pg_prod = hw_prod;
2952         rxr->rx_pg_cons = cons;
2953 }
2954
2955 static inline void
2956 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2957                    u8 *data, u16 cons, u16 prod)
2958 {
2959         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2960         struct rx_bd *cons_bd, *prod_bd;
2961
2962         cons_rx_buf = &rxr->rx_buf_ring[cons];
2963         prod_rx_buf = &rxr->rx_buf_ring[prod];
2964
2965         dma_sync_single_for_device(&bp->pdev->dev,
2966                 dma_unmap_addr(cons_rx_buf, mapping),
2967                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2968
2969         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2970
2971         prod_rx_buf->data = data;
2972
2973         if (cons == prod)
2974                 return;
2975
2976         dma_unmap_addr_set(prod_rx_buf, mapping,
2977                         dma_unmap_addr(cons_rx_buf, mapping));
2978
2979         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2980         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2981         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2982         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2983 }
2984
2985 static struct sk_buff *
2986 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2987             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2988             u32 ring_idx)
2989 {
2990         int err;
2991         u16 prod = ring_idx & 0xffff;
2992         struct sk_buff *skb;
2993
2994         err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
2995         if (unlikely(err)) {
2996                 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
2997 error:
2998                 if (hdr_len) {
2999                         unsigned int raw_len = len + 4;
3000                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3001
3002                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3003                 }
3004                 return NULL;
3005         }
3006
3007         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3008                          PCI_DMA_FROMDEVICE);
3009         skb = build_skb(data, 0);
3010         if (!skb) {
3011                 kfree(data);
3012                 goto error;
3013         }
3014         skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3015         if (hdr_len == 0) {
3016                 skb_put(skb, len);
3017                 return skb;
3018         } else {
3019                 unsigned int i, frag_len, frag_size, pages;
3020                 struct sw_pg *rx_pg;
3021                 u16 pg_cons = rxr->rx_pg_cons;
3022                 u16 pg_prod = rxr->rx_pg_prod;
3023
3024                 frag_size = len + 4 - hdr_len;
3025                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3026                 skb_put(skb, hdr_len);
3027
3028                 for (i = 0; i < pages; i++) {
3029                         dma_addr_t mapping_old;
3030
3031                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3032                         if (unlikely(frag_len <= 4)) {
3033                                 unsigned int tail = 4 - frag_len;
3034
3035                                 rxr->rx_pg_cons = pg_cons;
3036                                 rxr->rx_pg_prod = pg_prod;
3037                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3038                                                         pages - i);
3039                                 skb->len -= tail;
3040                                 if (i == 0) {
3041                                         skb->tail -= tail;
3042                                 } else {
3043                                         skb_frag_t *frag =
3044                                                 &skb_shinfo(skb)->frags[i - 1];
3045                                         skb_frag_size_sub(frag, tail);
3046                                         skb->data_len -= tail;
3047                                 }
3048                                 return skb;
3049                         }
3050                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3051
3052                         /* Don't unmap yet.  If we're unable to allocate a new
3053                          * page, we need to recycle the page and the DMA addr.
3054                          */
3055                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3056                         if (i == pages - 1)
3057                                 frag_len -= 4;
3058
3059                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3060                         rx_pg->page = NULL;
3061
3062                         err = bnx2_alloc_rx_page(bp, rxr,
3063                                                  RX_PG_RING_IDX(pg_prod),
3064                                                  GFP_ATOMIC);
3065                         if (unlikely(err)) {
3066                                 rxr->rx_pg_cons = pg_cons;
3067                                 rxr->rx_pg_prod = pg_prod;
3068                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3069                                                         pages - i);
3070                                 return NULL;
3071                         }
3072
3073                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3074                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3075
3076                         frag_size -= frag_len;
3077                         skb->data_len += frag_len;
3078                         skb->truesize += PAGE_SIZE;
3079                         skb->len += frag_len;
3080
3081                         pg_prod = NEXT_RX_BD(pg_prod);
3082                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3083                 }
3084                 rxr->rx_pg_prod = pg_prod;
3085                 rxr->rx_pg_cons = pg_cons;
3086         }
3087         return skb;
3088 }
3089
3090 static inline u16
3091 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3092 {
3093         u16 cons;
3094
3095         /* Tell compiler that status block fields can change. */
3096         barrier();
3097         cons = *bnapi->hw_rx_cons_ptr;
3098         barrier();
3099         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3100                 cons++;
3101         return cons;
3102 }
3103
3104 static int
3105 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3106 {
3107         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3108         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3109         struct l2_fhdr *rx_hdr;
3110         int rx_pkt = 0, pg_ring_used = 0;
3111
3112         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3113         sw_cons = rxr->rx_cons;
3114         sw_prod = rxr->rx_prod;
3115
3116         /* Memory barrier necessary as speculative reads of the rx
3117          * buffer can be ahead of the index in the status block
3118          */
3119         rmb();
3120         while (sw_cons != hw_cons) {
3121                 unsigned int len, hdr_len;
3122                 u32 status;
3123                 struct sw_bd *rx_buf, *next_rx_buf;
3124                 struct sk_buff *skb;
3125                 dma_addr_t dma_addr;
3126                 u8 *data;
3127
3128                 sw_ring_cons = RX_RING_IDX(sw_cons);
3129                 sw_ring_prod = RX_RING_IDX(sw_prod);
3130
3131                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3132                 data = rx_buf->data;
3133                 rx_buf->data = NULL;
3134
3135                 rx_hdr = get_l2_fhdr(data);
3136                 prefetch(rx_hdr);
3137
3138                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3139
3140                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3141                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3142                         PCI_DMA_FROMDEVICE);
3143
3144                 next_rx_buf =
3145                         &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3146                 prefetch(get_l2_fhdr(next_rx_buf->data));
3147
3148                 len = rx_hdr->l2_fhdr_pkt_len;
3149                 status = rx_hdr->l2_fhdr_status;
3150
3151                 hdr_len = 0;
3152                 if (status & L2_FHDR_STATUS_SPLIT) {
3153                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3154                         pg_ring_used = 1;
3155                 } else if (len > bp->rx_jumbo_thresh) {
3156                         hdr_len = bp->rx_jumbo_thresh;
3157                         pg_ring_used = 1;
3158                 }
3159
3160                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3161                                        L2_FHDR_ERRORS_PHY_DECODE |
3162                                        L2_FHDR_ERRORS_ALIGNMENT |
3163                                        L2_FHDR_ERRORS_TOO_SHORT |
3164                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3165
3166                         bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3167                                           sw_ring_prod);
3168                         if (pg_ring_used) {
3169                                 int pages;
3170
3171                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3172
3173                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3174                         }
3175                         goto next_rx;
3176                 }
3177
3178                 len -= 4;
3179
3180                 if (len <= bp->rx_copy_thresh) {
3181                         skb = netdev_alloc_skb(bp->dev, len + 6);
3182                         if (skb == NULL) {
3183                                 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3184                                                   sw_ring_prod);
3185                                 goto next_rx;
3186                         }
3187
3188                         /* aligned copy */
3189                         memcpy(skb->data,
3190                                (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3191                                len + 6);
3192                         skb_reserve(skb, 6);
3193                         skb_put(skb, len);
3194
3195                         bnx2_reuse_rx_data(bp, rxr, data,
3196                                 sw_ring_cons, sw_ring_prod);
3197
3198                 } else {
3199                         skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3200                                           (sw_ring_cons << 16) | sw_ring_prod);
3201                         if (!skb)
3202                                 goto next_rx;
3203                 }
3204                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3205                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3206                         __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3207
3208                 skb->protocol = eth_type_trans(skb, bp->dev);
3209
3210                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3211                         (ntohs(skb->protocol) != 0x8100)) {
3212
3213                         dev_kfree_skb(skb);
3214                         goto next_rx;
3215
3216                 }
3217
3218                 skb_checksum_none_assert(skb);
3219                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3220                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3221                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3222
3223                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3224                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3225                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3226                 }
3227                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3228                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3229                      L2_FHDR_STATUS_USE_RXHASH))
3230                         skb->rxhash = rx_hdr->l2_fhdr_hash;
3231
3232                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3233                 napi_gro_receive(&bnapi->napi, skb);
3234                 rx_pkt++;
3235
3236 next_rx:
3237                 sw_cons = NEXT_RX_BD(sw_cons);
3238                 sw_prod = NEXT_RX_BD(sw_prod);
3239
3240                 if ((rx_pkt == budget))
3241                         break;
3242
3243                 /* Refresh hw_cons to see if there is new work */
3244                 if (sw_cons == hw_cons) {
3245                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3246                         rmb();
3247                 }
3248         }
3249         rxr->rx_cons = sw_cons;
3250         rxr->rx_prod = sw_prod;
3251
3252         if (pg_ring_used)
3253                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3254
3255         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3256
3257         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3258
3259         mmiowb();
3260
3261         return rx_pkt;
3262
3263 }
3264
3265 /* MSI ISR - The only difference between this and the INTx ISR
3266  * is that the MSI interrupt is always serviced.
3267  */
3268 static irqreturn_t
3269 bnx2_msi(int irq, void *dev_instance)
3270 {
3271         struct bnx2_napi *bnapi = dev_instance;
3272         struct bnx2 *bp = bnapi->bp;
3273
3274         prefetch(bnapi->status_blk.msi);
3275         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3276                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3277                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3278
3279         /* Return here if interrupt is disabled. */
3280         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3281                 return IRQ_HANDLED;
3282
3283         napi_schedule(&bnapi->napi);
3284
3285         return IRQ_HANDLED;
3286 }
3287
3288 static irqreturn_t
3289 bnx2_msi_1shot(int irq, void *dev_instance)
3290 {
3291         struct bnx2_napi *bnapi = dev_instance;
3292         struct bnx2 *bp = bnapi->bp;
3293
3294         prefetch(bnapi->status_blk.msi);
3295
3296         /* Return here if interrupt is disabled. */
3297         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3298                 return IRQ_HANDLED;
3299
3300         napi_schedule(&bnapi->napi);
3301
3302         return IRQ_HANDLED;
3303 }
3304
3305 static irqreturn_t
3306 bnx2_interrupt(int irq, void *dev_instance)
3307 {
3308         struct bnx2_napi *bnapi = dev_instance;
3309         struct bnx2 *bp = bnapi->bp;
3310         struct status_block *sblk = bnapi->status_blk.msi;
3311
3312         /* When using INTx, it is possible for the interrupt to arrive
3313          * at the CPU before the status block posted prior to the
3314          * interrupt. Reading a register will flush the status block.
3315          * When using MSI, the MSI message will always complete after
3316          * the status block write.
3317          */
3318         if ((sblk->status_idx == bnapi->last_status_idx) &&
3319             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3320              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3321                 return IRQ_NONE;
3322
3323         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3324                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3325                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3326
3327         /* Read back to deassert IRQ immediately to avoid too many
3328          * spurious interrupts.
3329          */
3330         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3331
3332         /* Return here if interrupt is shared and is disabled. */
3333         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3334                 return IRQ_HANDLED;
3335
3336         if (napi_schedule_prep(&bnapi->napi)) {
3337                 bnapi->last_status_idx = sblk->status_idx;
3338                 __napi_schedule(&bnapi->napi);
3339         }
3340
3341         return IRQ_HANDLED;
3342 }
3343
3344 static inline int
3345 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3346 {
3347         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3348         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3349
3350         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3351             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3352                 return 1;
3353         return 0;
3354 }
3355
3356 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3357                                  STATUS_ATTN_BITS_TIMER_ABORT)
3358
3359 static inline int
3360 bnx2_has_work(struct bnx2_napi *bnapi)
3361 {
3362         struct status_block *sblk = bnapi->status_blk.msi;
3363
3364         if (bnx2_has_fast_work(bnapi))
3365                 return 1;
3366
3367 #ifdef BCM_CNIC
3368         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3369                 return 1;
3370 #endif
3371
3372         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3373             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3374                 return 1;
3375
3376         return 0;
3377 }
3378
3379 static void
3380 bnx2_chk_missed_msi(struct bnx2 *bp)
3381 {
3382         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3383         u32 msi_ctrl;
3384
3385         if (bnx2_has_work(bnapi)) {
3386                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3387                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3388                         return;
3389
3390                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3391                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3392                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3393                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3394                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3395                 }
3396         }
3397
3398         bp->idle_chk_status_idx = bnapi->last_status_idx;
3399 }
3400
3401 #ifdef BCM_CNIC
3402 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3403 {
3404         struct cnic_ops *c_ops;
3405
3406         if (!bnapi->cnic_present)
3407                 return;
3408
3409         rcu_read_lock();
3410         c_ops = rcu_dereference(bp->cnic_ops);
3411         if (c_ops)
3412                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3413                                                       bnapi->status_blk.msi);
3414         rcu_read_unlock();
3415 }
3416 #endif
3417
3418 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3419 {
3420         struct status_block *sblk = bnapi->status_blk.msi;
3421         u32 status_attn_bits = sblk->status_attn_bits;
3422         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3423
3424         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3425             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3426
3427                 bnx2_phy_int(bp, bnapi);
3428
3429                 /* This is needed to take care of transient status
3430                  * during link changes.
3431                  */
3432                 REG_WR(bp, BNX2_HC_COMMAND,
3433                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3434                 REG_RD(bp, BNX2_HC_COMMAND);
3435         }
3436 }
3437
3438 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3439                           int work_done, int budget)
3440 {
3441         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3442         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3443
3444         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3445                 bnx2_tx_int(bp, bnapi, 0);
3446
3447         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3448                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3449
3450         return work_done;
3451 }
3452
3453 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3454 {
3455         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3456         struct bnx2 *bp = bnapi->bp;
3457         int work_done = 0;
3458         struct status_block_msix *sblk = bnapi->status_blk.msix;
3459
3460         while (1) {
3461                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3462                 if (unlikely(work_done >= budget))
3463                         break;
3464
3465                 bnapi->last_status_idx = sblk->status_idx;
3466                 /* status idx must be read before checking for more work. */
3467                 rmb();
3468                 if (likely(!bnx2_has_fast_work(bnapi))) {
3469
3470                         napi_complete(napi);
3471                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3472                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3473                                bnapi->last_status_idx);
3474                         break;
3475                 }
3476         }
3477         return work_done;
3478 }
3479
3480 static int bnx2_poll(struct napi_struct *napi, int budget)
3481 {
3482         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3483         struct bnx2 *bp = bnapi->bp;
3484         int work_done = 0;
3485         struct status_block *sblk = bnapi->status_blk.msi;
3486
3487         while (1) {
3488                 bnx2_poll_link(bp, bnapi);
3489
3490                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3491
3492 #ifdef BCM_CNIC
3493                 bnx2_poll_cnic(bp, bnapi);
3494 #endif
3495
3496                 /* bnapi->last_status_idx is used below to tell the hw how
3497                  * much work has been processed, so we must read it before
3498                  * checking for more work.
3499                  */
3500                 bnapi->last_status_idx = sblk->status_idx;
3501
3502                 if (unlikely(work_done >= budget))
3503                         break;
3504
3505                 rmb();
3506                 if (likely(!bnx2_has_work(bnapi))) {
3507                         napi_complete(napi);
3508                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3509                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3510                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3511                                        bnapi->last_status_idx);
3512                                 break;
3513                         }
3514                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3515                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3516                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3517                                bnapi->last_status_idx);
3518
3519                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3520                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3521                                bnapi->last_status_idx);
3522                         break;
3523                 }
3524         }
3525
3526         return work_done;
3527 }
3528
3529 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3530  * from set_multicast.
3531  */
3532 static void
3533 bnx2_set_rx_mode(struct net_device *dev)
3534 {
3535         struct bnx2 *bp = netdev_priv(dev);
3536         u32 rx_mode, sort_mode;
3537         struct netdev_hw_addr *ha;
3538         int i;
3539
3540         if (!netif_running(dev))
3541                 return;
3542
3543         spin_lock_bh(&bp->phy_lock);
3544
3545         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3546                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3547         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3548         if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3549              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3550                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3551         if (dev->flags & IFF_PROMISC) {
3552                 /* Promiscuous mode. */
3553                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3554                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3555                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3556         }
3557         else if (dev->flags & IFF_ALLMULTI) {
3558                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3559                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3560                                0xffffffff);
3561                 }
3562                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3563         }
3564         else {
3565                 /* Accept one or more multicast(s). */
3566                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3567                 u32 regidx;
3568                 u32 bit;
3569                 u32 crc;
3570
3571                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3572
3573                 netdev_for_each_mc_addr(ha, dev) {
3574                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3575                         bit = crc & 0xff;
3576                         regidx = (bit & 0xe0) >> 5;
3577                         bit &= 0x1f;
3578                         mc_filter[regidx] |= (1 << bit);
3579                 }
3580
3581                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3582                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3583                                mc_filter[i]);
3584                 }
3585
3586                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3587         }
3588
3589         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3590                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3591                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3592                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3593         } else if (!(dev->flags & IFF_PROMISC)) {
3594                 /* Add all entries into to the match filter list */
3595                 i = 0;
3596                 netdev_for_each_uc_addr(ha, dev) {
3597                         bnx2_set_mac_addr(bp, ha->addr,
3598                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3599                         sort_mode |= (1 <<
3600                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3601                         i++;
3602                 }
3603
3604         }
3605
3606         if (rx_mode != bp->rx_mode) {
3607                 bp->rx_mode = rx_mode;
3608                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3609         }
3610
3611         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3612         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3613         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3614
3615         spin_unlock_bh(&bp->phy_lock);
3616 }
3617
3618 static int
3619 check_fw_section(const struct firmware *fw,
3620                  const struct bnx2_fw_file_section *section,
3621                  u32 alignment, bool non_empty)
3622 {
3623         u32 offset = be32_to_cpu(section->offset);
3624         u32 len = be32_to_cpu(section->len);
3625
3626         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3627                 return -EINVAL;
3628         if ((non_empty && len == 0) || len > fw->size - offset ||
3629             len & (alignment - 1))
3630                 return -EINVAL;
3631         return 0;
3632 }
3633
3634 static int
3635 check_mips_fw_entry(const struct firmware *fw,
3636                     const struct bnx2_mips_fw_file_entry *entry)
3637 {
3638         if (check_fw_section(fw, &entry->text, 4, true) ||
3639             check_fw_section(fw, &entry->data, 4, false) ||
3640             check_fw_section(fw, &entry->rodata, 4, false))
3641                 return -EINVAL;
3642         return 0;
3643 }
3644
3645 static void bnx2_release_firmware(struct bnx2 *bp)
3646 {
3647         if (bp->rv2p_firmware) {
3648                 release_firmware(bp->mips_firmware);
3649                 release_firmware(bp->rv2p_firmware);
3650                 bp->rv2p_firmware = NULL;
3651         }
3652 }
3653
3654 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3655 {
3656         const char *mips_fw_file, *rv2p_fw_file;
3657         const struct bnx2_mips_fw_file *mips_fw;
3658         const struct bnx2_rv2p_fw_file *rv2p_fw;
3659         int rc;
3660
3661         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3662                 mips_fw_file = FW_MIPS_FILE_09;
3663                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3664                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3665                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3666                 else
3667                         rv2p_fw_file = FW_RV2P_FILE_09;
3668         } else {
3669                 mips_fw_file = FW_MIPS_FILE_06;
3670                 rv2p_fw_file = FW_RV2P_FILE_06;
3671         }
3672
3673         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3674         if (rc) {
3675                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3676                 goto out;
3677         }
3678
3679         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3680         if (rc) {
3681                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3682                 goto err_release_mips_firmware;
3683         }
3684         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3685         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3686         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3687             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3688             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3689             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3690             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3691             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3692                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3693                 rc = -EINVAL;
3694                 goto err_release_firmware;
3695         }
3696         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3697             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3698             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3699                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3700                 rc = -EINVAL;
3701                 goto err_release_firmware;
3702         }
3703 out:
3704         return rc;
3705
3706 err_release_firmware:
3707         release_firmware(bp->rv2p_firmware);
3708         bp->rv2p_firmware = NULL;
3709 err_release_mips_firmware:
3710         release_firmware(bp->mips_firmware);
3711         goto out;
3712 }
3713
3714 static int bnx2_request_firmware(struct bnx2 *bp)
3715 {
3716         return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3717 }
3718
3719 static u32
3720 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3721 {
3722         switch (idx) {
3723         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3724                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3725                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3726                 break;
3727         }
3728         return rv2p_code;
3729 }
3730
3731 static int
3732 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3733              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3734 {
3735         u32 rv2p_code_len, file_offset;
3736         __be32 *rv2p_code;
3737         int i;
3738         u32 val, cmd, addr;
3739
3740         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3741         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3742
3743         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3744
3745         if (rv2p_proc == RV2P_PROC1) {
3746                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3747                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3748         } else {
3749                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3750                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3751         }
3752
3753         for (i = 0; i < rv2p_code_len; i += 8) {
3754                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3755                 rv2p_code++;
3756                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3757                 rv2p_code++;
3758
3759                 val = (i / 8) | cmd;
3760                 REG_WR(bp, addr, val);
3761         }
3762
3763         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3764         for (i = 0; i < 8; i++) {
3765                 u32 loc, code;
3766
3767                 loc = be32_to_cpu(fw_entry->fixup[i]);
3768                 if (loc && ((loc * 4) < rv2p_code_len)) {
3769                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3770                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3771                         code = be32_to_cpu(*(rv2p_code + loc));
3772                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3773                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3774
3775                         val = (loc / 2) | cmd;
3776                         REG_WR(bp, addr, val);
3777                 }
3778         }
3779
3780         /* Reset the processor, un-stall is done later. */
3781         if (rv2p_proc == RV2P_PROC1) {
3782                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3783         }
3784         else {
3785                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3786         }
3787
3788         return 0;
3789 }
3790
3791 static int
3792 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3793             const struct bnx2_mips_fw_file_entry *fw_entry)
3794 {
3795         u32 addr, len, file_offset;
3796         __be32 *data;
3797         u32 offset;
3798         u32 val;
3799
3800         /* Halt the CPU. */
3801         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3802         val |= cpu_reg->mode_value_halt;
3803         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3804         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3805
3806         /* Load the Text area. */
3807         addr = be32_to_cpu(fw_entry->text.addr);
3808         len = be32_to_cpu(fw_entry->text.len);
3809         file_offset = be32_to_cpu(fw_entry->text.offset);
3810         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3811
3812         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3813         if (len) {
3814                 int j;
3815
3816                 for (j = 0; j < (len / 4); j++, offset += 4)
3817                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3818         }
3819
3820         /* Load the Data area. */
3821         addr = be32_to_cpu(fw_entry->data.addr);
3822         len = be32_to_cpu(fw_entry->data.len);
3823         file_offset = be32_to_cpu(fw_entry->data.offset);
3824         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3825
3826         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3827         if (len) {
3828                 int j;
3829
3830                 for (j = 0; j < (len / 4); j++, offset += 4)
3831                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3832         }
3833
3834         /* Load the Read-Only area. */
3835         addr = be32_to_cpu(fw_entry->rodata.addr);
3836         len = be32_to_cpu(fw_entry->rodata.len);
3837         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3838         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3839
3840         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3841         if (len) {
3842                 int j;
3843
3844                 for (j = 0; j < (len / 4); j++, offset += 4)
3845                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3846         }
3847
3848         /* Clear the pre-fetch instruction. */
3849         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3850
3851         val = be32_to_cpu(fw_entry->start_addr);
3852         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3853
3854         /* Start the CPU. */
3855         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3856         val &= ~cpu_reg->mode_value_halt;
3857         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3858         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3859
3860         return 0;
3861 }
3862
3863 static int
3864 bnx2_init_cpus(struct bnx2 *bp)
3865 {
3866         const struct bnx2_mips_fw_file *mips_fw =
3867                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3868         const struct bnx2_rv2p_fw_file *rv2p_fw =
3869                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3870         int rc;
3871
3872         /* Initialize the RV2P processor. */
3873         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3874         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3875
3876         /* Initialize the RX Processor. */
3877         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3878         if (rc)
3879                 goto init_cpu_err;
3880
3881         /* Initialize the TX Processor. */
3882         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3883         if (rc)
3884                 goto init_cpu_err;
3885
3886         /* Initialize the TX Patch-up Processor. */
3887         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3888         if (rc)
3889                 goto init_cpu_err;
3890
3891         /* Initialize the Completion Processor. */
3892         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3893         if (rc)
3894                 goto init_cpu_err;
3895
3896         /* Initialize the Command Processor. */
3897         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3898
3899 init_cpu_err:
3900         return rc;
3901 }
3902
3903 static int
3904 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3905 {
3906         u16 pmcsr;
3907
3908         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3909
3910         switch (state) {
3911         case PCI_D0: {
3912                 u32 val;
3913
3914                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3915                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3916                         PCI_PM_CTRL_PME_STATUS);
3917
3918                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3919                         /* delay required during transition out of D3hot */
3920                         msleep(20);
3921
3922                 val = REG_RD(bp, BNX2_EMAC_MODE);
3923                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3924                 val &= ~BNX2_EMAC_MODE_MPKT;
3925                 REG_WR(bp, BNX2_EMAC_MODE, val);
3926
3927                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3928                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3929                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3930                 break;
3931         }
3932         case PCI_D3hot: {
3933                 int i;
3934                 u32 val, wol_msg;
3935
3936                 if (bp->wol) {
3937                         u32 advertising;
3938                         u8 autoneg;
3939
3940                         autoneg = bp->autoneg;
3941                         advertising = bp->advertising;
3942
3943                         if (bp->phy_port == PORT_TP) {
3944                                 bp->autoneg = AUTONEG_SPEED;
3945                                 bp->advertising = ADVERTISED_10baseT_Half |
3946                                         ADVERTISED_10baseT_Full |
3947                                         ADVERTISED_100baseT_Half |
3948                                         ADVERTISED_100baseT_Full |
3949                                         ADVERTISED_Autoneg;
3950                         }
3951
3952                         spin_lock_bh(&bp->phy_lock);
3953                         bnx2_setup_phy(bp, bp->phy_port);
3954                         spin_unlock_bh(&bp->phy_lock);
3955
3956                         bp->autoneg = autoneg;
3957                         bp->advertising = advertising;
3958
3959                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3960
3961                         val = REG_RD(bp, BNX2_EMAC_MODE);
3962
3963                         /* Enable port mode. */
3964                         val &= ~BNX2_EMAC_MODE_PORT;
3965                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3966                                BNX2_EMAC_MODE_ACPI_RCVD |
3967                                BNX2_EMAC_MODE_MPKT;
3968                         if (bp->phy_port == PORT_TP)
3969                                 val |= BNX2_EMAC_MODE_PORT_MII;
3970                         else {
3971                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3972                                 if (bp->line_speed == SPEED_2500)
3973                                         val |= BNX2_EMAC_MODE_25G_MODE;
3974                         }
3975
3976                         REG_WR(bp, BNX2_EMAC_MODE, val);
3977
3978                         /* receive all multicast */
3979                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3980                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3981                                        0xffffffff);
3982                         }
3983                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3984                                BNX2_EMAC_RX_MODE_SORT_MODE);
3985
3986                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3987                               BNX2_RPM_SORT_USER0_MC_EN;
3988                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3989                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3990                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3991                                BNX2_RPM_SORT_USER0_ENA);
3992
3993                         /* Need to enable EMAC and RPM for WOL. */
3994                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3995                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3996                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3997                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3998
3999                         val = REG_RD(bp, BNX2_RPM_CONFIG);
4000                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4001                         REG_WR(bp, BNX2_RPM_CONFIG, val);
4002
4003                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4004                 }
4005                 else {
4006                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4007                 }
4008
4009                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
4010                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4011                                      1, 0);
4012
4013                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4014                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4015                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4016
4017                         if (bp->wol)
4018                                 pmcsr |= 3;
4019                 }
4020                 else {
4021                         pmcsr |= 3;
4022                 }
4023                 if (bp->wol) {
4024                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4025                 }
4026                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4027                                       pmcsr);
4028
4029                 /* No more memory access after this point until
4030                  * device is brought back to D0.
4031                  */
4032                 udelay(50);
4033                 break;
4034         }
4035         default:
4036                 return -EINVAL;
4037         }
4038         return 0;
4039 }
4040
4041 static int
4042 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4043 {
4044         u32 val;
4045         int j;
4046
4047         /* Request access to the flash interface. */
4048         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4049         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4050                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4051                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4052                         break;
4053
4054                 udelay(5);
4055         }
4056
4057         if (j >= NVRAM_TIMEOUT_COUNT)
4058                 return -EBUSY;
4059
4060         return 0;
4061 }
4062
4063 static int
4064 bnx2_release_nvram_lock(struct bnx2 *bp)
4065 {
4066         int j;
4067         u32 val;
4068
4069         /* Relinquish nvram interface. */
4070         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4071
4072         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4073                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4074                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4075                         break;
4076
4077                 udelay(5);
4078         }
4079
4080         if (j >= NVRAM_TIMEOUT_COUNT)
4081                 return -EBUSY;
4082
4083         return 0;
4084 }
4085
4086
4087 static int
4088 bnx2_enable_nvram_write(struct bnx2 *bp)
4089 {
4090         u32 val;
4091
4092         val = REG_RD(bp, BNX2_MISC_CFG);
4093         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4094
4095         if (bp->flash_info->flags & BNX2_NV_WREN) {
4096                 int j;
4097
4098                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4099                 REG_WR(bp, BNX2_NVM_COMMAND,
4100                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4101
4102                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4103                         udelay(5);
4104
4105                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4106                         if (val & BNX2_NVM_COMMAND_DONE)
4107                                 break;
4108                 }
4109
4110                 if (j >= NVRAM_TIMEOUT_COUNT)
4111                         return -EBUSY;
4112         }
4113         return 0;
4114 }
4115
4116 static void
4117 bnx2_disable_nvram_write(struct bnx2 *bp)
4118 {
4119         u32 val;
4120
4121         val = REG_RD(bp, BNX2_MISC_CFG);
4122         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4123 }
4124
4125
4126 static void
4127 bnx2_enable_nvram_access(struct bnx2 *bp)
4128 {
4129         u32 val;
4130
4131         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4132         /* Enable both bits, even on read. */
4133         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4134                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4135 }
4136
4137 static void
4138 bnx2_disable_nvram_access(struct bnx2 *bp)
4139 {
4140         u32 val;
4141
4142         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4143         /* Disable both bits, even after read. */
4144         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4145                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4146                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4147 }
4148
4149 static int
4150 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4151 {
4152         u32 cmd;
4153         int j;
4154
4155         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4156                 /* Buffered flash, no erase needed */
4157                 return 0;
4158
4159         /* Build an erase command */
4160         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4161               BNX2_NVM_COMMAND_DOIT;
4162
4163         /* Need to clear DONE bit separately. */
4164         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4165
4166         /* Address of the NVRAM to read from. */
4167         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4168
4169         /* Issue an erase command. */
4170         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4171
4172         /* Wait for completion. */
4173         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4174                 u32 val;
4175
4176                 udelay(5);
4177
4178                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4179                 if (val & BNX2_NVM_COMMAND_DONE)
4180                         break;
4181         }
4182
4183         if (j >= NVRAM_TIMEOUT_COUNT)
4184                 return -EBUSY;
4185
4186         return 0;
4187 }
4188
4189 static int
4190 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4191 {
4192         u32 cmd;
4193         int j;
4194
4195         /* Build the command word. */
4196         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4197
4198         /* Calculate an offset of a buffered flash, not needed for 5709. */
4199         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4200                 offset = ((offset / bp->flash_info->page_size) <<
4201                            bp->flash_info->page_bits) +
4202                           (offset % bp->flash_info->page_size);
4203         }
4204
4205         /* Need to clear DONE bit separately. */
4206         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4207
4208         /* Address of the NVRAM to read from. */
4209         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4210
4211         /* Issue a read command. */
4212         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4213
4214         /* Wait for completion. */
4215         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4216                 u32 val;
4217
4218                 udelay(5);
4219
4220                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4221                 if (val & BNX2_NVM_COMMAND_DONE) {
4222                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4223                         memcpy(ret_val, &v, 4);
4224                         break;
4225                 }
4226         }
4227         if (j >= NVRAM_TIMEOUT_COUNT)
4228                 return -EBUSY;
4229
4230         return 0;
4231 }
4232
4233
4234 static int
4235 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4236 {
4237         u32 cmd;
4238         __be32 val32;
4239         int j;
4240
4241         /* Build the command word. */
4242         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4243
4244         /* Calculate an offset of a buffered flash, not needed for 5709. */
4245         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4246                 offset = ((offset / bp->flash_info->page_size) <<
4247                           bp->flash_info->page_bits) +
4248                          (offset % bp->flash_info->page_size);
4249         }
4250
4251         /* Need to clear DONE bit separately. */
4252         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4253
4254         memcpy(&val32, val, 4);
4255
4256         /* Write the data. */
4257         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4258
4259         /* Address of the NVRAM to write to. */
4260         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4261
4262         /* Issue the write command. */
4263         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4264
4265         /* Wait for completion. */
4266         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4267                 udelay(5);
4268
4269                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4270                         break;
4271         }
4272         if (j >= NVRAM_TIMEOUT_COUNT)
4273                 return -EBUSY;
4274
4275         return 0;
4276 }
4277
4278 static int
4279 bnx2_init_nvram(struct bnx2 *bp)
4280 {
4281         u32 val;
4282         int j, entry_count, rc = 0;
4283         const struct flash_spec *flash;
4284
4285         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4286                 bp->flash_info = &flash_5709;
4287                 goto get_flash_size;
4288         }
4289
4290         /* Determine the selected interface. */
4291         val = REG_RD(bp, BNX2_NVM_CFG1);
4292
4293         entry_count = ARRAY_SIZE(flash_table);
4294
4295         if (val & 0x40000000) {
4296
4297                 /* Flash interface has been reconfigured */
4298                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4299                      j++, flash++) {
4300                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4301                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4302                                 bp->flash_info = flash;
4303                                 break;
4304                         }
4305                 }
4306         }
4307         else {
4308                 u32 mask;
4309                 /* Not yet been reconfigured */
4310
4311                 if (val & (1 << 23))
4312                         mask = FLASH_BACKUP_STRAP_MASK;
4313                 else
4314                         mask = FLASH_STRAP_MASK;
4315
4316                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4317                         j++, flash++) {
4318
4319                         if ((val & mask) == (flash->strapping & mask)) {
4320                                 bp->flash_info = flash;
4321
4322                                 /* Request access to the flash interface. */
4323                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4324                                         return rc;
4325
4326                                 /* Enable access to flash interface */
4327                                 bnx2_enable_nvram_access(bp);
4328
4329                                 /* Reconfigure the flash interface */
4330                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4331                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4332                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4333                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4334
4335                                 /* Disable access to flash interface */
4336                                 bnx2_disable_nvram_access(bp);
4337                                 bnx2_release_nvram_lock(bp);
4338
4339                                 break;
4340                         }
4341                 }
4342         } /* if (val & 0x40000000) */
4343
4344         if (j == entry_count) {
4345                 bp->flash_info = NULL;
4346                 pr_alert("Unknown flash/EEPROM type\n");
4347                 return -ENODEV;
4348         }
4349
4350 get_flash_size:
4351         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4352         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4353         if (val)
4354                 bp->flash_size = val;
4355         else
4356                 bp->flash_size = bp->flash_info->total_size;
4357
4358         return rc;
4359 }
4360
4361 static int
4362 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4363                 int buf_size)
4364 {
4365         int rc = 0;
4366         u32 cmd_flags, offset32, len32, extra;
4367
4368         if (buf_size == 0)
4369                 return 0;
4370
4371         /* Request access to the flash interface. */
4372         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4373                 return rc;
4374
4375         /* Enable access to flash interface */
4376         bnx2_enable_nvram_access(bp);
4377
4378         len32 = buf_size;
4379         offset32 = offset;
4380         extra = 0;
4381
4382         cmd_flags = 0;
4383
4384         if (offset32 & 3) {
4385                 u8 buf[4];
4386                 u32 pre_len;
4387
4388                 offset32 &= ~3;
4389                 pre_len = 4 - (offset & 3);
4390
4391                 if (pre_len >= len32) {
4392                         pre_len = len32;
4393                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4394                                     BNX2_NVM_COMMAND_LAST;
4395                 }
4396                 else {
4397                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4398                 }
4399
4400                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4401
4402                 if (rc)
4403                         return rc;
4404
4405                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4406
4407                 offset32 += 4;
4408                 ret_buf += pre_len;
4409                 len32 -= pre_len;
4410         }
4411         if (len32 & 3) {
4412                 extra = 4 - (len32 & 3);
4413                 len32 = (len32 + 4) & ~3;
4414         }
4415
4416         if (len32 == 4) {
4417                 u8 buf[4];
4418
4419                 if (cmd_flags)
4420                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4421                 else
4422                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4423                                     BNX2_NVM_COMMAND_LAST;
4424
4425                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4426
4427                 memcpy(ret_buf, buf, 4 - extra);
4428         }
4429         else if (len32 > 0) {
4430                 u8 buf[4];
4431
4432                 /* Read the first word. */
4433                 if (cmd_flags)
4434                         cmd_flags = 0;
4435                 else
4436                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4437
4438                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4439
4440                 /* Advance to the next dword. */
4441                 offset32 += 4;
4442                 ret_buf += 4;
4443                 len32 -= 4;
4444
4445                 while (len32 > 4 && rc == 0) {
4446                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4447
4448                         /* Advance to the next dword. */
4449                         offset32 += 4;
4450                         ret_buf += 4;
4451                         len32 -= 4;
4452                 }
4453
4454                 if (rc)
4455                         return rc;
4456
4457                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4458                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4459
4460                 memcpy(ret_buf, buf, 4 - extra);
4461         }
4462
4463         /* Disable access to flash interface */
4464         bnx2_disable_nvram_access(bp);
4465
4466         bnx2_release_nvram_lock(bp);
4467
4468         return rc;
4469 }
4470
4471 static int
4472 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4473                 int buf_size)
4474 {
4475         u32 written, offset32, len32;
4476         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4477         int rc = 0;
4478         int align_start, align_end;
4479
4480         buf = data_buf;
4481         offset32 = offset;
4482         len32 = buf_size;
4483         align_start = align_end = 0;
4484
4485         if ((align_start = (offset32 & 3))) {
4486                 offset32 &= ~3;
4487                 len32 += align_start;
4488                 if (len32 < 4)
4489                         len32 = 4;
4490                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4491                         return rc;
4492         }
4493
4494         if (len32 & 3) {
4495                 align_end = 4 - (len32 & 3);
4496                 len32 += align_end;
4497                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4498                         return rc;
4499         }
4500
4501         if (align_start || align_end) {
4502                 align_buf = kmalloc(len32, GFP_KERNEL);
4503                 if (align_buf == NULL)
4504                         return -ENOMEM;
4505                 if (align_start) {
4506                         memcpy(align_buf, start, 4);
4507                 }
4508                 if (align_end) {
4509                         memcpy(align_buf + len32 - 4, end, 4);
4510                 }
4511                 memcpy(align_buf + align_start, data_buf, buf_size);
4512                 buf = align_buf;
4513         }
4514
4515         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4516                 flash_buffer = kmalloc(264, GFP_KERNEL);
4517                 if (flash_buffer == NULL) {
4518                         rc = -ENOMEM;
4519                         goto nvram_write_end;
4520                 }
4521         }
4522
4523         written = 0;
4524         while ((written < len32) && (rc == 0)) {
4525                 u32 page_start, page_end, data_start, data_end;
4526                 u32 addr, cmd_flags;
4527                 int i;
4528
4529                 /* Find the page_start addr */
4530                 page_start = offset32 + written;
4531                 page_start -= (page_start % bp->flash_info->page_size);
4532                 /* Find the page_end addr */
4533                 page_end = page_start + bp->flash_info->page_size;
4534                 /* Find the data_start addr */
4535                 data_start = (written == 0) ? offset32 : page_start;
4536                 /* Find the data_end addr */
4537                 data_end = (page_end > offset32 + len32) ?
4538                         (offset32 + len32) : page_end;
4539
4540                 /* Request access to the flash interface. */
4541                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4542                         goto nvram_write_end;
4543
4544                 /* Enable access to flash interface */
4545                 bnx2_enable_nvram_access(bp);
4546
4547                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4548                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4549                         int j;
4550
4551                         /* Read the whole page into the buffer
4552                          * (non-buffer flash only) */
4553                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4554                                 if (j == (bp->flash_info->page_size - 4)) {
4555                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4556                                 }
4557                                 rc = bnx2_nvram_read_dword(bp,
4558                                         page_start + j,
4559                                         &flash_buffer[j],
4560                                         cmd_flags);
4561
4562                                 if (rc)
4563                                         goto nvram_write_end;
4564
4565                                 cmd_flags = 0;
4566                         }
4567                 }
4568
4569                 /* Enable writes to flash interface (unlock write-protect) */
4570                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4571                         goto nvram_write_end;
4572
4573                 /* Loop to write back the buffer data from page_start to
4574                  * data_start */
4575                 i = 0;
4576                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4577                         /* Erase the page */
4578                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4579                                 goto nvram_write_end;
4580
4581                         /* Re-enable the write again for the actual write */
4582                         bnx2_enable_nvram_write(bp);
4583
4584                         for (addr = page_start; addr < data_start;
4585                                 addr += 4, i += 4) {
4586
4587                                 rc = bnx2_nvram_write_dword(bp, addr,
4588                                         &flash_buffer[i], cmd_flags);
4589
4590                                 if (rc != 0)
4591                                         goto nvram_write_end;
4592
4593                                 cmd_flags = 0;
4594                         }
4595                 }
4596
4597                 /* Loop to write the new data from data_start to data_end */
4598                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4599                         if ((addr == page_end - 4) ||
4600                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4601                                  (addr == data_end - 4))) {
4602
4603                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4604                         }
4605                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4606                                 cmd_flags);
4607
4608                         if (rc != 0)
4609                                 goto nvram_write_end;
4610
4611                         cmd_flags = 0;
4612                         buf += 4;
4613                 }
4614
4615                 /* Loop to write back the buffer data from data_end
4616                  * to page_end */
4617                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4618                         for (addr = data_end; addr < page_end;
4619                                 addr += 4, i += 4) {
4620
4621                                 if (addr == page_end-4) {
4622                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4623                                 }
4624                                 rc = bnx2_nvram_write_dword(bp, addr,
4625                                         &flash_buffer[i], cmd_flags);
4626
4627                                 if (rc != 0)
4628                                         goto nvram_write_end;
4629
4630                                 cmd_flags = 0;
4631                         }
4632                 }
4633
4634                 /* Disable writes to flash interface (lock write-protect) */
4635                 bnx2_disable_nvram_write(bp);
4636
4637                 /* Disable access to flash interface */
4638                 bnx2_disable_nvram_access(bp);
4639                 bnx2_release_nvram_lock(bp);
4640
4641                 /* Increment written */
4642                 written += data_end - data_start;
4643         }
4644
4645 nvram_write_end:
4646         kfree(flash_buffer);
4647         kfree(align_buf);
4648         return rc;
4649 }
4650
4651 static void
4652 bnx2_init_fw_cap(struct bnx2 *bp)
4653 {
4654         u32 val, sig = 0;
4655
4656         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4657         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4658
4659         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4660                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4661
4662         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4663         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4664                 return;
4665
4666         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4667                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4668                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4669         }
4670
4671         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4672             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4673                 u32 link;
4674
4675                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4676
4677                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4678                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4679                         bp->phy_port = PORT_FIBRE;
4680                 else
4681                         bp->phy_port = PORT_TP;
4682
4683                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4684                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4685         }
4686
4687         if (netif_running(bp->dev) && sig)
4688                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4689 }
4690
4691 static void
4692 bnx2_setup_msix_tbl(struct bnx2 *bp)
4693 {
4694         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4695
4696         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4697         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4698 }
4699
4700 static int
4701 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4702 {
4703         u32 val;
4704         int i, rc = 0;
4705         u8 old_port;
4706
4707         /* Wait for the current PCI transaction to complete before
4708          * issuing a reset. */
4709         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4710             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4711                 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4712                        BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4713                        BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4714                        BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4715                        BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4716                 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4717                 udelay(5);
4718         } else {  /* 5709 */
4719                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4720                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4721                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4722                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4723
4724                 for (i = 0; i < 100; i++) {
4725                         msleep(1);
4726                         val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4727                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4728                                 break;
4729                 }
4730         }
4731
4732         /* Wait for the firmware to tell us it is ok to issue a reset. */
4733         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4734
4735         /* Deposit a driver reset signature so the firmware knows that
4736          * this is a soft reset. */
4737         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4738                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4739
4740         /* Do a dummy read to force the chip to complete all current transaction
4741          * before we issue a reset. */
4742         val = REG_RD(bp, BNX2_MISC_ID);
4743
4744         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4745                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4746                 REG_RD(bp, BNX2_MISC_COMMAND);
4747                 udelay(5);
4748
4749                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4750                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4751
4752                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4753
4754         } else {
4755                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4756                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4757                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4758
4759                 /* Chip reset. */
4760                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4761
4762                 /* Reading back any register after chip reset will hang the
4763                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4764                  * of margin for write posting.
4765                  */
4766                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4767                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4768                         msleep(20);
4769
4770                 /* Reset takes approximate 30 usec */
4771                 for (i = 0; i < 10; i++) {
4772                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4773                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4774                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4775                                 break;
4776                         udelay(10);
4777                 }
4778
4779                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4780                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4781                         pr_err("Chip reset did not complete\n");
4782                         return -EBUSY;
4783                 }
4784         }
4785
4786         /* Make sure byte swapping is properly configured. */
4787         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4788         if (val != 0x01020304) {
4789                 pr_err("Chip not in correct endian mode\n");
4790                 return -ENODEV;
4791         }
4792
4793         /* Wait for the firmware to finish its initialization. */
4794         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4795         if (rc)
4796                 return rc;
4797
4798         spin_lock_bh(&bp->phy_lock);
4799         old_port = bp->phy_port;
4800         bnx2_init_fw_cap(bp);
4801         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4802             old_port != bp->phy_port)
4803                 bnx2_set_default_remote_link(bp);
4804         spin_unlock_bh(&bp->phy_lock);
4805
4806         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4807                 /* Adjust the voltage regular to two steps lower.  The default
4808                  * of this register is 0x0000000e. */
4809                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4810
4811                 /* Remove bad rbuf memory from the free pool. */
4812                 rc = bnx2_alloc_bad_rbuf(bp);
4813         }
4814
4815         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4816                 bnx2_setup_msix_tbl(bp);
4817                 /* Prevent MSIX table reads and write from timing out */
4818                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4819                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4820         }
4821
4822         return rc;
4823 }
4824
4825 static int
4826 bnx2_init_chip(struct bnx2 *bp)
4827 {
4828         u32 val, mtu;
4829         int rc, i;
4830
4831         /* Make sure the interrupt is not active. */
4832         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4833
4834         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4835               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4836 #ifdef __BIG_ENDIAN
4837               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4838 #endif
4839               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4840               DMA_READ_CHANS << 12 |
4841               DMA_WRITE_CHANS << 16;
4842
4843         val |= (0x2 << 20) | (1 << 11);
4844
4845         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4846                 val |= (1 << 23);
4847
4848         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4849             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4850                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4851
4852         REG_WR(bp, BNX2_DMA_CONFIG, val);
4853
4854         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4855                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4856                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4857                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4858         }
4859
4860         if (bp->flags & BNX2_FLAG_PCIX) {
4861                 u16 val16;
4862
4863                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4864                                      &val16);
4865                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4866                                       val16 & ~PCI_X_CMD_ERO);
4867         }
4868
4869         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4870                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4871                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4872                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4873
4874         /* Initialize context mapping and zero out the quick contexts.  The
4875          * context block must have already been enabled. */
4876         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4877                 rc = bnx2_init_5709_context(bp);
4878                 if (rc)
4879                         return rc;
4880         } else
4881                 bnx2_init_context(bp);
4882
4883         if ((rc = bnx2_init_cpus(bp)) != 0)
4884                 return rc;
4885
4886         bnx2_init_nvram(bp);
4887
4888         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4889
4890         val = REG_RD(bp, BNX2_MQ_CONFIG);
4891         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4892         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4893         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4894                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4895                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4896                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4897         }
4898
4899         REG_WR(bp, BNX2_MQ_CONFIG, val);
4900
4901         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4902         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4903         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4904
4905         val = (BCM_PAGE_BITS - 8) << 24;
4906         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4907
4908         /* Configure page size. */
4909         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4910         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4911         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4912         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4913
4914         val = bp->mac_addr[0] +
4915               (bp->mac_addr[1] << 8) +
4916               (bp->mac_addr[2] << 16) +
4917               bp->mac_addr[3] +
4918               (bp->mac_addr[4] << 8) +
4919               (bp->mac_addr[5] << 16);
4920         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4921
4922         /* Program the MTU.  Also include 4 bytes for CRC32. */
4923         mtu = bp->dev->mtu;
4924         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4925         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4926                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4927         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4928
4929         if (mtu < 1500)
4930                 mtu = 1500;
4931
4932         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4933         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4934         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4935
4936         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4937         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4938                 bp->bnx2_napi[i].last_status_idx = 0;
4939
4940         bp->idle_chk_status_idx = 0xffff;
4941
4942         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4943
4944         /* Set up how to generate a link change interrupt. */
4945         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4946
4947         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4948                (u64) bp->status_blk_mapping & 0xffffffff);
4949         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4950
4951         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4952                (u64) bp->stats_blk_mapping & 0xffffffff);
4953         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4954                (u64) bp->stats_blk_mapping >> 32);
4955
4956         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4957                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4958
4959         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4960                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4961
4962         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4963                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4964
4965         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4966
4967         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4968
4969         REG_WR(bp, BNX2_HC_COM_TICKS,
4970                (bp->com_ticks_int << 16) | bp->com_ticks);
4971
4972         REG_WR(bp, BNX2_HC_CMD_TICKS,
4973                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4974
4975         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4976                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4977         else
4978                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4979         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4980
4981         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4982                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4983         else {
4984                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4985                       BNX2_HC_CONFIG_COLLECT_STATS;
4986         }
4987
4988         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4989                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4990                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4991
4992                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4993         }
4994
4995         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4996                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4997
4998         REG_WR(bp, BNX2_HC_CONFIG, val);
4999
5000         if (bp->rx_ticks < 25)
5001                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5002         else
5003                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5004
5005         for (i = 1; i < bp->irq_nvecs; i++) {
5006                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5007                            BNX2_HC_SB_CONFIG_1;
5008
5009                 REG_WR(bp, base,
5010                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5011                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5012                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5013
5014                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5015                         (bp->tx_quick_cons_trip_int << 16) |
5016                          bp->tx_quick_cons_trip);
5017
5018                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5019                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5020
5021                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5022                        (bp->rx_quick_cons_trip_int << 16) |
5023                         bp->rx_quick_cons_trip);
5024
5025                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5026                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5027         }
5028
5029         /* Clear internal stats counters. */
5030         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5031
5032         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5033
5034         /* Initialize the receive filter. */
5035         bnx2_set_rx_mode(bp->dev);
5036
5037         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5038                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5039                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5040                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5041         }
5042         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5043                           1, 0);
5044
5045         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5046         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5047
5048         udelay(20);
5049
5050         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5051
5052         return rc;
5053 }
5054
5055 static void
5056 bnx2_clear_ring_states(struct bnx2 *bp)
5057 {
5058         struct bnx2_napi *bnapi;
5059         struct bnx2_tx_ring_info *txr;
5060         struct bnx2_rx_ring_info *rxr;
5061         int i;
5062
5063         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5064                 bnapi = &bp->bnx2_napi[i];
5065                 txr = &bnapi->tx_ring;
5066                 rxr = &bnapi->rx_ring;
5067
5068                 txr->tx_cons = 0;
5069                 txr->hw_tx_cons = 0;
5070                 rxr->rx_prod_bseq = 0;
5071                 rxr->rx_prod = 0;
5072                 rxr->rx_cons = 0;
5073                 rxr->rx_pg_prod = 0;
5074                 rxr->rx_pg_cons = 0;
5075         }
5076 }
5077
5078 static void
5079 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5080 {
5081         u32 val, offset0, offset1, offset2, offset3;
5082         u32 cid_addr = GET_CID_ADDR(cid);
5083
5084         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5085                 offset0 = BNX2_L2CTX_TYPE_XI;
5086                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5087                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5088                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5089         } else {
5090                 offset0 = BNX2_L2CTX_TYPE;
5091                 offset1 = BNX2_L2CTX_CMD_TYPE;
5092                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5093                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5094         }
5095         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5096         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5097
5098         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5099         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5100
5101         val = (u64) txr->tx_desc_mapping >> 32;
5102         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5103
5104         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5105         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5106 }
5107
5108 static void
5109 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5110 {
5111         struct tx_bd *txbd;
5112         u32 cid = TX_CID;
5113         struct bnx2_napi *bnapi;
5114         struct bnx2_tx_ring_info *txr;
5115
5116         bnapi = &bp->bnx2_napi[ring_num];
5117         txr = &bnapi->tx_ring;
5118
5119         if (ring_num == 0)
5120                 cid = TX_CID;
5121         else
5122                 cid = TX_TSS_CID + ring_num - 1;
5123
5124         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5125
5126         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5127
5128         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5129         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5130
5131         txr->tx_prod = 0;
5132         txr->tx_prod_bseq = 0;
5133
5134         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5135         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5136
5137         bnx2_init_tx_context(bp, cid, txr);
5138 }
5139
5140 static void
5141 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5142                      int num_rings)
5143 {
5144         int i;
5145         struct rx_bd *rxbd;
5146
5147         for (i = 0; i < num_rings; i++) {
5148                 int j;
5149
5150                 rxbd = &rx_ring[i][0];
5151                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5152                         rxbd->rx_bd_len = buf_size;
5153                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5154                 }
5155                 if (i == (num_rings - 1))
5156                         j = 0;
5157                 else
5158                         j = i + 1;
5159                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5160                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5161         }
5162 }
5163
5164 static void
5165 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5166 {
5167         int i;
5168         u16 prod, ring_prod;
5169         u32 cid, rx_cid_addr, val;
5170         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5171         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5172
5173         if (ring_num == 0)
5174                 cid = RX_CID;
5175         else
5176                 cid = RX_RSS_CID + ring_num - 1;
5177
5178         rx_cid_addr = GET_CID_ADDR(cid);
5179
5180         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5181                              bp->rx_buf_use_size, bp->rx_max_ring);
5182
5183         bnx2_init_rx_context(bp, cid);
5184
5185         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5186                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5187                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5188         }
5189
5190         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5191         if (bp->rx_pg_ring_size) {
5192                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5193                                      rxr->rx_pg_desc_mapping,
5194                                      PAGE_SIZE, bp->rx_max_pg_ring);
5195                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5196                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5197                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5198                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5199
5200                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5201                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5202
5203                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5204                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5205
5206                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5207                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5208         }
5209
5210         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5211         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5212
5213         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5214         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5215
5216         ring_prod = prod = rxr->rx_pg_prod;
5217         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5218                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5219                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5220                                     ring_num, i, bp->rx_pg_ring_size);
5221                         break;
5222                 }
5223                 prod = NEXT_RX_BD(prod);
5224                 ring_prod = RX_PG_RING_IDX(prod);
5225         }
5226         rxr->rx_pg_prod = prod;
5227
5228         ring_prod = prod = rxr->rx_prod;
5229         for (i = 0; i < bp->rx_ring_size; i++) {
5230                 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5231                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5232                                     ring_num, i, bp->rx_ring_size);
5233                         break;
5234                 }
5235                 prod = NEXT_RX_BD(prod);
5236                 ring_prod = RX_RING_IDX(prod);
5237         }
5238         rxr->rx_prod = prod;
5239
5240         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5241         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5242         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5243
5244         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5245         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5246
5247         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5248 }
5249
5250 static void
5251 bnx2_init_all_rings(struct bnx2 *bp)
5252 {
5253         int i;
5254         u32 val;
5255
5256         bnx2_clear_ring_states(bp);
5257
5258         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5259         for (i = 0; i < bp->num_tx_rings; i++)
5260                 bnx2_init_tx_ring(bp, i);
5261
5262         if (bp->num_tx_rings > 1)
5263                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5264                        (TX_TSS_CID << 7));
5265
5266         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5267         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5268
5269         for (i = 0; i < bp->num_rx_rings; i++)
5270                 bnx2_init_rx_ring(bp, i);
5271
5272         if (bp->num_rx_rings > 1) {
5273                 u32 tbl_32 = 0;
5274
5275                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5276                         int shift = (i % 8) << 2;
5277
5278                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5279                         if ((i % 8) == 7) {
5280                                 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5281                                 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5282                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5283                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5284                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5285                                 tbl_32 = 0;
5286                         }
5287                 }
5288
5289                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5290                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5291
5292                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5293
5294         }
5295 }
5296
5297 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5298 {
5299         u32 max, num_rings = 1;
5300
5301         while (ring_size > MAX_RX_DESC_CNT) {
5302                 ring_size -= MAX_RX_DESC_CNT;
5303                 num_rings++;
5304         }
5305         /* round to next power of 2 */
5306         max = max_size;
5307         while ((max & num_rings) == 0)
5308                 max >>= 1;
5309
5310         if (num_rings != max)
5311                 max <<= 1;
5312
5313         return max;
5314 }
5315
5316 static void
5317 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5318 {
5319         u32 rx_size, rx_space, jumbo_size;
5320
5321         /* 8 for CRC and VLAN */
5322         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5323
5324         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5325                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5326
5327         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5328         bp->rx_pg_ring_size = 0;
5329         bp->rx_max_pg_ring = 0;
5330         bp->rx_max_pg_ring_idx = 0;
5331         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5332                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5333
5334                 jumbo_size = size * pages;
5335                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5336                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5337
5338                 bp->rx_pg_ring_size = jumbo_size;
5339                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5340                                                         MAX_RX_PG_RINGS);
5341                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5342                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5343                 bp->rx_copy_thresh = 0;
5344         }
5345
5346         bp->rx_buf_use_size = rx_size;
5347         /* hw alignment + build_skb() overhead*/
5348         bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5349                 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5350         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5351         bp->rx_ring_size = size;
5352         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5353         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5354 }
5355
5356 static void
5357 bnx2_free_tx_skbs(struct bnx2 *bp)
5358 {
5359         int i;
5360
5361         for (i = 0; i < bp->num_tx_rings; i++) {
5362                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5363                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5364                 int j;
5365
5366                 if (txr->tx_buf_ring == NULL)
5367                         continue;
5368
5369                 for (j = 0; j < TX_DESC_CNT; ) {
5370                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5371                         struct sk_buff *skb = tx_buf->skb;
5372                         int k, last;
5373
5374                         if (skb == NULL) {
5375                                 j++;
5376                                 continue;
5377                         }
5378
5379                         dma_unmap_single(&bp->pdev->dev,
5380                                          dma_unmap_addr(tx_buf, mapping),
5381                                          skb_headlen(skb),
5382                                          PCI_DMA_TODEVICE);
5383
5384                         tx_buf->skb = NULL;
5385
5386                         last = tx_buf->nr_frags;
5387                         j++;
5388                         for (k = 0; k < last; k++, j++) {
5389                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5390                                 dma_unmap_page(&bp->pdev->dev,
5391                                         dma_unmap_addr(tx_buf, mapping),
5392                                         skb_frag_size(&skb_shinfo(skb)->frags[k]),
5393                                         PCI_DMA_TODEVICE);
5394                         }
5395                         dev_kfree_skb(skb);
5396                 }
5397                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5398         }
5399 }
5400
5401 static void
5402 bnx2_free_rx_skbs(struct bnx2 *bp)
5403 {
5404         int i;
5405
5406         for (i = 0; i < bp->num_rx_rings; i++) {
5407                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5408                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5409                 int j;
5410
5411                 if (rxr->rx_buf_ring == NULL)
5412                         return;
5413
5414                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5415                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5416                         u8 *data = rx_buf->data;
5417
5418                         if (data == NULL)
5419                                 continue;
5420
5421                         dma_unmap_single(&bp->pdev->dev,
5422                                          dma_unmap_addr(rx_buf, mapping),
5423                                          bp->rx_buf_use_size,
5424                                          PCI_DMA_FROMDEVICE);
5425
5426                         rx_buf->data = NULL;
5427
5428                         kfree(data);
5429                 }
5430                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5431                         bnx2_free_rx_page(bp, rxr, j);
5432         }
5433 }
5434
5435 static void
5436 bnx2_free_skbs(struct bnx2 *bp)
5437 {
5438         bnx2_free_tx_skbs(bp);
5439         bnx2_free_rx_skbs(bp);
5440 }
5441
5442 static int
5443 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5444 {
5445         int rc;
5446
5447         rc = bnx2_reset_chip(bp, reset_code);
5448         bnx2_free_skbs(bp);
5449         if (rc)
5450                 return rc;
5451
5452         if ((rc = bnx2_init_chip(bp)) != 0)
5453                 return rc;
5454
5455         bnx2_init_all_rings(bp);
5456         return 0;
5457 }
5458
5459 static int
5460 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5461 {
5462         int rc;
5463
5464         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5465                 return rc;
5466
5467         spin_lock_bh(&bp->phy_lock);
5468         bnx2_init_phy(bp, reset_phy);
5469         bnx2_set_link(bp);
5470         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5471                 bnx2_remote_phy_event(bp);
5472         spin_unlock_bh(&bp->phy_lock);
5473         return 0;
5474 }
5475
5476 static int
5477 bnx2_shutdown_chip(struct bnx2 *bp)
5478 {
5479         u32 reset_code;
5480
5481         if (bp->flags & BNX2_FLAG_NO_WOL)
5482                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5483         else if (bp->wol)
5484                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5485         else
5486                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5487
5488         return bnx2_reset_chip(bp, reset_code);
5489 }
5490
5491 static int
5492 bnx2_test_registers(struct bnx2 *bp)
5493 {
5494         int ret;
5495         int i, is_5709;
5496         static const struct {
5497                 u16   offset;
5498                 u16   flags;
5499 #define BNX2_FL_NOT_5709        1
5500                 u32   rw_mask;
5501                 u32   ro_mask;
5502         } reg_tbl[] = {
5503                 { 0x006c, 0, 0x00000000, 0x0000003f },
5504                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5505                 { 0x0094, 0, 0x00000000, 0x00000000 },
5506
5507                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5508                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5509                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5510                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5511                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5512                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5513                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5514                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5515                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5516
5517                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5518                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5519                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5520                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5521                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5522                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5523
5524                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5525                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5526                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5527
5528                 { 0x1000, 0, 0x00000000, 0x00000001 },
5529                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5530
5531                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5532                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5533                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5534                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5535                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5536                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5537                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5538                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5539                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5540                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5541
5542                 { 0x1800, 0, 0x00000000, 0x00000001 },
5543                 { 0x1804, 0, 0x00000000, 0x00000003 },
5544
5545                 { 0x2800, 0, 0x00000000, 0x00000001 },
5546                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5547                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5548                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5549                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5550                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5551                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5552                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5553                 { 0x2840, 0, 0x00000000, 0xffffffff },
5554                 { 0x2844, 0, 0x00000000, 0xffffffff },
5555                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5556                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5557
5558                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5559                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5560
5561                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5562                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5563                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5564                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5565                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5566                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5567                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5568                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5569                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5570
5571                 { 0x5004, 0, 0x00000000, 0x0000007f },
5572                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5573
5574                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5575                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5576                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5577                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5578                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5579                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5580                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5581                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5582                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5583
5584                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5585                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5586                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5587                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5588                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5589                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5590                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5591                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5592                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5593                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5594                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5595                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5596                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5597                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5598                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5599                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5600                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5601                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5602                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5603                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5604                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5605                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5606                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5607
5608                 { 0xffff, 0, 0x00000000, 0x00000000 },
5609         };
5610
5611         ret = 0;
5612         is_5709 = 0;
5613         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5614                 is_5709 = 1;
5615
5616         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5617                 u32 offset, rw_mask, ro_mask, save_val, val;
5618                 u16 flags = reg_tbl[i].flags;
5619
5620                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5621                         continue;
5622
5623                 offset = (u32) reg_tbl[i].offset;
5624                 rw_mask = reg_tbl[i].rw_mask;
5625                 ro_mask = reg_tbl[i].ro_mask;
5626
5627                 save_val = readl(bp->regview + offset);
5628
5629                 writel(0, bp->regview + offset);
5630
5631                 val = readl(bp->regview + offset);
5632                 if ((val & rw_mask) != 0) {
5633                         goto reg_test_err;
5634                 }
5635
5636                 if ((val & ro_mask) != (save_val & ro_mask)) {
5637                         goto reg_test_err;
5638                 }
5639
5640                 writel(0xffffffff, bp->regview + offset);
5641
5642                 val = readl(bp->regview + offset);
5643                 if ((val & rw_mask) != rw_mask) {
5644                         goto reg_test_err;
5645                 }
5646
5647                 if ((val & ro_mask) != (save_val & ro_mask)) {
5648                         goto reg_test_err;
5649                 }
5650
5651                 writel(save_val, bp->regview + offset);
5652                 continue;
5653
5654 reg_test_err:
5655                 writel(save_val, bp->regview + offset);
5656                 ret = -ENODEV;
5657                 break;
5658         }
5659         return ret;
5660 }
5661
5662 static int
5663 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5664 {
5665         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5666                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5667         int i;
5668
5669         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5670                 u32 offset;
5671
5672                 for (offset = 0; offset < size; offset += 4) {
5673
5674                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5675
5676                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5677                                 test_pattern[i]) {
5678                                 return -ENODEV;
5679                         }
5680                 }
5681         }
5682         return 0;
5683 }
5684
5685 static int
5686 bnx2_test_memory(struct bnx2 *bp)
5687 {
5688         int ret = 0;
5689         int i;
5690         static struct mem_entry {
5691                 u32   offset;
5692                 u32   len;
5693         } mem_tbl_5706[] = {
5694                 { 0x60000,  0x4000 },
5695                 { 0xa0000,  0x3000 },
5696                 { 0xe0000,  0x4000 },
5697                 { 0x120000, 0x4000 },
5698                 { 0x1a0000, 0x4000 },
5699                 { 0x160000, 0x4000 },
5700                 { 0xffffffff, 0    },
5701         },
5702         mem_tbl_5709[] = {
5703                 { 0x60000,  0x4000 },
5704                 { 0xa0000,  0x3000 },
5705                 { 0xe0000,  0x4000 },
5706                 { 0x120000, 0x4000 },
5707                 { 0x1a0000, 0x4000 },
5708                 { 0xffffffff, 0    },
5709         };
5710         struct mem_entry *mem_tbl;
5711
5712         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5713                 mem_tbl = mem_tbl_5709;
5714         else
5715                 mem_tbl = mem_tbl_5706;
5716
5717         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5718                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5719                         mem_tbl[i].len)) != 0) {
5720                         return ret;
5721                 }
5722         }
5723
5724         return ret;
5725 }
5726
5727 #define BNX2_MAC_LOOPBACK       0
5728 #define BNX2_PHY_LOOPBACK       1
5729
5730 static int
5731 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5732 {
5733         unsigned int pkt_size, num_pkts, i;
5734         struct sk_buff *skb;
5735         u8 *data;
5736         unsigned char *packet;
5737         u16 rx_start_idx, rx_idx;
5738         dma_addr_t map;
5739         struct tx_bd *txbd;
5740         struct sw_bd *rx_buf;
5741         struct l2_fhdr *rx_hdr;
5742         int ret = -ENODEV;
5743         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5744         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5745         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5746
5747         tx_napi = bnapi;
5748
5749         txr = &tx_napi->tx_ring;
5750         rxr = &bnapi->rx_ring;
5751         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5752                 bp->loopback = MAC_LOOPBACK;
5753                 bnx2_set_mac_loopback(bp);
5754         }
5755         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5756                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5757                         return 0;
5758
5759                 bp->loopback = PHY_LOOPBACK;
5760                 bnx2_set_phy_loopback(bp);
5761         }
5762         else
5763                 return -EINVAL;
5764
5765         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5766         skb = netdev_alloc_skb(bp->dev, pkt_size);
5767         if (!skb)
5768                 return -ENOMEM;
5769         packet = skb_put(skb, pkt_size);
5770         memcpy(packet, bp->dev->dev_addr, 6);
5771         memset(packet + 6, 0x0, 8);
5772         for (i = 14; i < pkt_size; i++)
5773                 packet[i] = (unsigned char) (i & 0xff);
5774
5775         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5776                              PCI_DMA_TODEVICE);
5777         if (dma_mapping_error(&bp->pdev->dev, map)) {
5778                 dev_kfree_skb(skb);
5779                 return -EIO;
5780         }
5781
5782         REG_WR(bp, BNX2_HC_COMMAND,
5783                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5784
5785         REG_RD(bp, BNX2_HC_COMMAND);
5786
5787         udelay(5);
5788         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5789
5790         num_pkts = 0;
5791
5792         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5793
5794         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5795         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5796         txbd->tx_bd_mss_nbytes = pkt_size;
5797         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5798
5799         num_pkts++;
5800         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5801         txr->tx_prod_bseq += pkt_size;
5802
5803         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5804         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5805
5806         udelay(100);
5807
5808         REG_WR(bp, BNX2_HC_COMMAND,
5809                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5810
5811         REG_RD(bp, BNX2_HC_COMMAND);
5812
5813         udelay(5);
5814
5815         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5816         dev_kfree_skb(skb);
5817
5818         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5819                 goto loopback_test_done;
5820
5821         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5822         if (rx_idx != rx_start_idx + num_pkts) {
5823                 goto loopback_test_done;
5824         }
5825
5826         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5827         data = rx_buf->data;
5828
5829         rx_hdr = get_l2_fhdr(data);
5830         data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5831
5832         dma_sync_single_for_cpu(&bp->pdev->dev,
5833                 dma_unmap_addr(rx_buf, mapping),
5834                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5835
5836         if (rx_hdr->l2_fhdr_status &
5837                 (L2_FHDR_ERRORS_BAD_CRC |
5838                 L2_FHDR_ERRORS_PHY_DECODE |
5839                 L2_FHDR_ERRORS_ALIGNMENT |
5840                 L2_FHDR_ERRORS_TOO_SHORT |
5841                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5842
5843                 goto loopback_test_done;
5844         }
5845
5846         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5847                 goto loopback_test_done;
5848         }
5849
5850         for (i = 14; i < pkt_size; i++) {
5851                 if (*(data + i) != (unsigned char) (i & 0xff)) {
5852                         goto loopback_test_done;
5853                 }
5854         }
5855
5856         ret = 0;
5857
5858 loopback_test_done:
5859         bp->loopback = 0;
5860         return ret;
5861 }
5862
5863 #define BNX2_MAC_LOOPBACK_FAILED        1
5864 #define BNX2_PHY_LOOPBACK_FAILED        2
5865 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5866                                          BNX2_PHY_LOOPBACK_FAILED)
5867
5868 static int
5869 bnx2_test_loopback(struct bnx2 *bp)
5870 {
5871         int rc = 0;
5872
5873         if (!netif_running(bp->dev))
5874                 return BNX2_LOOPBACK_FAILED;
5875
5876         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5877         spin_lock_bh(&bp->phy_lock);
5878         bnx2_init_phy(bp, 1);
5879         spin_unlock_bh(&bp->phy_lock);
5880         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5881                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5882         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5883                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5884         return rc;
5885 }
5886
5887 #define NVRAM_SIZE 0x200
5888 #define CRC32_RESIDUAL 0xdebb20e3
5889
5890 static int
5891 bnx2_test_nvram(struct bnx2 *bp)
5892 {
5893         __be32 buf[NVRAM_SIZE / 4];
5894         u8 *data = (u8 *) buf;
5895         int rc = 0;
5896         u32 magic, csum;
5897
5898         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5899                 goto test_nvram_done;
5900
5901         magic = be32_to_cpu(buf[0]);
5902         if (magic != 0x669955aa) {
5903                 rc = -ENODEV;
5904                 goto test_nvram_done;
5905         }
5906
5907         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5908                 goto test_nvram_done;
5909
5910         csum = ether_crc_le(0x100, data);
5911         if (csum != CRC32_RESIDUAL) {
5912                 rc = -ENODEV;
5913                 goto test_nvram_done;
5914         }
5915
5916         csum = ether_crc_le(0x100, data + 0x100);
5917         if (csum != CRC32_RESIDUAL) {
5918                 rc = -ENODEV;
5919         }
5920
5921 test_nvram_done:
5922         return rc;
5923 }
5924
5925 static int
5926 bnx2_test_link(struct bnx2 *bp)
5927 {
5928         u32 bmsr;
5929
5930         if (!netif_running(bp->dev))
5931                 return -ENODEV;
5932
5933         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5934                 if (bp->link_up)
5935                         return 0;
5936                 return -ENODEV;
5937         }
5938         spin_lock_bh(&bp->phy_lock);
5939         bnx2_enable_bmsr1(bp);
5940         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5941         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5942         bnx2_disable_bmsr1(bp);
5943         spin_unlock_bh(&bp->phy_lock);
5944
5945         if (bmsr & BMSR_LSTATUS) {
5946                 return 0;
5947         }
5948         return -ENODEV;
5949 }
5950
5951 static int
5952 bnx2_test_intr(struct bnx2 *bp)
5953 {
5954         int i;
5955         u16 status_idx;
5956
5957         if (!netif_running(bp->dev))
5958                 return -ENODEV;
5959
5960         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5961
5962         /* This register is not touched during run-time. */
5963         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5964         REG_RD(bp, BNX2_HC_COMMAND);
5965
5966         for (i = 0; i < 10; i++) {
5967                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5968                         status_idx) {
5969
5970                         break;
5971                 }
5972
5973                 msleep_interruptible(10);
5974         }
5975         if (i < 10)
5976                 return 0;
5977
5978         return -ENODEV;
5979 }
5980
5981 /* Determining link for parallel detection. */
5982 static int
5983 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5984 {
5985         u32 mode_ctl, an_dbg, exp;
5986
5987         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5988                 return 0;
5989
5990         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5991         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5992
5993         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5994                 return 0;
5995
5996         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5997         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5998         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5999
6000         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6001                 return 0;
6002
6003         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6004         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6005         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6006
6007         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6008                 return 0;
6009
6010         return 1;
6011 }
6012
6013 static void
6014 bnx2_5706_serdes_timer(struct bnx2 *bp)
6015 {
6016         int check_link = 1;
6017
6018         spin_lock(&bp->phy_lock);
6019         if (bp->serdes_an_pending) {
6020                 bp->serdes_an_pending--;
6021                 check_link = 0;
6022         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6023                 u32 bmcr;
6024
6025                 bp->current_interval = BNX2_TIMER_INTERVAL;
6026
6027                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6028
6029                 if (bmcr & BMCR_ANENABLE) {
6030                         if (bnx2_5706_serdes_has_link(bp)) {
6031                                 bmcr &= ~BMCR_ANENABLE;
6032                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6033                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6034                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6035                         }
6036                 }
6037         }
6038         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6039                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6040                 u32 phy2;
6041
6042                 bnx2_write_phy(bp, 0x17, 0x0f01);
6043                 bnx2_read_phy(bp, 0x15, &phy2);
6044                 if (phy2 & 0x20) {
6045                         u32 bmcr;
6046
6047                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6048                         bmcr |= BMCR_ANENABLE;
6049                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6050
6051                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6052                 }
6053         } else
6054                 bp->current_interval = BNX2_TIMER_INTERVAL;
6055
6056         if (check_link) {
6057                 u32 val;
6058
6059                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6060                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6061                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6062
6063                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6064                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6065                                 bnx2_5706s_force_link_dn(bp, 1);
6066                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6067                         } else
6068                                 bnx2_set_link(bp);
6069                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6070                         bnx2_set_link(bp);
6071         }
6072         spin_unlock(&bp->phy_lock);
6073 }
6074
6075 static void
6076 bnx2_5708_serdes_timer(struct bnx2 *bp)
6077 {
6078         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6079                 return;
6080
6081         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6082                 bp->serdes_an_pending = 0;
6083                 return;
6084         }
6085
6086         spin_lock(&bp->phy_lock);
6087         if (bp->serdes_an_pending)
6088                 bp->serdes_an_pending--;
6089         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6090                 u32 bmcr;
6091
6092                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6093                 if (bmcr & BMCR_ANENABLE) {
6094                         bnx2_enable_forced_2g5(bp);
6095                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6096                 } else {
6097                         bnx2_disable_forced_2g5(bp);
6098                         bp->serdes_an_pending = 2;
6099                         bp->current_interval = BNX2_TIMER_INTERVAL;
6100                 }
6101
6102         } else
6103                 bp->current_interval = BNX2_TIMER_INTERVAL;
6104
6105         spin_unlock(&bp->phy_lock);
6106 }
6107
6108 static void
6109 bnx2_timer(unsigned long data)
6110 {
6111         struct bnx2 *bp = (struct bnx2 *) data;
6112
6113         if (!netif_running(bp->dev))
6114                 return;
6115
6116         if (atomic_read(&bp->intr_sem) != 0)
6117                 goto bnx2_restart_timer;
6118
6119         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6120              BNX2_FLAG_USING_MSI)
6121                 bnx2_chk_missed_msi(bp);
6122
6123         bnx2_send_heart_beat(bp);
6124
6125         bp->stats_blk->stat_FwRxDrop =
6126                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6127
6128         /* workaround occasional corrupted counters */
6129         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6130                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6131                                             BNX2_HC_COMMAND_STATS_NOW);
6132
6133         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6134                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6135                         bnx2_5706_serdes_timer(bp);
6136                 else
6137                         bnx2_5708_serdes_timer(bp);
6138         }
6139
6140 bnx2_restart_timer:
6141         mod_timer(&bp->timer, jiffies + bp->current_interval);
6142 }
6143
6144 static int
6145 bnx2_request_irq(struct bnx2 *bp)
6146 {
6147         unsigned long flags;
6148         struct bnx2_irq *irq;
6149         int rc = 0, i;
6150
6151         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6152                 flags = 0;
6153         else
6154                 flags = IRQF_SHARED;
6155
6156         for (i = 0; i < bp->irq_nvecs; i++) {
6157                 irq = &bp->irq_tbl[i];
6158                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6159                                  &bp->bnx2_napi[i]);
6160                 if (rc)
6161                         break;
6162                 irq->requested = 1;
6163         }
6164         return rc;
6165 }
6166
6167 static void
6168 __bnx2_free_irq(struct bnx2 *bp)
6169 {
6170         struct bnx2_irq *irq;
6171         int i;
6172
6173         for (i = 0; i < bp->irq_nvecs; i++) {
6174                 irq = &bp->irq_tbl[i];
6175                 if (irq->requested)
6176                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6177                 irq->requested = 0;
6178         }
6179 }
6180
6181 static void
6182 bnx2_free_irq(struct bnx2 *bp)
6183 {
6184
6185         __bnx2_free_irq(bp);
6186         if (bp->flags & BNX2_FLAG_USING_MSI)
6187                 pci_disable_msi(bp->pdev);
6188         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6189                 pci_disable_msix(bp->pdev);
6190
6191         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6192 }
6193
6194 static void
6195 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6196 {
6197         int i, total_vecs, rc;
6198         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6199         struct net_device *dev = bp->dev;
6200         const int len = sizeof(bp->irq_tbl[0].name);
6201
6202         bnx2_setup_msix_tbl(bp);
6203         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6204         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6205         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6206
6207         /*  Need to flush the previous three writes to ensure MSI-X
6208          *  is setup properly */
6209         REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6210
6211         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6212                 msix_ent[i].entry = i;
6213                 msix_ent[i].vector = 0;
6214         }
6215
6216         total_vecs = msix_vecs;
6217 #ifdef BCM_CNIC
6218         total_vecs++;
6219 #endif
6220         rc = -ENOSPC;
6221         while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6222                 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6223                 if (rc <= 0)
6224                         break;
6225                 if (rc > 0)
6226                         total_vecs = rc;
6227         }
6228
6229         if (rc != 0)
6230                 return;
6231
6232         msix_vecs = total_vecs;
6233 #ifdef BCM_CNIC
6234         msix_vecs--;
6235 #endif
6236         bp->irq_nvecs = msix_vecs;
6237         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6238         for (i = 0; i < total_vecs; i++) {
6239                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6240                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6241                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6242         }
6243 }
6244
6245 static int
6246 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6247 {
6248         int cpus = num_online_cpus();
6249         int msix_vecs;
6250
6251         if (!bp->num_req_rx_rings)
6252                 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6253         else if (!bp->num_req_tx_rings)
6254                 msix_vecs = max(cpus, bp->num_req_rx_rings);
6255         else
6256                 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6257
6258         msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6259
6260         bp->irq_tbl[0].handler = bnx2_interrupt;
6261         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6262         bp->irq_nvecs = 1;
6263         bp->irq_tbl[0].vector = bp->pdev->irq;
6264
6265         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6266                 bnx2_enable_msix(bp, msix_vecs);
6267
6268         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6269             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6270                 if (pci_enable_msi(bp->pdev) == 0) {
6271                         bp->flags |= BNX2_FLAG_USING_MSI;
6272                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6273                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6274                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6275                         } else
6276                                 bp->irq_tbl[0].handler = bnx2_msi;
6277
6278                         bp->irq_tbl[0].vector = bp->pdev->irq;
6279                 }
6280         }
6281
6282         if (!bp->num_req_tx_rings)
6283                 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6284         else
6285                 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6286
6287         if (!bp->num_req_rx_rings)
6288                 bp->num_rx_rings = bp->irq_nvecs;
6289         else
6290                 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6291
6292         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6293
6294         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6295 }
6296
6297 /* Called with rtnl_lock */
6298 static int
6299 bnx2_open(struct net_device *dev)
6300 {
6301         struct bnx2 *bp = netdev_priv(dev);
6302         int rc;
6303
6304         rc = bnx2_request_firmware(bp);
6305         if (rc < 0)
6306                 goto out;
6307
6308         netif_carrier_off(dev);
6309
6310         bnx2_set_power_state(bp, PCI_D0);
6311         bnx2_disable_int(bp);
6312
6313         rc = bnx2_setup_int_mode(bp, disable_msi);
6314         if (rc)
6315                 goto open_err;
6316         bnx2_init_napi(bp);
6317         bnx2_napi_enable(bp);
6318         rc = bnx2_alloc_mem(bp);
6319         if (rc)
6320                 goto open_err;
6321
6322         rc = bnx2_request_irq(bp);
6323         if (rc)
6324                 goto open_err;
6325
6326         rc = bnx2_init_nic(bp, 1);
6327         if (rc)
6328                 goto open_err;
6329
6330         mod_timer(&bp->timer, jiffies + bp->current_interval);
6331
6332         atomic_set(&bp->intr_sem, 0);
6333
6334         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6335
6336         bnx2_enable_int(bp);
6337
6338         if (bp->flags & BNX2_FLAG_USING_MSI) {
6339                 /* Test MSI to make sure it is working
6340                  * If MSI test fails, go back to INTx mode
6341                  */
6342                 if (bnx2_test_intr(bp) != 0) {
6343                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6344
6345                         bnx2_disable_int(bp);
6346                         bnx2_free_irq(bp);
6347
6348                         bnx2_setup_int_mode(bp, 1);
6349
6350                         rc = bnx2_init_nic(bp, 0);
6351
6352                         if (!rc)
6353                                 rc = bnx2_request_irq(bp);
6354
6355                         if (rc) {
6356                                 del_timer_sync(&bp->timer);
6357                                 goto open_err;
6358                         }
6359                         bnx2_enable_int(bp);
6360                 }
6361         }
6362         if (bp->flags & BNX2_FLAG_USING_MSI)
6363                 netdev_info(dev, "using MSI\n");
6364         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6365                 netdev_info(dev, "using MSIX\n");
6366
6367         netif_tx_start_all_queues(dev);
6368 out:
6369         return rc;
6370
6371 open_err:
6372         bnx2_napi_disable(bp);
6373         bnx2_free_skbs(bp);
6374         bnx2_free_irq(bp);
6375         bnx2_free_mem(bp);
6376         bnx2_del_napi(bp);
6377         bnx2_release_firmware(bp);
6378         goto out;
6379 }
6380
6381 static void
6382 bnx2_reset_task(struct work_struct *work)
6383 {
6384         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6385         int rc;
6386
6387         rtnl_lock();
6388         if (!netif_running(bp->dev)) {
6389                 rtnl_unlock();
6390                 return;
6391         }
6392
6393         bnx2_netif_stop(bp, true);
6394
6395         rc = bnx2_init_nic(bp, 1);
6396         if (rc) {
6397                 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6398                 bnx2_napi_enable(bp);
6399                 dev_close(bp->dev);
6400                 rtnl_unlock();
6401                 return;
6402         }
6403
6404         atomic_set(&bp->intr_sem, 1);
6405         bnx2_netif_start(bp, true);
6406         rtnl_unlock();
6407 }
6408
6409 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6410
6411 static void
6412 bnx2_dump_ftq(struct bnx2 *bp)
6413 {
6414         int i;
6415         u32 reg, bdidx, cid, valid;
6416         struct net_device *dev = bp->dev;
6417         static const struct ftq_reg {
6418                 char *name;
6419                 u32 off;
6420         } ftq_arr[] = {
6421                 BNX2_FTQ_ENTRY(RV2P_P),
6422                 BNX2_FTQ_ENTRY(RV2P_T),
6423                 BNX2_FTQ_ENTRY(RV2P_M),
6424                 BNX2_FTQ_ENTRY(TBDR_),
6425                 BNX2_FTQ_ENTRY(TDMA_),
6426                 BNX2_FTQ_ENTRY(TXP_),
6427                 BNX2_FTQ_ENTRY(TXP_),
6428                 BNX2_FTQ_ENTRY(TPAT_),
6429                 BNX2_FTQ_ENTRY(RXP_C),
6430                 BNX2_FTQ_ENTRY(RXP_),
6431                 BNX2_FTQ_ENTRY(COM_COMXQ_),
6432                 BNX2_FTQ_ENTRY(COM_COMTQ_),
6433                 BNX2_FTQ_ENTRY(COM_COMQ_),
6434                 BNX2_FTQ_ENTRY(CP_CPQ_),
6435         };
6436
6437         netdev_err(dev, "<--- start FTQ dump --->\n");
6438         for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6439                 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6440                            bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6441
6442         netdev_err(dev, "CPU states:\n");
6443         for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6444                 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6445                            reg, bnx2_reg_rd_ind(bp, reg),
6446                            bnx2_reg_rd_ind(bp, reg + 4),
6447                            bnx2_reg_rd_ind(bp, reg + 8),
6448                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6449                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6450                            bnx2_reg_rd_ind(bp, reg + 0x20));
6451
6452         netdev_err(dev, "<--- end FTQ dump --->\n");
6453         netdev_err(dev, "<--- start TBDC dump --->\n");
6454         netdev_err(dev, "TBDC free cnt: %ld\n",
6455                    REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6456         netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6457         for (i = 0; i < 0x20; i++) {
6458                 int j = 0;
6459
6460                 REG_WR(bp, BNX2_TBDC_BD_ADDR, i);
6461                 REG_WR(bp, BNX2_TBDC_CAM_OPCODE,
6462                        BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6463                 REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6464                 while ((REG_RD(bp, BNX2_TBDC_COMMAND) &
6465                         BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6466                         j++;
6467
6468                 cid = REG_RD(bp, BNX2_TBDC_CID);
6469                 bdidx = REG_RD(bp, BNX2_TBDC_BIDX);
6470                 valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE);
6471                 netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6472                            i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6473                            bdidx >> 24, (valid >> 8) & 0x0ff);
6474         }
6475         netdev_err(dev, "<--- end TBDC dump --->\n");
6476 }
6477
6478 static void
6479 bnx2_dump_state(struct bnx2 *bp)
6480 {
6481         struct net_device *dev = bp->dev;
6482         u32 val1, val2;
6483
6484         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6485         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6486                    atomic_read(&bp->intr_sem), val1);
6487         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6488         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6489         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6490         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6491                    REG_RD(bp, BNX2_EMAC_TX_STATUS),
6492                    REG_RD(bp, BNX2_EMAC_RX_STATUS));
6493         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6494                    REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6495         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6496                    REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6497         if (bp->flags & BNX2_FLAG_USING_MSIX)
6498                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6499                            REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6500 }
6501
6502 static void
6503 bnx2_tx_timeout(struct net_device *dev)
6504 {
6505         struct bnx2 *bp = netdev_priv(dev);
6506
6507         bnx2_dump_ftq(bp);
6508         bnx2_dump_state(bp);
6509         bnx2_dump_mcp_state(bp);
6510
6511         /* This allows the netif to be shutdown gracefully before resetting */
6512         schedule_work(&bp->reset_task);
6513 }
6514
6515 /* Called with netif_tx_lock.
6516  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6517  * netif_wake_queue().
6518  */
6519 static netdev_tx_t
6520 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6521 {
6522         struct bnx2 *bp = netdev_priv(dev);
6523         dma_addr_t mapping;
6524         struct tx_bd *txbd;
6525         struct sw_tx_bd *tx_buf;
6526         u32 len, vlan_tag_flags, last_frag, mss;
6527         u16 prod, ring_prod;
6528         int i;
6529         struct bnx2_napi *bnapi;
6530         struct bnx2_tx_ring_info *txr;
6531         struct netdev_queue *txq;
6532
6533         /*  Determine which tx ring we will be placed on */
6534         i = skb_get_queue_mapping(skb);
6535         bnapi = &bp->bnx2_napi[i];
6536         txr = &bnapi->tx_ring;
6537         txq = netdev_get_tx_queue(dev, i);
6538
6539         if (unlikely(bnx2_tx_avail(bp, txr) <
6540             (skb_shinfo(skb)->nr_frags + 1))) {
6541                 netif_tx_stop_queue(txq);
6542                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6543
6544                 return NETDEV_TX_BUSY;
6545         }
6546         len = skb_headlen(skb);
6547         prod = txr->tx_prod;
6548         ring_prod = TX_RING_IDX(prod);
6549
6550         vlan_tag_flags = 0;
6551         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6552                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6553         }
6554
6555         if (vlan_tx_tag_present(skb)) {
6556                 vlan_tag_flags |=
6557                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6558         }
6559
6560         if ((mss = skb_shinfo(skb)->gso_size)) {
6561                 u32 tcp_opt_len;
6562                 struct iphdr *iph;
6563
6564                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6565
6566                 tcp_opt_len = tcp_optlen(skb);
6567
6568                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6569                         u32 tcp_off = skb_transport_offset(skb) -
6570                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6571
6572                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6573                                           TX_BD_FLAGS_SW_FLAGS;
6574                         if (likely(tcp_off == 0))
6575                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6576                         else {
6577                                 tcp_off >>= 3;
6578                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6579                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6580                                                   ((tcp_off & 0x10) <<
6581                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6582                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6583                         }
6584                 } else {
6585                         iph = ip_hdr(skb);
6586                         if (tcp_opt_len || (iph->ihl > 5)) {
6587                                 vlan_tag_flags |= ((iph->ihl - 5) +
6588                                                    (tcp_opt_len >> 2)) << 8;
6589                         }
6590                 }
6591         } else
6592                 mss = 0;
6593
6594         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6595         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6596                 dev_kfree_skb(skb);
6597                 return NETDEV_TX_OK;
6598         }
6599
6600         tx_buf = &txr->tx_buf_ring[ring_prod];
6601         tx_buf->skb = skb;
6602         dma_unmap_addr_set(tx_buf, mapping, mapping);
6603
6604         txbd = &txr->tx_desc_ring[ring_prod];
6605
6606         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6607         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6608         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6609         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6610
6611         last_frag = skb_shinfo(skb)->nr_frags;
6612         tx_buf->nr_frags = last_frag;
6613         tx_buf->is_gso = skb_is_gso(skb);
6614
6615         for (i = 0; i < last_frag; i++) {
6616                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6617
6618                 prod = NEXT_TX_BD(prod);
6619                 ring_prod = TX_RING_IDX(prod);
6620                 txbd = &txr->tx_desc_ring[ring_prod];
6621
6622                 len = skb_frag_size(frag);
6623                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6624                                            DMA_TO_DEVICE);
6625                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6626                         goto dma_error;
6627                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6628                                    mapping);
6629
6630                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6631                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6632                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6633                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6634
6635         }
6636         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6637
6638         /* Sync BD data before updating TX mailbox */
6639         wmb();
6640
6641         netdev_tx_sent_queue(txq, skb->len);
6642
6643         prod = NEXT_TX_BD(prod);
6644         txr->tx_prod_bseq += skb->len;
6645
6646         REG_WR16(bp, txr->tx_bidx_addr, prod);
6647         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6648
6649         mmiowb();
6650
6651         txr->tx_prod = prod;
6652
6653         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6654                 netif_tx_stop_queue(txq);
6655
6656                 /* netif_tx_stop_queue() must be done before checking
6657                  * tx index in bnx2_tx_avail() below, because in
6658                  * bnx2_tx_int(), we update tx index before checking for
6659                  * netif_tx_queue_stopped().
6660                  */
6661                 smp_mb();
6662                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6663                         netif_tx_wake_queue(txq);
6664         }
6665
6666         return NETDEV_TX_OK;
6667 dma_error:
6668         /* save value of frag that failed */
6669         last_frag = i;
6670
6671         /* start back at beginning and unmap skb */
6672         prod = txr->tx_prod;
6673         ring_prod = TX_RING_IDX(prod);
6674         tx_buf = &txr->tx_buf_ring[ring_prod];
6675         tx_buf->skb = NULL;
6676         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6677                          skb_headlen(skb), PCI_DMA_TODEVICE);
6678
6679         /* unmap remaining mapped pages */
6680         for (i = 0; i < last_frag; i++) {
6681                 prod = NEXT_TX_BD(prod);
6682                 ring_prod = TX_RING_IDX(prod);
6683                 tx_buf = &txr->tx_buf_ring[ring_prod];
6684                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6685                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
6686                                PCI_DMA_TODEVICE);
6687         }
6688
6689         dev_kfree_skb(skb);
6690         return NETDEV_TX_OK;
6691 }
6692
6693 /* Called with rtnl_lock */
6694 static int
6695 bnx2_close(struct net_device *dev)
6696 {
6697         struct bnx2 *bp = netdev_priv(dev);
6698
6699         bnx2_disable_int_sync(bp);
6700         bnx2_napi_disable(bp);
6701         del_timer_sync(&bp->timer);
6702         bnx2_shutdown_chip(bp);
6703         bnx2_free_irq(bp);
6704         bnx2_free_skbs(bp);
6705         bnx2_free_mem(bp);
6706         bnx2_del_napi(bp);
6707         bp->link_up = 0;
6708         netif_carrier_off(bp->dev);
6709         bnx2_set_power_state(bp, PCI_D3hot);
6710         return 0;
6711 }
6712
6713 static void
6714 bnx2_save_stats(struct bnx2 *bp)
6715 {
6716         u32 *hw_stats = (u32 *) bp->stats_blk;
6717         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6718         int i;
6719
6720         /* The 1st 10 counters are 64-bit counters */
6721         for (i = 0; i < 20; i += 2) {
6722                 u32 hi;
6723                 u64 lo;
6724
6725                 hi = temp_stats[i] + hw_stats[i];
6726                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6727                 if (lo > 0xffffffff)
6728                         hi++;
6729                 temp_stats[i] = hi;
6730                 temp_stats[i + 1] = lo & 0xffffffff;
6731         }
6732
6733         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6734                 temp_stats[i] += hw_stats[i];
6735 }
6736
6737 #define GET_64BIT_NET_STATS64(ctr)              \
6738         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6739
6740 #define GET_64BIT_NET_STATS(ctr)                                \
6741         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6742         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6743
6744 #define GET_32BIT_NET_STATS(ctr)                                \
6745         (unsigned long) (bp->stats_blk->ctr +                   \
6746                          bp->temp_stats_blk->ctr)
6747
6748 static struct rtnl_link_stats64 *
6749 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6750 {
6751         struct bnx2 *bp = netdev_priv(dev);
6752
6753         if (bp->stats_blk == NULL)
6754                 return net_stats;
6755
6756         net_stats->rx_packets =
6757                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6758                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6759                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6760
6761         net_stats->tx_packets =
6762                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6763                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6764                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6765
6766         net_stats->rx_bytes =
6767                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6768
6769         net_stats->tx_bytes =
6770                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6771
6772         net_stats->multicast =
6773                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6774
6775         net_stats->collisions =
6776                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6777
6778         net_stats->rx_length_errors =
6779                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6780                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6781
6782         net_stats->rx_over_errors =
6783                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6784                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6785
6786         net_stats->rx_frame_errors =
6787                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6788
6789         net_stats->rx_crc_errors =
6790                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6791
6792         net_stats->rx_errors = net_stats->rx_length_errors +
6793                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6794                 net_stats->rx_crc_errors;
6795
6796         net_stats->tx_aborted_errors =
6797                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6798                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6799
6800         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6801             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6802                 net_stats->tx_carrier_errors = 0;
6803         else {
6804                 net_stats->tx_carrier_errors =
6805                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6806         }
6807
6808         net_stats->tx_errors =
6809                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6810                 net_stats->tx_aborted_errors +
6811                 net_stats->tx_carrier_errors;
6812
6813         net_stats->rx_missed_errors =
6814                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6815                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6816                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6817
6818         return net_stats;
6819 }
6820
6821 /* All ethtool functions called with rtnl_lock */
6822
6823 static int
6824 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6825 {
6826         struct bnx2 *bp = netdev_priv(dev);
6827         int support_serdes = 0, support_copper = 0;
6828
6829         cmd->supported = SUPPORTED_Autoneg;
6830         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6831                 support_serdes = 1;
6832                 support_copper = 1;
6833         } else if (bp->phy_port == PORT_FIBRE)
6834                 support_serdes = 1;
6835         else
6836                 support_copper = 1;
6837
6838         if (support_serdes) {
6839                 cmd->supported |= SUPPORTED_1000baseT_Full |
6840                         SUPPORTED_FIBRE;
6841                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6842                         cmd->supported |= SUPPORTED_2500baseX_Full;
6843
6844         }
6845         if (support_copper) {
6846                 cmd->supported |= SUPPORTED_10baseT_Half |
6847                         SUPPORTED_10baseT_Full |
6848                         SUPPORTED_100baseT_Half |
6849                         SUPPORTED_100baseT_Full |
6850                         SUPPORTED_1000baseT_Full |
6851                         SUPPORTED_TP;
6852
6853         }
6854
6855         spin_lock_bh(&bp->phy_lock);
6856         cmd->port = bp->phy_port;
6857         cmd->advertising = bp->advertising;
6858
6859         if (bp->autoneg & AUTONEG_SPEED) {
6860                 cmd->autoneg = AUTONEG_ENABLE;
6861         } else {
6862                 cmd->autoneg = AUTONEG_DISABLE;
6863         }
6864
6865         if (netif_carrier_ok(dev)) {
6866                 ethtool_cmd_speed_set(cmd, bp->line_speed);
6867                 cmd->duplex = bp->duplex;
6868         }
6869         else {
6870                 ethtool_cmd_speed_set(cmd, -1);
6871                 cmd->duplex = -1;
6872         }
6873         spin_unlock_bh(&bp->phy_lock);
6874
6875         cmd->transceiver = XCVR_INTERNAL;
6876         cmd->phy_address = bp->phy_addr;
6877
6878         return 0;
6879 }
6880
6881 static int
6882 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6883 {
6884         struct bnx2 *bp = netdev_priv(dev);
6885         u8 autoneg = bp->autoneg;
6886         u8 req_duplex = bp->req_duplex;
6887         u16 req_line_speed = bp->req_line_speed;
6888         u32 advertising = bp->advertising;
6889         int err = -EINVAL;
6890
6891         spin_lock_bh(&bp->phy_lock);
6892
6893         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6894                 goto err_out_unlock;
6895
6896         if (cmd->port != bp->phy_port &&
6897             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6898                 goto err_out_unlock;
6899
6900         /* If device is down, we can store the settings only if the user
6901          * is setting the currently active port.
6902          */
6903         if (!netif_running(dev) && cmd->port != bp->phy_port)
6904                 goto err_out_unlock;
6905
6906         if (cmd->autoneg == AUTONEG_ENABLE) {
6907                 autoneg |= AUTONEG_SPEED;
6908
6909                 advertising = cmd->advertising;
6910                 if (cmd->port == PORT_TP) {
6911                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6912                         if (!advertising)
6913                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6914                 } else {
6915                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6916                         if (!advertising)
6917                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6918                 }
6919                 advertising |= ADVERTISED_Autoneg;
6920         }
6921         else {
6922                 u32 speed = ethtool_cmd_speed(cmd);
6923                 if (cmd->port == PORT_FIBRE) {
6924                         if ((speed != SPEED_1000 &&
6925                              speed != SPEED_2500) ||
6926                             (cmd->duplex != DUPLEX_FULL))
6927                                 goto err_out_unlock;
6928
6929                         if (speed == SPEED_2500 &&
6930                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6931                                 goto err_out_unlock;
6932                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6933                         goto err_out_unlock;
6934
6935                 autoneg &= ~AUTONEG_SPEED;
6936                 req_line_speed = speed;
6937                 req_duplex = cmd->duplex;
6938                 advertising = 0;
6939         }
6940
6941         bp->autoneg = autoneg;
6942         bp->advertising = advertising;
6943         bp->req_line_speed = req_line_speed;
6944         bp->req_duplex = req_duplex;
6945
6946         err = 0;
6947         /* If device is down, the new settings will be picked up when it is
6948          * brought up.
6949          */
6950         if (netif_running(dev))
6951                 err = bnx2_setup_phy(bp, cmd->port);
6952
6953 err_out_unlock:
6954         spin_unlock_bh(&bp->phy_lock);
6955
6956         return err;
6957 }
6958
6959 static void
6960 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6961 {
6962         struct bnx2 *bp = netdev_priv(dev);
6963
6964         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6965         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6966         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6967         strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6968 }
6969
6970 #define BNX2_REGDUMP_LEN                (32 * 1024)
6971
6972 static int
6973 bnx2_get_regs_len(struct net_device *dev)
6974 {
6975         return BNX2_REGDUMP_LEN;
6976 }
6977
6978 static void
6979 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6980 {
6981         u32 *p = _p, i, offset;
6982         u8 *orig_p = _p;
6983         struct bnx2 *bp = netdev_priv(dev);
6984         static const u32 reg_boundaries[] = {
6985                 0x0000, 0x0098, 0x0400, 0x045c,
6986                 0x0800, 0x0880, 0x0c00, 0x0c10,
6987                 0x0c30, 0x0d08, 0x1000, 0x101c,
6988                 0x1040, 0x1048, 0x1080, 0x10a4,
6989                 0x1400, 0x1490, 0x1498, 0x14f0,
6990                 0x1500, 0x155c, 0x1580, 0x15dc,
6991                 0x1600, 0x1658, 0x1680, 0x16d8,
6992                 0x1800, 0x1820, 0x1840, 0x1854,
6993                 0x1880, 0x1894, 0x1900, 0x1984,
6994                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6995                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6996                 0x2000, 0x2030, 0x23c0, 0x2400,
6997                 0x2800, 0x2820, 0x2830, 0x2850,
6998                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6999                 0x3c00, 0x3c94, 0x4000, 0x4010,
7000                 0x4080, 0x4090, 0x43c0, 0x4458,
7001                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7002                 0x4fc0, 0x5010, 0x53c0, 0x5444,
7003                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7004                 0x5fc0, 0x6000, 0x6400, 0x6428,
7005                 0x6800, 0x6848, 0x684c, 0x6860,
7006                 0x6888, 0x6910, 0x8000
7007         };
7008
7009         regs->version = 0;
7010
7011         memset(p, 0, BNX2_REGDUMP_LEN);
7012
7013         if (!netif_running(bp->dev))
7014                 return;
7015
7016         i = 0;
7017         offset = reg_boundaries[0];
7018         p += offset;
7019         while (offset < BNX2_REGDUMP_LEN) {
7020                 *p++ = REG_RD(bp, offset);
7021                 offset += 4;
7022                 if (offset == reg_boundaries[i + 1]) {
7023                         offset = reg_boundaries[i + 2];
7024                         p = (u32 *) (orig_p + offset);
7025                         i += 2;
7026                 }
7027         }
7028 }
7029
7030 static void
7031 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7032 {
7033         struct bnx2 *bp = netdev_priv(dev);
7034
7035         if (bp->flags & BNX2_FLAG_NO_WOL) {
7036                 wol->supported = 0;
7037                 wol->wolopts = 0;
7038         }
7039         else {
7040                 wol->supported = WAKE_MAGIC;
7041                 if (bp->wol)
7042                         wol->wolopts = WAKE_MAGIC;
7043                 else
7044                         wol->wolopts = 0;
7045         }
7046         memset(&wol->sopass, 0, sizeof(wol->sopass));
7047 }
7048
7049 static int
7050 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7051 {
7052         struct bnx2 *bp = netdev_priv(dev);
7053
7054         if (wol->wolopts & ~WAKE_MAGIC)
7055                 return -EINVAL;
7056
7057         if (wol->wolopts & WAKE_MAGIC) {
7058                 if (bp->flags & BNX2_FLAG_NO_WOL)
7059                         return -EINVAL;
7060
7061                 bp->wol = 1;
7062         }
7063         else {
7064                 bp->wol = 0;
7065         }
7066         return 0;
7067 }
7068
7069 static int
7070 bnx2_nway_reset(struct net_device *dev)
7071 {
7072         struct bnx2 *bp = netdev_priv(dev);
7073         u32 bmcr;
7074
7075         if (!netif_running(dev))
7076                 return -EAGAIN;
7077
7078         if (!(bp->autoneg & AUTONEG_SPEED)) {
7079                 return -EINVAL;
7080         }
7081
7082         spin_lock_bh(&bp->phy_lock);
7083
7084         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7085                 int rc;
7086
7087                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7088                 spin_unlock_bh(&bp->phy_lock);
7089                 return rc;
7090         }
7091
7092         /* Force a link down visible on the other side */
7093         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7094                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7095                 spin_unlock_bh(&bp->phy_lock);
7096
7097                 msleep(20);
7098
7099                 spin_lock_bh(&bp->phy_lock);
7100
7101                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7102                 bp->serdes_an_pending = 1;
7103                 mod_timer(&bp->timer, jiffies + bp->current_interval);
7104         }
7105
7106         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7107         bmcr &= ~BMCR_LOOPBACK;
7108         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7109
7110         spin_unlock_bh(&bp->phy_lock);
7111
7112         return 0;
7113 }
7114
7115 static u32
7116 bnx2_get_link(struct net_device *dev)
7117 {
7118         struct bnx2 *bp = netdev_priv(dev);
7119
7120         return bp->link_up;
7121 }
7122
7123 static int
7124 bnx2_get_eeprom_len(struct net_device *dev)
7125 {
7126         struct bnx2 *bp = netdev_priv(dev);
7127
7128         if (bp->flash_info == NULL)
7129                 return 0;
7130
7131         return (int) bp->flash_size;
7132 }
7133
7134 static int
7135 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7136                 u8 *eebuf)
7137 {
7138         struct bnx2 *bp = netdev_priv(dev);
7139         int rc;
7140
7141         if (!netif_running(dev))
7142                 return -EAGAIN;
7143
7144         /* parameters already validated in ethtool_get_eeprom */
7145
7146         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7147
7148         return rc;
7149 }
7150
7151 static int
7152 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7153                 u8 *eebuf)
7154 {
7155         struct bnx2 *bp = netdev_priv(dev);
7156         int rc;
7157
7158         if (!netif_running(dev))
7159                 return -EAGAIN;
7160
7161         /* parameters already validated in ethtool_set_eeprom */
7162
7163         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7164
7165         return rc;
7166 }
7167
7168 static int
7169 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7170 {
7171         struct bnx2 *bp = netdev_priv(dev);
7172
7173         memset(coal, 0, sizeof(struct ethtool_coalesce));
7174
7175         coal->rx_coalesce_usecs = bp->rx_ticks;
7176         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7177         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7178         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7179
7180         coal->tx_coalesce_usecs = bp->tx_ticks;
7181         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7182         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7183         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7184
7185         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7186
7187         return 0;
7188 }
7189
7190 static int
7191 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7192 {
7193         struct bnx2 *bp = netdev_priv(dev);
7194
7195         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7196         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7197
7198         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7199         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7200
7201         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7202         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7203
7204         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7205         if (bp->rx_quick_cons_trip_int > 0xff)
7206                 bp->rx_quick_cons_trip_int = 0xff;
7207
7208         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7209         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7210
7211         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7212         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7213
7214         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7215         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7216
7217         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7218         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7219                 0xff;
7220
7221         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7222         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7223                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7224                         bp->stats_ticks = USEC_PER_SEC;
7225         }
7226         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7227                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7228         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7229
7230         if (netif_running(bp->dev)) {
7231                 bnx2_netif_stop(bp, true);
7232                 bnx2_init_nic(bp, 0);
7233                 bnx2_netif_start(bp, true);
7234         }
7235
7236         return 0;
7237 }
7238
7239 static void
7240 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7241 {
7242         struct bnx2 *bp = netdev_priv(dev);
7243
7244         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7245         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7246
7247         ering->rx_pending = bp->rx_ring_size;
7248         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7249
7250         ering->tx_max_pending = MAX_TX_DESC_CNT;
7251         ering->tx_pending = bp->tx_ring_size;
7252 }
7253
7254 static int
7255 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7256 {
7257         if (netif_running(bp->dev)) {
7258                 /* Reset will erase chipset stats; save them */
7259                 bnx2_save_stats(bp);
7260
7261                 bnx2_netif_stop(bp, true);
7262                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7263                 if (reset_irq) {
7264                         bnx2_free_irq(bp);
7265                         bnx2_del_napi(bp);
7266                 } else {
7267                         __bnx2_free_irq(bp);
7268                 }
7269                 bnx2_free_skbs(bp);
7270                 bnx2_free_mem(bp);
7271         }
7272
7273         bnx2_set_rx_ring_size(bp, rx);
7274         bp->tx_ring_size = tx;
7275
7276         if (netif_running(bp->dev)) {
7277                 int rc = 0;
7278
7279                 if (reset_irq) {
7280                         rc = bnx2_setup_int_mode(bp, disable_msi);
7281                         bnx2_init_napi(bp);
7282                 }
7283
7284                 if (!rc)
7285                         rc = bnx2_alloc_mem(bp);
7286
7287                 if (!rc)
7288                         rc = bnx2_request_irq(bp);
7289
7290                 if (!rc)
7291                         rc = bnx2_init_nic(bp, 0);
7292
7293                 if (rc) {
7294                         bnx2_napi_enable(bp);
7295                         dev_close(bp->dev);
7296                         return rc;
7297                 }
7298 #ifdef BCM_CNIC
7299                 mutex_lock(&bp->cnic_lock);
7300                 /* Let cnic know about the new status block. */
7301                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7302                         bnx2_setup_cnic_irq_info(bp);
7303                 mutex_unlock(&bp->cnic_lock);
7304 #endif
7305                 bnx2_netif_start(bp, true);
7306         }
7307         return 0;
7308 }
7309
7310 static int
7311 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7312 {
7313         struct bnx2 *bp = netdev_priv(dev);
7314         int rc;
7315
7316         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7317                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7318                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7319
7320                 return -EINVAL;
7321         }
7322         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7323                                    false);
7324         return rc;
7325 }
7326
7327 static void
7328 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7329 {
7330         struct bnx2 *bp = netdev_priv(dev);
7331
7332         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7333         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7334         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7335 }
7336
7337 static int
7338 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7339 {
7340         struct bnx2 *bp = netdev_priv(dev);
7341
7342         bp->req_flow_ctrl = 0;
7343         if (epause->rx_pause)
7344                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7345         if (epause->tx_pause)
7346                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7347
7348         if (epause->autoneg) {
7349                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7350         }
7351         else {
7352                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7353         }
7354
7355         if (netif_running(dev)) {
7356                 spin_lock_bh(&bp->phy_lock);
7357                 bnx2_setup_phy(bp, bp->phy_port);
7358                 spin_unlock_bh(&bp->phy_lock);
7359         }
7360
7361         return 0;
7362 }
7363
7364 static struct {
7365         char string[ETH_GSTRING_LEN];
7366 } bnx2_stats_str_arr[] = {
7367         { "rx_bytes" },
7368         { "rx_error_bytes" },
7369         { "tx_bytes" },
7370         { "tx_error_bytes" },
7371         { "rx_ucast_packets" },
7372         { "rx_mcast_packets" },
7373         { "rx_bcast_packets" },
7374         { "tx_ucast_packets" },
7375         { "tx_mcast_packets" },
7376         { "tx_bcast_packets" },
7377         { "tx_mac_errors" },
7378         { "tx_carrier_errors" },
7379         { "rx_crc_errors" },
7380         { "rx_align_errors" },
7381         { "tx_single_collisions" },
7382         { "tx_multi_collisions" },
7383         { "tx_deferred" },
7384         { "tx_excess_collisions" },
7385         { "tx_late_collisions" },
7386         { "tx_total_collisions" },
7387         { "rx_fragments" },
7388         { "rx_jabbers" },
7389         { "rx_undersize_packets" },
7390         { "rx_oversize_packets" },
7391         { "rx_64_byte_packets" },
7392         { "rx_65_to_127_byte_packets" },
7393         { "rx_128_to_255_byte_packets" },
7394         { "rx_256_to_511_byte_packets" },
7395         { "rx_512_to_1023_byte_packets" },
7396         { "rx_1024_to_1522_byte_packets" },
7397         { "rx_1523_to_9022_byte_packets" },
7398         { "tx_64_byte_packets" },
7399         { "tx_65_to_127_byte_packets" },
7400         { "tx_128_to_255_byte_packets" },
7401         { "tx_256_to_511_byte_packets" },
7402         { "tx_512_to_1023_byte_packets" },
7403         { "tx_1024_to_1522_byte_packets" },
7404         { "tx_1523_to_9022_byte_packets" },
7405         { "rx_xon_frames" },
7406         { "rx_xoff_frames" },
7407         { "tx_xon_frames" },
7408         { "tx_xoff_frames" },
7409         { "rx_mac_ctrl_frames" },
7410         { "rx_filtered_packets" },
7411         { "rx_ftq_discards" },
7412         { "rx_discards" },
7413         { "rx_fw_discards" },
7414 };
7415
7416 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7417
7418 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7419
7420 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7421     STATS_OFFSET32(stat_IfHCInOctets_hi),
7422     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7423     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7424     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7425     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7426     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7427     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7428     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7429     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7430     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7431     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7432     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7433     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7434     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7435     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7436     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7437     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7438     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7439     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7440     STATS_OFFSET32(stat_EtherStatsCollisions),
7441     STATS_OFFSET32(stat_EtherStatsFragments),
7442     STATS_OFFSET32(stat_EtherStatsJabbers),
7443     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7444     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7445     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7446     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7447     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7448     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7449     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7450     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7451     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7452     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7453     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7454     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7455     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7456     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7457     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7458     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7459     STATS_OFFSET32(stat_XonPauseFramesReceived),
7460     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7461     STATS_OFFSET32(stat_OutXonSent),
7462     STATS_OFFSET32(stat_OutXoffSent),
7463     STATS_OFFSET32(stat_MacControlFramesReceived),
7464     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7465     STATS_OFFSET32(stat_IfInFTQDiscards),
7466     STATS_OFFSET32(stat_IfInMBUFDiscards),
7467     STATS_OFFSET32(stat_FwRxDrop),
7468 };
7469
7470 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7471  * skipped because of errata.
7472  */
7473 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7474         8,0,8,8,8,8,8,8,8,8,
7475         4,0,4,4,4,4,4,4,4,4,
7476         4,4,4,4,4,4,4,4,4,4,
7477         4,4,4,4,4,4,4,4,4,4,
7478         4,4,4,4,4,4,4,
7479 };
7480
7481 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7482         8,0,8,8,8,8,8,8,8,8,
7483         4,4,4,4,4,4,4,4,4,4,
7484         4,4,4,4,4,4,4,4,4,4,
7485         4,4,4,4,4,4,4,4,4,4,
7486         4,4,4,4,4,4,4,
7487 };
7488
7489 #define BNX2_NUM_TESTS 6
7490
7491 static struct {
7492         char string[ETH_GSTRING_LEN];
7493 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7494         { "register_test (offline)" },
7495         { "memory_test (offline)" },
7496         { "loopback_test (offline)" },
7497         { "nvram_test (online)" },
7498         { "interrupt_test (online)" },
7499         { "link_test (online)" },
7500 };
7501
7502 static int
7503 bnx2_get_sset_count(struct net_device *dev, int sset)
7504 {
7505         switch (sset) {
7506         case ETH_SS_TEST:
7507                 return BNX2_NUM_TESTS;
7508         case ETH_SS_STATS:
7509                 return BNX2_NUM_STATS;
7510         default:
7511                 return -EOPNOTSUPP;
7512         }
7513 }
7514
7515 static void
7516 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7517 {
7518         struct bnx2 *bp = netdev_priv(dev);
7519
7520         bnx2_set_power_state(bp, PCI_D0);
7521
7522         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7523         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7524                 int i;
7525
7526                 bnx2_netif_stop(bp, true);
7527                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7528                 bnx2_free_skbs(bp);
7529
7530                 if (bnx2_test_registers(bp) != 0) {
7531                         buf[0] = 1;
7532                         etest->flags |= ETH_TEST_FL_FAILED;
7533                 }
7534                 if (bnx2_test_memory(bp) != 0) {
7535                         buf[1] = 1;
7536                         etest->flags |= ETH_TEST_FL_FAILED;
7537                 }
7538                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7539                         etest->flags |= ETH_TEST_FL_FAILED;
7540
7541                 if (!netif_running(bp->dev))
7542                         bnx2_shutdown_chip(bp);
7543                 else {
7544                         bnx2_init_nic(bp, 1);
7545                         bnx2_netif_start(bp, true);
7546                 }
7547
7548                 /* wait for link up */
7549                 for (i = 0; i < 7; i++) {
7550                         if (bp->link_up)
7551                                 break;
7552                         msleep_interruptible(1000);
7553                 }
7554         }
7555
7556         if (bnx2_test_nvram(bp) != 0) {
7557                 buf[3] = 1;
7558                 etest->flags |= ETH_TEST_FL_FAILED;
7559         }
7560         if (bnx2_test_intr(bp) != 0) {
7561                 buf[4] = 1;
7562                 etest->flags |= ETH_TEST_FL_FAILED;
7563         }
7564
7565         if (bnx2_test_link(bp) != 0) {
7566                 buf[5] = 1;
7567                 etest->flags |= ETH_TEST_FL_FAILED;
7568
7569         }
7570         if (!netif_running(bp->dev))
7571                 bnx2_set_power_state(bp, PCI_D3hot);
7572 }
7573
7574 static void
7575 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7576 {
7577         switch (stringset) {
7578         case ETH_SS_STATS:
7579                 memcpy(buf, bnx2_stats_str_arr,
7580                         sizeof(bnx2_stats_str_arr));
7581                 break;
7582         case ETH_SS_TEST:
7583                 memcpy(buf, bnx2_tests_str_arr,
7584                         sizeof(bnx2_tests_str_arr));
7585                 break;
7586         }
7587 }
7588
7589 static void
7590 bnx2_get_ethtool_stats(struct net_device *dev,
7591                 struct ethtool_stats *stats, u64 *buf)
7592 {
7593         struct bnx2 *bp = netdev_priv(dev);
7594         int i;
7595         u32 *hw_stats = (u32 *) bp->stats_blk;
7596         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7597         u8 *stats_len_arr = NULL;
7598
7599         if (hw_stats == NULL) {
7600                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7601                 return;
7602         }
7603
7604         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7605             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7606             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7607             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7608                 stats_len_arr = bnx2_5706_stats_len_arr;
7609         else
7610                 stats_len_arr = bnx2_5708_stats_len_arr;
7611
7612         for (i = 0; i < BNX2_NUM_STATS; i++) {
7613                 unsigned long offset;
7614
7615                 if (stats_len_arr[i] == 0) {
7616                         /* skip this counter */
7617                         buf[i] = 0;
7618                         continue;
7619                 }
7620
7621                 offset = bnx2_stats_offset_arr[i];
7622                 if (stats_len_arr[i] == 4) {
7623                         /* 4-byte counter */
7624                         buf[i] = (u64) *(hw_stats + offset) +
7625                                  *(temp_stats + offset);
7626                         continue;
7627                 }
7628                 /* 8-byte counter */
7629                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7630                          *(hw_stats + offset + 1) +
7631                          (((u64) *(temp_stats + offset)) << 32) +
7632                          *(temp_stats + offset + 1);
7633         }
7634 }
7635
7636 static int
7637 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7638 {
7639         struct bnx2 *bp = netdev_priv(dev);
7640
7641         switch (state) {
7642         case ETHTOOL_ID_ACTIVE:
7643                 bnx2_set_power_state(bp, PCI_D0);
7644
7645                 bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7646                 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7647                 return 1;       /* cycle on/off once per second */
7648
7649         case ETHTOOL_ID_ON:
7650                 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7651                        BNX2_EMAC_LED_1000MB_OVERRIDE |
7652                        BNX2_EMAC_LED_100MB_OVERRIDE |
7653                        BNX2_EMAC_LED_10MB_OVERRIDE |
7654                        BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7655                        BNX2_EMAC_LED_TRAFFIC);
7656                 break;
7657
7658         case ETHTOOL_ID_OFF:
7659                 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7660                 break;
7661
7662         case ETHTOOL_ID_INACTIVE:
7663                 REG_WR(bp, BNX2_EMAC_LED, 0);
7664                 REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7665
7666                 if (!netif_running(dev))
7667                         bnx2_set_power_state(bp, PCI_D3hot);
7668                 break;
7669         }
7670
7671         return 0;
7672 }
7673
7674 static netdev_features_t
7675 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7676 {
7677         struct bnx2 *bp = netdev_priv(dev);
7678
7679         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7680                 features |= NETIF_F_HW_VLAN_RX;
7681
7682         return features;
7683 }
7684
7685 static int
7686 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7687 {
7688         struct bnx2 *bp = netdev_priv(dev);
7689
7690         /* TSO with VLAN tag won't work with current firmware */
7691         if (features & NETIF_F_HW_VLAN_TX)
7692                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7693         else
7694                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7695
7696         if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7697             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7698             netif_running(dev)) {
7699                 bnx2_netif_stop(bp, false);
7700                 dev->features = features;
7701                 bnx2_set_rx_mode(dev);
7702                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7703                 bnx2_netif_start(bp, false);
7704                 return 1;
7705         }
7706
7707         return 0;
7708 }
7709
7710 static void bnx2_get_channels(struct net_device *dev,
7711                               struct ethtool_channels *channels)
7712 {
7713         struct bnx2 *bp = netdev_priv(dev);
7714         u32 max_rx_rings = 1;
7715         u32 max_tx_rings = 1;
7716
7717         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7718                 max_rx_rings = RX_MAX_RINGS;
7719                 max_tx_rings = TX_MAX_RINGS;
7720         }
7721
7722         channels->max_rx = max_rx_rings;
7723         channels->max_tx = max_tx_rings;
7724         channels->max_other = 0;
7725         channels->max_combined = 0;
7726         channels->rx_count = bp->num_rx_rings;
7727         channels->tx_count = bp->num_tx_rings;
7728         channels->other_count = 0;
7729         channels->combined_count = 0;
7730 }
7731
7732 static int bnx2_set_channels(struct net_device *dev,
7733                               struct ethtool_channels *channels)
7734 {
7735         struct bnx2 *bp = netdev_priv(dev);
7736         u32 max_rx_rings = 1;
7737         u32 max_tx_rings = 1;
7738         int rc = 0;
7739
7740         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7741                 max_rx_rings = RX_MAX_RINGS;
7742                 max_tx_rings = TX_MAX_RINGS;
7743         }
7744         if (channels->rx_count > max_rx_rings ||
7745             channels->tx_count > max_tx_rings)
7746                 return -EINVAL;
7747
7748         bp->num_req_rx_rings = channels->rx_count;
7749         bp->num_req_tx_rings = channels->tx_count;
7750
7751         if (netif_running(dev))
7752                 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7753                                            bp->tx_ring_size, true);
7754
7755         return rc;
7756 }
7757
7758 static const struct ethtool_ops bnx2_ethtool_ops = {
7759         .get_settings           = bnx2_get_settings,
7760         .set_settings           = bnx2_set_settings,
7761         .get_drvinfo            = bnx2_get_drvinfo,
7762         .get_regs_len           = bnx2_get_regs_len,
7763         .get_regs               = bnx2_get_regs,
7764         .get_wol                = bnx2_get_wol,
7765         .set_wol                = bnx2_set_wol,
7766         .nway_reset             = bnx2_nway_reset,
7767         .get_link               = bnx2_get_link,
7768         .get_eeprom_len         = bnx2_get_eeprom_len,
7769         .get_eeprom             = bnx2_get_eeprom,
7770         .set_eeprom             = bnx2_set_eeprom,
7771         .get_coalesce           = bnx2_get_coalesce,
7772         .set_coalesce           = bnx2_set_coalesce,
7773         .get_ringparam          = bnx2_get_ringparam,
7774         .set_ringparam          = bnx2_set_ringparam,
7775         .get_pauseparam         = bnx2_get_pauseparam,
7776         .set_pauseparam         = bnx2_set_pauseparam,
7777         .self_test              = bnx2_self_test,
7778         .get_strings            = bnx2_get_strings,
7779         .set_phys_id            = bnx2_set_phys_id,
7780         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7781         .get_sset_count         = bnx2_get_sset_count,
7782         .get_channels           = bnx2_get_channels,
7783         .set_channels           = bnx2_set_channels,
7784 };
7785
7786 /* Called with rtnl_lock */
7787 static int
7788 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7789 {
7790         struct mii_ioctl_data *data = if_mii(ifr);
7791         struct bnx2 *bp = netdev_priv(dev);
7792         int err;
7793
7794         switch(cmd) {
7795         case SIOCGMIIPHY:
7796                 data->phy_id = bp->phy_addr;
7797
7798                 /* fallthru */
7799         case SIOCGMIIREG: {
7800                 u32 mii_regval;
7801
7802                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7803                         return -EOPNOTSUPP;
7804
7805                 if (!netif_running(dev))
7806                         return -EAGAIN;
7807
7808                 spin_lock_bh(&bp->phy_lock);
7809                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7810                 spin_unlock_bh(&bp->phy_lock);
7811
7812                 data->val_out = mii_regval;
7813
7814                 return err;
7815         }
7816
7817         case SIOCSMIIREG:
7818                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7819                         return -EOPNOTSUPP;
7820
7821                 if (!netif_running(dev))
7822                         return -EAGAIN;
7823
7824                 spin_lock_bh(&bp->phy_lock);
7825                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7826                 spin_unlock_bh(&bp->phy_lock);
7827
7828                 return err;
7829
7830         default:
7831                 /* do nothing */
7832                 break;
7833         }
7834         return -EOPNOTSUPP;
7835 }
7836
7837 /* Called with rtnl_lock */
7838 static int
7839 bnx2_change_mac_addr(struct net_device *dev, void *p)
7840 {
7841         struct sockaddr *addr = p;
7842         struct bnx2 *bp = netdev_priv(dev);
7843
7844         if (!is_valid_ether_addr(addr->sa_data))
7845                 return -EADDRNOTAVAIL;
7846
7847         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7848         if (netif_running(dev))
7849                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7850
7851         return 0;
7852 }
7853
7854 /* Called with rtnl_lock */
7855 static int
7856 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7857 {
7858         struct bnx2 *bp = netdev_priv(dev);
7859
7860         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7861                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7862                 return -EINVAL;
7863
7864         dev->mtu = new_mtu;
7865         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7866                                      false);
7867 }
7868
7869 #ifdef CONFIG_NET_POLL_CONTROLLER
7870 static void
7871 poll_bnx2(struct net_device *dev)
7872 {
7873         struct bnx2 *bp = netdev_priv(dev);
7874         int i;
7875
7876         for (i = 0; i < bp->irq_nvecs; i++) {
7877                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7878
7879                 disable_irq(irq->vector);
7880                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7881                 enable_irq(irq->vector);
7882         }
7883 }
7884 #endif
7885
7886 static void __devinit
7887 bnx2_get_5709_media(struct bnx2 *bp)
7888 {
7889         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7890         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7891         u32 strap;
7892
7893         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7894                 return;
7895         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7896                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7897                 return;
7898         }
7899
7900         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7901                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7902         else
7903                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7904
7905         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7906                 switch (strap) {
7907                 case 0x4:
7908                 case 0x5:
7909                 case 0x6:
7910                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7911                         return;
7912                 }
7913         } else {
7914                 switch (strap) {
7915                 case 0x1:
7916                 case 0x2:
7917                 case 0x4:
7918                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7919                         return;
7920                 }
7921         }
7922 }
7923
7924 static void __devinit
7925 bnx2_get_pci_speed(struct bnx2 *bp)
7926 {
7927         u32 reg;
7928
7929         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7930         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7931                 u32 clkreg;
7932
7933                 bp->flags |= BNX2_FLAG_PCIX;
7934
7935                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7936
7937                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7938                 switch (clkreg) {
7939                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7940                         bp->bus_speed_mhz = 133;
7941                         break;
7942
7943                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7944                         bp->bus_speed_mhz = 100;
7945                         break;
7946
7947                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7948                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7949                         bp->bus_speed_mhz = 66;
7950                         break;
7951
7952                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7953                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7954                         bp->bus_speed_mhz = 50;
7955                         break;
7956
7957                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7958                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7959                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7960                         bp->bus_speed_mhz = 33;
7961                         break;
7962                 }
7963         }
7964         else {
7965                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7966                         bp->bus_speed_mhz = 66;
7967                 else
7968                         bp->bus_speed_mhz = 33;
7969         }
7970
7971         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7972                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7973
7974 }
7975
7976 static void __devinit
7977 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7978 {
7979         int rc, i, j;
7980         u8 *data;
7981         unsigned int block_end, rosize, len;
7982
7983 #define BNX2_VPD_NVRAM_OFFSET   0x300
7984 #define BNX2_VPD_LEN            128
7985 #define BNX2_MAX_VER_SLEN       30
7986
7987         data = kmalloc(256, GFP_KERNEL);
7988         if (!data)
7989                 return;
7990
7991         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7992                              BNX2_VPD_LEN);
7993         if (rc)
7994                 goto vpd_done;
7995
7996         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7997                 data[i] = data[i + BNX2_VPD_LEN + 3];
7998                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7999                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8000                 data[i + 3] = data[i + BNX2_VPD_LEN];
8001         }
8002
8003         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8004         if (i < 0)
8005                 goto vpd_done;
8006
8007         rosize = pci_vpd_lrdt_size(&data[i]);
8008         i += PCI_VPD_LRDT_TAG_SIZE;
8009         block_end = i + rosize;
8010
8011         if (block_end > BNX2_VPD_LEN)
8012                 goto vpd_done;
8013
8014         j = pci_vpd_find_info_keyword(data, i, rosize,
8015                                       PCI_VPD_RO_KEYWORD_MFR_ID);
8016         if (j < 0)
8017                 goto vpd_done;
8018
8019         len = pci_vpd_info_field_size(&data[j]);
8020
8021         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8022         if (j + len > block_end || len != 4 ||
8023             memcmp(&data[j], "1028", 4))
8024                 goto vpd_done;
8025
8026         j = pci_vpd_find_info_keyword(data, i, rosize,
8027                                       PCI_VPD_RO_KEYWORD_VENDOR0);
8028         if (j < 0)
8029                 goto vpd_done;
8030
8031         len = pci_vpd_info_field_size(&data[j]);
8032
8033         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8034         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8035                 goto vpd_done;
8036
8037         memcpy(bp->fw_version, &data[j], len);
8038         bp->fw_version[len] = ' ';
8039
8040 vpd_done:
8041         kfree(data);
8042 }
8043
8044 static int __devinit
8045 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8046 {
8047         struct bnx2 *bp;
8048         int rc, i, j;
8049         u32 reg;
8050         u64 dma_mask, persist_dma_mask;
8051         int err;
8052
8053         SET_NETDEV_DEV(dev, &pdev->dev);
8054         bp = netdev_priv(dev);
8055
8056         bp->flags = 0;
8057         bp->phy_flags = 0;
8058
8059         bp->temp_stats_blk =
8060                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8061
8062         if (bp->temp_stats_blk == NULL) {
8063                 rc = -ENOMEM;
8064                 goto err_out;
8065         }
8066
8067         /* enable device (incl. PCI PM wakeup), and bus-mastering */
8068         rc = pci_enable_device(pdev);
8069         if (rc) {
8070                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8071                 goto err_out;
8072         }
8073
8074         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8075                 dev_err(&pdev->dev,
8076                         "Cannot find PCI device base address, aborting\n");
8077                 rc = -ENODEV;
8078                 goto err_out_disable;
8079         }
8080
8081         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8082         if (rc) {
8083                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8084                 goto err_out_disable;
8085         }
8086
8087         pci_set_master(pdev);
8088
8089         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8090         if (bp->pm_cap == 0) {
8091                 dev_err(&pdev->dev,
8092                         "Cannot find power management capability, aborting\n");
8093                 rc = -EIO;
8094                 goto err_out_release;
8095         }
8096
8097         bp->dev = dev;
8098         bp->pdev = pdev;
8099
8100         spin_lock_init(&bp->phy_lock);
8101         spin_lock_init(&bp->indirect_lock);
8102 #ifdef BCM_CNIC
8103         mutex_init(&bp->cnic_lock);
8104 #endif
8105         INIT_WORK(&bp->reset_task, bnx2_reset_task);
8106
8107         bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8108                                                          TX_MAX_TSS_RINGS + 1));
8109         if (!bp->regview) {
8110                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8111                 rc = -ENOMEM;
8112                 goto err_out_release;
8113         }
8114
8115         bnx2_set_power_state(bp, PCI_D0);
8116
8117         /* Configure byte swap and enable write to the reg_window registers.
8118          * Rely on CPU to do target byte swapping on big endian systems
8119          * The chip's target access swapping will not swap all accesses
8120          */
8121         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8122                    BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8123                    BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8124
8125         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
8126
8127         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8128                 if (!pci_is_pcie(pdev)) {
8129                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
8130                         rc = -EIO;
8131                         goto err_out_unmap;
8132                 }
8133                 bp->flags |= BNX2_FLAG_PCIE;
8134                 if (CHIP_REV(bp) == CHIP_REV_Ax)
8135                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8136
8137                 /* AER (Advanced Error Reporting) hooks */
8138                 err = pci_enable_pcie_error_reporting(pdev);
8139                 if (!err)
8140                         bp->flags |= BNX2_FLAG_AER_ENABLED;
8141
8142         } else {
8143                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8144                 if (bp->pcix_cap == 0) {
8145                         dev_err(&pdev->dev,
8146                                 "Cannot find PCIX capability, aborting\n");
8147                         rc = -EIO;
8148                         goto err_out_unmap;
8149                 }
8150                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8151         }
8152
8153         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
8154                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8155                         bp->flags |= BNX2_FLAG_MSIX_CAP;
8156         }
8157
8158         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
8159                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8160                         bp->flags |= BNX2_FLAG_MSI_CAP;
8161         }
8162
8163         /* 5708 cannot support DMA addresses > 40-bit.  */
8164         if (CHIP_NUM(bp) == CHIP_NUM_5708)
8165                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8166         else
8167                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8168
8169         /* Configure DMA attributes. */
8170         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8171                 dev->features |= NETIF_F_HIGHDMA;
8172                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8173                 if (rc) {
8174                         dev_err(&pdev->dev,
8175                                 "pci_set_consistent_dma_mask failed, aborting\n");
8176                         goto err_out_unmap;
8177                 }
8178         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8179                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8180                 goto err_out_unmap;
8181         }
8182
8183         if (!(bp->flags & BNX2_FLAG_PCIE))
8184                 bnx2_get_pci_speed(bp);
8185
8186         /* 5706A0 may falsely detect SERR and PERR. */
8187         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8188                 reg = REG_RD(bp, PCI_COMMAND);
8189                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8190                 REG_WR(bp, PCI_COMMAND, reg);
8191         }
8192         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8193                 !(bp->flags & BNX2_FLAG_PCIX)) {
8194
8195                 dev_err(&pdev->dev,
8196                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8197                 goto err_out_unmap;
8198         }
8199
8200         bnx2_init_nvram(bp);
8201
8202         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8203
8204         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8205             BNX2_SHM_HDR_SIGNATURE_SIG) {
8206                 u32 off = PCI_FUNC(pdev->devfn) << 2;
8207
8208                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8209         } else
8210                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8211
8212         /* Get the permanent MAC address.  First we need to make sure the
8213          * firmware is actually running.
8214          */
8215         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8216
8217         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8218             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8219                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8220                 rc = -ENODEV;
8221                 goto err_out_unmap;
8222         }
8223
8224         bnx2_read_vpd_fw_ver(bp);
8225
8226         j = strlen(bp->fw_version);
8227         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8228         for (i = 0; i < 3 && j < 24; i++) {
8229                 u8 num, k, skip0;
8230
8231                 if (i == 0) {
8232                         bp->fw_version[j++] = 'b';
8233                         bp->fw_version[j++] = 'c';
8234                         bp->fw_version[j++] = ' ';
8235                 }
8236                 num = (u8) (reg >> (24 - (i * 8)));
8237                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8238                         if (num >= k || !skip0 || k == 1) {
8239                                 bp->fw_version[j++] = (num / k) + '0';
8240                                 skip0 = 0;
8241                         }
8242                 }
8243                 if (i != 2)
8244                         bp->fw_version[j++] = '.';
8245         }
8246         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8247         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8248                 bp->wol = 1;
8249
8250         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8251                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8252
8253                 for (i = 0; i < 30; i++) {
8254                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8255                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8256                                 break;
8257                         msleep(10);
8258                 }
8259         }
8260         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8261         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8262         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8263             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8264                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8265
8266                 if (j < 32)
8267                         bp->fw_version[j++] = ' ';
8268                 for (i = 0; i < 3 && j < 28; i++) {
8269                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8270                         reg = be32_to_cpu(reg);
8271                         memcpy(&bp->fw_version[j], &reg, 4);
8272                         j += 4;
8273                 }
8274         }
8275
8276         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8277         bp->mac_addr[0] = (u8) (reg >> 8);
8278         bp->mac_addr[1] = (u8) reg;
8279
8280         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8281         bp->mac_addr[2] = (u8) (reg >> 24);
8282         bp->mac_addr[3] = (u8) (reg >> 16);
8283         bp->mac_addr[4] = (u8) (reg >> 8);
8284         bp->mac_addr[5] = (u8) reg;
8285
8286         bp->tx_ring_size = MAX_TX_DESC_CNT;
8287         bnx2_set_rx_ring_size(bp, 255);
8288
8289         bp->tx_quick_cons_trip_int = 2;
8290         bp->tx_quick_cons_trip = 20;
8291         bp->tx_ticks_int = 18;
8292         bp->tx_ticks = 80;
8293
8294         bp->rx_quick_cons_trip_int = 2;
8295         bp->rx_quick_cons_trip = 12;
8296         bp->rx_ticks_int = 18;
8297         bp->rx_ticks = 18;
8298
8299         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8300
8301         bp->current_interval = BNX2_TIMER_INTERVAL;
8302
8303         bp->phy_addr = 1;
8304
8305         /* Disable WOL support if we are running on a SERDES chip. */
8306         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8307                 bnx2_get_5709_media(bp);
8308         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8309                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8310
8311         bp->phy_port = PORT_TP;
8312         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8313                 bp->phy_port = PORT_FIBRE;
8314                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8315                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8316                         bp->flags |= BNX2_FLAG_NO_WOL;
8317                         bp->wol = 0;
8318                 }
8319                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8320                         /* Don't do parallel detect on this board because of
8321                          * some board problems.  The link will not go down
8322                          * if we do parallel detect.
8323                          */
8324                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8325                             pdev->subsystem_device == 0x310c)
8326                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8327                 } else {
8328                         bp->phy_addr = 2;
8329                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8330                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8331                 }
8332         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8333                    CHIP_NUM(bp) == CHIP_NUM_5708)
8334                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8335         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8336                  (CHIP_REV(bp) == CHIP_REV_Ax ||
8337                   CHIP_REV(bp) == CHIP_REV_Bx))
8338                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8339
8340         bnx2_init_fw_cap(bp);
8341
8342         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8343             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8344             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8345             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8346                 bp->flags |= BNX2_FLAG_NO_WOL;
8347                 bp->wol = 0;
8348         }
8349
8350         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8351                 bp->tx_quick_cons_trip_int =
8352                         bp->tx_quick_cons_trip;
8353                 bp->tx_ticks_int = bp->tx_ticks;
8354                 bp->rx_quick_cons_trip_int =
8355                         bp->rx_quick_cons_trip;
8356                 bp->rx_ticks_int = bp->rx_ticks;
8357                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8358                 bp->com_ticks_int = bp->com_ticks;
8359                 bp->cmd_ticks_int = bp->cmd_ticks;
8360         }
8361
8362         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8363          *
8364          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8365          * with byte enables disabled on the unused 32-bit word.  This is legal
8366          * but causes problems on the AMD 8132 which will eventually stop
8367          * responding after a while.
8368          *
8369          * AMD believes this incompatibility is unique to the 5706, and
8370          * prefers to locally disable MSI rather than globally disabling it.
8371          */
8372         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8373                 struct pci_dev *amd_8132 = NULL;
8374
8375                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8376                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8377                                                   amd_8132))) {
8378
8379                         if (amd_8132->revision >= 0x10 &&
8380                             amd_8132->revision <= 0x13) {
8381                                 disable_msi = 1;
8382                                 pci_dev_put(amd_8132);
8383                                 break;
8384                         }
8385                 }
8386         }
8387
8388         bnx2_set_default_link(bp);
8389         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8390
8391         init_timer(&bp->timer);
8392         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8393         bp->timer.data = (unsigned long) bp;
8394         bp->timer.function = bnx2_timer;
8395
8396 #ifdef BCM_CNIC
8397         if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8398                 bp->cnic_eth_dev.max_iscsi_conn =
8399                         (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8400                          BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8401 #endif
8402         pci_save_state(pdev);
8403
8404         return 0;
8405
8406 err_out_unmap:
8407         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8408                 pci_disable_pcie_error_reporting(pdev);
8409                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8410         }
8411
8412         pci_iounmap(pdev, bp->regview);
8413         bp->regview = NULL;
8414
8415 err_out_release:
8416         pci_release_regions(pdev);
8417
8418 err_out_disable:
8419         pci_disable_device(pdev);
8420         pci_set_drvdata(pdev, NULL);
8421
8422 err_out:
8423         return rc;
8424 }
8425
8426 static char * __devinit
8427 bnx2_bus_string(struct bnx2 *bp, char *str)
8428 {
8429         char *s = str;
8430
8431         if (bp->flags & BNX2_FLAG_PCIE) {
8432                 s += sprintf(s, "PCI Express");
8433         } else {
8434                 s += sprintf(s, "PCI");
8435                 if (bp->flags & BNX2_FLAG_PCIX)
8436                         s += sprintf(s, "-X");
8437                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8438                         s += sprintf(s, " 32-bit");
8439                 else
8440                         s += sprintf(s, " 64-bit");
8441                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8442         }
8443         return str;
8444 }
8445
8446 static void
8447 bnx2_del_napi(struct bnx2 *bp)
8448 {
8449         int i;
8450
8451         for (i = 0; i < bp->irq_nvecs; i++)
8452                 netif_napi_del(&bp->bnx2_napi[i].napi);
8453 }
8454
8455 static void
8456 bnx2_init_napi(struct bnx2 *bp)
8457 {
8458         int i;
8459
8460         for (i = 0; i < bp->irq_nvecs; i++) {
8461                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8462                 int (*poll)(struct napi_struct *, int);
8463
8464                 if (i == 0)
8465                         poll = bnx2_poll;
8466                 else
8467                         poll = bnx2_poll_msix;
8468
8469                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8470                 bnapi->bp = bp;
8471         }
8472 }
8473
8474 static const struct net_device_ops bnx2_netdev_ops = {
8475         .ndo_open               = bnx2_open,
8476         .ndo_start_xmit         = bnx2_start_xmit,
8477         .ndo_stop               = bnx2_close,
8478         .ndo_get_stats64        = bnx2_get_stats64,
8479         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8480         .ndo_do_ioctl           = bnx2_ioctl,
8481         .ndo_validate_addr      = eth_validate_addr,
8482         .ndo_set_mac_address    = bnx2_change_mac_addr,
8483         .ndo_change_mtu         = bnx2_change_mtu,
8484         .ndo_fix_features       = bnx2_fix_features,
8485         .ndo_set_features       = bnx2_set_features,
8486         .ndo_tx_timeout         = bnx2_tx_timeout,
8487 #ifdef CONFIG_NET_POLL_CONTROLLER
8488         .ndo_poll_controller    = poll_bnx2,
8489 #endif
8490 };
8491
8492 static int __devinit
8493 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8494 {
8495         static int version_printed = 0;
8496         struct net_device *dev;
8497         struct bnx2 *bp;
8498         int rc;
8499         char str[40];
8500
8501         if (version_printed++ == 0)
8502                 pr_info("%s", version);
8503
8504         /* dev zeroed in init_etherdev */
8505         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8506         if (!dev)
8507                 return -ENOMEM;
8508
8509         rc = bnx2_init_board(pdev, dev);
8510         if (rc < 0)
8511                 goto err_free;
8512
8513         dev->netdev_ops = &bnx2_netdev_ops;
8514         dev->watchdog_timeo = TX_TIMEOUT;
8515         dev->ethtool_ops = &bnx2_ethtool_ops;
8516
8517         bp = netdev_priv(dev);
8518
8519         pci_set_drvdata(pdev, dev);
8520
8521         memcpy(dev->dev_addr, bp->mac_addr, 6);
8522         memcpy(dev->perm_addr, bp->mac_addr, 6);
8523
8524         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8525                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8526                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8527
8528         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8529                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8530
8531         dev->vlan_features = dev->hw_features;
8532         dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8533         dev->features |= dev->hw_features;
8534         dev->priv_flags |= IFF_UNICAST_FLT;
8535
8536         if ((rc = register_netdev(dev))) {
8537                 dev_err(&pdev->dev, "Cannot register net device\n");
8538                 goto error;
8539         }
8540
8541         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8542                     "node addr %pM\n", board_info[ent->driver_data].name,
8543                     ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8544                     ((CHIP_ID(bp) & 0x0ff0) >> 4),
8545                     bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8546                     pdev->irq, dev->dev_addr);
8547
8548         return 0;
8549
8550 error:
8551         iounmap(bp->regview);
8552         pci_release_regions(pdev);
8553         pci_disable_device(pdev);
8554         pci_set_drvdata(pdev, NULL);
8555 err_free:
8556         free_netdev(dev);
8557         return rc;
8558 }
8559
8560 static void __devexit
8561 bnx2_remove_one(struct pci_dev *pdev)
8562 {
8563         struct net_device *dev = pci_get_drvdata(pdev);
8564         struct bnx2 *bp = netdev_priv(dev);
8565
8566         unregister_netdev(dev);
8567
8568         del_timer_sync(&bp->timer);
8569         cancel_work_sync(&bp->reset_task);
8570
8571         pci_iounmap(bp->pdev, bp->regview);
8572
8573         kfree(bp->temp_stats_blk);
8574
8575         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8576                 pci_disable_pcie_error_reporting(pdev);
8577                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8578         }
8579
8580         bnx2_release_firmware(bp);
8581
8582         free_netdev(dev);
8583
8584         pci_release_regions(pdev);
8585         pci_disable_device(pdev);
8586         pci_set_drvdata(pdev, NULL);
8587 }
8588
8589 static int
8590 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8591 {
8592         struct net_device *dev = pci_get_drvdata(pdev);
8593         struct bnx2 *bp = netdev_priv(dev);
8594
8595         /* PCI register 4 needs to be saved whether netif_running() or not.
8596          * MSI address and data need to be saved if using MSI and
8597          * netif_running().
8598          */
8599         pci_save_state(pdev);
8600         if (!netif_running(dev))
8601                 return 0;
8602
8603         cancel_work_sync(&bp->reset_task);
8604         bnx2_netif_stop(bp, true);
8605         netif_device_detach(dev);
8606         del_timer_sync(&bp->timer);
8607         bnx2_shutdown_chip(bp);
8608         bnx2_free_skbs(bp);
8609         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8610         return 0;
8611 }
8612
8613 static int
8614 bnx2_resume(struct pci_dev *pdev)
8615 {
8616         struct net_device *dev = pci_get_drvdata(pdev);
8617         struct bnx2 *bp = netdev_priv(dev);
8618
8619         pci_restore_state(pdev);
8620         if (!netif_running(dev))
8621                 return 0;
8622
8623         bnx2_set_power_state(bp, PCI_D0);
8624         netif_device_attach(dev);
8625         bnx2_init_nic(bp, 1);
8626         bnx2_netif_start(bp, true);
8627         return 0;
8628 }
8629
8630 /**
8631  * bnx2_io_error_detected - called when PCI error is detected
8632  * @pdev: Pointer to PCI device
8633  * @state: The current pci connection state
8634  *
8635  * This function is called after a PCI bus error affecting
8636  * this device has been detected.
8637  */
8638 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8639                                                pci_channel_state_t state)
8640 {
8641         struct net_device *dev = pci_get_drvdata(pdev);
8642         struct bnx2 *bp = netdev_priv(dev);
8643
8644         rtnl_lock();
8645         netif_device_detach(dev);
8646
8647         if (state == pci_channel_io_perm_failure) {
8648                 rtnl_unlock();
8649                 return PCI_ERS_RESULT_DISCONNECT;
8650         }
8651
8652         if (netif_running(dev)) {
8653                 bnx2_netif_stop(bp, true);
8654                 del_timer_sync(&bp->timer);
8655                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8656         }
8657
8658         pci_disable_device(pdev);
8659         rtnl_unlock();
8660
8661         /* Request a slot slot reset. */
8662         return PCI_ERS_RESULT_NEED_RESET;
8663 }
8664
8665 /**
8666  * bnx2_io_slot_reset - called after the pci bus has been reset.
8667  * @pdev: Pointer to PCI device
8668  *
8669  * Restart the card from scratch, as if from a cold-boot.
8670  */
8671 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8672 {
8673         struct net_device *dev = pci_get_drvdata(pdev);
8674         struct bnx2 *bp = netdev_priv(dev);
8675         pci_ers_result_t result;
8676         int err;
8677
8678         rtnl_lock();
8679         if (pci_enable_device(pdev)) {
8680                 dev_err(&pdev->dev,
8681                         "Cannot re-enable PCI device after reset\n");
8682                 result = PCI_ERS_RESULT_DISCONNECT;
8683         } else {
8684                 pci_set_master(pdev);
8685                 pci_restore_state(pdev);
8686                 pci_save_state(pdev);
8687
8688                 if (netif_running(dev)) {
8689                         bnx2_set_power_state(bp, PCI_D0);
8690                         bnx2_init_nic(bp, 1);
8691                 }
8692                 result = PCI_ERS_RESULT_RECOVERED;
8693         }
8694         rtnl_unlock();
8695
8696         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8697                 return result;
8698
8699         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8700         if (err) {
8701                 dev_err(&pdev->dev,
8702                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8703                          err); /* non-fatal, continue */
8704         }
8705
8706         return result;
8707 }
8708
8709 /**
8710  * bnx2_io_resume - called when traffic can start flowing again.
8711  * @pdev: Pointer to PCI device
8712  *
8713  * This callback is called when the error recovery driver tells us that
8714  * its OK to resume normal operation.
8715  */
8716 static void bnx2_io_resume(struct pci_dev *pdev)
8717 {
8718         struct net_device *dev = pci_get_drvdata(pdev);
8719         struct bnx2 *bp = netdev_priv(dev);
8720
8721         rtnl_lock();
8722         if (netif_running(dev))
8723                 bnx2_netif_start(bp, true);
8724
8725         netif_device_attach(dev);
8726         rtnl_unlock();
8727 }
8728
8729 static struct pci_error_handlers bnx2_err_handler = {
8730         .error_detected = bnx2_io_error_detected,
8731         .slot_reset     = bnx2_io_slot_reset,
8732         .resume         = bnx2_io_resume,
8733 };
8734
8735 static struct pci_driver bnx2_pci_driver = {
8736         .name           = DRV_MODULE_NAME,
8737         .id_table       = bnx2_pci_tbl,
8738         .probe          = bnx2_init_one,
8739         .remove         = __devexit_p(bnx2_remove_one),
8740         .suspend        = bnx2_suspend,
8741         .resume         = bnx2_resume,
8742         .err_handler    = &bnx2_err_handler,
8743 };
8744
8745 static int __init bnx2_init(void)
8746 {
8747         return pci_register_driver(&bnx2_pci_driver);
8748 }
8749
8750 static void __exit bnx2_cleanup(void)
8751 {
8752         pci_unregister_driver(&bnx2_pci_driver);
8753 }
8754
8755 module_init(bnx2_init);
8756 module_exit(bnx2_cleanup);
8757
8758
8759